content
stringlengths 5
1.05M
|
---|
"""
Management command to change many user enrollments in many courses using
csv file.
"""
import logging
from os import path
import unicodecsv
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from common.djangoapps.student.models import CourseEnrollment, CourseEnrollmentAttribute, User
from common.djangoapps.student.models import BulkChangeEnrollmentConfiguration
logger = logging.getLogger('common.djangoapps.student.management.commands.bulk_change_enrollment_csv')
class Command(BaseCommand):
"""
Management command to change many user enrollments in many
courses using the csv file
"""
help = """
Change the enrollment status of all the users specified in
the csv file in the specified course to specified course
mode.
Could be used to update effected users by order
placement issues. If large number of students are effected
in different courses.
Similar to bulk_change_enrollment but uses the csv file
input format and can enroll students in multiple courses.
Example:
$ ... bulk_change_enrollment_csv csv_file_path
"""
def add_arguments(self, parser):
""" Add argument to the command parser. """
parser.add_argument(
'--csv_file_path',
required=False,
help='Csv file path'
)
parser.add_argument(
'--file_from_database',
action='store_true',
help='Use file from the BulkChangeEnrollmentConfiguration model instead of the command line.',
)
def get_file_from_database(self):
""" Returns an options dictionary from the current SSPVerificationRetryConfig model. """
enrollment_config = BulkChangeEnrollmentConfiguration.current()
if not enrollment_config.enabled:
raise CommandError('BulkChangeEnrollmentConfiguration is disabled or empty, '
'but --file_from_database from was requested.')
return enrollment_config.csv_file
def handle(self, *args, **options):
""" Main handler for the command."""
file_path = options.get('csv_file_path', None)
file_from_database = options['file_from_database']
if file_from_database:
csv_file = self.get_file_from_database()
self.change_enrollments(csv_file)
elif file_path:
if not path.isfile(file_path):
raise CommandError("File not found.")
with open(file_path, 'rb') as csv_file:
self.change_enrollments(csv_file)
else:
CommandError('No file is provided. File is required')
def change_enrollments(self, csv_file):
""" change the enrollments of the learners. """
course_key = None
user = None
file_reader = unicodecsv.DictReader(csv_file)
headers = file_reader.fieldnames
if not ('course_id' in headers and 'mode' in headers and 'user' in headers):
raise CommandError('Invalid input CSV file.')
for row in list(file_reader):
try:
course_key = CourseKey.from_string(row['course_id'])
except InvalidKeyError:
logger.warning('Invalid or non-existent course id [{}]'.format(row['course_id']))
try:
user = User.objects.get(username=row['user'])
except ObjectDoesNotExist:
logger.warning('Invalid or non-existent user [{}]'.format(row['user']))
if course_key and user:
try:
course_enrollment = self.get_course_enrollment(course_key, user)
if course_enrollment:
mode = row['mode']
self.update_enrollment_mode(course_key, user, mode, course_enrollment)
else:
# if student enrollment do not exists directly enroll in new mode.
CourseEnrollment.enroll(user=user, course_key=course_key, mode=row['mode'])
except Exception as e: # pylint: disable=broad-except
logger.info("Unable to update student [%s] course [%s] enrollment to mode [%s] "
"because of Exception [%s]", row['user'], row['course_id'], row['mode'], repr(e))
def get_course_enrollment(self, course_key, user):
"""
If student is not enrolled in course enroll the student in free mode
"""
course_enrollment = CourseEnrollment.get_enrollment(user, course_key)
# If student is not enrolled in course enroll the student in free mode
if not course_enrollment:
# try to create a enroll user in default course enrollment mode in case of
# professional it will break because of no default course mode.
try:
course_enrollment = CourseEnrollment.get_or_create_enrollment(user=user,
course_key=course_key)
except Exception: # pylint: disable=broad-except
# In case if no free mode is available.
course_enrollment = None
return course_enrollment
def update_enrollment_mode(self, course_key, user, mode, course_enrollment):
"""
update the enrollment mode based on the learner existing state.
"""
# if student already had a enrollment and its mode is same as the provided one
if course_enrollment.mode == mode:
logger.info("Student [%s] is already enrolled in Course [%s] in mode [%s].", user.username,
course_key, course_enrollment.mode)
# set the enrollment to active if its not already active.
if not course_enrollment.is_active:
course_enrollment.update_enrollment(is_active=True)
else:
# if student enrollment exists update it to new mode.
with transaction.atomic():
course_enrollment.update_enrollment(
mode=mode,
is_active=True,
skip_refund=True
)
course_enrollment.save()
if mode == 'credit':
enrollment_attrs = [{'namespace': 'credit',
'name': 'provider_id',
'value': course_key.org
}]
CourseEnrollmentAttribute.add_enrollment_attr(enrollment=course_enrollment,
data_list=enrollment_attrs)
|
# -*- coding: utf-8 -*-
import re
class Cave(object):
_regex_spaces = r'[ ]{21}'
def __init__(self, price=0, meters=0, description='', url='',
service_url=''):
self.price = price
self.meters = meters
# 'description' is the only possible attribute with 'special' chars
self.description = description.encode('unicode_escape')
self.url = url
self.service_url = service_url
def __str__(self):
message = '''Founded a new cave!
Price: {0} euros
Meters: {1} m2
Description: {2}
<a href="{3}">Link</a>
<a href="{4}">Search link</a>
Good luck!'''
message = message.format(self.price, self.meters, self.description,
self.url, self.service_url)
message = re.sub(self._regex_spaces, '', message)
return message
def __unicode__(self):
return self.__str__().decode('unicode-escape')
def __repr__(self):
return self.__str__()
|
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
from IPython import get_ipython
# %%
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# %%
# get_ipython().run_line_magic('load_ext', 'autoreload')
# get_ipython().run_line_magic('autoreload', '2')
# get_ipython().run_line_magic('matplotlib', 'inline')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import cvxportfolio as cp
plotdir = '../portfolio/plots/'
datadir='../data/'
sigmas=pd.read_csv(datadir+'sigmas.csv.gz',index_col=0,parse_dates=[0]).iloc[:,:-1]
returns=pd.read_csv(datadir+'returns.csv.gz',index_col=0,parse_dates=[0])
volumes=pd.read_csv(datadir+'volumes.csv.gz',index_col=0,parse_dates=[0]).iloc[:,:-1]
w_b = pd.Series(index=returns.columns, data=1)
w_b.USDOLLAR = 0.
w_b/=sum(w_b)
start_t="2012-01-01"
end_t="2016-12-29"
simulated_tcost = cp.TcostModel(half_spread=0.0005/2., nonlin_coeff=1., sigma=sigmas, volume=volumes)
simulated_hcost = cp.HcostModel(borrow_costs=0.0001)
simulator = cp.MarketSimulator(returns, costs=[simulated_tcost, simulated_hcost],
market_volumes=volumes, cash_key='USDOLLAR')
return_estimate=pd.read_csv(datadir+'return_estimate.csv.gz',index_col=0,parse_dates=[0]).dropna()
volume_estimate=pd.read_csv(datadir+'volume_estimate.csv.gz',index_col=0,parse_dates=[0]).dropna()
sigma_estimate=pd.read_csv(datadir+'sigma_estimate.csv.gz',index_col=0,parse_dates=[0]).dropna()
optimization_tcost = cp.TcostModel(half_spread=0.0005/2., nonlin_coeff=1.,
sigma=sigma_estimate, volume=volume_estimate)
optimization_hcost=cp.HcostModel(borrow_costs=0.0001)
risk_data = pd.HDFStore(datadir+'risk_model.h5')
risk_model = cp.FactorModelSigma(risk_data.exposures, risk_data.factor_sigma, risk_data.idyos)
# %%
all_return_estimates={}
n_p1=returns.shape[1]
T=returns.shape[0]
for i,t in enumerate(returns.index[:-1]):
all_return_estimates[(t,t)]= return_estimate.loc[t]
tp1=returns.index[i+1]
all_return_estimates[(t,tp1)]=return_estimate.loc[tp1]
returns_forecast = cp.MPOReturnsForecast(all_return_estimates)
results_MPO={}
# %% [markdown]
# ## MPO Coarse search
# %%
import cvxpy as cvx
policies={}
gamma_risks_coarse=[.1,.3,1,3,10,30,100,300,1000]
gamma_tcosts_coarse=[1,2,5,10,20]
for gamma_risk in gamma_risks_coarse:
for gamma_tcost in gamma_tcosts_coarse:
policies[(gamma_risk, gamma_tcost)] = cp.MultiPeriodOpt(return_forecast=returns_forecast,
costs=[gamma_risk*risk_model, gamma_tcost*optimization_tcost, optimization_hcost],
constraints=[cp.LeverageLimit(3)],
trading_times=list(returns.index[(returns.index>=start_t)&(returns.index<=end_t)]),
lookahead_periods=2,
terminal_weights=None)
results_MPO.update({k:v for k,v in zip(policies.keys(),
simulator.run_multiple_backtest(w_b*1e8, start_time = start_t, end_time=end_t,
policies=policies.values(),parallel=True))})
# %%
result_df_coarse=pd.DataFrame()
for k in results_MPO:
if k[0] in gamma_risks_coarse and k[1] in gamma_tcosts_coarse:
result_df_coarse.loc[k[0], k[1]] = results_MPO[k]
result_df = result_df_coarse.loc[sorted(result_df_coarse.index), sorted(result_df_coarse.columns)]
# %%
plt.figure(figsize=(8,5))
for gamma_tcost in result_df.columns:
x=[el.excess_returns.std()*100*np.sqrt(250) for el in result_df[gamma_tcost]]
y=[el.excess_returns.mean()*100*250 for el in result_df[gamma_tcost]]
plt.plot(np.array(x),np.array(y), '.-', label='$\gamma^\mathrm{trade} = %g$'%gamma_tcost)
plt.legend(loc='lower right')
plt.xlabel('Risk')
plt.ylabel('Return')
plt.xlim([0,20])
plt.ylim([0,30])
import matplotlib.ticker as mtick
ax = plt.gca()
ax.xaxis.set_major_formatter(mtick.FormatStrFormatter('%.0f%%'))
ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0f%%'))
plt.savefig(plotdir+'mpo_riskrewardfrontier.png')
# %% [markdown]
# # MPO Pareto search
# %%
results_pareto={}
# %%
policies={}
#gamma_risks_pareto=[int(round(el)) for el in np.logspace(0,3,13)]
gamma_risks_pareto=[1, 2, 3, 6, 10, 18, 32, 56, 100, 178, 316, 562, 1000]
gamma_tcosts_pareto=[7,8,9,10,11,12]
gamma_holdings=[.1,1.,10., 100.,1000.]
for gamma_risk in gamma_risks_pareto:
for gamma_tcost in gamma_tcosts_pareto :
for gamma_holding in gamma_holdings:
policies[(gamma_risk, gamma_tcost, gamma_holding)] = cp.MultiPeriodOpt(alpha_model=returns_forecast,
costs=[gamma_risk*risk_model, gamma_tcost*optimization_tcost,
gamma_holding*optimization_hcost],
constraints=[cp.LeverageLimit(3)],
trading_times=list(returns.index[(returns.index>=start_t)&(returns.index<=end_t)]),
lookahead_periods=2,
terminal_weights=None)
import warnings
warnings.filterwarnings('ignore')
results_pareto.update(dict(zip(policies.keys(), simulator.run_multiple_backtest(1E8*w_b, start_time=start_t,
end_time=end_t,
policies=policies.values(), parallel=True))))
# %%
table=pd.DataFrame()
table[r'$\gamma^\mathrm{risk}$']=[el[0] for el in results_pareto.keys()]
table[r'$\gamma^\mathrm{trade}$']=[el[1] for el in results_pareto.keys()]
table[r'$\gamma^\mathrm{hold}$']=['%g'%el[2] for el in results_pareto.keys()]
table['Return']=[(results_pareto[k].excess_returns.mean()*100*250) for k in results_pareto.keys()]
table['Risk']=[(results_pareto[k].excess_returns.std()*100*np.sqrt(250)) for k in results_pareto.keys()]
table = table.sort_values('Risk', ascending=False).reset_index()
del table['index']
is_pareto = lambda i: table.loc[i,'Return']>=max(table.ix[i:].Return)
table['is_pareto'] = [is_pareto(i) for i in range(len(table))]
table.to_csv(datadir+'mpo_pareto_results.csv', float_format='%g')
# %%
plt.figure(figsize=(8,5))
plt.scatter(table.Risk.values,table.Return.values)
plt.plot(table[table.is_pareto].Risk,table[table.is_pareto].Return, 'C1.-', label='Pareto optimal frontier')
plt.legend( loc='lower right')
plt.xlabel('Risk')
plt.ylabel('Return')
plt.xlim([0,20])
plt.ylim([0,30])
import matplotlib.ticker as mtick
ax = plt.gca()
ax.xaxis.set_major_formatter(mtick.FormatStrFormatter('%.0f%%'))
ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0f%%'))
plt.savefig(plotdir+'mpo_pareto.png')
# %%
xlim=20
ylim=30
tableprint=table[table.is_pareto]
tableprint=tableprint[tableprint.Risk <= xlim]
tableprint=tableprint[tableprint.Return <= ylim]
del tableprint['is_pareto']
tableprint.Risk=tableprint.Risk.apply(lambda x: '%.2f%%'%x)
tableprint.Return=tableprint.Return.apply(lambda x: '%.2f%%'%x)
print(tableprint.iloc[::-1].to_latex(float_format='%.2f', escape=False, index=False).replace('%',r'\%'))
# %% [markdown]
# # SPO vs MPO
# %%
table_spo = pd.read_csv('spo_pareto_results.csv', index_col=0)
plt.figure(figsize=(8,5))
plt.plot(table[table.is_pareto].Risk,table[table.is_pareto].Return, 'C3.-', label='MPO pareto frontier')
plt.plot(table_spo[table_spo.is_pareto].Risk,
table_spo[table_spo.is_pareto].Return, 'C2.-', label='SPO pareto frontier')
plt.legend(loc='lower right')
plt.xlabel('Risk')
plt.ylabel('Return')
plt.xlim([0,20])
plt.ylim([0,30])
import matplotlib.ticker as mtick
ax = plt.gca()
ax.xaxis.set_major_formatter(mtick.FormatStrFormatter('%.0f%%'))
ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0f%%'))
plt.savefig(plotdir+'spo_vs_mpo_pareto.png')
|
# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
# This software is distributed under the terms and conditions of the 'Apache-2.0'
# license which can be found in the file 'LICENSE' in this package distribution
# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
from flask import request
from flask_restful import Resource
from moon_utilities.security_functions import check_auth
import logging
LOG = logging.getLogger("moon.orchestrator.api.containers")
class Pods(Resource):
"""
Endpoint for pdp requests
"""
__urls__ = (
"/pods",
"/pods/",
"/pods/<string:uuid>",
"/pods/<string:uuid>/",
)
def __init__(self, **kwargs):
self.driver = kwargs.get("driver")
@check_auth
def get(self, uuid=None, user_id=None):
"""Retrieve all pods
:param uuid: uuid of the pod
:param user_id: user ID who do the request
:return: {
"pod_id1": {
"name": "...",
"replicas": "...",
"description": "...",
}
}
:internal_api: get_pdp
"""
pods = {}
LOG.info("pods={}".format(self.driver.get_pods()))
for _pod_key, _pod_values in self.driver.get_pods().items():
for _pod_value in _pod_values:
if _pod_value['namespace'] != "moon":
continue
pods[_pod_key] = _pod_value
return {"pods": pods}
@check_auth
def post(self, uuid=None, user_id=None):
"""Create a new pod.
:param uuid: uuid of the pod (not used here)
:param user_id: user ID who do the request
:request body: {
"name": "...",
"description": "...",
"type": "plugin_name"
}
:return: {
"pdp_id1": {
"name": "...",
"replicas": "...",
"description": "...",
}
}
"""
return {"pods": None}
@check_auth
def delete(self, uuid=None, user_id=None):
"""Delete a pod
:param uuid: uuid of the pod to delete
:param user_id: user ID who do the request
:return: {
"result": "True or False",
"message": "optional message"
}
"""
return {"result": True}
@check_auth
def patch(self, uuid=None, user_id=None):
"""Update a pod
:param uuid: uuid of the pdp to update
:param user_id: user ID who do the request
:request body: {
"name": "...",
"replicas": "...",
"description": "...",
}
:return: {
"pod_id1": {
"name": "...",
"replicas": "...",
"description": "...",
}
}
:internal_api: update_pdp
"""
return {"pods": None}
|
"""
This script evaluates query interpretations based on the strict evaluation metrics;
macro averaging of precision, recall and F-measure.
For detailed information see:
F. Hasibi, K. Balog, and S. E. Bratsberg. "Entity Linking in Queries: Tasks and Evaluation",
In Proceedings of ACM SIGIR International Conference on the Theory of Information Retrieval (ICTIR '15), Sep 2015.
DOI: http://dx.doi.org/10.1145/2808194.2809473
Usage:
python evaluation_erd.py <qrel_file> <result_file>
e.g.
python evaluation_erd.py qrels_sets_ERD-dev.txt ERD-dev_MLMcg-GIF.txt
@author: Faegheh Hasibi ([email protected])
"""
from __future__ import division
import sys
from collections import defaultdict
class Evaluator(object):
def __init__(self, qrels, results):
self.qrels_dict = self.__group_by_queries(qrels)
self.results_dict = self.__group_by_queries(results)
qid_overlap = set(self.qrels_dict.keys()) & set(self.results_dict.keys())
if len(qid_overlap) == 0:
print "ERR: Query mismatch between qrel and result file!"
exit(0)
@staticmethod
def __group_by_queries(file_lines):
"""
Groups the lines by query id.
:param file_lines: list of lines [[qid, label, en_id, ...], ...]
:return: {qid: [iset0, iset1, ..], ..}; isets are sets of entity ids
"""
grouped_inters = defaultdict(list)
for cols in file_lines:
if len(cols) > 2:
grouped_inters[cols[0]].append(set(cols[2:]))
elif cols[0] not in grouped_inters:
grouped_inters[cols[0]] = []
# check that identical interpretations are not assigned to a query
for qid, interprets in grouped_inters.iteritems():
q_interprets = set()
for inter in interprets:
if tuple(sorted(inter)) in q_interprets:
print "Err: Identical interpretations for query [" + qid + "]!"
exit(0)
else:
q_interprets.add(tuple(sorted(inter)))
return grouped_inters
def eval(self, eval_query_func):
"""
Evaluates all queries and calculates total precision, recall and F1 (macro averaging).
:param eval_query_func: A function that takes qrel and results for a query and returns evaluation metrics
:return Total precision, recall, and F1 for all queries
"""
queries_eval = {}
total_prec, total_rec, total_f = 0, 0, 0
for qid in sorted(self.qrels_dict):
queries_eval[qid] = eval_query_func(self.qrels_dict[qid], self.results_dict.get(qid, []))
total_prec += queries_eval[qid]['prec']
total_rec += queries_eval[qid]['rec']
n = len(self.qrels_dict) # number of queries
total_prec /= n
total_rec /= n
total_f = (2 * total_rec * total_prec) / (total_rec + total_prec) if total_prec + total_rec != 0 else 0
log = "\n----------------" + "\nEvaluation results:\n" + \
"Prec: " + str(round(total_prec, 4)) + "\n" +\
"Rec: " + str(round(total_rec, 4)) + "\n" + \
"F1: " + str(round(total_f, 4)) + "\n" + \
"all: " + str(round(total_prec, 4)) + ", " + str(round(total_rec, 4)) + ", " + str(round(total_f, 4))
print log
metrics = {'prec': total_prec, 'rec': total_rec, 'f': total_f}
return metrics
def erd_eval_query(query_qrels, query_results):
"""
Evaluates a single query.
:param query_qrels: Query interpretations from Qrel [{en1, en2, ..}, ..]
:param query_results: Query interpretations from result file [{en1, en2, ..}, ..]
:return: precision, recall, and F1 for a query
"""
tp = 0 # correct
fn = 0 # missed
fp = 0 # incorrectly returned
# ----- Query has no interpretation set. ------
if len(query_qrels) == 0:
if len(query_results) == 0:
return {'prec': 1, 'rec': 1, 'f': 1}
return {'prec': 0, 'rec': 0, 'f': 0}
# ----- Query has at least an interpretation set. -----
# Iterate over qrels to calculate TP and FN
for qrel_item in query_qrels:
if find_item(qrel_item, query_results):
tp += 1
else:
fn += 1
# Iterate over results to calculate FP
for res_item in query_results:
if not find_item(res_item, query_qrels): # Finds the result in the qrels
fp += 1
prec = tp / (tp+fp) if tp+fp != 0 else 0
rec = tp / (tp+fn) if tp+fn != 0 else 0
metrics = {'prec': prec, 'rec': rec}
return metrics
def find_item(item_to_find, items_list):
"""
Returns True if an item is found in the item list.
:param item_to_find: item to be found
:param items_list: list of items to search in
:return boolean
"""
is_found = False
item_to_find = set([en.lower() for en in item_to_find])
for item in items_list:
item = set([en.lower() for en in item])
if item == item_to_find:
is_found = True
return is_found
def parse_file(file_name):
"""
Parses file and returns the positive instances for each query.
:param file_name: Name of file to be parsed
:return list of lines [[qid, label, en_id, ...], ...]
"""
file_lines = []
efile = open(file_name, "r")
for line in efile.readlines():
if line.strip() == "":
continue
cols = line.strip().split("\t")
file_lines.append(cols)
return file_lines
def main(args):
if len(args) < 2:
print "\tUsage: [qrel_file] [result_file]"
exit(0)
qrels = parse_file(args[0])
results = parse_file(args[1])
evaluator = Evaluator(qrels, results)
evaluator.eval(erd_eval_query)
if __name__ == '__main__':
main(sys.argv[1:])
|
from django.conf import settings
CONNECTIONS = getattr(settings, 'NEXUS_REDIS_CONNECTIONS', [
# {'host': '127.0.0.1', 'port': 8930},
# {'host': '127.0.0.1', 'port': 8932, 'password': 'secret', 'db': 2},
])
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 6379
|
from ..parsing import * # Module under test
def test_extract_single_deep_nested():
sigil = "[ENV=[USR=[CURRENT].DEFAULT_ENV].APP=[APP=[ACTIVE]]]"
text = f"-- {sigil} --"
assert set(extract(text)) == {sigil}
def test_ignore_whitespace():
sigil = "[ ENV . HELP ]"
text = f"-- {sigil} --"
assert set(extract(text)) == {sigil}
def test_extract_file_stream():
with open("tests/data/sample.txt", "r") as fp:
assert set(extract(fp)) == {"[ENV.HOST]"}
|
from django.conf import settings
from django.test import TestCase
from django.core import mail
from ecommerce import mailer
from ecommerce.models import Order
class TestMailer(TestCase):
"""Test cases for mailer module."""
@classmethod
def setUpClass(cls):
"""Set up testing Order instance."""
super(TestMailer, cls).setUpClass()
cls.order = Order(
name='John Doe',
email='[email protected]',
phone='222222222222'
)
def test_mail_send_works(self):
"""If we send email, Django will put it in mail.outbox collection."""
for i in range(10):
mailer.send_order(
subject='Testing email',
order=self.order,
)
self.assertEqual(len(mail.outbox), i + 1)
def test_mailer_construct_valid_email(self):
"""Saved email contains valid subject and valid body."""
mailer.send_order(
subject='Testing email 1',
order=self.order,
)
sent_mail = mail.outbox[0]
self.assertEqual(sent_mail.subject, 'Testing email 1')
self.assertIn('Thank you, {}'.format(self.order.name), sent_mail.body)
def test_mailer_will_send_order_to_specified_recipients(self):
"""Saved email contains valid set of recipients."""
mailer.send_order(
subject='Testing email 1',
order=self.order,
)
sent_mail = mail.outbox[0]
recipients = [self.order.email, *settings.EMAIL_RECIPIENTS]
self.assertListEqual(recipients, sent_mail.recipients())
mailer.send_order(
subject='Testing email 1',
order=self.order,
to_customer=False
)
sent_mail = mail.outbox[1]
recipients = settings.EMAIL_RECIPIENTS
self.assertListEqual(recipients, sent_mail.recipients())
def test_order_call(self):
"""Mailer module should be able to send mails about ordered calls."""
subject = 'Testing backcall'
mailer.send_backcall(
subject=subject,
phone='22222222',
url='fidals.ru'
)
sent_mail = mail.outbox[0]
recipients = settings.EMAIL_RECIPIENTS
self.assertListEqual(recipients, sent_mail.recipients())
self.assertEqual(sent_mail.subject, subject)
|
import pytest
import matplotlib
from dython.data_utils import split_hist
def test_split_hist_check(iris_df):
result = split_hist(iris_df, 'sepal length (cm)', 'target')
assert isinstance(result, matplotlib.axes.Axes)
|
from __future__ import absolute_import, unicode_literals
from django.contrib import admin
from django.db.models import F
from django.utils.translation import ugettext_lazy as _
from .models import Discipline, TrainingSet
class DisciplineListFilter(admin.SimpleListFilter):
"""
Generic Filter for models that have a direct relationship to disciplines.
Inherits from `admin.SimpleListFilter`.
"""
title = _("disciplines")
# Parameter for the filter that will be used in the URL query.
parameter_name = "disciplines"
def lookups(self, request, model_admin):
"""
Defining look up values that can be seen in the admin
interface. Returns tuples: the first element is a coded
value, whereas the second one is human-readable.
:param request: current user request
:type request: django.http.request
:param model_admin: admin of current model
:type model_admin: ModelAdmin
:return: list of tuples containing id and title of each discipline
:rtype: list
"""
list_of_disciplines = []
# Verify that only disciplines are displayed that actually can contain training sets
queryset = Discipline.objects.filter(lft=F("rght") - 1)
if request.user.is_superuser:
queryset = queryset.filter(creator_is_admin=True)
else:
queryset = queryset.filter(created_by__in=request.user.groups.all())
for discipline in queryset:
list_of_disciplines.append(
(
str(discipline.id),
" \u2794 ".join(
map(str, discipline.get_ancestors(include_self=True))
),
)
)
return sorted(list_of_disciplines, key=lambda tp: tp[1])
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
:param request: current user request
:type request: django.http.request
:param queryset: current queryset
:type queryset: QuerySet
:return: filtered queryset based on the value provided in the query string
:rtype: QuerySet
"""
if self.value():
return queryset.filter(discipline__id=self.value()).distinct()
return queryset
class DocumentDisciplineListFilter(DisciplineListFilter):
"""
Filter for disciplines within document list display.
Inherits from `admin.SimpleListFilter`.
"""
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
:param request: current user request
:type request: django.http.request
:param queryset: current queryset
:type queryset: QuerySet
:return: filtered queryset based on the value provided in the query string
:rtype: QuerySet
"""
if self.value():
return queryset.filter(
training_sets__discipline__id=self.value()
).distinct()
return queryset
class DocumentTrainingSetListFilter(admin.SimpleListFilter):
"""
Filter for training sets within document list display.
Inherits from `admin.SimpleListFilter`.
"""
title = _("training sets")
# Parameter for the filter that will be used in the URL query.
parameter_name = "training set"
def lookups(self, request, model_admin):
"""
Defining look up values that can be seen in the admin
interface. Returns tuples: the first element is a coded
value, whereas the second one is human-readable.
:param request: current user request
:type request: django.http.request
:param model_admin: admin of current model
:type model_admin: ModelAdmin
:return: list of tuples containing id and title of each training set
:rtype: list
"""
list_of_trainingsets = []
if request.user.is_superuser:
queryset = TrainingSet.objects.all().filter(creator_is_admin=True)
else:
queryset = TrainingSet.objects.all().filter(
created_by__in=request.user.groups.all()
)
for trainingset in queryset:
list_of_trainingsets.append((str(trainingset.id), trainingset.title))
return sorted(list_of_trainingsets, key=lambda tp: tp[1])
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
:param request: current user request
:type request: django.http.request
:param queryset: current queryset
:type queryset: QuerySet
:return: filtered queryset based on the value provided in the query string
:rtype: QuerySet
"""
if self.value():
return queryset.filter(training_sets__id=self.value()).distinct()
return queryset
class ApprovedImageListFilter(admin.SimpleListFilter):
"""
Filter for approved images within document list display.
Inherits from `admin.SimpleListFilter`.
"""
title = _("approved images")
# Parameter for the filter that will be used in the URL query.
parameter_name = "approvedimages"
default_value = None
def lookups(self, request, model_admin):
"""
Defining look up values that can be seen in the admin
interface. Returns tuples: the first element is a coded
value, whereas the second one is human-readable
:param request: current user request
:type request: django.http.request
:param model_admin: admin of current model
:type model_admin: ModelAdmin
:return: list of tuples containing id and title of each discipline
:rtype: list
"""
return (
(1, _("at least one approved image")),
(2, _("at least one pending image")),
(3, _("no images")),
)
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
:param request: current user request
:type request: django.http.request
:param queryset: current queryset
:type queryset: QuerySet
:return: filtered queryset based on the value provided in the query string
:rtype: QuerySet
"""
if self.value():
if int(self.value()) == 1:
return queryset.filter(document_image__confirmed=True).distinct()
if int(self.value()) == 2:
return queryset.filter(document_image__confirmed=False).distinct()
if int(self.value()) == 3:
return queryset.filter(document_image__isnull=True).distinct()
return queryset
class AssignedListFilter(admin.SimpleListFilter):
"""
Filter for documents that are either assigned or unassigned to at least one training set.
Inherits from `admin.SimpleListFilter`.
"""
title = _("assigned & unassigned")
# Parameter for the filter that will be used in the URL query.
parameter_name = "assigned"
default_value = None
def lookups(self, request, model_admin):
"""
Defining look up values that can be seen in the admin
interface. Returns tuples: the first element is a coded
value, whereas the second one is human-readable
:param request: current user request
:type request: django.http.request
:param model_admin: admin of current model
:type model_admin: ModelAdmin
:return: list of tuples containing id and title of each discipline
:rtype: list
"""
return (
(0, _("unassigned only")),
(1, _("assigned only")),
)
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
:param request: current user request
:type request: django.http.request
:param queryset: current queryset
:type queryset: QuerySet
:return: filtered queryset based on the value provided in the query string
:rtype: QuerySet
"""
if self.value():
if int(self.value()) == 0:
return queryset.filter(training_sets__isnull=True).distinct()
if int(self.value()) == 1:
return queryset.filter(training_sets__isnull=False).distinct()
return queryset
|
#!/usr/bin/env python
#
# Copyright (c) 2016 In-Q-Tel, Inc, All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on 17 August 2016
@author: aganeshLab41, tlanham
Test module for machine learning plugin
for classifying ports from tcp packets.
"""
import pytest
import sys
import os
from port_classifier import rabbit_init
from port_classifier import get_path
from port_classifier import get_host
from port_classifier import save_model
def test_get_path():
get_path()
sys.argv = []
get_path()
def test_get_host():
get_host()
os.environ['POSEIDON_HOST'] = '1.1.1.1'
assert get_host() == '1.1.1.1'
class Test:
def __init__(self):
self.s = 'hello world'
def test_save_model():
model = Test()
save_model(model)
assert os.path.isfile('port_class_log_reg_model.pickle')
os.environ['POSEIDON_HOST'] = 'httpbin.org/post'
save_model(model)
@pytest.mark.skip(reason='requires rabbitmq broker, integration test')
def test_rabbit_init():
channel, connection = rabbit_init(host='poseidon-rabbit',
exchange='topic-poseidon-internal',
queue_name='features_flowparser',
rabbit_rec=False)
|
import enum
import logging
class DeepLib(enum.Enum):
Null = -1
Pytorch = 0
Tensorflow = 1
SkLearn = 2
def log_device_setup(deepLib: DeepLib = DeepLib.Null, level=logging.INFO):
import sys
import psutil
import multiprocessing
logging.info(f'__Python VERSION: {sys.version}')
logging.info(f"Number of available cores: {psutil.cpu_count(logical=False)}.")
logging.info(f"Number of available logical processors: {multiprocessing.cpu_count()}.")
setup_func = {
DeepLib.Null: lambda *args: None,
DeepLib.Pytorch: log_pytorch_device_setup,
DeepLib.Tensorflow: log_tensorflow_device_setup,
DeepLib.SkLearn: log_sklearn_device_setup,
}
setup_func[deepLib](level)
def log_pytorch_device_setup(level=logging.INFO):
from subprocess import check_output
import torch
logging.info(f'__pyTorch VERSION:{torch.__version__}')
try:
logging.info(f'__CUDA VERSION:\n{check_output(["nvcc", "--version"]).decode("utf-8")}')
except FileNotFoundError:
logging.info('__CUDA VERSION:Not Found')
try:
logging.info(f'__nvidia-smi:\n{check_output(["nvidia - smi",]).decode("utf-8")}')
except FileNotFoundError:
logging.info('__nvidia-smi: Not Found')
logging.info(f'__CUDNN VERSION:{torch.backends.cudnn.version()}')
logging.info(f'__Number CUDA Devices:{torch.cuda.device_count()}')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logging.info(f"\n{'-' * 25}\nDEVICE: {device}\n{'-' * 25}\n")
# Additional Info when using cuda
if device.type == 'cuda':
logging.info(torch.cuda.get_device_name(0))
logging.info('Memory Usage:')
logging.info(f'Allocated: {round(torch.cuda.memory_allocated(0) / 1024 ** 3, 1)} GB')
logging.info(f'Cached: {round(torch.cuda.memory_reserved(0) / 1024 ** 3, 1)} GB')
logging.info(f"Memory summary: \n{torch.cuda.memory_summary()}")
def log_tensorflow_device_setup(level=logging.INFO):
import tensorflow as tf
from subprocess import check_output
logging.info(f'__tensorflow VERSION:{tf.__version__}')
try:
logging.info(f'__CUDA VERSION:\n{check_output(["nvcc", "--version"]).decode("utf-8")}')
except FileNotFoundError:
logging.info('__CUDA VERSION:Not Found')
try:
logging.info(f'__nvidia-smi:\n{check_output(["nvidia - smi",]).decode("utf-8")}')
except FileNotFoundError:
logging.info('__nvidia-smi: Not Found')
physical_devices = tf.config.list_physical_devices('GPU')
logging.info(f"physical_devices: {physical_devices}")
logical_devices = tf.config.list_logical_devices('GPU')
logging.info(f"logical_devices: {logical_devices}")
set_tf_loglevel(level)
def set_tf_loglevel(level=logging.INFO):
import os
if level >= logging.FATAL:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
if level >= logging.ERROR:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
if level >= logging.WARNING:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
else:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'
logging.getLogger('tensorflow').setLevel(level)
def log_sklearn_device_setup(level=logging.INFO):
import sklearn
logging.info(f'__sklearn VERSION:{sklearn.__version__}')
|
import argparse
import datetime
import json
import logging
import pickle
import time
import shutil
from kite.graph_data.data_feeder import EndpointDataFeeder
from kite.graph_data.session import RequestInit
from kite.graph_data.graph_feed import GraphFeedConfig
from kite.infer_expr.config import MetaInfo, Config
from kite.infer_call.request import Request as CallRequest, KwargRequest, ArgTypeRequest, ArgPlaceholderRequest
from kite.infer_expr.request import Request as ExprRequest
from kite.infer_expr.attr_base import Request as AttrBaseRequest
from kite.infer_attr.request import Request as AttrRequest
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s')
def get_filename(cur_sample: int, total: int, timestamp: int) -> str:
n_digits = len(str(total))
format_str = "{{:0{}d}}".format(n_digits) + "-of-{}-{}.pickle"
return format_str.format(cur_sample, total, timestamp)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--endpoint', type=str, default='http://localhost:3039')
parser.add_argument('--random_seed', type=int)
parser.add_argument('--batch', type=int, default=10)
parser.add_argument('--samples', type=int, default=1000, help='number of samples to generate')
parser.add_argument('--meta_info', type=str)
parser.add_argument('--out_dir', type=str, default='data')
parser.add_argument('--samples_per_file', type=int, default=500)
parser.add_argument('--max_samples', type=int)
parser.add_argument('--attr_base_proportion', type=float)
parser.add_argument('--attr_proportion', type=float)
parser.add_argument('--call_proportion', type=float)
parser.add_argument('--arg_type_proportion', type=float)
parser.add_argument('--kwarg_name_proportion', type=float)
parser.add_argument('--arg_placeholder_proportion', type=float)
args = parser.parse_args()
meta_info = MetaInfo.from_json(json.load(open(args.meta_info, 'r')))
config = Config()
req = RequestInit(
config=GraphFeedConfig(edge_set=config.ggnn.edge_set),
random_seed=args.random_seed,
num_batches=args.batch,
max_hops=config.max_hops,
name_subtoken_index=meta_info.name_subtoken_index,
type_subtoken_index=meta_info.type_subtoken_index,
production_index=meta_info.production,
expr=ExprRequest(
max_samples=args.max_samples,
call=CallRequest(
symbols=meta_info.call.dist,
batch_proportion=args.call_proportion,
),
attr=AttrRequest(
symbols=meta_info.attr.dist,
batch_proportion=args.attr_proportion,
parents=meta_info.attr.parents,
),
attr_base=AttrBaseRequest(
symbols=meta_info.attr_base.dist,
batch_proportion=args.attr_base_proportion,
),
arg_type=ArgTypeRequest(
symbols=meta_info.call.dist,
batch_proportion=args.arg_type_proportion,
),
kwarg_name=KwargRequest(
symbols=meta_info.call.dist,
keywords=meta_info.call.keywords,
batch_proportion=args.kwarg_name_proportion,
),
arg_placeholder=ArgPlaceholderRequest(
symbols=meta_info.call.dist,
batch_proportion=args.arg_placeholder_proportion,
)
),
)
logging.info("will write {} samples to {}, random seed = {}".format(
args.samples, args.out_dir, args.random_seed))
feeder = EndpointDataFeeder(args.endpoint, req)
try:
tmp_filename = None
filename = None
file = None
file_samples = 0
start = None
n_names = 0
n_production = 0
def finish_file():
file.close()
shutil.move(tmp_filename, filename)
end = datetime.datetime.now()
logging.info(
"sample {}: saved {} with {} samples ({} name, {} production), took {}".format(
i, filename, args.samples_per_file, n_names, n_production, end - start
))
for i in range(args.samples):
if not file or file_samples >= args.samples_per_file:
if file:
finish_file()
file_samples = 0
ts = int(time.time() * 1000)
filename = "{}/{}".format(args.out_dir, get_filename(i, args.samples, ts))
tmp_filename = "{}.part".format(filename)
file = open(tmp_filename, 'wb')
start = datetime.datetime.now()
logging.info("writing to {}".format(tmp_filename))
sample = feeder.next()
pickle.dump(sample, file)
n_names += len(sample.data.expr.infer_name.prediction_nodes)
n_production += len(sample.data.expr.infer_production.prediction_nodes)
file_samples += 1
if file_samples > 0:
finish_file()
finally:
feeder.stop()
if __name__ == "__main__":
main()
|
import torch
from torch import nn
import torch.optim as optim
import torch.nn.functional as F
import math
import numpy as np
from config import parameters as conf
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
if conf.pretrained_model == "bert":
from transformers import BertModel
elif conf.pretrained_model == "roberta":
from transformers import RobertaModel
elif conf.pretrained_model == "finbert":
from transformers import BertModel
elif conf.pretrained_model == "longformer":
from transformers import LongformerModel
class Bert_model(nn.Module):
def __init__(self, num_decoder_layers, hidden_size, dropout_rate, input_length,
program_length, op_list, const_list, num_char_length, num_emb_dim):
super(Bert_model, self).__init__()
self.op_list_size = len(op_list)
self.const_list_size = len(const_list)
self.reserved_token_size = self.op_list_size + self.const_list_size
self.program_length = program_length
self.hidden_size = hidden_size
self.const_list = const_list
self.op_list = op_list
self.input_length = input_length
self.num_char_length = num_char_length
self.num_emb_dim = num_emb_dim
self.reserved_ind = nn.Parameter(torch.arange(
0, self.reserved_token_size), requires_grad=False)
self.reserved_go = nn.Parameter(torch.arange(op_list.index(
'GO'), op_list.index('GO') + 1), requires_grad=False)
self.reserved_para = nn.Parameter(torch.arange(op_list.index(
')'), op_list.index(')') + 1), requires_grad=False)
# masking for decoidng for test time
op_ones = nn.Parameter(torch.ones(
self.op_list_size), requires_grad=False)
op_zeros = nn.Parameter(torch.zeros(
self.op_list_size), requires_grad=False)
other_ones = nn.Parameter(torch.ones(
input_length + self.const_list_size), requires_grad=False)
other_zeros = nn.Parameter(torch.zeros(
input_length + self.const_list_size), requires_grad=False)
self.op_only_mask = nn.Parameter(
torch.cat((op_ones, other_zeros), 0), requires_grad=False)
self.seq_only_mask = nn.Parameter(
torch.cat((op_zeros, other_ones), 0), requires_grad=False)
# for ")"
para_before_ones = nn.Parameter(torch.ones(
op_list.index(')')), requires_grad=False)
para_after_ones = nn.Parameter(torch.ones(
input_length + self.reserved_token_size - op_list.index(')') - 1), requires_grad=False)
para_zero = nn.Parameter(torch.zeros(1), requires_grad=False)
self.para_mask = nn.Parameter(torch.cat(
(para_before_ones, para_zero, para_after_ones), 0), requires_grad=False)
# for step embedding
# self.step_masks = []
all_tmp_list = self.op_list + self.const_list
self.step_masks = nn.Parameter(torch.zeros(
conf.max_step_ind, input_length + self.reserved_token_size), requires_grad=False)
for i in range(conf.max_step_ind):
this_step_mask_ind = all_tmp_list.index("#" + str(i))
self.step_masks[i, this_step_mask_ind] = 1.0
# self.step_mask_eye = torch.eye(conf.max_step_ind)
if conf.pretrained_model == "bert":
self.bert = BertModel.from_pretrained(
conf.model_size, cache_dir=conf.cache_dir)
elif conf.pretrained_model == "roberta":
self.bert = RobertaModel.from_pretrained(
conf.model_size, cache_dir=conf.cache_dir)
elif conf.pretrained_model == "finbert":
self.bert = BertModel.from_pretrained(
conf.model_size, cache_dir=conf.cache_dir)
elif conf.pretrained_model == "longformer":
self.bert = LongformerModel.from_pretrained(
conf.model_size, cache_dir=conf.cache_dir)
self.cls_prj = nn.Linear(hidden_size, hidden_size, bias=True)
self.cls_dropout = nn.Dropout(dropout_rate)
self.seq_prj = nn.Linear(hidden_size, hidden_size, bias=True)
self.seq_dropout = nn.Dropout(dropout_rate)
self.reserved_token_embedding = nn.Embedding(
self.reserved_token_size, hidden_size)
self.num_char_embedding = nn.Embedding(self.num_char_length, num_emb_dim)
# attentions
self.decoder_history_attn_prj = nn.Linear(
hidden_size, hidden_size, bias=True)
self.decoder_history_attn_dropout = nn.Dropout(dropout_rate)
self.question_attn_prj = nn.Linear(hidden_size, hidden_size, bias=True)
self.question_attn_dropout = nn.Dropout(dropout_rate)
self.question_summary_attn_prj = nn.Linear(
hidden_size, hidden_size, bias=True)
self.question_summary_attn_dropout = nn.Dropout(dropout_rate)
if conf.sep_attention:
self.input_embeddings_prj = nn.Linear(
hidden_size*3, hidden_size, bias=True)
else:
self.input_embeddings_prj = nn.Linear(
hidden_size*2, hidden_size, bias=True)
self.input_embeddings_layernorm = nn.LayerNorm([1, hidden_size])
self.option_embeddings_prj = nn.Linear(
hidden_size*2, hidden_size, bias=True)
# decoder lstm
self.rnn = torch.nn.LSTM(input_size=hidden_size, hidden_size=hidden_size,
num_layers=conf.num_decoder_layers, batch_first=True)
# num char encoder
self.num_bilstm = torch.nn.LSTM(input_size=num_emb_dim, hidden_size=hidden_size // 2,
num_layers=conf.num_encoder_layers, bidirectional=True)
self.num_char_prj = nn.Linear(hidden_size, hidden_size, bias=True)
self.num_char_dropout = nn.Dropout(dropout_rate)
# num attention
self.num_attn_prj = nn.Linear(hidden_size, hidden_size, bias=True)
self.num_attn_dropout = nn.Dropout(dropout_rate)
# seq_out_prj
self.seqout_prj = nn.Linear(hidden_size * 2, hidden_size, bias=True)
self.seqout_dropout = nn.Dropout(dropout_rate)
# step vector
self.decoder_step_proj = nn.Linear(
3*hidden_size, hidden_size, bias=True)
self.decoder_step_proj_dropout = nn.Dropout(dropout_rate)
self.step_mix_proj = nn.Linear(
hidden_size*2, hidden_size, bias=True)
def forward(self, is_training, input_ids, input_mask, segment_ids, option_mask, program_ids, program_mask, num_char_ids, number_mask, num_char_mask, device):
bert_outputs = self.bert(
input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids)
# print("="*30)
# print("input_ids.size(), ", input_ids.size()) # [batch, seq_length], [16, 512]
# print("number_mask: ", number_mask.size())
# print("input_mask.size(), ", input_mask.size()) # [batch, seq_length], [16, 512]
# print("segment_ids.size(), ", segment_ids.size()) # [batch, seq_length], [16, 512]
# print("option_mask.size()", option_mask.size()) # [batch, option_length], [16, 556]
# print("program_ids.size()", program_ids.size()) # [batch, program_length], [16, 30]
# print("program_mask.size()", program_mask.size()) # [batch, program_length], [16, 30]
######
# Step 1: get the sequence, including questions and retrieved text: {h_i^e}
######
bert_sequence_output = bert_outputs.last_hidden_state # [batch, seq_length, hidden], [16, 512, 768]
bert_pooled_output = bert_sequence_output[:, 0, :] # [batch, hidden], [16, 768]
batch_size, seq_length, bert_dim = list(bert_sequence_output.size())
pooled_output = self.cls_prj(bert_pooled_output) # if conf.sep_attention is True, the pooled_output will not be used
pooled_output = self.cls_dropout(pooled_output)
option_size = self.reserved_token_size + seq_length # 556
sequence_output = self.seq_prj(bert_sequence_output)
sequence_output = self.seq_dropout(sequence_output) # [batch_size, seq_length, hidden], [16, 512, 768]
if conf.num_char:
######
# Step new1: get number embeddings and number_hidden_state
# the number char_hidden_avg will be concated with the input sequence
# therefore, for the word (not number), we copy the output of the encoder here
######
num_char_embeddings = self.num_char_embedding(num_char_ids)
size_a, size_b, size_c, size_d = num_char_embeddings.size()
num_char_embeddings = num_char_embeddings.reshape(-1, size_c, size_d) # [16 * 512, 10, 300]
# add pad, get bilstm output
num_char_length = num_char_mask.sum(-1).reshape(-1) # [16 * 512]
num_char_length += (num_char_length == 0).long() # add 1 to those has 0 number, we can multiply 0 again to avoid the calculation
num_char_length = num_char_length.tolist()
num_char_embeddings_pad = torch.nn.utils.rnn.pack_padded_sequence(input=num_char_embeddings, lengths=num_char_length, batch_first=True, enforce_sorted=False)
num_char_hidden, _ = self.num_bilstm(num_char_embeddings_pad)
num_char_hidden, out_len = pad_packed_sequence(num_char_hidden, batch_first=True)
num_char_hidden = num_char_hidden.reshape(size_a, size_b, size_c, -1) # because bilstm
num_char_mask_repeat = num_char_mask.unsqueeze(-1).repeat(1,1,1,self.hidden_size) # batch, seq_length, max_num_length, hidden
num_char_hidden = num_char_hidden * num_char_mask_repeat # same as above
num_char_hidden_sum = num_char_hidden.sum(-2)
num_char_mask = num_char_mask.sum(-1).unsqueeze(-1).repeat(1,1,self.hidden_size) + 1e-7
num_char_hidden_avg = num_char_hidden_sum / num_char_mask # batch, seq_length, hidden
num_char_output = self.num_char_prj(num_char_hidden_avg)
num_char_output = self.num_char_dropout(num_char_output) # batch, seq_length, hidden
mask = number_mask.unsqueeze(-1).repeat(1,1,self.hidden_size) # batch, seq_length, hidden
concat_num_word_output = num_char_output * mask + sequence_output * (mask - 1) # batch, seq_length, hidden
# copy the output of the encoder here
# number_mask: [batch, seq_length]
num_attn_vec = self.num_attn_prj(concat_num_word_output)
num_attn_vec = self.num_attn_dropout(num_attn_vec) # batch, seq_length, hidden
# print("num_attn_vec: ", num_attn_vec.size())
num_attn_w = torch.matmul(concat_num_word_output, torch.transpose(num_attn_vec, 1, 2)) # batch, seq_length, seq_length (len_generated)
# print("num_attn_w: ", num_attn_w.size())
# print("mask: ", mask.size())
attn_mask = number_mask.unsqueeze(-1).repeat(1, 1, num_attn_w.size()[-1]) # batch,
num_attn_w -= 1e6 * (1 - attn_mask)
num_attn_w = F.softmax(num_attn_w, dim=1)
# print("after softmax, num_attn_w.size(): ", num_attn_w.size()) #
num_ctx_out = torch.matmul(
torch.transpose(num_attn_w, 1, 2), concat_num_word_output) # batch, seq_length, hidden
# print("num_ctx_out: ", num_ctx_out.size()) # batch, seq_length, hidden
sequence_output = torch.cat([sequence_output, num_ctx_out], dim=-1)
sequence_output = self.seqout_prj(sequence_output)
sequence_output = self.seqout_dropout(sequence_output)
print("run this???")
# print(sequence_output)
######
# Step 2: get option embeddings: {h_i^s, h_i^m}
# and concat it with sequence_output: H
######
op_embeddings = self.reserved_token_embedding(self.reserved_ind)
op_embeddings = op_embeddings.repeat(batch_size, 1, 1) # [batch_size, reserved_ind_length, hidden], [16, 44, 768], the length of reserved_ind = len(op_list) + len(const_list)
# [batch, op + seq len, hidden]
initial_option_embeddings = torch.cat([op_embeddings, sequence_output], dim=1)
######
# Step 3: init something used for LSTM decoder
######
# for init, only one symbol "GO", so the size of decoder_output is [batch_size, 1, hidden]
init_decoder_output = self.reserved_token_embedding(self.reserved_go) # [1, 768]
decoder_output = init_decoder_output.repeat(batch_size, 1, 1) # [16, 1, 768]
if conf.sep_attention:
decoder_history = decoder_output
else:
decoder_history = torch.unsqueeze(pooled_output, dim=-1)
# initialize the hidden state for the LSTM decoder
decoder_state_h = torch.zeros(1, batch_size, self.hidden_size, device=device)
decoder_state_c = torch.zeros(1, batch_size, self.hidden_size, device=device)
######
# Step 4: prepare something for future use
######
split_program_ids = torch.split(program_ids, 1, dim=1) # len(split_program_ids) = 30, split_program_ids[0].size() = [16, 1]
# What's the float_input_mask for?
float_input_mask = input_mask.float()
float_input_mask = torch.unsqueeze(float_input_mask, dim=-1)
# used for updating option embeddings, adding step embedding
this_step_new_op_emb = initial_option_embeddings # [batch, option_length, hidden]
logits = []
######
# Step 5: generate program
######
for cur_step in range(self.program_length):
######
# Step 5.1: get decoder history attention: att_h
######
decoder_history_attn_vec = self.decoder_history_attn_prj(decoder_output) # [batch, 1, hidden], [16, 1, 768]
decoder_history_attn_vec = self.decoder_history_attn_dropout(decoder_history_attn_vec)
decoder_history_attn_w = torch.matmul(
decoder_history, torch.transpose(decoder_history_attn_vec, 1, 2)) # [batch, cur_step + 1, 1]
decoder_history_attn_w = F.softmax(decoder_history_attn_w, dim=1) # [batch, cur_step + 1, 1]
decoder_history_ctx_embeddings = torch.matmul(
torch.transpose(decoder_history_attn_w, 1, 2), decoder_history) # [batch, 1, hidden],[16, 1, 768]
######
# Step 5.2: get attention for input sequence: att_p
######
if conf.sep_attention:
# input seq att
question_attn_vec = self.question_attn_prj(decoder_output)
question_attn_vec = self.question_attn_dropout(question_attn_vec) #[batch, 1, hidden],[16, 1, 768]
question_attn_w = torch.matmul(
sequence_output, torch.transpose(question_attn_vec, 1, 2))#[batch, seq_length, 1],[16, 512, 1]
question_attn_w -= 1e6 * (1 - float_input_mask)
question_attn_w = F.softmax(question_attn_w, dim=1) # [batch, seq_length, 1], [16, 512, 1]
question_ctx_embeddings = torch.matmul(
torch.transpose(question_attn_w, 1, 2), sequence_output) # [batch, 1, hidden], [16, 1, 768]
######
# Step 5.3: get another input sequence attention: att_p'
######
question_summary_vec = self.question_summary_attn_prj(decoder_output)
question_summary_vec = self.question_summary_attn_dropout(question_summary_vec) # [batch, 1, hidden]
question_summary_w = torch.matmul(
sequence_output, torch.transpose(question_summary_vec, 1, 2)) #[batch, seq_length, 1],[16, 512, 1]
question_summary_w -= 1e6 * (1 - float_input_mask)
question_summary_w = F.softmax(question_summary_w, dim=1) # [batch, seq_length, 1], [16, 512, 1]
question_summary_embeddings = torch.matmul(
torch.transpose(question_summary_w, 1, 2), sequence_output)
######
# Step 5.4: get contextual information C_T
######
if conf.sep_attention:
# [batch, 1, hidden * 3], [16, 1, 2304]
concat_input_embeddings = torch.cat([decoder_history_ctx_embeddings,
question_ctx_embeddings,
decoder_output], dim=-1)
else:
concat_input_embeddings = torch.cat([decoder_history_ctx_embeddings,
decoder_output], dim=-1)
input_embeddings = self.input_embeddings_prj(concat_input_embeddings) #[batch, 1, hidden],[16, 1, 768]
if conf.layer_norm:
input_embeddings = self.input_embeddings_layernorm(
input_embeddings)
######
# Step 5.5: get all token embeddings: H_T'
######
question_option_vec = this_step_new_op_emb * question_summary_embeddings # [batch, option_size, hidden], [16 556, 768]
option_embeddings = torch.cat(
[this_step_new_op_emb, question_option_vec], dim=-1) # [batch, option_size, hidden*2], [16, 556, 1536]
option_embeddings = self.option_embeddings_prj(option_embeddings) # [batch, option_size, hidden], [16, 556, 768]
######
# Step 5.6: get logits
######
option_logits = torch.matmul(
option_embeddings, torch.transpose(input_embeddings, 1, 2)) # batch, option_size, 1],[16, 556, 1]
option_logits = torch.squeeze(option_logits, dim=2) # [batch, op + seq_len],op + seq_len = option_size
option_logits -= 1e6 * (1 - option_mask)
logits.append(option_logits)
######
# Step 6: update state
######
if is_training:
program_index = torch.unsqueeze(split_program_ids[cur_step], dim=1) # [batch, 1, 1], [16, 1, 1]
else:
# constrain decoding
if cur_step % 4 == 0 or (cur_step + 1) % 4 == 0:
# op round
option_logits -= 1e6 * self.seq_only_mask
else:
# number round
option_logits -= 1e6 * self.op_only_mask
if (cur_step + 1) % 4 == 0:
# ")" round
option_logits -= 1e6 * self.para_mask
# print(program_index)
program_index = torch.argmax(option_logits, axis=-1, keepdim=True)
program_index = torch.unsqueeze(program_index, dim=1)
if (cur_step + 1) % 4 == 0:
# update op embeddings
this_step_index = cur_step // 4
this_step_list_index = (
self.op_list + self.const_list).index("#" + str(this_step_index)) # ??? integer
this_step_mask = self.step_masks[this_step_index, :] # [option_size], [556]
decoder_step_vec = self.decoder_step_proj(concat_input_embeddings)
decoder_step_vec = self.decoder_step_proj_dropout(decoder_step_vec)#[batch,1,hidden], [16, 1, 768]
decoder_step_vec = torch.squeeze(decoder_step_vec) # [batch, hidden], [16, 768]
this_step_new_emb = decoder_step_vec # [batch, hidden]
this_step_new_emb = torch.unsqueeze(this_step_new_emb, 1)
this_step_new_emb = this_step_new_emb.repeat(
1, self.reserved_token_size+self.input_length, 1) # [batch, op seq, hidden]
this_step_mask = torch.unsqueeze(this_step_mask, 0) # [1, op seq]
this_step_mask = torch.unsqueeze(this_step_mask, 2) # [1, op seq, 1]
this_step_mask = this_step_mask.repeat(batch_size, 1, self.hidden_size) # [batch, op seq, hidden]
this_step_new_op_emb = torch.where(
this_step_mask > 0, this_step_new_emb, initial_option_embeddings)
program_index = torch.repeat_interleave(program_index, self.hidden_size, dim=2) # [batch, 1, hidden]
input_program_embeddings = torch.gather(option_embeddings, dim=1, index=program_index)
decoder_output, (decoder_state_h, decoder_state_c) = self.rnn(
input_program_embeddings, (decoder_state_h, decoder_state_c))
decoder_history = torch.cat(
[decoder_history, input_program_embeddings], dim=1) # [batch, cur_step + 1, hidden]
logits = torch.stack(logits, dim=1)
return logits
|
import logging
from datawinners.main.database import get_db_manager
from datawinners.project.view_models import ReporterEntity
from datawinners.tasks import app
from mangrove.datastore.entity import get_by_short_code
from mangrove.form_model.form_model import REPORTER
from mangrove.transport.contract.survey_response import SurveyResponse
@app.task(max_retries=3, throw=False)
def update_datasender_on_open_submissions(database_name, reporter_id):
logger = logging.getLogger('datawinners.tasks')
try:
dbm = get_db_manager(database_name)
logger.error(reporter_id)
reporter_entity = ReporterEntity(get_by_short_code(dbm, reporter_id, [REPORTER]))
rows = dbm.load_all_rows_in_view("anonymous_submissions", key=reporter_entity.mobile_number)
for row in rows:
_update_survey_response(dbm, row, reporter_entity.entity.id)
except Exception as e:
logger.exception('Failed for db: %s ,reporter_id: %s' % (database_name, reporter_id))
logger.exception(e)
def _update_survey_response(dbm, row, reporter_id):
survey_response_doc = SurveyResponse.__document_class__.wrap(row['value'])
survey_response = SurveyResponse.new_from_doc(dbm=dbm, doc=survey_response_doc)
survey_response.is_anonymous_submission = False
survey_response.owner_uid = reporter_id
survey_response.save()
@app.task(max_retries=3, throw=False)
def convert_open_submissions_to_registered_submissions(database_name, reporter_ids):
for reporter_id in reporter_ids:
update_datasender_on_open_submissions(database_name, reporter_id) |
class Solution:
def longestLine(self, M):
hor, ver, dig, aDig, mx, m, n = {}, {}, {}, {}, 0, len(M), len(M and M[0])
for i in range(m):
for j in range(n):
if M[i][j]:
ver[(i, j)] = j > 0 and M[i][j - 1] and ver[(i, j - 1)] + 1 or 1
hor[(i, j)] = i > 0 and M[i - 1][j] and hor[(i - 1, j)] + 1 or 1
dig[(i, j)] = i > 0 and j > 0 and M[i - 1][j - 1] and dig[(i - 1, j - 1)] + 1 or 1
aDig[(i, j)] = i > 0 and j + 1 < n and M[i - 1][j + 1] and aDig[(i - 1, j + 1)] + 1 or 1
mx = max(mx, ver[(i, j)], hor[(i, j)], dig[(i, j)], aDig[(i, j)])
return mx |
from django.db import models
class PoliceOfficer(models.Model):
"""
An officer of the NYPD.
E.g. "Jake Peralta"
"""
badge_number = models.IntegerField(primary_key=True)
first_name = models.CharField(max_length=200)
surname = models.CharField(max_length=200)
rank = models.CharField(max_length=200)
arrests = models.ManyToManyField(
"Arrest",
related_name="arresting_officers",
help_text='All arrests made by the officer'
)
class PoliceStation(models.Model):
officers = models.ForeignKey("PoliceOfficer", on_delete=models.CASCADE)
class Precinct(PoliceStation):
"""
A precinct of officers
E.g. "Brookyln 99"
"""
number = models.IntegerField(primary_key=True)
burrough = models.CharField(max_length=20)
captain = models.OneToOneField(PoliceOfficer, on_delete=models.CASCADE)
class Meta:
unique_together = ("burrough", "number")
def natural_key(self):
return (self.burrough, self.number)
class Division(PoliceStation):
"""
A division of officers, not in the field.
E.g. Major Crimes Unit
"""
name = models.CharField(max_length=200)
class Arrest(models.Model):
alleged_crime = models.CharField(max_length=20)
perp = models.ForeignKey("Perpetrator", on_delete=models.CASCADE)
arrest_date = models.DateField()
processing_date = models.DateField()
class Perpetrator(models.Model):
"""
A person who is accused of a crime.
E.g. Doug Judy, aka. "The Pontiac Bandit"
"""
first_name = models.CharField(max_length=200)
surname = models.CharField(max_length=200)
alias = models.CharField(max_length=200)
|
# CPAC/network_centrality/utils.py
#
# Network centrality utilities
# Method to return recommended block size based on memory restrictions
def calc_blocksize(timeseries, memory_allocated=None,
include_full_matrix=False, sparsity_thresh=0.0):
'''
Method to calculate blocksize to calculate correlation matrix
as per the memory allocated by the user. By default, the block
size is 1000 when no memory limit is specified.
If memory allocated is specified, then block size is calculated
as memory allocated subtracted by the memory of the timeseries
and centrality output, then divided by the size of one correlation
map. That is how many correlation maps can we calculate simultaneously
in memory?
Parameters
----------
timeseries : numpy array
timeseries data: `nvoxs` x `ntpts`
memory_allocated : float
memory allocated in GB for degree centrality
include_full_matrix : boolean
Boolean indicating if we're using the entire correlation matrix
in RAM (needed during eigenvector centrality).
Default is False
sparsity_thresh : float
a number between 0 and 1 that represents the number of
connections to keep during sparsity thresholding.
Default is 0.0.
Returns
-------
block_size : an integer
size of block for matrix calculation
'''
# Import packages
import numpy as np
# Init variables
block_size = 1000 # default
nvoxs = timeseries.shape[0]
ntpts = timeseries.shape[1]
nbytes = timeseries.dtype.itemsize
# If we need the full matrix for centrality calculation
if include_full_matrix:
memory_for_full_matrix = nvoxs * nvoxs * nbytes
# Otherwise, we're doing it in blocks
else:
memory_for_full_matrix = 0
# Memory variables
memory_for_timeseries = nvoxs * ntpts * nbytes
memory_for_output = 2 * nvoxs * nbytes # bin and wght outputs
needed_memory = memory_for_timeseries + \
memory_for_output + \
memory_for_full_matrix
if memory_allocated:
available_memory = memory_allocated * 1024.0**3 # assume it is in GB
## memory_for_block = # of seed voxels * nvoxs * nbytes
block_size = int( (available_memory - needed_memory)/(nvoxs*nbytes) )
# If we're doing degree/sparisty thresholding, calculate block_size
if sparsity_thresh:
# k - block_size, v - nvoxs, d - nbytes, m - memory_allocated
# Solve for k: (-d/2 - 20.5)*k^2 + (41*v + d*v -d/2 - 20.5)*k - m = 0
coeffs = np.zeros(3)
coeffs[0] = -nbytes/2 - 20.5
coeffs[1] = 41*nvoxs + nbytes*nvoxs - nbytes/2 - 20.5
coeffs[2] = -(available_memory - needed_memory)
roots = np.roots(coeffs)
# If roots are complex, then the block_size needed to occupy all of
# the available memory is bigger than the number of voxels.
# So set block_size = nvoxs
if np.iscomplex(roots[0]):
block_size = nvoxs
# If the roots are real, test the roots for condition
else:
root = roots[np.where(roots <= nvoxs)]
root = root[np.where(root > 0)]
if len(root) == 1:
block_size = np.floor(root[0])
else:
block_size = 1000
# Test if calculated block size is beyond max/min limits
if block_size > nvoxs:
block_size = nvoxs
elif block_size < 1:
memory_usage = (needed_memory + 2.0*nvoxs*nbytes)/1024.0**3
raise MemoryError('Not enough memory available to perform degree '\
'centrality. Need a minimum of %.2fGB' % memory_usage)
# Convert block_size to an integer before returning
block_size = int(block_size)
# Return memory usage and block size
if sparsity_thresh:
# Calculate RAM usage by blocking algorithm
m = (-nbytes/2 - 20.5)*block_size**2 + \
(41*nvoxs + nbytes*nvoxs - nbytes/2 - 20.5)*block_size
# Calculate RAM usage by sparse matrix at end
max_conns = nvoxs**2-nvoxs
# Max number of connections * i + j (32-bit ints) + w
m2 = np.round(max_conns*sparsity_thresh)*(4 + 4 + nbytes)
if m2 > m:
m = m2
memory_usage = (needed_memory + m)/1024.0**3
else:
memory_usage = (needed_memory + block_size*nvoxs*nbytes)/1024.0**3
# Print information
print 'block_size -> %i voxels' % block_size
print '# of blocks -> %i' % np.ceil(float(nvoxs)/block_size)
print 'expected usage -> %.2fGB' % memory_usage
return block_size
# Method to calculate correlation coefficient from (one or two) datasets
def calc_corrcoef(X, Y=None):
'''
Method to calculate correlation
Each of the columns in X will be correlated
with each of the columns in Y. Each column
represents a variable, with the rows containing the observations.
Parameters
----------
X : numpy array
array of shape x1, x2
Y : numpy array
array of shape y1, y2
Returns
-------
r : numpy array
array containing correlation values of shape x2, y2
'''
# Import packages
import numpy as np
if Y is None:
Y = X
if X.shape[0] != Y.shape[0]:
raise Exception("X and Y must have the same number of rows.")
X = X.astype(float)
Y = Y.astype(float)
X -= X.mean(axis=0)[np.newaxis,...]
Y -= Y.mean(axis=0)
xx = np.sum(X**2, axis=0)
yy = np.sum(Y**2, axis=0)
r = np.dot(X.T, Y)/np.sqrt(np.multiply.outer(xx,yy))
return r
# Method to cluster the data (used in lFCD)
def cluster_data(img, thr, xyz_a, k=26):
'''docstring for cluster_data'''
# Import packages
from scipy.sparse import coo_matrix, cs_graph_components
import numpy as np
# Threshold the entire correlation map and find connected components, store this in sparse matrix
val_idx = img > thr # store valid indices
xyz_th = xyz_a[val_idx] # find the 3D indices corresponding to the above threshold voxels
i,j,d = graph_3d_grid(xyz_th, k=k) # find the connected components for the above threshold voxels
nvoxs = xyz_th.shape[0] # store the number of correlated voxels in entire network
adj = coo_matrix((d, (i,j)), shape=(nvoxs,nvoxs)) # and store the connected nodes and weights in sparse matrix
# Identify the connected components (clusters) within the graph
nc, labels = cs_graph_components(adj)
# Copy the node labels to their voxel equivalents
lbl_img = np.zeros(img.shape) # init lbl_img - map to store label data
# add 2 so that labels corresponding to unconnected voxels (-2)
# will be zero in lbl_img, and label==0 will now equal 2
lbl_img[val_idx] = labels + 2
return lbl_img
# Convert probability threshold value to correlation threshold
def convert_pvalue_to_r(scans, threshold):
'''
Method to calculate correlation threshold from p_value
Parameters
----------
scans : int
Total number of scans in the data
threshold : float
input p_value
Returns
-------
rvalue : float
correlation threshold value
'''
# Import packages
import scipy.stats as s
import math
print "p_value ->", threshold
x = 1-threshold/2
dof = scans-2
#Inverse Survival Function (Inverse of SF)
tvalue = s.t.isf(x, dof)
rvalue = math.sqrt(math.pow(tvalue, 2)/(dof+ math.pow(tvalue,2)))
return rvalue
# Borrowed from nipy.graph.graph
# https://github.com/nipy/nipy/blob/master/nipy/algorithms/graph/graph.py
def graph_3d_grid(xyz, k=18):
'''
Utility that computes the six neighbors on a 3d grid
Parameters
----------
xyz: array of shape (n_samples, 3); grid coordinates of the points
k: neighboring system, equal to 6, 18, or 26
Returns
-------
i, j, d 3 arrays of shape (E),
where E is the number of edges in the resulting graph
(i, j) represent the edges, d their weights
'''
# Import packages
import numpy as np
if np.size(xyz) == 0:
return None
lxyz = xyz - xyz.min(0)
m = 3 * lxyz.max(0).sum() + 2
# six neighbours
n6 = [np.array([1, m, m ** 2]), np.array([m ** 2, 1, m]),
np.array([m, m ** 2, 1])]
# eighteen neighbours
n18 = [np.array([1 + m, 1 - m, m ** 2]),
np.array([1 + m, m - 1, m ** 2]),
np.array([m ** 2, 1 + m, 1 - m]),
np.array([m ** 2, 1 + m, m - 1]),
np.array([1 - m, m ** 2, 1 + m]),
np.array([m - 1, m ** 2, 1 + m])]
# twenty-six neighbours
n26 = [np.array([1 + m + m ** 2, 1 - m, 1 - m ** 2]),
np.array([1 + m + m ** 2, m - 1, 1 - m ** 2]),
np.array([1 + m + m ** 2, 1 - m, m ** 2 - 1]),
np.array([1 + m + m ** 2, m - 1, m ** 2 - 1])]
# compute the edges in each possible direction
def create_edges(lxyz, nn, l1dist=1, left=np.array([]), right=np.array([]),
weights=np.array([])):
q = 0
for nn_row in nn:
v1 = np.dot(lxyz, nn_row)
o1 = np.argsort(v1)
sv1 = v1[o1]
nz = np.squeeze(np.nonzero(sv1[: - 1] - sv1[1:] == - l1dist))
o1z, o1z1 = o1[nz], o1[nz + 1]
left = np.hstack((left, o1z, o1z1))
right = np.hstack((right, o1z1, o1z))
q += 2 * np.size(nz)
weights = np.hstack((weights, np.sqrt(l1dist) * np.ones(q)))
return left, right, weights
i, j, d = create_edges(lxyz, n6, 1.)
if k >= 18:
i, j, d = create_edges(lxyz, n18, 2, i, j, d)
if k == 26:
i, j, d = create_edges(lxyz, n26, 3, i, j, d)
i, j = i.astype(np.int), j.astype(np.int)
# reorder the edges to have a more standard order
order = np.argsort(i + j * (len(i) + 1))
i, j, d = i[order], j[order], d[order]
return i, j, d
# Function to map a centrality matrix to a nifti image
def map_centrality_matrix(centrality_matrix, aff, mask, template_type):
'''
Method to map centrality matrix to a nifti image
Parameters
----------
centrality_matrix : tuple (string, array_like)
tuple containing matrix name and degree/eigenvector centrality matrix
aff : ndarray
Affine matrix of the input data
mask : ndarray
Mask or roi data matrix
template_type : int
type of template: 0 for mask, 1 for roi
Returns
-------
out_file : string (nifti image)
nifti image mapped from the centrality matrix
Raises
------
Exception
'''
import nibabel as nib
import os
import numpy as np
try:
out_file, matrix = centrality_matrix
out_file = os.path.join(os.getcwd(), out_file + '.nii.gz')
sparse_m = np.zeros((mask.shape), dtype=float)
print 'mapping centrality matrix to nifti image...', out_file
if int(template_type) == 0:
cords = np.argwhere(mask)
index=0
for val in cords:
x,y,z=val
sparse_m[x,y,z]= matrix[index]
index+=1
elif int(template_type) == 1:
nodes = np.unique(mask).tolist()
nodes.sort()
index = 0
for n in nodes:
if n> 0:
cords = np.argwhere(mask==n)
for val in cords:
x,y,z = val
if isinstance(matrix[index], list):
sparse_m[x,y,z] = matrix[index][0]
else:
sparse_m[x,y,z]=matrix[index]
index+=1
nifti_img = nib.Nifti1Image(sparse_m, aff)
nifti_img.to_filename(out_file)
return out_file
except:
print 'Error in mapping centrality matrix to nifti image'
raise
# Function to actually do the list merging
def merge_lists(deg_list=[],eig_list=[],lfcd_list=[]):
merged_list = []
merged_list.extend(deg_list)
merged_list.extend(eig_list)
merged_list.extend(lfcd_list)
return merged_list
|
from django.conf import settings
from django.db import connections
from django.test.testcases import LiveServerTestCase, LiveServerThread
from daphne.endpoints import build_endpoint_description_strings
from daphne.server import Server
from ..routing import get_default_application
from ..staticfiles import StaticFilesWrapper
class DaphneServerThread(LiveServerThread):
"""
LiveServerThread subclass that runs Daphne
"""
def __init__(self, host, application, *args, **kwargs):
self.application = application
super().__init__(host, None, *args, **kwargs)
def run(self):
"""
Sets up the live server and databases, and then loops over handling
http requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
self.daphne = self._create_server()
self.daphne.run()
except Exception as e:
self.error = e
self.is_ready.set()
finally:
connections.close_all()
@property
def port(self):
# Dynamically fetch real listen port if we were given 0
if self._port == 0:
return self.daphne.listening_addresses[0][1]
return self._port
@port.setter
def port(self, value):
self._port = value
def _create_server(self):
endpoints = build_endpoint_description_strings(host=self.host, port=self._port)
return Server(
application=self.application,
endpoints=endpoints,
signal_handlers=False,
ws_protocols=getattr(settings, "CHANNELS_WS_PROTOCOLS", None),
root_path=getattr(settings, "FORCE_SCRIPT_NAME", "") or "",
ready_callable=lambda: self.is_ready.set(),
)
def terminate(self):
if hasattr(self, "daphne"):
# Stop the WSGI server
self.daphne.stop()
self.join()
class ChannelsLiveServerTestCase(LiveServerTestCase):
"""
Drop-in replacement for Django's LiveServerTestCase.
In order to serve static files create a subclass with serve_static = True.
"""
server_thread_class = DaphneServerThread
static_wrapper = StaticFilesWrapper
serve_static = False
@classmethod
def _create_server_thread(cls, connections_override):
if cls.serve_static:
application = cls.static_wrapper(get_default_application())
else:
application = get_default_application()
return cls.server_thread_class(
cls.host,
application,
connections_override=connections_override,
port=cls.port,
)
|
def check_status(status):
#2xx Success
if status == 200:
return ("OK")
elif status == 201:
return ("Created")
elif status == 202:
return ("Accepted")
elif status == 203:
return ("Non-Authoritative Information")
elif status == 204:
return ("No Content")
elif status == 205:
return ("Reset Content")
elif status == 206:
return ("Partial Content")
elif status == 207:
return ("Multi-Status")
elif status == 208:
return ("Already Reported")
elif status == 226:
return ("IM Used")
#3xx Redirection
elif status == 300:
return ("Multiple Choices")
elif status == 301:
return ("Moved Permanently")
elif status == 302:
return ("Found")
elif status == 303:
return ("See Other")
elif status == 304:
return ("Not Modified")
elif status == 305:
return ("Use Proxy")
elif status == 306:
return ("Switch Proxy")
elif status == 307:
return ("Temporary Redirect")
elif status == 308:
return ("Permanent Redirect")
#4xx Client Error
elif status == 400:
return ("Bad Request")
elif status == 401:
return ("Unauthorized")
elif status == 402:
return ("Payment Required")
elif status == 403:
return ("Forbidden")
elif status == 404:
return ("Not Found")
elif status == 405:
return ("Method Not Allowed")
elif status == 406:
return ("Not Acceptable")
elif status == 407:
return ("Proxy Authentication Required")
elif status == 408:
return ("Request Timeout")
elif status == 409:
return ("Conflict")
elif status == 410:
return ("Gone")
elif status == 411:
return ("Length Required")
elif status == 412:
return ("Precondition Failed")
elif status == 413:
return ("Payload Too Large")
elif status == 414:
return ("URI Too Long")
elif status == 415:
return ("Unsupported Media Type")
elif status == 416:
return ("Range Not Satisfiable")
elif status == 417:
return ("Expectation Failed")
elif status == 418:
return ("I'm a teapot")
elif status == 421:
return ("Misdirected Request")
elif status == 422:
return ("Unprocessable Entity")
elif status == 423:
return ("Locked")
elif status == 424:
return ("Failed Dependency")
elif status == 426:
return ("Upgrade Required")
elif status == 428:
return ("Precondition Required")
elif status == 429:
return ("Too Many Requests")
elif status == 431:
return ("Request Header Fields Too Large")
elif status == 451:
return ("Unavailable For Legal Reasons")
elif status == 440:
return ("Login Timeout")
elif status == 449:
return ("Retry With")
#5xx Server Error
elif status == 500:
return ("Internal Server Error")
elif status == 501:
return ("Not Implemented")
elif status == 502:
return ("Bad Gateway")
elif status == 503:
return ("Service Unavailable")
elif status == 504:
return ("Gateway Timeout")
elif status == 505:
return ("HTTP Version Not Supported")
elif status == 506:
return ("Variant Also Negotiates")
elif status == 507:
return ("Insufficient Storage")
elif status == 508:
return ("Loop Detected")
elif status == 510:
return ("Not Extended")
elif status == 511:
return ("Network Authentication Required")
#Unofficial codes
elif status == 103:
return ("Checkpoint")
elif status == 450:
return ("Blocked by Windows Parental Controls")
elif status == 509:
return ("Bandwidth Limit Exceeded")
#nginx
elif status == 444:
return ("No Response (nginx)")
elif status == 495:
return ("SSL Certificate Error (nginx)")
elif status == 496:
return ("SSL Certificate Required (nginx)")
elif status == 497:
return ("HTTP Request Sent to HTTPS Port (nginx)")
elif status == 499:
return ("Client Closed Request (nginx)")
#CloudFlare
elif status == 520:
return ("Unknown Error (CloudFlare)")
elif status == 521:
return ("Web Server Is Down (CloudFlare)")
elif status == 522:
return ("Connection Timed Out (CloudFlare)")
elif status == 523:
return ("Origin Is Unreachable (CloudFlare)")
elif status == 524:
return ("A Timeout Occurred (CloudFlare)")
elif status == 525:
return ("SSL Handshake Failed (CloudFlare)")
elif status == 526:
return ("Invalid SSL Certificate (CloudFlare)") |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: 函数默认参数
Desc :
"""
def spam(a, b=42):
print(a, b)
spam(1) # Ok. a=1, b=42
spam(1, 2) # Ok. a=1, b=2
_no_value = object()
def spam(a, b=_no_value):
if b is _no_value:
print('No b value supplied')
|
import sys
import os
import json
from enum import Enum
from pathlib import Path
from .channel import Channel
from io_fetch_channel import ChannelPerformanceFetcher
from io_channel import IOSharedResourceMap
from regla import channel_factory, ChannelEntity, RuleSerializer
from datetime import datetime
from typing import Dict
class Command(Enum):
campaigns = 'campaigns'
adgroups = 'adgroups'
orgs = 'orgs'
execute_rule = 'execute_rule'
impact_report = 'impact_report'
channel_report = 'channel_report'
enity_report = 'entity_report'
rule_report = 'rule_report'
report = 'report'
entity_process = 'entity_process'
def prepare_credentials(args: Dict[str, any]) -> Dict[str, any]:
shared_credentials_map = IOSharedResourceMap(url_key='shared_credentials_url')
credentials = shared_credentials_map.run(args['credentials'])
return credentials
def run():
args = json.loads(sys.argv[1])
from moda import log
log.set_message_logger(lambda message, end: print(json.dumps({'log': message + end})))
with open(Path(__file__).parent.parent / 'configure.json') as f:
configure = json.load(f)
command = Command(args['command'])
if command is Command.orgs:
channel = channel_factory(channel_identifier=args['channel'])
with channel.connected(credentials=prepare_credentials(args)):
orgs = channel.get_entities(entity_type=ChannelEntity.org)
print(json.dumps({
'data': [
{
**d,
'id': d['id'],
} for d in orgs
]
}))
elif command is Command.campaigns:
channel = channel_factory(channel_identifier=args['channel'])
with channel.connected(credentials=prepare_credentials(args)):
campaigns = [
campaign
for org in args['orgs']
for campaign in channel.get_entities(
entity_type=ChannelEntity.campaign,
parent_ids={ChannelEntity.org: str(org['id'])}
)
]
print(json.dumps({
'data': [
{
**d,
'org_id': d['org_id'],
'id': d['id'],
} for d in campaigns
]
}))
elif command is Command.adgroups:
channel = channel_factory(channel_identifier=args['channel'])
with channel.connected(credentials=prepare_credentials(args)):
ad_groups = channel.get_entities(
entity_type=ChannelEntity.ad_group,
parent_ids={ChannelEntity.org: str(args['orgID']), ChannelEntity.campaign: str(args['campaignID'])}
)
print(json.dumps({
'data': [
{
**d,
'org_id': d['org_id'],
'campaign_id': d['campaign_id'],
'id': d['id'],
} for d in ad_groups
]
}))
elif command is Command.execute_rule:
from io_map import IOMap
IOMap.map_auto_register = True
from .rule_executor import RuleExecutor
date_format = '%Y-%m-%d'
rule_executor = RuleExecutor(options=args['dbConfig'])
rule = rule_executor.get_rule(rule_id=args['ruleID'])
if configure['dry_run_only'] is not False or ('dryRunOnly' in args and args['dryRunOnly']):
if rule._id in configure['non_dry_run_rule_ids']:
log.log(f'Allowing non dry run for rule {rule._id} despite dry_run_only configuration because rule is listed in the non_dry_run_rule_ids configuration.')
elif 'nonDryRunRuleIDs' in args and rule._id in args['nonDryRunRuleIDs']:
log.log(f'Allowing non dry run for rule {rule._id} despite dry_run_only configuration because rule is listed in the nonDryRunRuleIDs argument.')
else:
if not rule.dryRun:
log.log(f'Forcing dry run for rule {rule._id} due to dry_run_only configuration.')
rule.dryRun = True
result = rule_executor.execute(
credentials=prepare_credentials(args),
rule=rule,
granularity=args['granularity'],
start_date=datetime.strptime(args['startDate'], date_format),
end_date=datetime.strptime(args['endDate'], date_format)
)
print(json.dumps({
"result" : result,
}, cls=RuleSerializer))
elif command is Command.impact_report:
from io_map import IOMap
IOMap.map_auto_register = True
from .rule_executor import RuleExecutor
date_format = '%Y-%m-%d'
credentials = prepare_credentials(args)
rule_id = args['ruleID']
report_id = args['reportID']
rule_executor = RuleExecutor(options=args['dbConfig'])
rule = rule_executor.get_rule(rule_id=rule_id)
report_metadata = rule_executor.get_impact_report_metadata(
credentials=credentials,
rule=rule
)
print(json.dumps({'result': {'reportId': report_id, 'granularity': report_metadata.granularity.value}}))
if report_metadata.is_valid:
report = rule_executor.get_impact_report(
credentials=credentials,
rule=rule
)
print(f'{{"result":{{"reportId": "{report_id}", "rows": {report.to_json(orient="records")}}}}}')
input()
elif command is Command.channel_report:
fetcher = ChannelPerformanceFetcher(
raw_channel=args['channel'],
raw_time_granularity=args['time_granularity'],
raw_entity_granularity=args['entity_granularity'],
raw_performance_columns=[]
)
start = datetime.fromtimestamp(args['start'])
end = datetime.fromtimestamp(args['end'])
report = fetcher.run(
credentials=prepare_credentials(args),
start=start,
end=end
)
print(json.dumps({
'data': report.to_csv(index=False),
}))
elif command is Command.report:
from .report import get_metadata_report
result = get_metadata_report(
columns=args['columns'],
filters=args['filters'],
options=args['options'],
credentials=prepare_credentials(args)
)
print(json.dumps({
'result': {
'metadata': result['metadata'],
'report': result['report'].to_csv(index=False),
},
}))
elif command is Command.entity_process:
from .entity import process_entities
result = process_entities(
operations=args['operations'],
context=args['context'],
credentials=prepare_credentials(args)
)
print(json.dumps({
'result': result,
}))
else:
raise ValueError('Unsupported command', command)
if __name__ == '__main__':
run() |
#
# Copyright 2020-2021 Picovoice Inc.
#
# You may not use this file except in compliance with the license. A copy of the license is located in the "LICENSE"
# file accompanying this source.
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
import sys
from gpiozero import LED
from apa102 import APA102
COLORS_RGB = dict(
off=(0, 0, 0),
blue=(0, 0, 255),
green=(0, 255, 0),
orange=(255, 128, 0),
pink=(255, 51, 153),
purple=(128, 0, 128),
red=(255, 0, 0),
white=(255, 255, 255),
yellow=(255, 255, 51),
)
driver = APA102(num_led=12)
power = LED(5)
power.on()
def set_color(color):
for i in range(12):
driver.set_pixel(i, color[0], color[1], color[2])
driver.show()
def main():
set_color(COLORS_RGB[sys.argv[1]])
if __name__ == '__main__':
main()
|
#Copyright (c) 2014, Benjamin Bässler <[email protected]>
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#* Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
#* Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from PIL import Image
import numpy as np
## Reads a image from a file converts it to black and white and creates a numpy array
#
# @param filename File to read image from
# @param threshold Threshold to convert greyscale image to black and white. Range 0.0 ... 1.0
# @param max_x is the image size to crop/extend the image in width
# @param max_y is the image size to crop/extend the image in high
# @return Returns the black and white numpy array
def import_img (filename, threshold, max_x, max_y): #convert image to grey scale
img = Image.open(filename)
img_grey = img.convert('L')
img_array = np.array(img_grey)
img_out = np.zeros((max_y, max_x), dtype=np.int)
#convert to black/white
max_val = 255 #np.iinfo(type(img_array[0][0])).max
min_val = 0 #np.iinfo(type(img_array[0][0])).min
threshold = threshold * (max_val-min_val) - min_val
for y in range(0, img_out.shape[0]):
for x in range(0, img_out.shape[1]):
if y < img_array.shape[0] and x < img_array.shape[1] and img_array[y][x] < threshold:
img_out[y][x] = min_val
else:
img_out[y][x] = max_val
del img
del img_grey
del img_array
return img_out
|
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
from rest_framework import status
from faker import Faker
f = Faker()
# class ServiceAPITestCase(APITestCase):
# def setUp(self):
# pass
#
# def test_get_device_info(self):
# data = {
#
# }
# path = reverse("api")
# res = self.client.post(
# data=data,
# path=path,
# )
# self.assertEqual(res.status_code, status.HTTP_200_OK)
#
# def test_add_device(self):
# data = {
#
# }
# path = reverse("api")
# res = self.client.post(
# data=data,
# path=path,
# )
# self.assertEqual(res.status_code, status.HTTP_200_OK)
#
# def test_remove_device(self):
# data = {
#
# }
# path = reverse("api")
# res = self.client.post(
# data=data,
# path=path,
# )
# self.assertEqual(res.status_code, status.HTTP_200_OK)
#
# def test_get_device_tag(self):
# data = {
#
# }
# path = reverse("api")
# res = self.client.post(
# data=data,
# path=path,
# )
# self.assertEqual(res.status_code, status.HTTP_200_OK)
#
# def test_update_device_tag(self):
# data = {
#
# }
# path = reverse("api")
# res = self.client.post(
# data=data,
# path=path,
# )
# self.assertEqual(res.status_code, status.HTTP_200_OK)
#
# def test_delete_device_tag(self):
# data = {
#
# }
# path = reverse("api")
# res = self.client.post(
# data=data,
# path=path,
# )
# self.assertEqual(res.status_code, status.HTTP_200_OK)
#
# def test_push_message(self):
# data = {
#
# }
# path = reverse("api")
# res = self.client.post(
# data=data,
# path=path,
# )
# self.assertEqual(res.status_code, status.HTTP_200_OK)
#
# def test_push_message_to_all_device(self):
# data = {
#
# }
# path = reverse("api")
# res = self.client.post(
# data=data,
# path=path,
# )
# self.assertEqual(res.status_code, status.HTTP_200_OK)
#
# def test_push_message_to_special_device_by_tag(self):
# data = {
#
# }
# path = reverse("api")
# res = self.client.post(
# data=data,
# path=path,
# )
# self.assertEqual(res.status_code, status.HTTP_200_OK)
|
_g_repo = None
def set(repo):
global _g_repo
_g_repo = repo
def get():
return _g_repo
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. See accompanying LICENSE file.
#
import os
import re
import sys
import errno
import shlex
import logging
import subprocess
import platform
import fileinput
import getpass
import shutil
from xml.etree import ElementTree as ET
from os.path import basename
from subprocess import Popen,PIPE
from datetime import date
from datetime import datetime
from operator import contains
try: input = raw_input
except NameError: pass
os_name = platform.system()
os_name = os_name.upper()
msgPrompt = "Enter the below options"
msgCommand = "Usage : python rolebasedusersearchutil.py -u <userName> -p <password> -r <role>"
msgRoleList = " <role> can be ROLE_USER/ROLE_SYS_ADMIN/ROLE_KEY_ADMIN/ROLE_ADMIN_AUDITOR/ROLE_KEY_ADMIN_AUDITOR"
if os_name == "LINUX":
RANGER_ADMIN_HOME = os.getenv("RANGER_ADMIN_HOME")
if RANGER_ADMIN_HOME is None:
RANGER_ADMIN_HOME = os.getcwd()
elif os_name == "WINDOWS":
RANGER_ADMIN_HOME = os.getenv("RANGER_ADMIN_HOME")
def log(msg,type):
if type == 'info':
logging.info(" %s",msg)
if type == 'debug':
logging.debug(" %s",msg)
if type == 'warning':
logging.warning(" %s",msg)
if type == 'exception':
logging.exception(" %s",msg)
if type == 'error':
logging.error(" %s",msg)
def main(argv):
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
ews_lib = os.path.join(RANGER_ADMIN_HOME,"ews","lib")
app_home = os.path.join(RANGER_ADMIN_HOME,"ews","webapp")
ranger_log = os.path.join(RANGER_ADMIN_HOME,"ews","logs")
if os.environ['JAVA_HOME'] == "":
log("[E] ---------- JAVA_HOME environment property not defined, aborting installation. ----------", "error")
sys.exit(1)
JAVA_BIN=os.path.join(os.environ['JAVA_HOME'],'bin','java')
if os_name == "WINDOWS" :
JAVA_BIN = JAVA_BIN+'.exe'
if os.path.isfile(JAVA_BIN):
pass
else:
while os.path.isfile(JAVA_BIN) == False:
log("Enter java executable path: :","info")
JAVA_BIN=input()
log("[I] Using Java:" + str(JAVA_BIN),"info")
userName = ""
password = ""
userRole = ""
userNameMsgFlag = False
passwordMsgFlag = False
userRoleMsgFlag = False
userroleFlag = False
if len(argv) == 1:
log("[I] " +msgPrompt + " or \n" + msgCommand + "\n " +msgRoleList, "info")
userName = input('Enter a user name: ')
password = getpass.getpass('Enter a user password:')
userRole = input('Enter a role: ')
elif len(argv) > 1 and len(argv) < 8 :
for i in range(1, len(sys.argv)) :
if sys.argv[i] == "-u" :
if len(argv)-1 > i+1 or len(argv)-1 == i+1:
userName = sys.argv[i+1]
continue
if sys.argv[i] == "-p" :
if len(argv)-1 > i+1 or len(argv)-1 == i+1:
password = sys.argv[i+1]
continue
if sys.argv[i] == "-r" :
if len(argv)-1 > i+1 or len(argv)-1 == i+1:
userRole = sys.argv[i+1]
userroleFlag = True
continue
else:
log("[E] Invalid argument list.", "error")
log("[I] " + msgCommand + "\n " + msgRoleList, "info")
sys.exit(1)
if userName == "" :
userNameMsgFlag = True
elif userName != "" :
if userName.lower() == "-p" or userName.lower() == "-r" or userName.lower() == "-u" :
userNameMsgFlag = True
if password == "" :
passwordMsgFlag = True
elif password.lower() == "-p" or password.lower() == "-r" or password.lower() == "-u" :
passwordMsgFlag = True
if userroleFlag == True :
if userRole == "":
userRoleMsgFlag = True
elif userRole != "":
if userRole.lower() == "-p" or userRole.lower() == "-r" or userRole.lower() == "-u":
userRoleMsgFlag = True
if userNameMsgFlag == True or passwordMsgFlag == True or userRoleMsgFlag == True :
log("[I] "+msgPrompt + " or \n" + msgCommand + "\n " +msgRoleList, "info")
if userNameMsgFlag == True :
userName = input('Enter a user name: ')
if passwordMsgFlag == True :
password = getpass.getpass("Enter user password:")
if userRoleMsgFlag == True :
userRole = input('Enter a role: ')
if userName != "" and password != "" :
if os_name == "LINUX":
path = os.path.join("%s","WEB-INF","classes","conf:%s","WEB-INF","classes","lib","*:%s","WEB-INF",":%s","META-INF",":%s","WEB-INF","lib","*:%s","WEB-INF","classes",":%s","WEB-INF","classes","META-INF:%s/*")%(app_home ,app_home ,app_home, app_home, app_home, app_home ,app_home,ews_lib)
elif os_name == "WINDOWS":
path = os.path.join("%s","WEB-INF","classes","conf;%s","WEB-INF","classes","lib","*;%s","WEB-INF",";%s","META-INF",";%s","WEB-INF","lib","*;%s","WEB-INF","classes",";%s","WEB-INF","classes","META-INF" )%(app_home ,app_home ,app_home, app_home, app_home, app_home ,app_home)
if userRole != "" :
get_java_cmd = "%s -Dlogdir=%s -Dlogback.configurationFile=db_patch.log4j.xml -cp %s org.apache.ranger.patch.cliutil.%s %s %s %s"%(JAVA_BIN,ranger_log,path,'RoleBasedUserSearchUtil',userName,password,userRole)
if userRole == "" :
get_java_cmd = "%s -Dlogdir=%s -Dlogback.configurationFile=db_patch.log4j.xml -cp %s org.apache.ranger.patch.cliutil.%s %s %s "%(JAVA_BIN,ranger_log,path,'RoleBasedUserSearchUtil',userName,password)
if os_name == "LINUX":
ret = subprocess.call(shlex.split(get_java_cmd))
elif os_name == "WINDOWS":
ret = subprocess.call(get_java_cmd)
if ret == 0:
log("[I] List fetched successfully","info")
else:
log("[E] Unable to fetch user list of given role ","error")
sys.exit(1)
else:
log("[E] Input Error","error")
main(sys.argv)
|
from spiceypy import spiceypy as spice
import numpy as np
from scipy.linalg import norm
import lincov.horizon as horizon
def sun_spacecraft_angle(body, time, object_id):
if body == 'earth':
frame = 'ITRF93'
elif body == 'moon':
frame = 'MOON_ME'
sun,t1,t2 = spice.subslr('INTERCEPT/ELLIPSOID', body, time, frame, 'NONE', str(object_id))
sc, t1,t2 = spice.subpnt('INTERCEPT/ELLIPSOID', body, time, frame, 'NONE', str(object_id))
sun /= norm(sun)
sc /= norm(sc)
return np.arccos(np.dot(sun, sc))
class State(object):
"""State information for the linear covariance analysis"""
ground_stations = { 'DSS-24': 399024,
'DSS-34': 399034,
'DSS-54': 399054 }
r_station_ecef = {}
def __init__(self, time, loader = None, params = None):
self.loader = loader
self.params = params
self.mu_earth = loader.mu_earth * 1e9
self.mu_moon = loader.mu_moon * 1e9
# Ensure that ground station locations are loaded
if len(State.r_station_ecef) == 0:
for station in State.ground_stations:
gid = self.ground_stations[station]
State.r_station_ecef[station] = spice.spkezr(station, self.loader.start, 'ITRF93', 'NONE', 'earth')[0][0:3] * 1000.0
self.eci = spice.spkez(loader.object_id, time, 'J2000', 'NONE', 399)[0] * 1000.0
self.lci = spice.spkez(loader.object_id, time, 'J2000', 'NONE', 301)[0] * 1000.0
self.T_inrtl_to_body = np.identity(3) # FIXME: Add attitude, eventually
# FIXME: Need measurements here
self.a_meas_inrtl = np.zeros(3)
self.w_meas_inrtl = np.zeros(3)
# Get distance to earth and moon
self.d_earth = norm(self.eci[0:3])
self.d_moon = norm(self.lci[0:3])
# Get angular size of each
self.earth_angle = 2 * np.arctan(self.loader.r_earth[2] * 1000.0 / self.d_earth)
self.moon_angle = 2 * np.arctan(self.loader.r_moon[2] * 1000.0 / self.d_moon)
self.earth_phase_angle = sun_spacecraft_angle('earth', time, loader.object_id)
self.moon_phase_angle = sun_spacecraft_angle('moon', time, loader.object_id)
# We need to be able to clearly see the planet in order to do
# horizon detection.
planet_occult_code = spice.occult('earth', 'ellipsoid', 'ITRF93', 'moon', 'ellipsoid', 'MOON_ME', 'NONE', str(loader.object_id), time)
self.horizon_moon_enabled = False
self.horizon_earth_enabled = False
if planet_occult_code == 0:
if self.earth_angle < self.params.horizon_fov and self.earth_phase_angle < self.params.horizon_max_phase_angle:
self.horizon_earth_enabled = True
if self.moon_angle < self.params.horizon_fov and self.moon_phase_angle < self.params.horizon_max_phase_angle:
self.horizon_moon_enabled = True
else:
self.earth_angle = 0.0
self.moon_angle = 0.0
self.elevation_from = {}
self.visible_from = []
self.r_station_inrtl = {}
for ground_name in self.ground_stations:
obj_str = str(self.loader.object_id)
moon_occult_code = spice.occult(obj_str, 'point', ' ', 'moon', 'ellipsoid', 'MOON_ME', 'NONE', str(self.ground_stations[ground_name]), time)
elevation = float('nan')
if moon_occult_code >= 0:
# get spacecraft elevation
x, lt = spice.spkcpo(obj_str, time, ground_name + '_TOPO', 'OBSERVER', 'NONE', self.r_station_ecef[ground_name] / 1000.0, 'earth', 'ITRF93')
r, lon, lat = spice.reclat(x[0:3])
if lat >= self.params.radiometric_min_elevation:
elevation = lat
self.visible_from.append(ground_name)
# store elevation of spacecraft for logging purposes
self.elevation_from[ground_name] = elevation
self.range_earth = norm(self.eci[0:3])
self.range_moon = norm(self.lci[0:3])
self.time = time
@property
def object_id(self):
return self.loader.object_id
@property
def T_body_to_att(self):
return self.loader.T_body_to_att
@property
def T_body_to_cam(self):
return self.loader.T_body_to_cam
def range(self, rel):
if rel == 'earth':
return self.range_earth
elif rel == 'moon':
return self.range_moon
def radii(self, rel):
if rel == 'earth': return self.loader.r_earth * 1000.0
elif rel == 'moon': return self.loader.self.r_moon * 1000.0
def T_pa_to_cam(self, rel):
"""Transformation from cone principal axis frame to opnav camera
frame, where rel tells which planet cone is oriented
towards.
"""
if rel in ('earth', 399):
body_id = 399
elif rel in ('moon', 301):
body_id = 301
else:
raise NotImplemented("expected 'moon' or 'earth' based opnav, got '{}'".format(rel))
return horizon.compute_T_pa_to_cam(self.eci, self.time, body_id)
@property
def T_epa_to_cam(self):
return self.T_pa_to_cam('earth')
@property
def T_mpa_to_cam(self):
return self.T_pa_to_cam('moon')
|
#!/usr/bin/python
import os
import sys
import time
import glob
import shutil
import json
import xml.etree.ElementTree as ET
import string
import re
import platform
import time
os_commands = {
'debian': {
'9': [
'echo "deb https://repo.gluu.org/debian/ stretch-stable main" > /etc/apt/sources.list.d/gluu-repo.list',
],
'8': [
'echo "deb https://repo.gluu.org/debian/ stable main" > /etc/apt/sources.list.d/gluu-repo.list',
],
},
'ubuntu': {
'16': [
'echo "deb https://repo.gluu.org/ubuntu/ xenial-devel main" > /etc/apt/sources.list.d/gluu-repo.list',
],
'14': [
'echo "deb https://repo.gluu.org/ubuntu/ trusty-devel main" > /etc/apt/sources.list.d/gluu-repo-devel.list'
],
},
'centos': {
'6': [
'wget https://repo.gluu.org/centos/Gluu-centos-testing.repo -O /etc/yum.repos.d/Gluu.repo'
#change this
],
'7': [
'wget https://repo.gluu.org/centos/Gluu-centos-7-testing.repo -O /etc/yum.repos.d/Gluu.repo' #testing
#'wget https://repo.gluu.org/centos/Gluu-centos7.repo -O /etc/yum.repos.d/Gluu.repo'
],
},
'red': {
'6': [
'wget https://repo.gluu.org/rhel/Gluu-rhel6.repo -O /etc/yum.repos.d/Gluu.repo'
],
'7': [
'wget https://repo.gluu.org/rhel/Gluu-rhel7.repo -O /etc/yum.repos.d/Gluu.repo'
],
},
}
def detect_os_type():
try:
p = platform.linux_distribution()
os_type = p[0].split()[0].lower()
os_version = p[1].split('.')[0]
return os_type, os_version
except:
sys.exit('OS type could not be determined, exiting.')
os_type = detect_os_type()
print "Detected OS Type", " ".join(os_type)
try:
commands = os_commands[os_type[0]][os_type[1]]
except:
sys.exit('Unsupported Operating System, exiting.')
if os_type[0] in ('ubuntu', 'debian'):
package_type = 'deb'
install_command = 'apt-get install -y {0}'
elif os_type[0] in ('centos', 'red'):
package_type = 'rpm'
install_command = 'yum install -y {0}'
detected_oxd = 'oxd-server'
stop_gluu_command = {
'ubuntu16': 'service {0} stop',
'ubuntu14': 'service {0} stop',
'centos6': 'service {0} stop',
'centos7': 'service {0} stop',
}
os_type_str = ''.join(os_type)
commands.insert(0, stop_gluu_command[os_type_str].format(detected_oxd))
if package_type == 'deb':
commands += [
'curl https://repo.gluu.org/debian/gluu-apt.key | apt-key add -',
'apt-get update',
'apt-get install -y oxd-server',
]
if not os.path.exists('/usr/lib/apt/methods/https'):
commands.insert(0, 'apt-get install -y apt-transport-https')
elif package_type == 'rpm':
commands += [
'yum install -y epel-release',
'wget https://repo.gluu.org/rhel/RPM-GPG-KEY-GLUU -O /etc/pki/rpm-gpg/RPM-GPG-KEY-GLUU',
'rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-GLUU',
'yum clean all',
'yum install -y oxd-server',
]
add_commands = []
try:
import pip
except:
add_commands.append(install_command.format('python-pip'))
try:
import yaml
except:
add_commands.append(install_command.format('python-yaml'))
if add_commands:
commands += add_commands
def get_by_attrib(elements, attrib, value=True):
for element in elements:
if element.get('name') == attrib:
if value:
return element.get('value')
return element
if value:
return ''
def json_load_byteified(file_handle):
return _byteify(
json.load(open(file_handle), object_hook=_byteify),
ignore_dicts=True
)
def _byteify(data, ignore_dicts = False):
# if this is a unicode string, return its string representation
if isinstance(data, unicode):
return data.encode('utf-8')
# if this is a list of values, return list of byteified values
if isinstance(data, list):
return [ _byteify(item, ignore_dicts=True) for item in data ]
# if this is a dictionary, return dictionary of byteified keys and values
# but only if we haven't already byteified it
if isinstance(data, dict) and not ignore_dicts:
tmp_ = {}
for key, value in data.iteritems():
tmp_[_byteify(key, ignore_dicts=True)] = _byteify(value, ignore_dicts=True)
return tmp_
# if it's anything else, return it in its original form
return data
current_version = '4.0'
oxd_base_dir = '/opt/oxd-server'
oxd_data_dir = os.path.join(oxd_base_dir, 'conf')
oxd_conf_dir = '/etc/oxd/oxd-server' if os.path.exists('/etc/oxd/oxd-server') else '/opt/oxd-server/conf'
oxd_backup_dir = os.path.join('/var/oxd-backup')
oxd_data_backup_dir = os.path.join(oxd_backup_dir, 'json_data_backup')
if not os.path.exists(oxd_backup_dir):
os.mkdir(oxd_backup_dir)
if not os.path.exists(oxd_data_backup_dir):
os.mkdir(oxd_data_backup_dir)
oxd_conf_json_fn = os.path.join(oxd_conf_dir, 'oxd-conf.json')
oxd_default_site_config_json_fn = os.path.join(oxd_conf_dir, 'oxd-default-site-config.json')
log4j_xml_fn = os.path.join(oxd_conf_dir, 'log4j.xml')
conf_yaml_template = 'oxd-server.yml.temp'
oxd4_server_yaml_fn = os.path.join(oxd_conf_dir, 'oxd-server.yml')
update_required = False
if os.path.exists(oxd_conf_json_fn):
update_required = True
if os.path.exists(oxd_default_site_config_json_fn):
update_required = True
if update_required:
print """A previous version of oxd-server detected. If you continue,
the current version will be replaced by the latest version available,
and your config/data will be migrated to the new version.
"""
ask = "Do you want to migrate data to oxd-server-{0}? [y|N]: ".format(current_version)
answer = raw_input(ask)
if not answer or answer.lower()[0] != 'y':
sys.exit("Migration cancelled, exiting.")
if os_type[0] in ('ubuntu','debian'):
commands.insert(1,'apt-get purge -y oxd-server')
elif os_type[0] in ('centos','red'):
commands.insert(1,'yum remove -y oxd-server')
if update_required:
oxd_conf_json = json_load_byteified(oxd_conf_json_fn)
current_dbFileLocation = '/opt/oxd-server/data/oxd_db'
if oxd_conf_json.get('storage_configuration') and oxd_conf_json['storage_configuration'].get('dbFileLocation'):
current_dbFileLocation = oxd_conf_json['storage_configuration']['dbFileLocation']
current_dbFile = current_dbFileLocation+'.mv.db'
commands.append('wget https://raw.githubusercontent.com/GluuFederation/oxd/version_4.0/upgrade/oxd-server.yml.temp -O oxd-server.yml.temp')
for b_file in (
oxd_conf_json_fn,
oxd_default_site_config_json_fn,
log4j_xml_fn,
oxd4_server_yaml_fn,
current_dbFile,
):
if os.path.exists(b_file):
shutil.copy2(b_file, oxd_backup_dir)
json_files = glob.glob(os.path.join(oxd_conf_dir,'*.json'))
json_files.remove(os.path.join(oxd_conf_dir,'oxd-conf.json'))
json_files.remove(os.path.join(oxd_conf_dir,'oxd-default-site-config.json'))
for json_file in json_files:
shutil.move(json_file, oxd_data_backup_dir)
print "About to execute following commands:"
print '\n'.join(commands)
ask = "Do you want to continue [y|N]: "
answer = raw_input(ask)
if not answer or answer.lower()[0] != 'y':
sys.exit("Migration cancelled, exiting.")
for cmd in commands:
print "Executing", cmd
os.system(cmd)
import yaml
if update_required:
oxd_conf_json_back_fn = os.path.join(oxd_backup_dir, 'oxd-conf.json')
oxd_default_site_config_json_back_fn = os.path.join(oxd_backup_dir, 'oxd-default-site-config.json')
log4j_xml_back_fn = os.path.join(oxd_backup_dir, 'log4j.xml')
oxd_default_site_config_json = json_load_byteified(oxd_default_site_config_json_back_fn)
if not oxd_default_site_config_json['contacts']:
oxd_default_site_config_json['contacts'] = []
else:
if not type(oxd_default_site_config_json['contacts']) == type([]):
oxd_default_site_config_json['contacts'] = [oxd_default_site_config_json['contacts']]
log4j_xml_tree = tree = ET.parse(log4j_xml_back_fn)
log4j_xml_root = log4j_xml_tree.getroot()
oxd4_server_yaml_fn = '/opt/oxd-server/conf/oxd-server.yml'
oxd4_server_yaml = yaml.safe_load(open(oxd4_server_yaml_fn).read())
for key in oxd_default_site_config_json:
if key in oxd4_server_yaml['defaultSiteConfig']:
oxd4_server_yaml['defaultSiteConfig'][key] = oxd_default_site_config_json[key]
for key in oxd_conf_json:
if key in oxd4_server_yaml:
oxd4_server_yaml[key] = oxd_conf_json[key]
xml_appenders = log4j_xml_root.findall('appender')
file_attrib = get_by_attrib(xml_appenders, 'FILE', False)
params = file_attrib.findall('param')
currentLogFilename = get_by_attrib(params, 'File')
log_fp, log_e = os.path.splitext(currentLogFilename)
oxd4_server_yaml['logging']['appenders'][1]['currentLogFilename'] = currentLogFilename
DatePattern = log_file_attrib = get_by_attrib(params, 'DatePattern')
archivedLogFilenamePattern = log_fp +'-%d{'+ DatePattern.replace("'.'",'') + '}-%i.log.gz'
oxd4_server_yaml['logging']['appenders'][1]['archivedLogFilenamePattern'] = archivedLogFilenamePattern
categories = log4j_xml_root.findall('category')
org_xdi_attrib = get_by_attrib(categories, 'org.gluu', False)
if not org_xdi_attrib:
org_xdi_attrib = get_by_attrib(categories, 'org.xdi', False)
org_xdi = org_xdi_attrib.find('priority').get('value')
oxd4_server_yaml['logging']['loggers']['org.gluu'] = org_xdi
root = log4j_xml_root.find('root')
priority = root.find('priority').get('value')
oxd4_server_yaml['logging']['level'] = priority
oxd4_server_yaml['migration_source_folder_path'] = oxd_data_backup_dir
yaml_temp = open(conf_yaml_template).read()
sub_vars = re.findall('\{\{(.*?)\}\}', yaml_temp)
for sv in sub_vars:
sv_pattern = sv.split(':')
m = oxd4_server_yaml
for p in sv_pattern:
if '|' in p:
p,n=p.split('|')
m = m[p][int(n)]
else:
m = m[p]
if type(True) == type(m):
m = str(m).lower()
if (type(m) != type([]) and not m):
m="''"
k = '{{'+sv+'}}'
#print sv, m
yaml_temp = yaml_temp.replace(k,str(m))
with open(oxd4_server_yaml_fn,'w') as W:
W.write(yaml_temp)
db_fn_backup = os.path.join(oxd_backup_dir,'oxd_db.mv.db')
if os.path.exists(db_fn_backup):
shutil.copy2(db_fn_backup, '/opt/oxd-server/data/')
os.system('chown jetty:jetty ' + oxd_backup_dir)
print "Migration is finished. Please restart oxd-server"
|
# __main__.py
import subprocess
import sys
def Playdemo():
# Creating new project folder
try:
if sys.argv[1] == "Play-demo":
subprocess.call(["..TBG_Example/main.py", "-c"])
else:
print("Invalid keyword: format>> python -m Play-demo")
except IndexError:
print("Invalid argument input: format>> python -m Play-demo")
if __name__ == "__main__":
Playdemo()
|
from playhouse.migrate import *
my_db = SqliteDatabase('../data.db')
migrator = SqliteMigrator(my_db)
multiplier_up = DecimalField(null=True)
avg_price_minutes = DecimalField(null=True)
migrate(
migrator.add_column('marketparams', 'multiplier_up', multiplier_up),
migrator.add_column('marketparams', 'avg_price_minutes', avg_price_minutes),
)
|
from collections import Counter
from io import BytesIO
import requests
from PIL import Image, ImageDraw, ImageSequence
header = {'Authorization': 'Basic YnV0dGVyOmZseQ==', } # 'YnV0dGVyOmZseQ==' is base64.encode('butter:fly')
response = requests.get('http://www.pythonchallenge.com/pc/hex/white.gif', headers=header)
imgs = Image.open(BytesIO(response.content))
brighter_points = []
for img in ImageSequence.Iterator(imgs):
pos = list(img.getdata()).index(img.getextrema()[1])
brighter_points.append((pos % img.width, pos // img.width))
print(brighter_points) # seems like random walk
c = Counter()
for p in brighter_points:
c[p] += 1
num = c[(100, 100)]
img_new = Image.new('RGB', (50 * num, 50))
draw = ImageDraw.Draw(img_new)
n = 0
points = []
for p in brighter_points:
if p == (100, 100):
if n > 0:
draw.line(points, 'red', 3)
points = [(50 * n + 25, 25)]
n += 1
else:
d = [x - 100 for x in p]
points.append((points[-1][0] + d[0], points[-1][1] + d[1]))
draw.line(points, 'red', 3)
img_new.show() # bonus
|
"""Detect file changes through the windows api
"""
import os
import win32event
import win32file
import win32con
import asyncio
import pywintypes
import threading
ACTIONS = {
1 : "Created",
2 : "Deleted",
3 : "Updated",
4 : "old name",
5 : "new name"
}
# Thanks to Claudio Grondi for the correct set of numbers
FILE_LIST_DIRECTORY = 0x0001
async def main():
p = 'C:/'
await start(p)
# log = print
def log(*a, **kw):
print(" > ", *a, **kw)
def log_callback(e, **kw):
print("log_callback", e, **kw)
l = asyncio.Lock()
check_lock = asyncio.Lock()
keep = {}
_ignore = ['F:\\clients\\strangemother\\backblaze\\.git']
_ignore_dirs = ['F:\\clients\\strangemother\\backblaze\\.git']
top_content = {'step': 0}
async def start(watch_dir=None, config=None, callback=None):
log('start')
global late_task
config = config or {}
if watch_dir is None and isinstance(config, tuple):
watch_dir = config[0]
config = config[1]
# scan = config.get('scan', -1) or 0
#late_task = asyncio.get_running_loop().create_task(late_call())
#watch_dir = watch_dir
hDir = await get_hdir(watch_dir)
await loop(hDir, watch_dir, callback or log_callback, config=config)
log('monitor.start Done')
async def get_hdir(watch_dir):
try:
hDir = win32file.CreateFile(
watch_dir,
FILE_LIST_DIRECTORY,
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE | win32con.FILE_SHARE_DELETE,
None,
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS,
None
)
except pywintypes.error as e:
# (2, 'CreateFile', 'The system cannot find the file specified.')
log('monitor.start - FileError', e)
hDir = None
return hDir
async def loop(hDir, root_path, callback, config=None):
log('loop', os.getpid(), hDir)
log('config', config)
run = 1
fails = 0
while run:
log('Back into step', root_path)
try:
should_continue = await step(hDir, root_path, callback, config=config)
fails = 0
except Exception as e:
log('monitor.loop caught step exception:', str(e))
fails += 1
should_continue = False if (fails >= 3) else True
if should_continue is False:
log(f'\nToo many failures: {fails}. Killing with last failure\n')
log(e)
top_content['step'] += 1
if should_continue is False:
log('Result is false; stopping monitor.loop for', root_path)
run = False
continue
log('Loop complete')
def loger(tick):
log('Timer tick', tick, top_content)
top_content['step'] += 1
return True
def ignore(action, file, full_filename):
if file in _ignore:
return True
if full_filename in _ignore:
return True
if file in _ignore:
return True
for _dir in _ignore_dirs:
if file.startswith(_dir): return True
if full_filename.startswith(_dir): return True
async def step(hDir, root_path, callback, config=None):
#
# ReadDirectoryChangesW takes a previously-created
# handle to a directory, a buffer size for results,
# a flag to indicate whether to watch subtrees and
# a filter of what changes to notify.
#
# NB Tim Juchcinski reports that he needed to up
# the buffer size to be sure of picking up all
# events when a large number of files were
# deleted at once.
# results = wait(hDir)
# async for item in results:
# log(item)
# for action, file in item:
# full_filename = os.path.join(path_to_watch, file)
# log(full_filename, ACTIONS.get(action, "Unknown"))
log('>..', end='')
last = keep.get('last', None)
try:
results = await wait(hDir)
except KeyboardInterrupt:
log('Keyboard cancelled')
return False
await asyncio.sleep(.01)
if results is None:
log('- Result is None, This may occur if the file is deleted before analysis')
log(root_path, hDir)
return False
if results is l:
log('Received lock, will wait again')
return True
log('Iterating', len(results), 'results')
clean_actions = ()
for action, file in results:
full_filename = os.path.join(root_path, file)
if ignore(action, file, full_filename):
log('x ', full_filename)
continue
_action = (full_filename, ACTIONS.get(action, "Unknown"))
clean_actions += (_action, )
if _action == last:
log('Drop Duplicate')
continue
try:
keep['last'] = await execute(_action, callback, config)
except Exception as e:
log('monitor.step caught exception.', _action, file)
raise e
if config.get('callback_many'):
config['callback_many'](clean_actions)
log('monitor.step fall to end.')
async def lock_wait(hDir):
async with check_lock:
log('locked' if l.locked() else 'unlocked')
await asyncio.ensure_future(l.acquire())
log('now', 'locked' if l.locked() else 'unlocked')
await wait(hDir)
# await asyncio.sleep(1)
l.release()
return l
async def wait(hDir):
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
try:
rc = win32event.WaitForSingleObject(overlapped.hEvent, 5000)
results = win32file.ReadDirectoryChangesW(
hDir,
8192, #1024,
True,
win32con.FILE_NOTIFY_CHANGE_FILE_NAME
| win32con.FILE_NOTIFY_CHANGE_DIR_NAME
| win32con.FILE_NOTIFY_CHANGE_ATTRIBUTES
| win32con.FILE_NOTIFY_CHANGE_SIZE
| win32con.FILE_NOTIFY_CHANGE_LAST_WRITE
| win32con.FILE_NOTIFY_CHANGE_SECURITY
| win32con.FILE_NOTIFY_CHANGE_FILE_NAME,
# ~ win32con.FILE_NOTIFY_CHANGE_CREATION |
# ~ win32con.FILE_NOTIFY_CHANGE_LAST_ACCESS |
None,
None
)
#log('watch', results, rc)
if rc == win32event.WAIT_OBJECT_0:
# got some data! Must use GetOverlappedResult to find out
# how much is valid! 0 generally means the handle has
# been closed. Blocking is OK here, as the event has
# already been set.
nbytes = win32file.GetOverlappedResult(hDir, overlapped, True)
if nbytes:
bits = win32file.FILE_NOTIFY_INFORMATION(buf, nbytes)
log('nbytes', nbytes, bits)
else:
# This is "normal" exit - our 'tearDown' closes the
# handle.
# log "looks like dir handle was closed!"
log('teardown')
return
else:
log('Timeout', hDir, rc)
#log('return', results)
return results
except pywintypes.error as e:
log('monitor.start - FileError', e)
return None
async def execute(result, callback, settings):
try:
return callback(result)
except Exception as e:
log(f'An exception has occured during callback execution: {e}')
raise e
# await asyncio.sleep(.3)
# return result
async def late_call():
log('late')
if __name__ == '__main__':
asyncio.run(main())
|
#!/usr/bin/env python
#
# Copyright 2009, Ubixum, Inc
#
# This file copied and modified for fx2lib from the GnuRadio project
#
# Copyright 2004,2006 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import re
import sys, struct
import os, os.path
from optparse import OptionParser
def hex_to_bytes (s):
if len (s) & 0x1:
raise ValueError, "Length must be even"
r = []
for i in range (0, len(s), 2):
r.append (int (s[i:i+2], 16))
return r
def msb (x):
return (x >> 8) & 0xff
def lsb (x):
return x & 0xff
class ihx_rec (object):
def __init__ (self, addr, type, data):
self.addr = addr
self.type = type
self.data = data
class ihx_file (object):
def __init__ (self):
self.pat = re.compile (r':[0-9A-F]{10,}')
def read (self, file):
r = []
for line in file:
line = line.strip().upper ()
if not self.pat.match (line):
raise ValueError, "Invalid hex record format"
bytes = hex_to_bytes (line[1:])
sum = reduce (lambda x, y: x + y, bytes, 0) % 256
if sum != 0:
raise ValueError, "Bad hex checksum"
lenx = bytes[0]
addr = (bytes[1] << 8) + bytes[2]
type = bytes[3]
data = bytes[4:-1]
if lenx != len (data):
raise ValueError, "Invalid hex record (bad length)"
if type != 0:
break;
r.append (ihx_rec (addr, type, data))
return r
def build_eeprom_image (filename, outfile,vid,pid,devid,cb):
"""Build a ``C0 Load'' EEPROM image.
For details on this format, see section 3.4.3 of
the EZ-USB FX2 Technical Reference Manual
"""
image = [
0xC0, # boot from EEPROM
lsb (vid),
msb (vid),
lsb (pid),
msb (pid),
lsb (devid),
msb (devid),
cb # configuration byte
]
# you could just append all the records..
# but that would most likely cause a lot of
# extra headers/addrs to be written
ihx = ihx_file();
records = ihx.read(open(filename))
# create image map of all values we're writing data too
image_map={}
for r in records:
addr=r.addr
c=0
l=len(r.data)
while c<l:
image_map[addr] = r.data[c]
addr += 1
c += 1
# now create new records based on contiguous image data
max_addr = max(image_map.keys())
records = []
start_addr = 0
while start_addr <= max_addr:
if not image_map.has_key(start_addr):
start_addr += 1
continue
end_addr = start_addr
# add continguous data up to 10 bits long (0x3ff)
# is max size, trm 3.4.3
size=0
while image_map.has_key(end_addr) and size < 0x3ff:
end_addr += 1
size += 1
l = end_addr - start_addr
data = []
for d in range(l):
data.append(image_map [ start_addr + d ])
records.append ( ihx_rec ( start_addr, 0, data ) )
start_addr = end_addr
# 4 byte header that indicates where to load
# the immediately follow code bytes.
for r in records:
image.extend( [
msb (len (r.data)),
lsb (len (r.data)),
msb (r.addr),
lsb (r.addr)
])
image.extend(r.data)
# writes 0 to CPUCS reg (brings FX2 out of reset)
image.extend ( [
0x80,
0x01,
0xe6,
0x00,
0x00
] )
buf=struct.pack ( "B"*len(image), *image )
print "iic Image Size" , len(buf)
out=open( outfile, 'w')
out.write(buf)
out.close();
if __name__ == '__main__':
usage = "usage: %prog [options] firmware.ihx outfile"
parser = OptionParser (usage=usage)
parser.add_option ( "-v", "--vid", type="int", default=0x04b4,
help="Vendor ID for iic c2 image." )
parser.add_option ( "-p", "--pid", type="int", default=0x0082,
help="Product ID for iic c2 image." )
parser.add_option ( "-d", "--devid", type="int", default=0,
help="Device ID for iic c2 image." )
parser.add_option ( "-c", "--configbyte", type="int", default=0x04,
help="Configuration Byte (i2c & disconnect polarity, default 0x04)" )
(options, args) = parser.parse_args ()
if len (args) != 2:
parser.print_help ()
sys.exit (1)
ihx_filename = args[0]
iic_filename = args[1]
build_eeprom_image ( ihx_filename, iic_filename, options.vid, options.pid, options.devid, options.configbyte )
|
from unicodedata import name
import jmespath
from util.logger import get_logger
from util.decoder import decode
class TransactionTransformer:
def __init__(self) -> None:
self.logger = get_logger(__name__)
@staticmethod
def transform(raw_transactions):
payloads = jmespath.search('[*].payload', raw_transactions)
if not payloads:
return []
else:
return [TransactionTransformer.transform_payload(raw_payload) for raw_payload in payloads]
@staticmethod
def transform_payload(raw_payload):
transaction = {}
raw_actions = jmespath.search("data.actions", raw_payload)
if not raw_actions:
return None
raw_header = jmespath.search("header", raw_payload)
transaction["header"] = TransactionTransformer.transform_header(
raw_header)
transaction["data"] = [
TransactionTransformer.transform_actions(a) for a in raw_actions]
return transaction
@staticmethod
def transform_header(raw_header):
channel_header = raw_header["channel_header"]
return {
"channel_id": channel_header["channel_id"],
"smart_contract": jmespath.search("extension.chaincode_id", raw_header),
"transaction_id": channel_header["tx_id"],
"timestamp": channel_header["timestamp"],
"creator": jmespath.search("signature_header.creator.mspid", raw_header)
}
@staticmethod
def transform_actions(raw_action):
# endosments_no = len(jmespath.search(
# "payload.action.endorsements", raw_action))
# chaincode_id = jmespath.search(
# "payload.action.proposal_response_payload.extension.chaincode_id", raw_action)
return {
"input": TransactionTransformer.transform_input(raw_action),
"result": TransactionTransformer.transform_response(raw_action)
}
@staticmethod
def transform_input(raw_action):
creator = jmespath.search("header.creator.mspid", raw_action)
action_input = jmespath.search(
"payload.chaincode_proposal_payload.input.chaincode_spec", raw_action)
if "is_init" in action_input["input"]:
action_input["is_init"] = action_input["input"]["is_init"]
if "args" not in action_input:
action_input["args"] = action_input["input"]["args"]
action_input["args"] = [decode(a) for a in action_input["args"]]
return {
"method": action_input["args"][0],
"args": action_input["args"][1:],
"creator": creator
}
@staticmethod
def transform_response(raw_action):
response = jmespath.search(
"payload.action.proposal_response_payload.extension.response", raw_action)
response["payload"] = decode(response["payload"])
rwset = jmespath.search(
"payload.action.proposal_response_payload.extension.results.ns_rwset", raw_action)
rwset = TransactionTransformer._decode_rwset_values(rwset)
return {
"status": response["status"],
"response": response["payload"],
**rwset
}
@staticmethod
def _decode_rwset_values(rwset):
ret = {"reads": [], "writes": []}
for rws in rwset:
namespace = rws["namespace"]
for rs in rws["rwset"]["reads"]:
value = decode(rs["value"]) if "value" in rs else None
ret["reads"].append({
"key": rs["key"],
"value": value,
"version": rs["version"],
"namespace": namespace
})
for ws in rws["rwset"]["writes"]:
value = decode(ws["value"]) if "value" in ws else None
ret["writes"].append({
"key": ws["key"],
"value": value,
"is_delete": ws["is_delete"] if "is_delete" in ws else False,
"namespace": namespace
})
return ret
|
# Band service skeleton
# (c) Dmitry Rodin 2018
# ---------------------
# this file is a reqular python module requirement
# обязательный для python файл модуля.
from . import main
|
"""DID Document and resource builders."""
from typing import Iterator, List, Optional, Type, Union
from ..did import DID
from ..did_url import DIDUrl
from ..service import DIDCommService, Service
from ..verification_method import VerificationMethod
from .doc import DIDDocument
def _default_id_generator(base: str, start: int = 0) -> Iterator[str]:
"""Generate ID fragments."""
index = start
while True:
yield "{}-{}".format(base, index)
index += 1
class VerificationMethodBuilder:
"""VerificationMethod scoped builder."""
def __init__(
self,
did: DID,
*,
id_base: str = None,
methods: Optional[List[VerificationMethod]] = None
):
self._did = did
self.methods = methods or []
self._id_base = id_base or "key"
self._id_generator = self._default_id_generator()
def _default_id_generator(self):
"""Default ID generator."""
yield from _default_id_generator(self._id_base, start=len(self.methods))
def add(
self,
type_: Type[VerificationMethod],
ident: Optional[str] = None,
controller: DID = None,
**kwargs
):
"""Add verification method from parts and context."""
ident = ident or next(self._id_generator)
controller = controller or self._did
vmethod = type_.make(id=self._did.ref(ident), controller=controller, **kwargs)
self.methods.append(vmethod)
return vmethod
def remove(self, vmethod: VerificationMethod):
"""Remove method from builder."""
self.methods.remove(vmethod)
class RelationshipBuilder(VerificationMethodBuilder):
"""Builder for relationships."""
def __init__(
self,
did: DID,
id_base: str,
*,
methods: Optional[List[Union[VerificationMethod, DIDUrl]]] = None
):
super().__init__(did, id_base=id_base)
self.methods = methods or []
def _default_id_generator(self):
"""Default ID generator."""
start = len(
[
vmethod
for vmethod in self.methods
if isinstance(vmethod, VerificationMethod)
]
)
yield from _default_id_generator(self._id_base, start=start)
def reference(self, ref: DIDUrl):
"""Add reference to relationship."""
if not isinstance(ref, DIDUrl):
raise ValueError(
"Reference must be DIDUrl, not {}".format(type(ref).__name__)
)
self.methods.append(ref)
def embed(self, *args, **kwargs):
"""Embed verification method in relationship."""
return super().add(*args, **kwargs)
def remove(self, vmethod: Union[DIDUrl, VerificationMethod]):
"""Remove reference or method from builder."""
self.methods.remove(vmethod)
class ServiceBuilder:
"""Builder for services."""
def __init__(self, did: DID, *, services: Optional[List[Service]] = None):
self._did = did
self.services = services or []
self._id_generator = _default_id_generator("service", start=len(self.services))
def _determine_next_priority(self):
"""Return the next priority after the highest priority currently in services."""
return (
max(
[
service.priority
for service in self.services
if isinstance(service, DIDCommService)
]
)
+ 1
if self.services
else 0
)
def add(
self, type_: str, service_endpoint: str, ident: Optional[str] = None, **extra
):
"""Add service."""
ident = ident or next(self._id_generator)
service = Service.make(
id=self._did.ref(ident),
type=type_,
service_endpoint=service_endpoint,
**extra
)
self.services.append(service)
return service
def add_didcomm(
self,
service_endpoint: str,
recipient_keys: List[VerificationMethod],
routing_keys: List[VerificationMethod] = None,
*,
priority: int = None,
type_: str = None,
ident: Optional[str] = None
):
"""Add DIDComm Service."""
ident = ident or next(self._id_generator)
routing_keys = routing_keys or []
priority = priority or self._determine_next_priority()
service = DIDCommService.make(
id=self._did.ref(ident),
service_endpoint=service_endpoint,
recipient_keys=[vmethod.id for vmethod in recipient_keys],
routing_keys=[vmethod.id for vmethod in routing_keys],
type=type_,
priority=priority,
)
self.services.append(service)
return service
def remove(self, service: Service):
"""Remove service from builder."""
self.services.remove(service)
class DIDDocumentBuilder:
"""Builder for constructing DID Documents programmatically."""
DEFAULT_CONTEXT = ["https://www.w3.org/ns/did/v1"]
def __init__(
self,
id: Union[str, DID],
context: List[str] = None,
*,
also_known_as: List[str] = None,
controller: Union[List[str], List[DID]] = None
):
"""Initliaze builder."""
self.id: DID = DID(id)
self.context = context or self.DEFAULT_CONTEXT
self.also_known_as = also_known_as
self.controller = controller
self.verification_method = VerificationMethodBuilder(self.id)
self.authentication = RelationshipBuilder(self.id, "auth")
self.assertion_method = RelationshipBuilder(self.id, "assert")
self.key_agreement = RelationshipBuilder(self.id, "key-agreement")
self.capability_invocation = RelationshipBuilder(
self.id, "capability-invocation"
)
self.capability_delegation = RelationshipBuilder(
self.id, "capability-delegation"
)
self.service = ServiceBuilder(self.id)
self.extra = {}
@classmethod
def from_doc(cls, doc: DIDDocument) -> "DIDDocumentBuilder":
"""Create a Builder from an existing DIDDocument."""
builder = cls(
id=doc.id,
context=doc.context,
also_known_as=doc.also_known_as,
controller=doc.controller,
)
builder.verification_method = VerificationMethodBuilder(
doc.id, methods=doc.verification_method
)
builder.authentication = RelationshipBuilder(
doc.id, "auth", methods=doc.authentication
)
builder.assertion_method = RelationshipBuilder(
doc.id, "assert", methods=doc.assertion_method
)
builder.key_agreement = RelationshipBuilder(
doc.id, "key-agreement", methods=doc.key_agreement
)
builder.capability_invocation = RelationshipBuilder(
doc.id, "capability-invocation", methods=doc.capability_invocation
)
builder.capability_delegation = RelationshipBuilder(
doc.id, "capability-delegation", methods=doc.capability_delegation
)
builder.service = ServiceBuilder(doc.id, services=doc.service)
return builder
def build(self) -> DIDDocument:
"""Build document."""
return DIDDocument.construct(
id=self.id,
context=self.context,
also_known_as=self.also_known_as,
controller=self.controller,
verification_method=self.verification_method.methods or None,
authentication=self.authentication.methods or None,
assertion_method=self.assertion_method.methods or None,
key_agreement=self.key_agreement.methods or None,
capability_invocation=self.capability_invocation.methods or None,
capability_delegation=self.capability_delegation.methods or None,
service=self.service.services or None,
**self.extra
)
|
class Solution(object):
def poorPigs(self, buckets, minutesToDie, minutesToTest):
"""
:type buckets: int
:type minutesToDie: int
:type minutesToTest: int
:rtype: int
"""
# 一只猪鉴定5桶,2两只猪一只喝行一只喝列能喝25桶,三只猪一只行一只列一只面
# 真无聊什么鬼题
# 什么有毒猪种还从0开始没有猪哪里能鉴定
pigNum = 0
while (minutesToTest // minutesToDie + 1) ** pigNum < buckets:
pigNum += 1
return pigNum |
from ToastNotifier import ToastNotifier
import time
import poplib
from email.parser import Parser
from email.header import decode_header
import webbrowser
import os
import base64
_uidl = '' #留空就行
email = '[email protected]' #邮箱地址
password = '' #密码
host = 'pop.163.com' #邮箱服务器地址
sleep = 15 * 60 # 读取间隔 单位 s,官方要求最低15分钟请求一次
### 读取最新邮件
def ReadEmail(server):
global _uidl
print(server.getwelcome().decode('utf-8'))
index = server.stat()[0]
uidl = server.uidl(index)
if _uidl == uidl.split()[2]: #邮件相同,跳过
return False
else:
_uidl = uidl.split()[2]
resp, lines, octets = server.retr(index)
msg_content = b'\r\n'.join(lines).decode('utf-8')
msg = Parser().parsestr(msg_content)
From = decode_str(msg.get("From"))
Subject = decode_str(msg.get("Subject"))
CacheEmail(msg)
ShowToast("新邮件提醒","来自%s\n%s"%(From,Subject),"temp.html")
return True
### 新邮件缓存
def CacheEmail(msg):
data = None
charset = "utf-8"
for message in msg.walk():
if message.get_content_type() == "text/html":
charset = message.get_content_charset()
data = message.get_payload(decode=True)
f = open("temp.html", "w", encoding=charset)
if charset == 'utf-8':
f.write(str(data,charset).encode('utf-8').decode('unicode_escape'))
else:
f.write(str(data,charset))
f.close()
# 解码头
def decode_str(s):
value, charset = decode_header(s)[0]
if charset:
value = value.decode(charset)
return value
# 连接邮箱
def ConnectEmail():
global email
global password
global host
server = poplib.POP3(host)
server.set_debuglevel(0)
server.user(email)
server.pass_(password)
server.utf8()
return server
### 提示
def ShowToast(title, content, url):
toaster = ToastNotifier()
toaster.show_toast(title, content, duration=2,
callback_on_click=lambda: open_ticket(url))
## 打开在浏览器
def open_ticket(url):
webbrowser.open_new_tab("%s\\%s"%(os.getcwd(),url))
if __name__ == "__main__":
while True:
server = ConnectEmail()
ReadEmail(server)
server.quit()
time.sleep(sleep) |
import unittest
import subprocess
import re
def trycredentials(user, pwd):
proc = subprocess.Popen(
['python', '01.py'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
outs, errs = proc.communicate('%s\n%s\n' % (user, pwd))
return outs + errs
class TestEx1(unittest.TestCase):
def test_correct_credentials(self):
res = trycredentials('apple', 'red')
self.assertIn('welcome master', res.lower(),
'No welcome banner in: [%s]' % res)
def test_incorrect_credentials(self):
res = trycredentials('foo', 'bar')
self.assertIn('intruder alert', res.lower(),
'No error banner in: [%s]' % res)
def test_correct_username_wrong_password(self):
res = trycredentials('apple', 'blue')
self.assertIn('intruder alert', res.lower(),
'No error banner in: [%s]' % res)
class TestEx2(unittest.TestCase):
def test_above_avg(self):
res = subprocess.check_output(['python', '02.py', '99', '90', '15', '28', '38', '44', '50', '81', '79', '60', '99', '90', '15', '28', '38', '44', '50', '81', '79', '60'])
self.assertEqual(res, '99 90 81 79 60 99 90 81 79 60\n')
class TestEx3(unittest.TestCase):
def test_3(self):
proc = subprocess.Popen(
['python', '03.py', 'mycar', 'home', 'none'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
outs, errs = proc.communicate()
lines = outs.split('\n')
self.assertIn('10.0.0.5', lines[0], 'mycar should have IP 10.0.0.5')
self.assertIn('194.90.2.1', lines[1], 'home should have IP 194.90.2.1')
self.assertEqual(re.search(r'\d', lines[2]), None,
'none should not have IP')
if __name__ == '__main__':
unittest.main()
|
from guy import Guy,http
class T(Guy):
__doc__="""<script>
async function storage(mode) {
switch(mode) {
case "get":
return localStorage["var"]==42;
case "set":
localStorage["var"]=42;
return true
default:
alert("mode='"+mode+"' ?!?")
}
}
</script>"""
size=(100,100)
def __init__(self,mode):
self.mode=mode
super().__init__()
async def init(self):
ok =await self.js.storage(self.mode)
self.exit(ok)
def test_no_lockPort(runner):
t=T("get")
ok=runner(t)
assert not ok,"localStorage is already present ?!"
t=T("set")
ok=runner(t)
assert ok,"setting localstorage not possible ?!"
t=T("get")
ok=runner(t)
assert not ok,"win has memory ;-("
# CAN't WORK IN pytest, as is
# def test_lockPort(): # app mode only (broken with cef ... coz ioloop/pytests)
# t=T("set")
# ok=t.runCef(one=True)
# assert ok==True
# t=T("get")
# ok=t.runCef(one=True)
# assert ok==True # localStorage is persistent !
|
class SinglyLinkedList:
def __init__(self):
self.head = None
self.len = 0
def add(self, item):
if not self.head:
self.head = ListNode(item)
else:
p = ListNode(item)
p.next = self.head
self.head = p
self.len += 1
def remove(self, item):
if not self.head:
return ValueError('Can\'t remove from a list with no items.')
curr = self.head
while curr:
if curr.next.val == item:
curr.next = curr.next.next
curr = curr.next
self.size -= 1
def __iter__(self):
self.top = self.head
return self
def __next__(self):
if self.top:
curr = self.top
else:
raise StopIteration()
self.top = self.top.next
return curr.val
def __len__(self):
return self.len
def __str__(self):
if not self.head:
return '[]'
s = '['
curr = self.head
for item in self:
s += str(item)
if curr.next:
s += ','
curr = curr.next
s += ']'
return s
class ListNode:
def __init__(self, val):
self.val = val
self.next = None
if __name__ == '__main__':
slist = SinglyLinkedList()
slist.add(12)
slist.add(13)
slist.add(14)
print(slist)
|
# Copyright (c) 2017, 2018, Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
import pytest
from nose.plugins.skip import SkipTest
import logging
from ansible.modules.cloud.oracle import oci_load_balancer_backend
from ansible.module_utils.oracle import oci_utils, oci_lb_utils
try:
import oci
from oci.util import to_dict
from oci.load_balancer.models import Backend, WorkRequest
from oci.exceptions import ServiceError, ClientError
except ImportError:
raise SkipTest("test_oci_load_balancer_backend.py requires `oci` module")
class FakeModule(object):
def __init__(self, **kwargs):
self.params = kwargs
def fail_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
raise Exception(kwargs["msg"])
def exit_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
@pytest.fixture()
def lb_client(mocker):
mock_lb_client = mocker.patch(
"oci.load_balancer.load_balancer_client.LoadBalancerClient"
)
return mock_lb_client.return_value
@pytest.fixture()
def create_backend_patch(mocker):
return mocker.patch.object(oci_load_balancer_backend, "create_backend")
@pytest.fixture()
def update_backend_patch(mocker):
return mocker.patch.object(oci_load_balancer_backend, "update_backend")
@pytest.fixture()
def create_or_update_lb_resources_and_wait_patch(mocker):
return mocker.patch.object(oci_lb_utils, "create_or_update_lb_resources_and_wait")
@pytest.fixture()
def delete_lb_resources_and_wait_patch(mocker):
return mocker.patch.object(oci_lb_utils, "delete_lb_resources_and_wait")
@pytest.fixture()
def check_and_create_resource_patch(mocker):
return mocker.patch.object(oci_utils, "check_and_create_resource")
@pytest.fixture()
def get_existing_resource_patch(mocker):
return mocker.patch.object(oci_utils, "get_existing_resource")
def setUpModule():
logging.basicConfig(
filename="/tmp/oci_ansible_module.log", filemode="a", level=logging.INFO
)
oci_load_balancer_backend.set_logger(logging)
def test_create_or_update_backend_create(
lb_client, check_and_create_resource_patch, get_existing_resource_patch
):
module = get_module()
backend = get_backend()
get_existing_resource_patch.return_value = None
check_and_create_resource_patch.return_value = {
"backend": to_dict(backend),
"changed": True,
}
result = oci_load_balancer_backend.create_or_update_backend(lb_client, module)
assert result["backend"]["ip_address"] is backend.ip_address
def test_create_or_update_backend_update(
lb_client, update_backend_patch, get_existing_resource_patch
):
module = get_module()
backend = get_backend()
get_existing_resource_patch.return_value = backend
update_backend_patch.return_value = {"backend": to_dict(backend), "changed": True}
result = oci_load_balancer_backend.create_or_update_backend(lb_client, module)
assert result["backend"]["ip_address"] is backend.ip_address
def test_create_or_update_backend_service_error(
lb_client, check_and_create_resource_patch, get_existing_resource_patch
):
module = get_module()
error_message = "Internal Server Error"
check_and_create_resource_patch.side_effect = ServiceError(
500, "InternalServerError", dict(), error_message
)
get_existing_resource_patch.return_value = None
try:
oci_load_balancer_backend.create_or_update_backend(lb_client, module)
except Exception as ex:
assert error_message in ex.args[0]
def test_create_or_update_backend_client_error(
lb_client, check_and_create_resource_patch, get_existing_resource_patch
):
module = get_module()
error_message = "Work Request Failed"
check_and_create_resource_patch.side_effect = ClientError(
Exception("Work Request Failed")
)
get_existing_resource_patch.return_value = None
try:
oci_load_balancer_backend.create_or_update_backend(lb_client, module)
except Exception as ex:
assert error_message in ex.args[0]
def test_create_backend(lb_client, create_or_update_lb_resources_and_wait_patch):
module = get_module()
backend = get_backend()
create_or_update_lb_resources_and_wait_patch.return_value = {
"backend": to_dict(backend),
"changed": True,
}
result = oci_load_balancer_backend.create_backend(
lb_client, module, "ocid1.loadbalancer.oc1.iad.aaaaa", "10.159.34.21:8181"
)
assert result["backend"]["name"] == backend.name
def test_update_backend(lb_client, create_or_update_lb_resources_and_wait_patch):
module = get_module()
backend = get_backend()
create_or_update_lb_resources_and_wait_patch.return_value = {
"backend": to_dict(backend),
"changed": True,
}
result = oci_load_balancer_backend.update_backend(
lb_client,
module,
backend,
"ocid1.loadbalancer.oc1.iad.aaaaa",
"10.159.34.21:8181",
)
assert result["changed"] is True
def test_update_backend_no_update(lb_client):
module = get_module()
backend = get_backend()
backend.offline = False
result = oci_load_balancer_backend.update_backend(
lb_client,
module,
backend,
"ocid1.loadbalancer.oc1.iad.aaaaa",
"10.159.34.21:8181",
)
assert result["changed"] is False
def test_delete_backend(lb_client, delete_lb_resources_and_wait_patch):
module = get_module()
backend = get_backend()
delete_lb_resources_and_wait_patch.return_value = get_response(
204, None, backend, None
)
delete_lb_resources_and_wait_patch.return_value = {
"backend": to_dict(backend),
"changed": True,
}
result = oci_load_balancer_backend.delete_backend(lb_client, module)
assert result["changed"] is True
def get_backend():
backend = Backend()
backend.name = "10.159.34.21:8181"
backend.backup = True
backend.drain = True
backend.offline = True
backend.weight = 5
backend.ip_address = "10.159.34.21"
backend.port = "8181"
return backend
def get_response(status, header, data, request):
return oci.Response(status, header, data, request)
def get_module():
params = {
"load_balancer_id": "ocid1.loadbalancer.oc1.iad.aaaaa",
"backend_set_name": "backend1",
"ip_address": "10.159.34.21",
"port": "8181",
"backup": True,
"offline": False,
"drain": True,
"weight": 5,
}
module = FakeModule(**params)
return module
|
class Solution:
def snakesAndLadders(self, board: List[List[int]]) -> int:
# this is a bfs
def num_to_rc(num):
N = len(board)
num -= 1
r, c = divmod(num, N)
if r % 2:
c = N - 1 - c
r = N - 1 - r
return r, c
frontier = collections.deque([1])
seen = {1}
target = len(board) * len(board)
step = 0
while frontier:
sz = len(frontier)
for _ in range(sz):
x = frontier.popleft()
if x == target:
return step
for dx in range(1, 7):
nx = x + dx
if nx <= target:
r, c = num_to_rc(nx)
if board[r][c] != -1:
nx = board[r][c]
if nx not in seen:
seen.add(nx)
frontier.append(nx)
step += 1
return -1
|
import sys
__author__ = 'South Mountain'
def p2(n):
"""
Each new term in the Fibonacci sequence is generated by adding the previous
two terms. By starting with 1 and 2, the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values do not
exceed four million, find the sum of the even-valued terms.
"""
fibonacci(n, 1, 2, 2)
def fibonacci(n, first, second, result):
"""fibonacci"""
if second >= n:
print("Answer of the Problem 2nd is %d" % result)
return
nx = first + second
if nx % 2 == 0:
result += nx
fibonacci(n, second, nx, result)
if __name__ == '__main__':
p2(89)
p2(4*1000000)
sys.exit(0)
|
import os
import df2img
import disnake
import pandas as pd
from PIL import Image
import discordbot.config_discordbot as cfg
from discordbot.config_discordbot import logger
from discordbot.helpers import autocrop_image
from gamestonk_terminal.economy import wsj_model
async def currencies_command(ctx):
"""Currencies overview [Wall St. Journal]"""
try:
# Debug user input
if cfg.DEBUG:
logger.debug("econ-currencies")
# Retrieve data
df = wsj_model.global_currencies()
df = pd.DataFrame.from_dict(df)
# Check for argument
if df.empty:
raise Exception("No available data found")
df["Last"] = pd.to_numeric(df["Last"].astype(float))
df["Chng"] = pd.to_numeric(df["Chng"].astype(float))
df["%Chng"] = pd.to_numeric(df["%Chng"].astype(float))
formats = {"Last": "{:.2f}", "Chng": "{:.2f}", "%Chng": "{:.2f}%"}
for col, value in formats.items():
df[col] = df[col].map(lambda x: value.format(x)) # pylint: disable=W0640
df = df.fillna("")
df.set_index(" ", inplace=True)
# Debug user output
if cfg.DEBUG:
logger.debug(df.to_string())
df = df[
[
"Last",
"Chng",
"%Chng",
]
]
dindex = len(df.index)
fig = df2img.plot_dataframe(
df,
fig_size=(800, (40 + (40 * dindex))),
col_width=[8, 3, 3],
tbl_cells=dict(
align="left",
height=35,
),
template="plotly_dark",
font=dict(
family="Consolas",
size=20,
),
paper_bgcolor="rgba(0, 0, 0, 0)",
)
imagefile = "econ-currencies.png"
df2img.save_dataframe(fig=fig, filename=imagefile)
image = Image.open(imagefile)
image = autocrop_image(image, 0)
image.save(imagefile, "PNG", quality=100)
image = disnake.File(imagefile)
title = "Economy: [WSJ] Currencies"
embed = disnake.Embed(title=title, colour=cfg.COLOR)
embed.set_image(url=f"attachment://{imagefile}")
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
os.remove(imagefile)
await ctx.send(embed=embed, file=image)
except Exception as e:
embed = disnake.Embed(
title="ERROR Economy: [WSJ] Currencies",
colour=cfg.COLOR,
description=e,
)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
await ctx.send(embed=embed, delete_after=30.0)
|
# Generated by Django 3.1.8 on 2021-04-26 16:02
from django.db import migrations, models
import django.db.models.deletion
def set_primary_groups(apps, schema_editor):
Organization = apps.get_model("organizations", "Organization")
ResponsibleGroup = apps.get_model("permissions", "ResponsibleGroup")
for organization in Organization.objects.all():
created = False
if organization.primary_group is None:
primary_group = ResponsibleGroup.objects.create(
name=organization.name, description=f"Medlemmer av {organization.name}"
)
organization.primary_group = primary_group
created = True
if organization.hr_group is None:
hr_group = ResponsibleGroup.objects.create(
name="HR", description=f"HR-gruppen til {organization.name}. Tillatelser for å se og behandle søknader."
)
organization.hr_group = hr_group
created = True
if created:
organization.save()
class Migration(migrations.Migration):
dependencies = [
("permissions", "0002_auto_20210422_2020"),
("organizations", "0025_auto_20210426_1735"),
]
operations = [
migrations.RemoveField(
model_name="organization",
name="groups",
),
migrations.AddField(
model_name="organization",
name="hr_group",
field=models.OneToOneField(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="hr_organization",
to="permissions.responsiblegroup",
),
),
migrations.AlterField(
model_name="organization",
name="primary_group",
field=models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="organization",
to="permissions.responsiblegroup",
),
),
migrations.RunPython(set_primary_groups, lambda apps, schema_editor: None),
]
|
from .accuracy import accuracy
from .compose import compose
from .fftconv2d import fft_conv2d, FFTConv2d
|
from cStringIO import StringIO
import collections
import logging
import time
from urlparse import urljoin
from xml.etree import ElementTree
import recurly
from recurly import Account, AddOn, Adjustment, BillingInfo, Coupon, Plan, Redemption, Subscription, SubscriptionAddOn, Transaction
from recurly import Money, NotFoundError, ValidationError, BadRequestError, PageError
from recurlytests import RecurlyTest, xml
recurly.SUBDOMAIN = 'api'
class TestResources(RecurlyTest):
def test_authentication(self):
recurly.API_KEY = None
account_code = 'test%s' % self.test_id
try:
Account.get(account_code)
except recurly.UnauthorizedError, exc:
pass
else:
self.fail("Updating account with invalid email address did not raise a ValidationError")
def test_account(self):
account_code = 'test%s' % self.test_id
with self.mock_request('account/does-not-exist.xml'):
self.assertRaises(NotFoundError, Account.get, account_code)
account = Account(account_code=account_code)
with self.mock_request('account/created.xml'):
account.save()
self.assertEqual(account._url, urljoin(recurly.base_uri(), 'accounts/%s' % account_code))
with self.mock_request('account/list-active.xml'):
active = Account.all_active()
self.assertTrue(len(active) >= 1)
self.assertEqual(active[0].account_code, account_code)
with self.mock_request('account/exists.xml'):
same_account = Account.get(account_code)
self.assertTrue(isinstance(same_account, Account))
self.assertTrue(same_account is not account)
self.assertEqual(same_account.account_code, account_code)
self.assertTrue(same_account.first_name is None)
self.assertEqual(same_account._url, urljoin(recurly.base_uri(), 'accounts/%s' % account_code))
account.username = 'shmohawk58'
account.email = 'larry.david'
account.first_name = u'L\xe4rry'
account.last_name = 'David'
account.company_name = 'Home Box Office'
account.accept_language = 'en-US'
with self.mock_request('account/update-bad-email.xml'):
try:
account.save()
except ValidationError, exc:
self.assertTrue(isinstance(exc.errors, collections.Mapping))
self.assertTrue('account.email' in exc.errors)
suberror = exc.errors['account.email']
self.assertEqual(suberror.symbol, 'invalid_email')
self.assertTrue(suberror.message)
self.assertEqual(suberror.message, str(suberror))
else:
self.fail("Updating account with invalid email address did not raise a ValidationError")
account.email = '[email protected]'
with self.mock_request('account/updated.xml'):
account.save()
with self.mock_request('account/deleted.xml'):
account.delete()
with self.mock_request('account/list-closed.xml'):
closed = Account.all_closed()
self.assertTrue(len(closed) >= 1)
self.assertEqual(closed[0].account_code, account_code)
with self.mock_request('account/list-active-when-closed.xml'):
active = Account.all_active()
self.assertTrue(len(active) < 1 or active[0].account_code != account_code)
# Make sure we can reopen a closed account.
with self.mock_request('account/reopened.xml'):
account.reopen()
try:
with self.mock_request('account/list-active.xml'):
active = Account.all_active()
self.assertTrue(len(active) >= 1)
self.assertEqual(active[0].account_code, account_code)
finally:
with self.mock_request('account/deleted.xml'):
account.delete()
# Make sure numeric account codes work.
if self.test_id == 'mock':
numeric_test_id = 58
else:
numeric_test_id = int(self.test_id)
account = Account(account_code=numeric_test_id)
with self.mock_request('account/numeric-created.xml'):
account.save()
try:
self.assertEqual(account._url, urljoin(recurly.base_uri(), 'accounts/%d' % numeric_test_id))
finally:
with self.mock_request('account/numeric-deleted.xml'):
account.delete()
def test_add_on(self):
plan_code = 'plan%s' % self.test_id
add_on_code = 'addon%s' % self.test_id
plan = Plan(
plan_code=plan_code,
name='Mock Plan',
setup_fee_in_cents=Money(0),
unit_amount_in_cents=Money(1000),
)
with self.mock_request('add-on/plan-created.xml'):
plan.save()
try:
add_on = AddOn(add_on_code=add_on_code, name='Mock Add-On')
with self.mock_request('add-on/need-amount.xml'):
try:
plan.create_add_on(add_on)
except ValidationError, exc:
pass
else:
self.fail("Creating a plan add-on without an amount did not raise a ValidationError")
error = exc.errors['add_on.unit_amount_in_cents']
self.assertEqual(error.symbol, 'blank')
add_on = AddOn(add_on_code=add_on_code, name='Mock Add-On', unit_amount_in_cents=Money(40))
with self.mock_request('add-on/created.xml'):
plan.create_add_on(add_on)
self.assertEqual(add_on.add_on_code, add_on_code)
self.assertEqual(add_on.name, 'Mock Add-On')
try:
with self.mock_request('add-on/exists.xml'):
same_add_on = plan.get_add_on(add_on_code)
self.assertEqual(same_add_on.add_on_code, add_on_code)
self.assertEqual(same_add_on.name, 'Mock Add-On')
self.assertEqual(same_add_on.unit_amount_in_cents['USD'], 40)
finally:
with self.mock_request('add-on/deleted.xml'):
add_on.delete()
finally:
with self.mock_request('add-on/plan-deleted.xml'):
plan.delete()
def test_billing_info(self):
logging.basicConfig(level=logging.DEBUG) # make sure it's init'ed
logger = logging.getLogger('recurly.http.request')
logger.setLevel(logging.DEBUG)
log_content = StringIO()
log_handler = logging.StreamHandler(log_content)
logger.addHandler(log_handler)
account = Account(account_code='binfo%s' % self.test_id)
with self.mock_request('billing-info/account-created.xml'):
account.save()
logger.removeHandler(log_handler)
self.assertTrue('<account' in log_content.getvalue())
try:
# Billing info link won't be present at all yet.
self.assertRaises(AttributeError, getattr, account, 'billing_info')
log_content = StringIO()
log_handler = logging.StreamHandler(log_content)
logger.addHandler(log_handler)
binfo = BillingInfo(
first_name='Verena',
last_name='Example',
address1='123 Main St',
city=u'San Jos\xe9',
state='CA',
zip='94105',
country='US',
type='credit_card',
number='4111 1111 1111 1111',
verification_value='7777',
year='2015',
month='12',
)
with self.mock_request('billing-info/created.xml'):
account.update_billing_info(binfo)
logger.removeHandler(log_handler)
log_content = log_content.getvalue()
self.assertTrue('<billing_info' in log_content)
# See if we redacted our sensitive fields properly.
self.assertTrue('4111' not in log_content)
self.assertTrue('7777' not in log_content)
with self.mock_request('billing-info/account-exists.xml'):
same_account = Account.get('binfo%s' % self.test_id)
with self.mock_request('billing-info/exists.xml'):
same_binfo = same_account.billing_info
self.assertEqual(same_binfo.first_name, 'Verena')
self.assertEqual(same_binfo.city, u'San Jos\xe9')
with self.mock_request('billing-info/deleted.xml'):
binfo.delete()
finally:
with self.mock_request('billing-info/account-deleted.xml'):
account.delete()
log_content = StringIO()
log_handler = logging.StreamHandler(log_content)
logger.addHandler(log_handler)
account = Account(account_code='binfo-%s-2' % self.test_id)
account.billing_info = BillingInfo(
first_name='Verena',
last_name='Example',
address1='123 Main St',
city=u'San Jos\xe9',
state='CA',
zip='94105',
country='US',
type='credit_card',
number='4111 1111 1111 1111',
verification_value='7777',
year='2015',
month='12',
)
with self.mock_request('billing-info/account-embed-created.xml'):
account.save()
try:
logger.removeHandler(log_handler)
log_content = log_content.getvalue()
self.assertTrue('<account' in log_content)
self.assertTrue('<billing_info' in log_content)
self.assertTrue('4111' not in log_content)
self.assertTrue('7777' not in log_content)
with self.mock_request('billing-info/account-embed-exists.xml'):
same_account = Account.get('binfo-%s-2' % self.test_id)
with self.mock_request('billing-info/embedded-exists.xml'):
binfo = same_account.billing_info
self.assertEqual(binfo.first_name, 'Verena')
finally:
with self.mock_request('billing-info/account-embed-deleted.xml'):
account.delete()
def test_charge(self):
account = Account(account_code='charge%s' % self.test_id)
with self.mock_request('adjustment/account-created.xml'):
account.save()
try:
with self.mock_request('adjustment/account-has-no-charges.xml'):
charges = account.adjustments()
self.assertEqual(charges, [])
charge = Adjustment(unit_amount_in_cents=1000, currency='USD', description='test charge', type='charge')
with self.mock_request('adjustment/charged.xml'):
account.charge(charge)
with self.mock_request('adjustment/account-has-adjustments.xml'):
charges = account.adjustments()
self.assertEqual(len(charges), 1)
same_charge = charges[0]
self.assertEqual(same_charge.unit_amount_in_cents, 1000)
self.assertEqual(same_charge.currency, 'USD')
self.assertEqual(same_charge.description, 'test charge')
self.assertEqual(same_charge.type, 'charge')
with self.mock_request('adjustment/account-has-charges.xml'):
charges = account.adjustments(type='charge')
self.assertEqual(len(charges), 1)
with self.mock_request('adjustment/account-has-no-credits.xml'):
credits = account.adjustments(type='credit')
self.assertEqual(len(credits), 0)
finally:
with self.mock_request('adjustment/account-deleted.xml'):
account.delete()
def test_coupon(self):
# Check that a coupon may not exist.
coupon_code = 'coupon%s' % self.test_id
with self.mock_request('coupon/does-not-exist.xml'):
self.assertRaises(NotFoundError, Coupon.get, coupon_code)
# Create a coupon?
coupon = Coupon(
coupon_code=coupon_code,
name='Nice Coupon',
discount_in_cents=Money(1000),
)
with self.mock_request('coupon/created.xml'):
coupon.save()
self.assertTrue(coupon._url)
try:
with self.mock_request('coupon/exists.xml'):
same_coupon = Coupon.get(coupon_code)
self.assertEqual(same_coupon.coupon_code, coupon_code)
self.assertEqual(same_coupon.name, 'Nice Coupon')
discount = same_coupon.discount_in_cents
self.assertEqual(discount['USD'], 1000)
self.assertTrue('USD' in discount)
account_code = 'coupon%s' % self.test_id
account = Account(account_code=account_code)
with self.mock_request('coupon/account-created.xml'):
account.save()
try:
redemption = Redemption(
account_code=account_code,
currency='USD',
)
with self.mock_request('coupon/redeemed.xml'):
real_redemption = coupon.redeem(redemption)
self.assertTrue(isinstance(real_redemption, Redemption))
self.assertEqual(real_redemption.currency, 'USD')
with self.mock_request('coupon/account-with-redemption.xml'):
account = Account.get(account_code)
with self.mock_request('coupon/redemption-exists.xml'):
same_redemption = account.redemption()
self.assertEqual(same_redemption._url, real_redemption._url)
with self.mock_request('coupon/unredeemed.xml'):
real_redemption.delete()
finally:
with self.mock_request('coupon/account-deleted.xml'):
account.delete()
plan = Plan(
plan_code='basicplan',
name='Basic Plan',
setup_fee_in_cents=Money(0),
unit_amount_in_cents=Money(1000),
)
with self.mock_request('coupon/plan-created.xml'):
plan.save()
try:
account_code_2 = 'coupon-%s-2' % self.test_id
sub = Subscription(
plan_code='basicplan',
coupon_code='coupon%s' % self.test_id,
currency='USD',
account=Account(
account_code=account_code_2,
billing_info=BillingInfo(
first_name='Verena',
last_name='Example',
number='4111 1111 1111 1111',
address1='123 Main St',
city='San Francisco',
state='CA',
zip='94105',
country='US',
verification_value='7777',
year='2015',
month='12',
),
),
)
with self.mock_request('coupon/subscribed.xml'):
sub.save()
with self.mock_request('coupon/second-account-exists.xml'):
account_2 = Account.get(account_code_2)
try:
with self.mock_request('coupon/second-account-redemption.xml'):
redemption_2 = account_2.redemption()
self.assertTrue(isinstance(redemption_2, Redemption))
self.assertEqual(redemption_2.currency, 'USD')
with self.mock_request('coupon/exists.xml'):
same_coupon = redemption_2.coupon()
self.assertEqual(same_coupon.coupon_code, coupon_code)
finally:
with self.mock_request('coupon/second-account-deleted.xml'):
account_2.delete()
plan_coupon = Coupon(
coupon_code='plancoupon%s' % self.test_id,
name='Plan Coupon',
discount_in_cents=Money(1000),
applies_to_all_plans=False,
plan_codes=('basicplan',),
)
with self.mock_request('coupon/plan-coupon-created.xml'):
plan_coupon.save()
try:
self.assertTrue(plan_coupon._url)
coupon_plans = list(plan_coupon.plan_codes)
self.assertEqual(len(coupon_plans), 1)
self.assertEqual(coupon_plans[0], 'basicplan')
finally:
with self.mock_request('coupon/plan-coupon-deleted.xml'):
plan_coupon.delete()
finally:
with self.mock_request('coupon/plan-deleted.xml'):
plan.delete()
finally:
with self.mock_request('coupon/deleted.xml'):
coupon.delete()
def test_invoice(self):
account = Account(account_code='invoice%s' % self.test_id)
with self.mock_request('invoice/account-created.xml'):
account.save()
try:
with self.mock_request('invoice/account-has-no-invoices.xml'):
invoices = account.invoices()
self.assertEqual(invoices, [])
with self.mock_request('invoice/error-no-charges.xml'):
try:
account.invoice()
except ValidationError, exc:
error = exc
else:
self.fail("Invoicing an account with no charges did not raise a ValidationError")
self.assertEqual(error.symbol, 'will_not_invoice')
charge = Adjustment(unit_amount_in_cents=1000, currency='USD', description='test charge', type='charge')
with self.mock_request('invoice/charged.xml'):
account.charge(charge)
with self.mock_request('invoice/invoiced.xml'):
account.invoice()
with self.mock_request('invoice/account-has-invoices.xml'):
invoices = account.invoices()
self.assertEqual(len(invoices), 1)
finally:
with self.mock_request('invoice/account-deleted.xml'):
account.delete()
def test_pages(self):
account_code = 'pages-%s-%%d' % self.test_id
all_test_accounts = list()
try:
for i in range(1, 8):
account = Account(account_code=account_code % i)
all_test_accounts.append(account)
with self.mock_request('pages/account-%d-created.xml' % i):
account.save()
self.mock_sleep(1)
with self.mock_request('pages/list.xml'):
accounts = Account.all(per_page=4)
self.assertTrue(isinstance(accounts[0], Account))
self.assertRaises(IndexError, lambda: accounts[4])
# Test errors, since the first page has no first page.
self.assertRaises(PageError, lambda: accounts.first_page())
# Make sure PageError is a ValueError.
self.assertRaises(ValueError, lambda: accounts.first_page())
with self.mock_request('pages/next-list.xml'):
next_accounts = accounts.next_page()
# We asked for all the accounts, which may include closed accounts
# from previous tests or data, not just the three we created.
self.assertTrue(isinstance(next_accounts[0], Account))
self.assertRaises(IndexError, lambda: next_accounts[4])
with self.mock_request('pages/list.xml'): # should be just like the first
first_accounts = next_accounts.first_page()
self.assertTrue(isinstance(first_accounts[0], Account))
finally:
for i, account in enumerate(all_test_accounts, 1):
with self.mock_request('pages/account-%d-deleted.xml' % i):
account.delete()
def test_plan(self):
plan_code = 'plan%s' % self.test_id
with self.mock_request('plan/does-not-exist.xml'):
self.assertRaises(NotFoundError, Plan.get, plan_code)
plan = Plan(
plan_code=plan_code,
name='Mock Plan',
setup_fee_in_cents=Money(0),
unit_amount_in_cents=Money(1000),
)
with self.mock_request('plan/created.xml'):
plan.save()
try:
self.assertEqual(plan.plan_code, plan_code)
with self.mock_request('plan/exists.xml'):
same_plan = Plan.get(plan_code)
self.assertEqual(same_plan.plan_code, plan_code)
self.assertEqual(same_plan.name, 'Mock Plan')
plan.plan_interval_length = 2
plan.plan_interval_unit = 'months'
plan.unit_amount_in_cents = Money(USD=2000)
plan.setup_fee_in_cents = Money(USD=0)
with self.mock_request('plan/updated.xml'):
plan.save()
finally:
with self.mock_request('plan/deleted.xml'):
plan.delete()
def test_subscribe(self):
logging.basicConfig(level=logging.DEBUG) # make sure it's init'ed
logger = logging.getLogger('recurly.http.request')
logger.setLevel(logging.DEBUG)
plan = Plan(
plan_code='basicplan',
name='Basic Plan',
setup_fee_in_cents=Money(0),
unit_amount_in_cents=Money(1000),
)
with self.mock_request('subscription/plan-created.xml'):
plan.save()
try:
account = Account(account_code='subscribe%s' % self.test_id)
with self.mock_request('subscription/account-created.xml'):
account.save()
try:
sub = Subscription(
plan_code='basicplan',
currency='USD',
unit_amount_in_cents=1000,
)
with self.mock_request('subscription/error-no-billing-info.xml'):
try:
account.subscribe(sub)
except BadRequestError, exc:
error = exc
else:
self.fail("Subscribing with no billing info did not raise a BadRequestError")
self.assertEqual(error.symbol, 'billing_info_required')
binfo = BillingInfo(
first_name='Verena',
last_name='Example',
address1='123 Main St',
city=u'San Jos\xe9',
state='CA',
zip='94105',
country='US',
type='credit_card',
number='4111 1111 1111 1111',
verification_value='7777',
year='2015',
month='12',
)
with self.mock_request('subscription/update-billing-info.xml'):
account.update_billing_info(binfo)
with self.mock_request('subscription/subscribed.xml'):
account.subscribe(sub)
self.assertTrue(sub._url)
with self.mock_request('subscription/account-subscriptions.xml'):
subs = account.subscriptions()
self.assertTrue(len(subs) > 0)
self.assertEqual(subs[0].uuid, sub.uuid)
with self.mock_request('subscription/all-subscriptions.xml'):
subs = Subscription.all()
self.assertTrue(len(subs) > 0)
self.assertEqual(subs[0].uuid, sub.uuid)
with self.mock_request('subscription/cancelled.xml'):
sub.cancel()
with self.mock_request('subscription/reactivated.xml'):
sub.reactivate()
# Try modifying the subscription.
sub.timeframe = 'renewal'
sub.unit_amount_in_cents = 800
with self.mock_request('subscription/updated-at-renewal.xml'):
sub.save()
pending_sub = sub.pending_subscription
self.assertTrue(isinstance(pending_sub, Subscription))
self.assertEqual(pending_sub.unit_amount_in_cents, 800)
self.assertEqual(sub.unit_amount_in_cents, 1000)
with self.mock_request('subscription/terminated.xml'):
sub.terminate(refund='none')
log_content = StringIO()
log_handler = logging.StreamHandler(log_content)
logger.addHandler(log_handler)
sub = Subscription(
plan_code='basicplan',
currency='USD',
account=Account(
account_code='subscribe%s' % self.test_id,
billing_info=BillingInfo(
first_name='Verena',
last_name='Example',
address1='123 Main St',
city=u'San Jos\xe9',
state='CA',
zip='94105',
country='US',
type='credit_card',
number='4111 1111 1111 1111',
verification_value='7777',
year='2015',
month='12',
),
),
)
with self.mock_request('subscription/subscribed-billing-info.xml'):
account.subscribe(sub)
logger.removeHandler(log_handler)
log_content = log_content.getvalue()
self.assertTrue('<subscription' in log_content)
self.assertTrue('<billing_info' in log_content)
# See if we redacted our sensitive fields properly.
self.assertTrue('4111' not in log_content)
self.assertTrue('7777' not in log_content)
finally:
with self.mock_request('subscription/account-deleted.xml'):
account.delete()
account_code_2 = 'subscribe-%s-2' % self.test_id
sub = Subscription(
plan_code='basicplan',
currency='USD',
account=Account(
account_code=account_code_2,
billing_info=BillingInfo(
first_name='Verena',
last_name='Example',
address1='123 Main St',
city=u'San Jos\xe9',
state='CA',
zip='94105',
country='US',
type='credit_card',
number='4111 1111 1111 1111',
verification_value='7777',
year='2015',
month='12',
),
),
)
with self.mock_request('subscription/subscribe-embedded-account.xml'):
sub.save()
with self.mock_request('subscription/embedded-account-exists.xml'):
acc = Account.get(account_code_2)
self.assertEqual(acc.account_code, account_code_2)
with self.mock_request('subscription/embedded-account-deleted.xml'):
acc.delete()
finally:
with self.mock_request('subscription/plan-deleted.xml'):
plan.delete()
def test_subscribe_add_on(self):
plan = Plan(
plan_code='basicplan',
name='Basic Plan',
setup_fee_in_cents=Money(0),
unit_amount_in_cents=Money(1000),
)
with self.mock_request('subscribe-add-on/plan-created.xml'):
plan.save()
try:
add_on = AddOn(
add_on_code='mock_add_on',
name='Mock Add-On',
unit_amount_in_cents=Money(100),
)
with self.mock_request('subscribe-add-on/add-on-created.xml'):
plan.create_add_on(add_on)
second_add_on = AddOn(
add_on_code='second_add_on',
name='Second Add-On',
unit_amount_in_cents=Money(50),
)
with self.mock_request('subscribe-add-on/second-add-on-created.xml'):
plan.create_add_on(second_add_on)
account_code='sad-on-%s' % self.test_id
sub = Subscription(
plan_code='basicplan',
subscription_add_ons=[
SubscriptionAddOn(
add_on_code='mock_add_on',
),
SubscriptionAddOn(
add_on_code='second_add_on',
),
],
currency='USD',
account=Account(
account_code=account_code,
billing_info=BillingInfo(
first_name='Verena',
last_name='Example',
number='4111 1111 1111 1111',
address1='123 Main St',
city='San Francisco',
state='CA',
zip='94105',
country='US',
verification_value='7777',
year='2015',
month='12',
),
),
)
with self.mock_request('subscribe-add-on/subscribed.xml'):
sub.save()
# Subscription amounts are in one real currency, so they aren't Money instances.
sub_amount = sub.unit_amount_in_cents
self.assertTrue(not isinstance(sub_amount, Money))
self.assertEqual(sub_amount, 1000)
# Test that the add-ons' amounts aren't real Money instances either.
add_on_1, add_on_2 = sub.subscription_add_ons
self.assertIsInstance(add_on_1, SubscriptionAddOn)
amount_1 = add_on_1.unit_amount_in_cents
self.assertTrue(not isinstance(amount_1, Money))
self.assertEqual(amount_1, 100)
with self.mock_request('subscribe-add-on/account-exists.xml'):
account = Account.get(account_code)
with self.mock_request('subscribe-add-on/account-deleted.xml'):
account.delete()
finally:
with self.mock_request('subscribe-add-on/plan-deleted.xml'):
plan.delete()
def test_transaction(self):
logging.basicConfig(level=logging.DEBUG) # make sure it's init'ed
logger = logging.getLogger('recurly.http.request')
logger.setLevel(logging.DEBUG)
account_code = 'transaction%s' % self.test_id
log_content = StringIO()
log_handler = logging.StreamHandler(log_content)
logger.addHandler(log_handler)
transaction = Transaction(
amount_in_cents=1000,
currency='USD',
account=Account(
account_code=account_code,
billing_info=BillingInfo(
first_name='Verena',
last_name='Example',
number='4111-1111-1111-1111',
year='2014',
address1='123 Main St',
city='San Francisco',
state='CA',
zip='94105',
country='US',
month='7',
verification_value='7777',
),
)
)
with self.mock_request('transaction/created.xml'):
transaction.save()
logger.removeHandler(log_handler)
try:
transaction.get_refund_transaction()
except ValueError:
pass
else:
self.fail("Transaction with no refund transaction did not raise a ValueError from get_refund_transaction()")
with self.mock_request('transaction/account-exists.xml'):
account = Account.get(account_code)
try:
log_content = log_content.getvalue()
self.assertTrue('<transaction' in log_content)
self.assertTrue('<billing_info' in log_content)
# See if we redacted our sensitive fields properly.
self.assertTrue('4111' not in log_content)
self.assertTrue('7777' not in log_content)
with self.mock_request('transaction/refunded.xml'):
refunded_transaction = transaction.refund()
transaction_2 = Transaction(
amount_in_cents=1000,
currency='USD',
account=Account(account_code=account_code),
)
with self.mock_request('transaction/created-again.xml'):
transaction_2.save()
self.assertNotEqual(transaction_2.uuid, transaction.uuid)
self.assertTrue(transaction_2.refundable)
with self.mock_request('transaction/partial-refunded.xml'):
refunded_transaction = transaction_2.refund(amount_in_cents=700)
self.assertTrue(refunded_transaction is transaction_2)
self.assertTrue(hasattr(transaction_2, 'get_refund_transaction'))
with self.mock_request('transaction/partial-refunded-transaction.xml'):
refund_transaction = transaction_2.get_refund_transaction()
self.assertTrue(isinstance(refund_transaction, Transaction))
self.assertTrue(not refund_transaction.refundable)
self.assertNotEqual(refund_transaction.uuid, transaction_2.uuid)
finally:
with self.mock_request('transaction/account-deleted.xml'):
account.delete()
def test_transaction_with_balance(self):
transaction = Transaction(
amount_in_cents=1000,
currency='USD',
account=Account(),
)
with self.mock_request('transaction-balance/transaction-no-account.xml'):
try:
transaction.save()
except ValidationError, error:
pass
else:
self.fail("Posting a transaction without an account code did not raise a ValidationError")
# Make sure there really were errors.
self.assertTrue(len(error.errors) > 0)
account_code = 'transbalance%s' % self.test_id
account = Account(account_code=account_code)
with self.mock_request('transaction-balance/account-created.xml'):
account.save()
try:
# Try to charge without billing info, should break.
transaction = Transaction(
amount_in_cents=1000,
currency='USD',
account=account,
)
with self.mock_request('transaction-balance/transaction-no-billing-fails.xml'):
try:
transaction.save()
except ValidationError, error:
pass
else:
self.fail("Posting a transaction without billing info did not raise a ValidationError")
# Make sure there really were errors.
self.assertTrue(len(error.errors) > 0)
binfo = BillingInfo(
first_name='Verena',
last_name='Example',
address1='123 Main St',
city=u'San Jos\xe9',
state='CA',
zip='94105',
country='US',
type='credit_card',
number='4111 1111 1111 1111',
verification_value='7777',
year='2015',
month='12',
)
with self.mock_request('transaction-balance/set-billing-info.xml'):
account.update_billing_info(binfo)
# Try to charge now, should be okay.
transaction = Transaction(
amount_in_cents=1000,
currency='USD',
account=account,
)
with self.mock_request('transaction-balance/transacted.xml'):
transaction.save()
# Give the account a credit.
credit = Adjustment(unit_amount_in_cents=-2000, currency='USD', description='transaction test credit')
with self.mock_request('transaction-balance/credited.xml'):
# TODO: maybe this should be adjust()?
account.charge(credit)
# Try to charge less than the account balance, which should fail (not a CC transaction).
transaction = Transaction(
amount_in_cents=500,
currency='USD',
account=account,
)
with self.mock_request('transaction-balance/transacted-2.xml'):
transaction.save()
# The transaction doesn't actually save.
self.assertTrue(transaction._url is None)
# Try to charge more than the account balance, which should work.
transaction = Transaction(
amount_in_cents=3000,
currency='USD',
account=account,
)
with self.mock_request('transaction-balance/transacted-3.xml'):
transaction.save()
# This transaction should be recorded.
self.assertTrue(transaction._url is not None)
finally:
with self.mock_request('transaction-balance/account-deleted.xml'):
account.delete()
if __name__ == '__main__':
import unittest
unittest.main()
|
from os import walk
from os.path import join
from knot_a_rumor.story import Story, Scene
class Knot:
def __init__(self, path):
self.path = path
def stories(self):
return next(walk(self.path))[1]
def get_story(self, name):
story = Story(self.build_story_path(name))
story.load()
return story
def build_story_path(self, name):
return join(self.path, name)
def init_story(self, name):
story = self.get_story(name)
player_state = {
"current_scene": story.scene,
"story": name,
"location" : { "x":0, "y":0},
"turn": 0 ,
"seen": []
}
return player_state
def play(self, player_state):
scene = self.load_scene(player_state)
player_state["location"] = scene.start
return player_state
def move(self, player_state, direction, times=1):
scene = self.load_scene(player_state)
if not scene.valid_move(player_state["location"], direction, times):
return player_state
if direction == "n":
player_state["location"]["y"] += times
elif direction == "s":
player_state["location"]["y"] -= times
elif direction == "e":
player_state["location"]["x"] += times
elif direction == "w":
player_state["location"]["x"] -= times
player_state["turn"] += 1
return player_state
def load_scene(self, player_state):
story_name = player_state["story"]
return Scene(self.build_story_path(story_name), player_state)
def scene_map(self, player_state):
scene = self.load_scene(player_state)
return scene.build_map(player_state)
def narrate(self, player_state):
scene = self.load_scene(player_state)
narration = scene.view(player_state["location"])
if narration == None:
return scene.narration
return narration
def look(self, player_state):
scene = self.load_scene(player_state)
player_state, seen = scene.look(player_state)
return (player_state, seen)
def describe(self, player_state, char):
scene = self.load_scene(player_state)
return scene.describe(player_state, char)
|
import os
import json
import unittest
from unittest import mock
from unittest.mock import ANY
import logging
from logging import handlers, LogRecord
from typing import Tuple, Optional, Dict
from tests.util.config_mixin import ConfigMixin
from backee.model.web_handler import WebHandler
from backee.model.max_level_filter import MaxLevelFilter
from backee.parser.config_parser import parse_config
class LoggersParserTestCase(ConfigMixin, unittest.TestCase):
"""
Tests for `backee/parser/logger_parser.py`.
"""
@unittest.mock.patch("os.mkdir")
def test_file_logger_all_values_parsed(self, mkdir):
"""
All possible values for file logger are set and parsed correctly.
"""
expected_file_logger = self.__create_file_logger(
filename="/folder/log_file1",
format="('%(asctime)s [%(threadName)18s][%(levelname)8s] %(message)s')",
max_bytes=1024,
backup_count=1,
min_level=logging.INFO,
max_level=logging.ERROR,
)
# parse config and get first logger
parsed_config = self._get_parsed_config("file_loggers_config.yml")
parsed_file_logger = parsed_config.loggers[0]
# make sure file was opened
mkdir.assert_called_with("/folder")
result, msg = self.__compare_file_loggers(
expected_file_logger, parsed_file_logger
)
self.assertTrue(
result,
msg=f"Full config is not as expected, following comparison failed: {msg}",
)
@unittest.mock.patch("os.mkdir")
def test_file_logger_default_values(self, mkdir):
"""
Only required values are set and others are default.
"""
expected_file_logger = self.__create_file_logger(
filename="/folder/log_file2",
format="('%(asctime)s %(levelname)s %(message)s')",
max_bytes=1 * 1024 * 1024,
backup_count=0,
min_level=logging.DEBUG,
max_level=logging.CRITICAL,
)
# parse config and get logger
parsed_config = self._get_parsed_config("file_loggers_config.yml")
parsed_file_logger = parsed_config.loggers[1]
# make sure file was opened
mkdir.assert_called_with("/folder")
result, msg = self.__compare_file_loggers(
expected_file_logger, parsed_file_logger
)
self.assertTrue(
result,
msg=f"Default config is not as expected, following comparison failed: {msg}",
)
def test_web_logger_all_values_parsed(self):
"""
All possible values for web logger are set and parsed correctly.
"""
expected_web_logger = self.__create_web_handler(
method="POST",
url="https://some/url1",
headers={"Content-Type": "application/json", "TestHeader1": "Value1"},
body='{"message":"message 1"}',
auth={
"type": "basic",
"username": "admin",
"password": "${WEB_LOGGER_PASSWORD}",
},
min_level=logging.INFO,
max_level=logging.ERROR,
)
# parse config and get logger
parsed_config = self._get_parsed_config("full_config.yml")
parsed_web_logger = parsed_config.loggers[0]
self.assertEqual(
expected_web_logger,
parsed_web_logger,
msg="full web logger is parsed incorrectly",
)
def test_web_logger_default_values(self):
"""
Only required values are set and others are default.
"""
expected_web_logger = self.__create_web_handler(
method="POST", url="https://some/url2", body='{"message":"message 2"}'
)
# parse config and get logger
parsed_config = self._get_parsed_config("default_config.yml")
parsed_web_logger = parsed_config.loggers[0]
self.assertEqual(
expected_web_logger,
parsed_web_logger,
msg="default web logger is parsed incorrectly",
)
@unittest.mock.patch("requests.post")
def test_web_logger_wildcard_replacements_in_post(self, mock_post):
"""
Test that {{ message }} and {{ name }} are replaced in url, headers and body for POST.
"""
parsed_config = self._get_parsed_config("full_config.yml")
parsed_web_logger = parsed_config.loggers[1]
message = "test"
name = parsed_config.name
parsed_web_logger.emit(
LogRecord(
name=None,
level=logging.ERROR,
pathname=None,
lineno=None,
msg=message,
args=None,
exc_info=None,
)
)
# headers, data and URL are updated
data = json.dumps(json.loads(f'{{"message":"{message}","name":"{name}"}}'))
mock_post.assert_called_once_with(
auth=ANY,
data=data,
headers={
"Content-Type": "application/json",
"TestHeader2": f"{name} {message}",
},
url=f"https://some/url2?name={name}&message={message}",
)
@unittest.mock.patch("requests.get")
def test_web_logger_wildcard_replacements_in_get(self, mock_get):
"""
Test that {{ message }} and {{ name }} are replaced in url, headers and body for GET.
"""
parsed_config = self._get_parsed_config("full_config.yml")
parsed_web_logger = parsed_config.loggers[2]
message = "test"
name = parsed_config.name
parsed_web_logger.emit(
LogRecord(
name=None,
level=logging.ERROR,
pathname=None,
lineno=None,
msg=message,
args=None,
exc_info=None,
)
)
# headers, data and URL are updated
mock_get.assert_called_once_with(
auth=ANY,
headers={
"Content-Type": "application/json",
"TestHeader3": f"{name} {message}",
},
url=f"https://some/url3?name={name}&message={message}",
)
@unittest.mock.patch("os.mkdir")
def test_file_sizes_parser(self, mkdir):
"""
Test file sizes parser.
"""
id_file_size = {
2: 1,
3: 1 * 1024,
4: 1 * 1024 * 1024,
5: 1 * 1024 * 1024 * 1024,
}
# parse config and get first logger
parsed_config = self._get_parsed_config("file_loggers_config.yml")
# make sure file was opened
mkdir.assert_called_with("/folder")
[
self.assertEqual(v, parsed_config.loggers[k].maxBytes)
for k, v in id_file_size.items()
]
def __compare_file_loggers(
self, first: handlers.RotatingFileHandler, second: handlers.RotatingFileHandler
) -> Tuple[bool, str]:
"""
Helper function to compare two handlers.RotatingFileHandler instances.
"""
if not isinstance(first, handlers.RotatingFileHandler) or not (
second,
handlers.RotatingFileHandler,
):
return False, "class instance"
if first.baseFilename != second.baseFilename:
return False, "filename"
if first.maxBytes != second.maxBytes:
return False, "maxBytes"
if first.backupCount != second.backupCount:
return False, "backupCount"
if first.formatter._fmt != second.formatter._fmt:
return False, "formatter"
if first.level != second.level:
return False, "level"
if len(first.filters) != len(second.filters):
return False, "filters"
for x, y in zip(first.filters, second.filters):
if x != y:
return False, "filters items"
return True, None
def __create_file_logger(
self,
filename: str,
format: str,
max_bytes: int = 1048576,
backup_count: int = 0,
min_level: int = logging.DEBUG,
max_level: int = logging.CRITICAL,
) -> handlers.RotatingFileHandler:
with mock.patch("builtins.open", create=True):
handler = handlers.RotatingFileHandler(
filename=filename, maxBytes=max_bytes, backupCount=backup_count
)
handler.setFormatter(logging.Formatter(fmt=format))
handler.setLevel(min_level)
handler.addFilter(MaxLevelFilter(max_level))
return handler
def __create_web_handler(
self,
method: str,
url: str,
headers: Optional[Dict[str, str]] = None,
body: Optional[str] = None,
auth: Optional[Dict[str, str]] = None,
min_level: int = logging.DEBUG,
max_level: int = logging.CRITICAL,
name: str = "",
) -> WebHandler:
web = WebHandler(method, url, headers, body, auth, name)
web.setLevel(min_level)
web.addFilter(MaxLevelFilter(max_level))
return web
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
#
# Copyright 2013 The Flutter Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import sys
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages as pdfp
class BenchmarkResult:
def __init__(self, name, backend, timeUnit, drawCallCount):
self.name = name
self.series = {}
self.seriesLabels = {}
self.backend = backend
self.largeYValues = False
self.yLimit = 200
self.timeUnit = timeUnit
self.drawCallCount = drawCallCount
def __repr__(self):
return 'Name: % s\nBackend: % s\nSeries: % s\nSeriesLabels: % s\n' % (self.name, self.backend, self.series, self.seriesLabels)
def addDataPoint(self, family, x, y):
if family not in self.series:
self.series[family] = { 'x': [], 'y': [] }
self.series[family]['x'].append(x)
self.series[family]['y'].append(y)
if y > self.yLimit:
self.largeYValues = True
def setFamilyLabel(self, family, label):
# I'm not keying the main series dict off the family label
# just in case we get data where the two aren't a 1:1 mapping
if family in self.seriesLabels:
assert self.seriesLabels[family] == label
return
self.seriesLabels[family] = label
def plot(self):
figures = []
figures.append(plt.figure(dpi=1200, frameon=False, figsize=(11, 8.5)))
for family in self.series:
plt.plot(self.series[family]['x'], self.series[family]['y'], label = self.seriesLabels[family])
plt.xlabel('Benchmark Seed')
plt.ylabel('Time (' + self.timeUnit + ')')
title = ''
# Crop the Y axis so that we can see what's going on at the lower end
if self.largeYValues:
plt.ylim((0, self.yLimit))
title = self.name + ' ' + self.backend + ' (Cropped)'
else:
title = self.name + ' ' + self.backend
if self.drawCallCount != -1:
title += '\nDraw Call Count: ' + str(int(self.drawCallCount))
plt.title(title)
plt.grid(which='both', axis='both')
plt.legend(fontsize='xx-small')
plt.plot()
if self.largeYValues:
# Plot again but with the full Y axis visible
figures.append(plt.figure(dpi=1200, frameon=False, figsize=(11, 8.5)))
for family in self.series:
plt.plot(self.series[family]['x'], self.series[family]['y'], label = self.seriesLabels[family])
plt.xlabel('Benchmark Seed')
plt.ylabel('Time (' + self.timeUnit + ')')
title = self.name + ' ' + self.backend + ' (Complete)'
if self.drawCallCount != -1:
title += '\nDraw Call Count: ' + str(int(self.drawCallCount))
plt.title(title)
plt.grid(which='both', axis='both')
plt.legend(fontsize='xx-small')
plt.plot()
return figures
def main():
parser = argparse.ArgumentParser()
parser.add_argument('filename', action='store',
help='Path to the JSON output from Google Benchmark')
parser.add_argument('-o', '--output-pdf', dest='outputPDF', action='store', default='output.pdf',
help='Filename to output the PDF of graphs to.')
args = parser.parse_args()
jsonData = parseJSON(args.filename)
return processBenchmarkData(jsonData, args.outputPDF)
def error(message):
print(message)
exit(1)
def extractAttributesLabel(benchmarkResult):
# Possible attribute keys are:
# AntiAliasing
# HairlineStroke
# StrokedStyle
# FilledStyle
attributes = ['AntiAliasing', 'HairlineStroke', 'StrokedStyle', 'FilledStyle']
label = ''
for attr in attributes:
try:
if benchmarkResult[attr] != 0:
label += attr + ', '
except KeyError:
pass
return label[:-2]
def processBenchmarkData(benchmarkJSON, outputPDF):
benchmarkResultsData = {}
for benchmarkResult in benchmarkJSON:
# Skip aggregate results
if 'aggregate_name' in benchmarkResult:
continue
benchmarkVariant = benchmarkResult['name'].split('/')
# The final split is always `real_time` and can be discarded
benchmarkVariant.remove('real_time')
splits = len(benchmarkVariant)
# First split is always the benchmark function name
benchmarkName = benchmarkVariant[0]
# The last split is always the seeded value into the benchmark
benchmarkSeededValue = benchmarkVariant[splits-1]
# The second last split is always the backend
benchmarkBackend = benchmarkVariant[splits-2]
# Time taken (wall clock time) for benchmark to run
benchmarkRealTime = benchmarkResult['real_time']
benchmarkUnit = benchmarkResult['time_unit']
benchmarkFamilyIndex = benchmarkResult['family_index']
benchmarkFamilyLabel = ''
if splits > 3:
for i in range(1, splits-2):
benchmarkFamilyLabel += benchmarkVariant[i] + ', '
benchmarkFamilyAttributes = extractAttributesLabel(benchmarkResult)
if benchmarkFamilyAttributes == '':
benchmarkFamilyLabel = benchmarkFamilyLabel[:-2]
else:
benchmarkFamilyLabel = benchmarkFamilyLabel + benchmarkFamilyAttributes
if 'DrawCallCount' in benchmarkResult:
benchmarkDrawCallCount = benchmarkResult['DrawCallCount']
else:
benchmarkDrawCallCount = -1
if benchmarkName not in benchmarkResultsData:
benchmarkResultsData[benchmarkName] = BenchmarkResult(benchmarkName, benchmarkBackend, benchmarkUnit, benchmarkDrawCallCount)
benchmarkResultsData[benchmarkName].addDataPoint(benchmarkFamilyIndex, benchmarkSeededValue, benchmarkRealTime)
benchmarkResultsData[benchmarkName].setFamilyLabel(benchmarkFamilyIndex, benchmarkFamilyLabel)
pp = pdfp(outputPDF)
for benchmark in benchmarkResultsData:
figures = benchmarkResultsData[benchmark].plot()
for fig in figures:
pp.savefig(fig)
pp.close()
def parseJSON(filename):
try:
jsonFile = open(filename, 'r')
except:
error('Unable to load file.')
try:
jsonData = json.load(jsonFile)
except JSONDecodeError:
error('Invalid JSON. Unable to parse.')
return jsonData['benchmarks']
if __name__ == '__main__':
sys.exit(main())
|
"""endpoint"""
BROKER_URL = 'amqp://user:[email protected]:15672/'
|
""" Calculator module main entry-point.
"""
from calculator.controller import MainController
controller = MainController()
controller.main()
|
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
from __future__ import absolute_import
import numpy
import pytest
import skhep_testdata
import uproot
def test():
with uproot.open(skhep_testdata.data_path("uproot-Zmumu.root")) as f:
for arrays in f["events"].iterate(
"px1", step_size=1000, cut="px1 > 0", library="np"
):
assert numpy.all(arrays["px1"] > 0)
|
"""
Common tools that are useful for neo.io object tests
"""
import os
import shutil
from neo.core import Block, Segment
from neo.test.generate_datasets import generate_from_supported_objects
def close_object_safe(obj):
"""
Close an object safely, ignoring errors
For some io types the file should be closed before being
opened again in a test. Call this after the test is done to make sure
the file is closed.
"""
try:
obj.close()
except:
pass
def cleanup_test_file(mode, path, directory=None):
"""
Remove test files or directories safely. mode is the mode of the io class,
either 'file' or 'directory'. It can also be an io class object, or any
other object with a 'mode' attribute. If that is the case, use the
'mode' attribute from the object.
If directory is not None and path is not an absolute path already,
use the file from the given directory.
"""
if directory is not None and not os.path.isabs(path):
path = os.path.join(directory, path)
if hasattr(mode, 'mode'):
mode = mode.mode
if mode == 'file':
if os.path.exists(path):
os.remove(path)
elif mode == 'dir':
if os.path.exists(path):
shutil.rmtree(path)
def get_test_file_full_path(ioclass, filename=None,
directory=None, clean=False):
"""
Get the full path for a file of the given filename.
If filename is None, create a filename.
If filename is a list, get the full path for each item in the list.
If return_path is True, also return the full path to the file.
If directory is not None and path is not an absolute path already,
use the file from the given directory.
If return_path is True, return the full path of the file along with
the io object. return reader, path. Default is False.
If clean is True, try to delete existing versions of the file
before creating the io object. Default is False.
"""
# create a filename if none is provided
if filename is None:
filename = 'Generated0_%s' % ioclass.__name__
if (ioclass.mode == 'file' and len(ioclass.extensions) >= 1):
filename += '.' + ioclass.extensions[0]
elif not hasattr(filename, 'lower'):
return [get_test_file_full_path(ioclass, filename=fname,
directory=directory, clean=clean) for
fname in filename]
# if a directory is provided add it
if directory is not None and not os.path.isabs(filename):
filename = os.path.join(directory, filename)
if clean:
cleanup_test_file(ioclass, filename)
return filename
# prevent this being treated as a test if imported into a test file
get_test_file_full_path.__test__ = False
def create_generic_io_object(ioclass, filename=None, directory=None,
return_path=False, clean=False):
"""
Create an io object in a generic way that can work with both
file-based and directory-based io objects
If filename is None, create a filename.
If return_path is True, also return the full path to the file.
If directory is not None and path is not an absolute path already,
use the file from the given directory.
If return_path is True, return the full path of the file along with
the io object. return reader, path. Default is False.
If clean is True, try to delete existing versions of the file
before creating the io object. Default is False.
"""
filename = get_test_file_full_path(ioclass, filename=filename,
directory=directory, clean=clean)
try:
# actually create the object
if ioclass.mode == 'file':
ioobj = ioclass(filename=filename)
elif ioclass.mode == 'dir':
ioobj = ioclass(dirname=filename)
else:
ioobj = None
except:
print(filename)
raise
# return the full path if requested, otherwise don't
if return_path:
return ioobj, filename
return ioobj
def iter_generic_io_objects(ioclass, filenames, directory=None,
return_path=False, clean=False):
"""
Return an iterable over the io objects created from a list of filenames.
The objects are automatically cleaned up afterwards.
If directory is not None and path is not an absolute path already,
use the file from the given directory.
If return_path is True, yield the full path of the file along with
the io object. yield reader, path. Default is False.
If clean is True, try to delete existing versions of the file
before creating the io object. Default is False.
"""
for filename in filenames:
ioobj, path = create_generic_io_object(ioclass, filename=filename,
directory=directory,
return_path=True,
clean=clean)
if ioobj is None:
continue
if return_path:
yield ioobj, path
else:
yield ioobj
close_object_safe(ioobj)
def create_generic_reader(ioobj, target=None, readall=False):
"""
Create a function that can read the target object from a file.
If target is None, use the first supported_objects from ioobj
If target is False, use the 'read' method.
If target is the Block or Segment class, use read_block or read_segment,
respectively.
If target is a string, use 'read_'+target.
If readall is True, use the read_all_ method instead of the read_ method.
Default is False.
"""
if target is None:
target = ioobj.supported_objects[0].__name__
if target == Block:
if readall:
return ioobj.read_all_blocks
return ioobj.read_block
elif target == Segment:
if readall:
return ioobj.read_all_segments
return ioobj.read_segment
elif not target:
if readall:
raise ValueError('readall cannot be True if target is False')
return ioobj.read
elif hasattr(target, 'lower'):
if readall:
return getattr(ioobj, 'read_all_%ss' % target.lower())
return getattr(ioobj, 'read_%s' % target.lower())
def iter_generic_readers(ioclass, filenames, directory=None, target=None,
return_path=False, return_ioobj=False,
clean=False, readall=False):
"""
Iterate over functions that can read the target object from a list of
filenames.
If target is None, use the first supported_objects from ioobj
If target is False, use the 'read' method.
If target is the Block or Segment class, use read_block or read_segment,
respectively.
If target is a string, use 'read_'+target.
If directory is not None and path is not an absolute path already,
use the file from the given directory.
If return_path is True, return the full path of the file along with
the reader object. return reader, path.
If return_ioobj is True, return the io object as well as the reader.
return reader, ioobj. Default is False.
If both return_path and return_ioobj is True,
return reader, path, ioobj. Default is False.
If clean is True, try to delete existing versions of the file
before creating the io object. Default is False.
If readall is True, use the read_all_ method instead of the read_ method.
Default is False.
"""
for ioobj, path in iter_generic_io_objects(ioclass=ioclass,
filenames=filenames,
directory=directory,
return_path=True,
clean=clean):
res = create_generic_reader(ioobj, target=target, readall=readall)
if not return_path and not return_ioobj:
yield res
else:
res = (res,)
if return_path:
res = res + (path,)
if return_ioobj:
res = res + (ioobj,)
yield res
def create_generic_writer(ioobj, target=None):
"""
Create a function that can write the target object to a file using the
neo io object ioobj.
If target is None, use the first supported_objects from ioobj
If target is False, use the 'write' method.
If target is the Block or Segment class, use write_block or write_segment,
respectively.
If target is a string, use 'write_'+target.
"""
if target is None:
target = ioobj.supported_objects[0].__name__
if target == Block:
return ioobj.write_block
elif target == Segment:
return ioobj.write_segment
elif not target:
return ioobj.write
elif hasattr(target, 'lower'):
return getattr(ioobj, 'write_' + target.lower())
def read_generic(ioobj, target=None, lazy=False, readall=False,
return_reader=False):
"""
Read the target object from a file using the given neo io object ioobj.
If target is None, use the first supported_objects from ioobj
If target is False, use the 'write' method.
If target is the Block or Segment class, use write_block or write_segment,
respectively.
If target is a string, use 'write_'+target.
The lazy parameters is passed to the reader. Defaults is True.
If readall is True, use the read_all_ method instead of the read_ method.
Default is False.
If return_reader is True, yield the io reader function as well as the
object. yield obj, reader. Default is False.
"""
obj_reader = create_generic_reader(ioobj, target=target, readall=readall)
obj = obj_reader(lazy=lazy)
if return_reader:
return obj, obj_reader
return obj
def iter_read_objects(ioclass, filenames, directory=None, target=None,
return_path=False, return_ioobj=False,
return_reader=False, clean=False, readall=False,
lazy=False):
"""
Iterate over objects read from a list of filenames.
If target is None, use the first supported_objects from ioobj
If target is False, use the 'read' method.
If target is the Block or Segment class, use read_block or read_segment,
respectively.
If target is a string, use 'read_'+target.
If directory is not None and path is not an absolute path already,
use the file from the given directory.
If return_path is True, yield the full path of the file along with
the object. yield obj, path.
If return_ioobj is True, yield the io object as well as the object.
yield obj, ioobj. Default is False.
If return_reader is True, yield the io reader function as well as the
object. yield obj, reader. Default is False.
If some combination of return_path, return_ioobj, and return_reader
is True, they are yielded in the order: obj, path, ioobj, reader.
If clean is True, try to delete existing versions of the file
before creating the io object. Default is False.
The lazy parameter is passed to the reader. Defaults is True.
If readall is True, use the read_all_ method instead of the read_ method.
Default is False.
"""
for obj_reader, path, ioobj in iter_generic_readers(ioclass, filenames,
directory=directory,
target=target,
return_path=True,
return_ioobj=True,
clean=clean,
readall=readall):
obj = obj_reader(lazy=lazy)
if not return_path and not return_ioobj and not return_reader:
yield obj
else:
obj = (obj,)
if return_path:
obj = obj + (path,)
if return_ioobj:
obj = obj + (ioobj,)
if return_reader:
obj = obj + (obj_reader,)
yield obj
def write_generic(ioobj, target=None, obj=None, return_writer=False):
"""
Write the target object to a file using the given neo io object ioobj.
If target is None, use the first supported_objects from ioobj
If target is False, use the 'write' method.
If target is the Block or Segment class, use write_block or write_segment,
respectively.
If target is a string, use 'write_'+target.
obj is the object to write. If obj is None, an object is created
automatically for the io class.
If return_writer is True, yield the io writer function as well as the
object. yield obj, writer. Default is False.
"""
if obj is None:
supported_objects = ioobj.supported_objects
obj = generate_from_supported_objects(supported_objects)
obj_writer = create_generic_writer(ioobj, target=target)
obj_writer(obj)
if return_writer:
return obj, obj_writer
return obj
|
from base64 import b64decode
from cryptography.hazmat.backends import default_backend
from cryptography.x509 import load_der_x509_certificate
from django.conf import settings
import jwt
from lxml import etree
import requests
from urllib import urlencode
AUTHORITY = getattr(settings, 'AAD_AUTHORITY', 'https://login.microsoftonline.com')
SCOPE = getattr(settings, 'AAD_SCOPE', 'openid')
RESPONSE_TYPE = getattr(settings, 'AAD_RESPONSE_TYPE', 'id_token')
RESPONSE_MODE = getattr(settings, 'AAD_RESPONSE_MODE', 'form_post')
TENANT_ID = getattr(settings, 'AAD_TENANT_ID')
CLIENT_ID = getattr(settings, 'AAD_CLIENT_ID')
def get_login_url(authority=AUTHORITY, response_type=RESPONSE_TYPE, response_mode=RESPONSE_MODE, scope=SCOPE, client_id=CLIENT_ID, redirect_uri=None, nonce=None, state=None):
param_dict = {
'response_type': response_type,
'response_mode': response_mode,
'scope': scope,
'client_id': client_id,
}
if redirect_uri is not None:
param_dict['redirect_uri'] = redirect_uri
if nonce is not None:
param_dict['nonce'] = nonce
if state is None:
param_dict['state'] = state
params = urlencode(param_dict)
return '{authoriy}/common/oauth2/authorize?{params}'.format(
authoriy=authoriy,
params=params,
)
def get_logout_url(redirect_uri, authoriy=AUTHORITY):
params = urlencode({
'post_logout_redirect_uri': redirect_uri,
})
return '{authoriy}/common/oauth2/logout?{params}'.format(
authoriy=authoriy,
params=params,
)
def get_federation_metadata_document_url(authoriy=AUTHORITY, tenant_id=TENANT_ID):
return '{authoriy}/{tenant_id}/federationmetadata/2007-06/federationmetadata.xml'.format(
authoriy=authoriy,
tenant_id=tenant_id,
)
def parse_x509_DER_list(federation_metadata_document):
document = etree.fromstring(federation_metadata_document)
certificate_elems = document.findall('.//{http://www.w3.org/2000/09/xmldsig#}X509Certificate')
b64encoded_DERs = {certificate_elem.text for certificate_elem in certificate_elems}
return [b64decode(b64encoded_DER) for b64encoded_DER in b64encoded_DERs]
def get_public_keys():
try:
federation_metadata_document_url = get_federation_metadata_document_url()
response = requests.get(federation_metadata_document_url)
if not response.ok:
raise
response.encoding = response.apparent_encoding
x509_DER_list = parse_x509_DER_list(response.text.encode('utf-8'))
keys = [load_der_x509_certificate(x509_DER, default_backend()).public_key() for x509_DER in x509_DER_list]
except:
keys = []
return keys
def get_email_from_token(token=None, audience=CLIENT_ID, nonce=None):
for key in get_public_keys():
try:
payload = jwt.decode(token, key=key, audience=audience)
if payload['nonce'] != nonce:
continue
return payload['upn']
except (jwt.InvalidTokenError, IndexError) as e:
pass
return None
|
from Data.Utils.custom_dataset import CustomDataset as CD
import matplotlib.pyplot as plt
# prova della classe custom_dataset
dataset = CD('Data/Datasets','training')
#__GetItem__(index)
sample = dataset[4]
print(type(sample['image']),type(sample['label']))
|
import sys
import json
import os
import errno
import subprocess
import platform
import pathlib
def getNeedsUpdate(scriptFile, libraryFile, sourceFile, directory, pattern):
referenceChangeTime = os.path.getmtime(scriptFile)
libraryChangeTime = os.path.getmtime(libraryFile)
if libraryChangeTime > referenceChangeTime:
referenceChangeTime = libraryChangeTime
sourceChangeTime = os.path.getmtime(sourceFile)
if sourceChangeTime > referenceChangeTime:
referenceChangeTime = sourceChangeTime
pathlist = pathlib.Path(directory).glob(pattern)
counter = 0
for path in pathlist:
counter += 1
path_in_str = str(path)
fileChangeTime = os.path.getmtime(path_in_str)
if referenceChangeTime > fileChangeTime:
return True
return counter == 0
def removePermutations(directory, pattern):
pathlist = pathlib.Path(directory).glob(pattern)
for path in pathlist:
path.unlink()
def main():
if len(sys.argv) < 4:
print('Specify shader json file followed by requested formats as comma separated list with no spaces (dxil,cso,spirv,metal), output directory path [and optional resource folder relative path] as parameters')
return
with open(sys.argv[1], 'r') as sourceJsonData:
sourceJson = json.load(sourceJsonData)
if not sourceJson:
print('No data found.')
return
pythonExecutable = sys.executable
enableDebugSymbols = True
outDirName = sys.argv[3]
if not os.path.exists(outDirName):
try:
os.makedirs(outDirName)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
resourceRelativePath = None
if len(sys.argv) >= 5:
resourceRelativePath = sys.argv[4]
if not resourceRelativePath:
resourceRelativePath = ''
jsonDirectory, jsonFileName = os.path.split(sys.argv[1])
destinationJson = list()
shaderConductorCmdPath = os.path.dirname(sys.argv[0])
supportedFormats = ['dxil', 'cso', 'spirv', 'metal']
shaderConductorExectutableName = 'ShaderConductorCmd'
if platform.system() == 'Darwin':
supportedFormats = ['spirv', 'metal']
elif platform.system() == 'Windows':
preprocessHLSLPath = os.path.join(shaderConductorCmdPath, 'preprocessForHLSL.py')
shaderConductorExectutableName = 'ShaderConductorCmd.exe'
fxcCmdPath = 'C:/Program Files (x86)/Windows Kits/10/bin/x64/fxc.exe'
elif platform.system() == 'Linux':
supportedFormats = ['spirv', 'metal']
else:
print('Script needs to be updated with ShaderConductor path for platform: ' + platform.system())
return
shaderConductorSearchPath = pathlib.Path(os.path.join(shaderConductorCmdPath, 'Vendor/ShaderConductor/Build'))
for path in shaderConductorSearchPath.glob('**/' + shaderConductorExectutableName):
print(path)
shaderConductorCmdPath = path
break
requestedFormats = sys.argv[2].split(',')
outFormats = list()
for request in requestedFormats:
if request in supportedFormats:
outFormats.append(request)
hlslFile = False
for shaderFile in sourceJson:
if not 'file' in shaderFile or not 'shaders' in shaderFile:
continue
sourceFile = shaderFile['file']
shaders = shaderFile['shaders']
filePath, extension = os.path.splitext(sourceFile)
filePath, fileName = os.path.split(filePath)
sourceFile = os.path.join(jsonDirectory, sourceFile)
if 'cso' in outFormats:
hlslFile = os.path.join(outDirName, fileName + '.hlsl')
subprocess.call([pythonExecutable, preprocessHLSLPath, sourceFile, hlslFile])
for shader in shaders:
if not 'name' in shader or not 'type' in shader:
continue
if shader['type'] == 'vertex':
shaderType = 'vs'
elif shader['type'] == 'fragment':
shaderType = 'ps'
elif shader['type'] == 'compute':
shaderType = 'cs'
shaderSignature = None
shaderOptionsList = None
shaderOptionsExcludesList = None
shaderOptionsDependenciesList = None
shaderOptionsDict = None
if 'signature' in shader:
shaderSignature = shader['signature']
if 'options' in shaderSignature:
shaderOptions = shaderSignature['options']
if type(shaderOptions) is list:
shaderOptionsList = shaderOptions
elif type(shaderOptions) is dict:
shaderOptionsDict = shaderOptions
if "defines" in shaderOptionsDict:
shaderOptionsList = shaderOptionsDict["defines"]
if "excludes" in shaderOptionsDict:
shaderOptionsExcludesList = shaderOptionsDict["excludes"]
if "dependencies" in shaderOptionsDict:
shaderOptionsDependenciesList = shaderOptionsDict["dependencies"]
entryName = shader['name']
destinationShaderList = list()
destinationShader = dict()
destinationShader['type'] = shader['type']
destinationShader['name'] = entryName
if shaderSignature:
destinationShader['signature'] = shaderSignature;
destinationShaderList.append(destinationShader)
destinationShaderFile = dict()
destinationShaderFile['shaders'] = destinationShaderList
permutations = list()
if shaderOptionsList:
permutationCount = 2**len(shaderOptionsList)
for i in range(0, permutationCount):
permutation = dict()
permutation["parameters"] = list()
permutation["identifier"] = i
permutationOptions = list()
for n, option in enumerate(shaderOptionsList):
permutation["parameters"].append('-D')
permutationValue = '0'
if(i & (1 << n)) != 0:
permutationValue = '1'
permutationOptions.append(option)
permutation["parameters"].append(option + '=' + permutationValue)
isValidPermutation = True
if shaderOptionsExcludesList:
for exclude in shaderOptionsExcludesList:
isValidPermutation = False
for check in exclude:
if not check in permutationOptions:
isValidPermutation = True
break
if not isValidPermutation:
print("excluding permutation " + str(i) + ": " + str(permutationOptions))
break
if isValidPermutation and shaderOptionsDependenciesList:
for option in permutationOptions:
if option not in shaderOptionsDependenciesList:
continue
optionHasDependencies = False
while True:
option = shaderOptionsDependenciesList[option]
if option not in permutationOptions:
isValidPermutation = False
break
if option not in shaderOptionsDependenciesList:
break
if not isValidPermutation:
print("excluding permutation " + str(i) + " because of missing dependencies: " + str(permutationOptions))
break
if isValidPermutation:
permutations.append(permutation)
else:
permutation = dict()
permutation["parameters"] = list()
permutation["identifier"] = 0
permutations.append(permutation)
skipShaderCompiling = False
if not getNeedsUpdate(sys.argv[0], sys.argv[1], sourceFile, outDirName, fileName + "." + shaderType + ".*.*"):
print("Shaders for file " + sourceFile + " are already up to date. Skipping.")
skipShaderCompiling = True
for outFormat in outFormats:
if outFormat == 'dxil':
compilerOutFormat = 'dxil'
destinationShaderFile['file~d3d12'] = resourceRelativePath + '/' + fileName + '.' + shaderType + '.' + outFormat
elif outFormat == 'cso':
compilerOutFormat = 'cso'
destinationShaderFile['file~d3d12'] = resourceRelativePath + '/' + fileName + '.' + shaderType + '.' + outFormat
elif outFormat == 'spirv':
compilerOutFormat = 'spirv'
destinationShaderFile['file~vulkan'] = resourceRelativePath + '/' + fileName + '.' + shaderType + '.' + outFormat
elif outFormat == 'metal':
compilerOutFormat = 'msl_macos'
if platform.system() == 'Darwin':
destinationShaderFile['file~metal'] = resourceRelativePath + '/' + fileName + '.' + shaderType + '.metallib'
else:
destinationShaderFile['file~metal'] = resourceRelativePath + '/' + fileName + '.' + shaderType + '.metal'
if not skipShaderCompiling:
if outFormat == 'metal':
removePermutations(outDirName, fileName + "." + shaderType + ".*.metal")
removePermutations(outDirName, fileName + "." + shaderType + ".*.metallib")
else:
removePermutations(outDirName, fileName + "." + shaderType + ".*."+outFormat)
for permutationDict in permutations:
permutation = permutationDict["parameters"]
permutationOutFile = os.path.join(outDirName, fileName + '.' + shaderType + '.' + str(permutationDict["identifier"]) + '.' + outFormat)
if outFormat == 'cso':
parameterList = [fxcCmdPath, '-I', '.', '-Fo', permutationOutFile, '-E', entryName, '-T', shaderType + '_5_1', hlslFile]
else:
parameterList = [shaderConductorCmdPath, '-I', sourceFile, '-O', permutationOutFile, '--minorshadermodel', '1', '-E', entryName, '-S', shaderType, '-T', compilerOutFormat]
if outFormat == 'dxil' or outFormat == 'cso':
parameterList.append("-DRN_RENDERER_D3D12=1")
permutation = [p.replace('RN_USE_MULTIVIEW', '__RN_USE_MULTIVIEW__') for p in permutation] #exclude multiview stuff for d3d12 without effecting the permutation index for now
elif outFormat == 'spirv':
parameterList.append("-DRN_RENDERER_VULKAN=1")
elif outFormat == 'metal':
parameterList.append("-DRN_RENDERER_METAL=1")
if len(permutation) > 0:
parameterList.extend(permutation)
if not skipShaderCompiling:
print(parameterList)
subprocess.call(parameterList)
if outFormat == 'metal' and platform.system() == 'Darwin':
bitcodeOutFile = permutationOutFile + '.air'
libOutFile = os.path.join(outDirName, fileName + '.' + shaderType + '.' + str(permutationDict["identifier"]) + '.metallib')
if enableDebugSymbols:
subprocess.call(['xcrun', '-sdk', 'macosx', 'metal', '-gline-tables-only', '-MO', '-c', permutationOutFile, '-o', bitcodeOutFile])
else:
subprocess.call(['xcrun', '-sdk', 'macosx', 'metal', '-c', permutationOutFile, '-o', bitcodeOutFile])
subprocess.call(['xcrun', '-sdk', 'macosx', 'metallib', bitcodeOutFile, '-o', libOutFile])
os.remove(permutationOutFile)
os.remove(bitcodeOutFile)
destinationJson.append(destinationShaderFile)
if hlslFile:
os.remove(hlslFile)
with open(os.path.join(outDirName, 'Shaders.json'), 'w') as destinationJsonData:
json.dump(destinationJson, destinationJsonData, indent=4, sort_keys=True)
if __name__ == '__main__':
main()
|
from .cord_19_data import CORD19Data |
from django.contrib.admin.views.main import (
ChangeList, IGNORED_PARAMS as BASE_IGNORED_PARAMS
)
from django.core.paginator import InvalidPage
from django.contrib.admin.options import IncorrectLookupParameters
from django.db.models import Count, F, Q
from django.db.models import Exists
from django.db.models import OuterRef, Subquery
from django.contrib.admin.views.main import (
ALL_VAR, ORDER_VAR, PAGE_VAR, SEARCH_VAR, IS_POPUP_VAR, TO_FIELD_VAR
)
# Additional changelist settings
LAYOUT_VAR = 'sty'
IGNORED_PARAMS = BASE_IGNORED_PARAMS + (LAYOUT_VAR, )
TREE_LAYOUT = 'tree'
LIST_LAYOUT = 'list'
ORDERED_DAG_SEQUENCE_FIELD_NAME = '_sequence'
class DagChangeList(ChangeList):
def allow_node_drag(self, request):
draggable = True
for k, v in self.params.items():
if k in [ALL_VAR, IS_POPUP_VAR, TO_FIELD_VAR, PAGE_VAR]:
continue
elif ORDER_VAR == k:
if v != '' and self.model.sequence_manager: # Sorted nodes thefore disable is sort enabled
draggable = False
elif SEARCH_VAR == k:
if v != '':
draggable = False # disbaled moving nodes if filters are active
elif LAYOUT_VAR == k:
if v != TREE_LAYOUT:
draggable = False # disable is not in tree view
return draggable
# Copied from Django "version": "==2.2.19"
# Changed: IGNORED_PARAMS is updated to include LAYOUT_VAR
def get_filters_params(self, params=None):
"""
Return all params except IGNORED_PARAMS.
"""
params = params or self.params
lookup_params = params.copy() # a dictionary of the query string
# Remove all the parameters that are globally and systematically
# ignored.
for ignored in IGNORED_PARAMS:
if ignored in lookup_params:
del lookup_params[ignored]
return lookup_params
def _convert_order_node_to_edge(self, ordering):
for part in ordering:
if isinstance(part, str):
order_type = part[:part.startswith('-')]
base = part.lstrip('-')
elif isinstance(part, F):
base = part.name
order_type = ''
if base == ORDERED_DAG_SEQUENCE_FIELD_NAME:
yield part
else:
yield "{}child__{}".format(order_type, base)
def get_results_tree(self, request):
qs = self.queryset \
.annotate(
children_count=Count('children', distinct=True),
usage_count=Subquery(
self.model.get_node_model().objects
.filter(pk=OuterRef('id'))
.annotate(usage_count=Count('parents__pk'))
.values('usage_count')
)
)
qs = self.apply_select_related(qs)
ordering = self.get_ordering(request, qs)
if self.model.sequence_manager:
ordering = ['dag_sequence_path', ]
else:
ordering = ['dag_pk_path', ]
qs = qs.order_by(*ordering)
return qs
def get_results_edgetree(self, request):
qs = (
self.model.get_edge_model()
.objects
.filter(Q(child_id__in=self.queryset.values('pk')))
.annotate(
siblings_count=Count('parent__children', distinct=True),
is_child=Count('parent__parents'),
children_count=Count('child__children', distinct=True),
parent_used=Exists(self.queryset.filter(pk=OuterRef('parent_id')))
))
if not qs.query.select_related:
qs = self.apply_select_related(qs, for_edge=True)
if self.model.sequence_manager:
order_component = self.model.sequence_manager \
.get_edge_rel_sort_query_component(self.model, 'child', 'parent')
qs = qs.annotate(**{ORDERED_DAG_SEQUENCE_FIELD_NAME: order_component})
# Set ordering.
ordering = list(self._convert_order_node_to_edge(self.get_ordering(request, qs,)))
qs = qs.order_by(*ordering)
return qs
def get_results_list(self, request):
qs = self.queryset.annotate(
children_count=Count('children', distinct=True),
usage_count=Subquery(
self.model.get_node_model()
.objects
.filter(pk=OuterRef('id'))
.annotate(usage_count=Count('parents__pk'))
.values('usage_count')
)
)
qs = self.apply_select_related(qs)
ordering = self.get_ordering(request, qs)
qs = qs.order_by(*ordering)
return qs
def get_results(self, request):
self.result_list_extra = []
# Add annotation to show detached nodes here
# to self.queryset look for no parent matching path / parents
if self.get_layout_style(request) == LIST_LAYOUT:
qs = self.get_results_list(request)
else:
qs = self.get_results_tree(request)
paginator = self.model_admin.get_paginator(request, qs, self.list_per_page)
# Get the number of objects, with admin filters applied.
result_count = paginator.count
# Get the total number of objects, with no admin filters applied.
if self.model_admin.show_full_result_count:
full_result_count = self.root_queryset.count()
else:
full_result_count = None
can_show_all = result_count <= self.list_max_show_all
multi_page = result_count > self.list_per_page
# Get the list of objects to display on this page.
if (self.show_all and can_show_all) or not multi_page:
result_list = qs._clone()
else:
try:
result_list = paginator.page(self.page_num + 1).object_list
except InvalidPage:
raise IncorrectLookupParameters
self.result_count = result_count
self.show_full_result_count = self.model_admin.show_full_result_count
# Admin actions are shown if there is at least one entry
# or if entries are not counted because show_full_result_count is disabled
self.show_admin_actions = not self.show_full_result_count or bool(full_result_count)
self.full_result_count = full_result_count
self.result_list = result_list
self.can_show_all = can_show_all
self.multi_page = multi_page
self.paginator = paginator
def _get_default_layout_style(self):
layout_style = 'tree'
if hasattr(self.model_admin, 'layout_style'):
layout_style = self.model_admin.layout_style
return layout_style
def get_layout_style(self, request):
params = self.params
return params.get(LAYOUT_VAR, None) or self._get_default_layout_style()
def _get_default_ordering(self):
ordering = []
if self.model_admin.ordering:
ordering = self.model_admin.ordering
elif self.lookup_opts.ordering:
ordering = self.lookup_opts.ordering
return ordering
def apply_select_related(self, qs, for_edge=False):
if self.list_select_related is True:
if for_edge:
return qs.select_related()
return qs.select_related()
if self.list_select_related is False:
if self.has_related_field_in_list_display():
return qs.select_related()
if for_edge:
return qs.select_related()
return qs
if self.list_select_related:
if for_edge:
related = ["child__{}".format(relation) for relation in self.list_select_related]
return qs.select_related(*related)
return qs.select_related(*self.list_select_related)
return qs
|
# A class in Python is template
# to define class
class Name:
pass
# A class must be instantiated. In other words, we must create an instance of the class, in order to breathe life into the schematic.
# Instantiating a class looks a lot like calling a function. We would be able to create an instance of our defined Name as follows:
# A class instance is also called as object
name1 = Name()
# when we want to share data or make data available to different instances of class we can create a class variable or attribute
# define a class
class Name:
name = 'anay' # class variable or attribute
# instance of the class
name1 = Name()
print(name1.name)
# Methods are nothing but a Python function but are defined under class.
# The first argument in a method is always the object that is calling the method. Convention recommends that we name this first argument self.
# Methods always have at least this one argument.
class Name:
name = 'anay' # class variable or attribute
def print_message(self):
print(Name.name+ 'Good Day' )
# instance of the class
name1 = Name()
print(name1.print_message)
# Python constructors or dunder methods
|
from PIL import Image
import numpy as np
from OpenGL.GL import *
from os import path
def MTL(filename, withtextures=True):
contents = {}
mtl = None
fpath = ""
if filename.rfind("/") != -1:
fpath = filename[:filename.rfind("/")+1]
for line in open(filename, "r"):
if line.startswith('#'): continue
values = line.split()
if not values: continue
if values[0] == 'newmtl':
mtl = contents[values[1]] = {}
elif mtl is None:
raise ValueError("mtl file doesn't start with newmtl stmt")
elif values[0] == 'map_Kd':
# load the texture referred to by this declaration
mtl[values[0]] = values[1]
if not path.isfile(mtl['map_Kd']):
if path.isfile(fpath + mtl['map_Kd']):
mtl['map_Kd'] = fpath + mtl['map_Kd']
if withtextures:
surf = Image.open(mtl['map_Kd']).convert("RGBA")
img = np.fromstring(surf.tobytes(), np.uint8)
ix, iy = surf.size
texid = mtl['texture_Kd'] = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, texid)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,
GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, ix, iy, 0, GL_RGBA,
GL_UNSIGNED_BYTE, img)
else:
mtl[values[0]] = tuple([float(i) for i in values[1:]])
return contents
class OBJ:
def __init__(self, filename, swapyz=False, withtextures=True):
"""Loads a Wavefront OBJ file. """
self.vertices = []
self.normals = []
self.texcoords = []
self.faces = []
material = None
path = ""
if filename.rfind("/") != -1:
path = filename[:filename.rfind("/")+1]
for line in open(filename, "r"):
if line.startswith('#'): continue
values = line.split()
if not values: continue
if values[0] == 'v':
v = [float(i) for i in values[1:4]]
# print(v)
if swapyz:
v[1], v[2] = v[2], v[1]
# print("Verticies", len(tuple(v)), tuple(v))
self.vertices.append(tuple(v))
elif values[0] == 'vn':
v = [float(i) for i in values[1:4]]
if swapyz:
v[1], v[2] = v[2], v[1]
# print("Normals", len(tuple(v)), tuple(v))
self.normals.append(tuple(v))
elif values[0] == 'vt':
v = [float(i) for i in values[1:3]]
# print("Textures", len(tuple(v)), tuple(v))
self.texcoords.append(tuple(v))
elif values[0] in ('usemtl', 'usemat'):
material = values[1]
elif values[0] == 'mtllib':
self.mtl = MTL(path+values[1], withtextures)
elif values[0] == 'f':
face = []
texcoords = []
norms = []
for v in values[1:]:
w = v.split('/')
face.append(int(w[0]))
if len(w) >= 2 and len(w[1]) > 0:
texcoords.append(int(w[1]))
else:
texcoords.append(0)
if len(w) >= 3 and len(w[2]) > 0:
norms.append(int(w[2]))
else:
norms.append(0)
self.faces.append((face, norms, texcoords, material))
self.gl_list = glGenLists(1)
glNewList(self.gl_list, GL_COMPILE)
glEnable(GL_TEXTURE_2D)
glFrontFace(GL_CCW)
self.generateList()
glDisable(GL_TEXTURE_2D)
glEndList()
def generateList(self):
for face in self.faces:
vertices, normals, texture_coords, material = face
mtl = self.mtl[material]
if 'texture_Kd' in mtl:
# use diffuse texmap
glBindTexture(GL_TEXTURE_2D, mtl['texture_Kd'])
else:
# just use diffuse colour
glColor(*mtl['Kd'])
glBegin(GL_POLYGON)
for i in range(len(vertices)):
if normals[i] > 0:
glNormal3fv(self.normals[normals[i] - 1])
if texture_coords[i] > 0:
glTexCoord2fv(self.texcoords[texture_coords[i] - 1])
glVertex3fv(self.vertices[vertices[i] - 1])
glEnd()
def draw(self):
glCallList(self.gl_list) |
#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2009 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
"""
Project:
glideinWMS
Description:
unit test for glideinwms/creation/lib/cvWCreate.py
Author:
Dennis Box <[email protected]>
"""
import os
import unittest
from unittest import mock
import xmlrunner
from glideinwms.creation.lib.cvWCreate import (
create_client_condor_config,
create_client_mapfile,
filter_unwanted_config_attrs,
get_template,
)
class Test_cvWCreate(unittest.TestCase):
def test_create_client_mapfile(self):
mapfile_fname = "condor_mapfile"
my_DN = (
"/DC=org/DC=incommon/C=US/ST=IL/L=Batavia/O=Fermi Research Alliance/OU=Fermilab/CN=fermicloud001.fnal.gov"
)
factory_DNs = [
"/DC=org/DC=incommon/C=US/ST=IL/L=Batavia/O=Fermi Research Alliance/OU=Fermilab/CN=fermicloud010.fnal.gov",
"/DC=org/DC=incommon/C=US/ST=IL/L=Batavia/O=Fermi Research Alliance/OU=Fermilab/CN=fermicloud011.fnal.gov",
]
schedd_DNs = [
"/DC=org/DC=incommon/C=US/ST=IL/L=Batavia/O=Fermi Research Alliance/OU=Fermilab/CN=fermicloud020.fnal.gov",
"/DC=org/DC=incommon/C=US/ST=IL/L=Batavia/O=Fermi Research Alliance/OU=Fermilab/CN=fermicloud021.fnal.gov",
]
collector_DNs = [
"/DC=org/DC=incommon/C=US/ST=IL/L=Batavia/O=Fermi Research Alliance/OU=Fermilab/CN=fermicloud030.fnal.gov"
]
pilot_DNs = []
create_client_mapfile(mapfile_fname, my_DN, factory_DNs, schedd_DNs, collector_DNs, pilot_DNs)
self.assertTrue(os.path.exists(mapfile_fname))
# the first 6 entries of the mapfile should have
# these names, everything after that should be
# named anonymous
m_dat = ["me", "factory0", "factory1", "schedd0", "schedd1", "collector0", "anonymous"]
idx = 0
for line in open(mapfile_fname):
parts = line.split()
self.assertEqual(parts[-1], m_dat[idx])
if idx + 1 < len(m_dat):
idx += 1
os.remove(mapfile_fname)
def test_get_template(self):
# test that we can fetch an existing template
glideinWMS_dir = ".."
template_name = "gwms-factory.service"
tmp = get_template(template_name, glideinWMS_dir)
self.assertNotEqual(tmp, "")
# test that fetching a nonexistent template throws
# the correct Exception
try:
bad = get_template("I-dont-exist", glideinWMS_dir)
assert False
except OSError as ior:
pass
def test_create_client_condor_config(self):
# use mock output from condor_config_val -dump
# to create a condor_mapfile
config_fname = "condor_config"
mapfile_fname = "condor_mapfile"
collector_nodes = ["fermicloud001.fnal.gov", "fermicloud002.fnal.gov"]
classad_proxy = "/tmp/classad_proxy"
with mock.patch("glideinwms.lib.condorExe.exe_cmd") as m_exe_cmd:
with open("fixtures/frontend/ccvd.fixture") as fil:
m_exe_cmd.return_value = fil.readlines()
create_client_condor_config(config_fname, mapfile_fname, collector_nodes, classad_proxy)
self.assertTrue(os.path.exists(config_fname))
os.remove(config_fname)
if __name__ == "__main__":
unittest.main(testRunner=xmlrunner.XMLTestRunner(output="unittests-reports"))
|
"""WSGI script for the marsha project."""
from configurations.wsgi import get_wsgi_application
application = get_wsgi_application() # pylint: disable=invalid-name
|
import http.client
import json
server_key = '<server key>';
client_token = '<token received by client>';
notification = {
'title': 'Hello world',
'body': 'This is a very long message',
'icon': 'firebase-logo.png',
'click_action': 'http://localhost:8081'
}
headers = {
'Authorization': 'key=' + server_key,
'Content-Type': 'application/json'
}
body = {
'notification': notification,
'to': client_token
}
conn = http.client.HTTPSConnection('fcm.googleapis.com')
conn.request('POST', '/fcm/send', json.dumps(body), headers)
response = conn.getresponse()
print(response.status, response.reason)
print(response.read().decode())
|
from hypothesis import given
from hypothesis import example
from hypothesis import assume
from hypothesis import strategies as st
from hypothesis.extra.numpy import arrays
import pytest
import numpy as np
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import os.path
import time
import tempfile
import tensorflow as tf
import StringIO
import random
import string
import mmap
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from google.protobuf import text_format
from syntaxnet import sentence_pb2
from syntaxnet import graph_builder
from syntaxnet import structured_graph_builder
from syntaxnet.ops import gen_parser_ops
from syntaxnet import task_spec_pb2
import time
import BaseHTTPServer
import cgi
from BaseHTTPServer import BaseHTTPRequestHandler
from BaseHTTPServer import HTTPServer
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('task_context', '',
'Path to a task context with inputs and parameters for '
'feature extractors.')
flags.DEFINE_string('resource_dir', '',
'Optional base directory for task context resources.')
flags.DEFINE_string('model_path', '', 'Path to model parameters.')
flags.DEFINE_string('arg_prefix', None, 'Prefix for context parameters.')
flags.DEFINE_string('graph_builder', 'greedy',
'Which graph builder to use, either greedy or structured.')
flags.DEFINE_string('input', 'stdin',
'Name of the context input to read data from.')
flags.DEFINE_string('output', 'stdout',
'Name of the context input to write data to.')
flags.DEFINE_string('hidden_layer_sizes', '200,200',
'Comma separated list of hidden layer sizes.')
flags.DEFINE_integer('batch_size', 32,
'Number of sentences to process in parallel.')
flags.DEFINE_integer('beam_size', 8, 'Number of slots for beam parsing.')
flags.DEFINE_integer('max_steps', 1000, 'Max number of steps to take.')
flags.DEFINE_bool('slim_model', False,
'Whether to expect only averaged variables.')
MODEL_DIR = '/home/ubuntu/models/syntaxnet/models/Chinese'
USE_SLIM_MODEL = True
BATCH_SIZE = 1024
BEAM_SIZE = 8
MAX_STEPS = 1000
TOKENIZER_TASK_CONTEXT = '/home/ubuntu/models/syntaxnet/syntaxnet/models/parsey_universal/context-tokenize-zh.pbtxt'
TOKENIZER_INPUT = 'stdin-untoken'
TOKENIZER_ARG_PREFIX = 'brain_tokenizer_zh'
TOKENIZER_HIDDEN_LAYER = '256,256'
TOKENIZER_OUTPUT = 'stdout-conll'
TOKENIZER_MODEL_PATH = 'tokenizer-params'
TASK_CONTEXT = '/home/ubuntu/models/syntaxnet/syntaxnet/models/parsey_universal/context.pbtxt'
TASK_INPUT = 'stdin-conll'
TASK_OUTPUT = 'stdout-conll'
MORPHER_HIDDEN_LAYER = '64'
MORPHER_ARG_PREFIX = 'brain_morpher'
MORPHER_MODEL_PATH = 'morpher-params'
TAGGER_HIDDEN_LAYER = '64'
TAGGER_ARG_PREFIX = 'brain_tagger'
TAGGER_MODEL_PATH = 'tagger-params'
PARSER_HIDDEN_LAYER = '512,512'
PARSER_ARG_PREFIX = 'brain_parser'
PARSER_MODEL_PATH = 'parser-params'
PORT_NUMBER = 3080
PAGE = u'''
<!DOCTYPE html>
<html>
<body>
<form action="/parse" method="POST">
请输入一个完整句子:<br>
<textarea rows="12" cols="100" type="text" name="sentence" >
</textarea>
<br/>
<input type="submit">
</form>
<br/>
转换结果:
<br/>
<pre>
{0}
</pre>
</body>
</html>
'''
def RewriteContext(task_context, in_corpus_name):
context = task_spec_pb2.TaskSpec()
with gfile.FastGFile(task_context, 'rb') as fin:
text_format.Merge(fin.read(), context)
tf_in = tempfile.NamedTemporaryFile(delete=False)
for resource in context.input:
for part in resource.part:
if part.file_pattern != '-':
part.file_pattern = os.path.join(MODEL_DIR, part.file_pattern)
if resource.name == in_corpus_name:
for part in resource.part:
if part.file_pattern == '-':
part.file_pattern = tf_in.name
fout = tempfile.NamedTemporaryFile(delete=False)
fout.write(str(context))
return fout.name, tf_in.name
def UnderscoreIfEmpty(part):
if not part:
return unicode('_')
return unicode(part)
def GetMorphAttributes(token):
extension = (sentence_pb2.TokenMorphology.morphology)
if not token.HasExtension(extension):
return unicode('_')
morph = token.Extensions[extension]
if not morph:
return unicode('_')
if len(morph.attribute) == 0:
return unicode('_')
attrs = []
for attribute in morph.attribute:
value = attribute.name
if attribute.value != 'on':
value += unicode('=')
value += attribute.value
attrs.append(value)
return unicode('|').join(attrs);
def ConvertTokenToString(index, token):
fields = []
fields.append(unicode(index + 1))
fields.append(UnderscoreIfEmpty(token.word))
fields.append(unicode('_'))
fields.append(UnderscoreIfEmpty(token.category))
fields.append(UnderscoreIfEmpty(token.tag))
fields.append(GetMorphAttributes(token))
fields.append(unicode(token.head + 1))
fields.append(UnderscoreIfEmpty(token.label))
fields.append(unicode('_'))
fields.append(unicode('_'))
return unicode('\t').join(fields)
def ConvertToString(sentence):
value = unicode('')
lines = []
for index in range(len(sentence.token)):
lines.append(ConvertTokenToString(index, sentence.token[index]))
return unicode('\n').join(lines) + unicode('\n\n')
class ParserEval:
def __init__(self, sess, task_context, arg_prefix, hidden_layer_sizes, model_path, in_corpus_name, out_corpus_name):
self.task_context, self.in_name = RewriteContext(task_context, in_corpus_name)
self.arg_prefix = arg_prefix
self.sess = sess
self.in_corpus_name = in_corpus_name
self.out_corpus_name = out_corpus_name
feature_sizes, domain_sizes, embedding_dims, num_actions = sess.run(
gen_parser_ops.feature_size(task_context=self.task_context,
arg_prefix=self.arg_prefix))
self.feature_sizes = feature_sizes
self.domain_sizes = domain_sizes
self.embedding_dims = embedding_dims
self.num_actions = num_actions
self.hidden_layer_sizes = map(int, hidden_layer_sizes.split(','))
self.parser = structured_graph_builder.StructuredGraphBuilder(
self.num_actions,
self.feature_sizes,
self.domain_sizes,
self.embedding_dims,
self.hidden_layer_sizes,
gate_gradients=True,
arg_prefix=self.arg_prefix,
beam_size=BEAM_SIZE,
max_steps=MAX_STEPS)
self.parser.AddEvaluation(self.task_context,
BATCH_SIZE,
evaluation_max_steps=MAX_STEPS)
self.parser.AddSaver(USE_SLIM_MODEL)
self.sess.run(self.parser.inits.values())
self.parser.saver.restore(self.sess, os.path.join(MODEL_DIR, model_path))
def __del__(self):
os.remove(self.task_context)
# os.remove(self.in_name)
# os.remove(self.out_name)
def Parse(self, sentence):
with open(self.in_name, "w") as f:
f.write(sentence)
self.parser.AddEvaluation(self.task_context,
BATCH_SIZE,
corpus_name=self.in_corpus_name,
evaluation_max_steps=MAX_STEPS)
# tf_documents = self.sess.run([self.parser.evaluation['documents'],])
_, _, tf_documents = self.sess.run([
self.parser.evaluation['epochs'],
self.parser.evaluation['eval_metrics'],
self.parser.evaluation['documents'],
])
assert len(tf_documents) == 1
#print type(tf_documents[len(tf_documents)-1])
doc = sentence_pb2.Sentence()
doc.ParseFromString(tf_documents[len(tf_documents)-1])
#print unicode(doc)
return ConvertToString(doc)
class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_HEAD(s):
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
def do_GET(s):
"""Respond to a GET request."""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(PAGE.format(' ').encode("utf-8"))
def do_POST(s):
form = cgi.FieldStorage(fp=s.rfile,
headers=s.headers,
environ={"REQUEST_METHOD": "POST"})
target_text = ''
for item in form.list:
#print "begin: %s = %s" % (item.name, item.value)
if item.name == 'sentence':
target_text = item.value
if target_text:
target_text = s.parser.Parse(target_text)
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
#print target_text
s.wfile.write(target_text.encode("utf-8"))
def main(unused_argv):
sess = tf.Session()
# parser = ParserEval(sess,
# TOKENIZER_TASK_CONTEXT,
# TOKENIZER_ARG_PREFIX,
# TOKENIZER_HIDDEN_LAYER,
# TOKENIZER_MODEL_PATH,
# TOKENIZER_INPUT,
# TOKENIZER_OUTPUT)
# with tf.Session() as morpher_sess:
# parser = ParserEval(sess,
# TASK_CONTEXT,
# MORPHER_ARG_PREFIX,
# MORPHER_HIDDEN_LAYER,
# MORPHER_MODEL_PATH,
# TASK_INPUT,
# TASK_OUTPUT)
# with tf.Session() as tagger_sess:
parser = ParserEval(sess,
TASK_CONTEXT,
TAGGER_ARG_PREFIX,
TAGGER_HIDDEN_LAYER,
TAGGER_MODEL_PATH,
TASK_INPUT,
TASK_OUTPUT)
# with tf.Session() as parser_sess:
# parser = ParserEval(parser_sess,
# TASK_CONTEXT,
# PARSER_ARG_PREFIX,
# PARSER_HIDDEN_LAYER,
# PARSER_MODEL_PATH,
# TASK_INPUT,
# TASK_OUTPUT)
# result = tokenizer.Parse("俄罗斯最新一艘亚森级核动力潜艇喀山号31日在北德文斯克举行下水礼.")
# result = morpher.Parse(result)
# result = tagger.Parse(result)
# result = parser.Parse(result)
# print result
server_class = BaseHTTPServer.HTTPServer
MyHandler.parser = parser
httpd = server_class(('', PORT_NUMBER), MyHandler)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
if __name__ == '__main__':
tf.app.run()
|
# automation package
__version__ = "0.00.02"
|
from django.http import HttpResponseRedirect
def responsive_switch(request,action):
if action=="go":
request.session['desktop_mode']=True
elif action=="leave":
request.session['desktop_mode']=False
return HttpResponseRedirect(request.META.get("HTTP_REFERER", ""))
|
import itertools
import numpy as np
from matplotlib.cm import get_cmap
from bridge_sim.model import Config, Point
from bridge_sim.sim import without
from bridge_sim.sim.build import get_bridge_nodes, get_bridge_shells
from bridge_sim.sim.model import BuildContext, Node, Shell, SimParams
from lib.plot import default_cmap, legend_marker_size, parula_cmap, plt
from lib.plot.geometry import top_view_bridge
from lib.plot.geometry.shell import shell_properties_3d, shell_properties_top_view
from lib.plot.geometry.node import node_scatter_3d
from lib.plot.responses import plot_deck_sensors
from bridge_sim.util import flatten, safe_str
def make_shell_properties_3d(original_c: Config):
"""Make plots of the shells in 3D, coloured by material property."""
# For each scenarios scenario build the model and extract the shells.
for damage_scenario in healthy_and_cracked_scenarios:
c, sim_params = damage_scenario.use(original_c, SimParams([]))
for ctx, ctx_name in [
(BuildContext(add_loads=[Point(x=85, y=0, z=0)]), "refined"),
(None, "unrefined"),
]:
bridge_shells = get_bridge_shells(bridge=c.bridge, ctx=ctx)
deck_shells = flatten(bridge_shells[0], Shell)
pier_shells = flatten(bridge_shells[1], Shell)
all_shells = flatten(bridge_shells, Shell)
# For each combination of parameters plot the shells.
for shells_name, shells in [
("pier", pier_shells),
("all", all_shells),
("deck", deck_shells),
]:
for outline, label in itertools.product([True, False], [True, False]):
for prop_name, prop_units, prop_f in [
("Thickness", "m", lambda s: s.thickness),
("Density", "kg/m", lambda s: s.density),
("Poisson's ratio", "m/m", lambda s: s.poissons),
("Young's modulus", "MPa", lambda s: s.youngs),
]:
for cmap in [default_cmap, get_cmap("tab10")]:
shell_properties_3d(
shells=shells,
prop_units=prop_units,
prop_f=prop_f,
cmap=cmap,
outline=outline,
label=label,
colorbar=not label,
)
plt.title(f"{prop_name} of {c.bridge.name}")
plt.savefig(
c.get_image_path(
f"geometry/shells-{ctx_name}-3d",
safe_str(
f"{shells_name}-{prop_name}-outline-{outline}-{cmap.name}"
)
+ ".pdf",
)
)
plt.close()
def make_shell_properties_top_view(
c: Config,
shells_name_: str,
prop_name_: str,
refined_: bool,
outline: bool,
lanes: bool,
):
"""Make plots of the shells in top view, coloured by material property."""
original_c = c
# For each scenarios scenario build the model and extract the shells.
for damage_scenario, damage_name in zip(
healthy_and_cracked_scenarios, [None, "cracked"]
):
c, sim_params = damage_scenario.use(original_c)
for ctx, ctx_name, refined, in [
(
BuildContext(
add_loads=[Point(x=85, y=0, z=0)], refinement_radii=[2, 1, 0.5],
),
"refined",
True,
),
(None, "unrefined", False),
]:
if refined != refined_:
continue
bridge_shells = get_bridge_shells(bridge=c.bridge, ctx=ctx)
deck_shells = flatten(bridge_shells[0], Shell)
pier_shells = flatten(bridge_shells[1], Shell)
all_shells = pier_shells + deck_shells
for shells_name, shells in [
("piers", pier_shells),
("deck", deck_shells),
]:
if shells_name != shells_name_:
continue
for prop_name, prop_units, prop_f in [
("Mesh", "", None),
("Thickness", "m", lambda s: np.around(s.thickness, 3)),
("Density", "kg/m", lambda s: np.around(s.density, 3)),
("Poisson's ratio", "m/m", lambda s: s.poissons),
("Young's modulus", "MPa", lambda s: np.around(s.youngs, 1)),
]:
if prop_name_ not in prop_name.lower():
continue
for cmap in [parula_cmap, default_cmap]:
def top_view():
top_view_bridge(
bridge=c.bridge,
abutments=True,
piers=True,
lanes=lanes,
compass=prop_f is not None,
)
top_view()
shell_properties_top_view(
shells=shells,
prop_f=prop_f,
prop_units=prop_units,
cmap=cmap,
colorbar=prop_f is not None,
# label=prop_f is not None,
outline=outline,
)
top_view()
damage_str = "" if damage_name is None else f" ({damage_name})"
plt.title(
f"{prop_name} of bridge 705's {shells_name}{damage_str}"
)
plt.savefig(
c.get_image_path(
f"geometry/{shells_name}-shells-{ctx_name}-top-view",
safe_str(
f"{prop_name}-{cmap.name}-outline-{outline}-lanes-{lanes}"
)
+ ".pdf",
)
)
plt.close()
if prop_f is None:
break
def make_node_plots(original_c: Config):
"""Make all variations of 3d scatter plots of nodes."""
for damage_scenario in healthy_and_cracked_scenarios:
c, sim_params = damage_scenario.use(original_c, SimParams([]))
for ctx, ctx_name in [
(BuildContext(add_loads=[Point(x=85, y=0, z=0)]), "refined"),
(None, "unrefined"),
]:
bridge_nodes = get_bridge_nodes(bridge=c.bridge, ctx=ctx)
deck_nodes = set(flatten(bridge_nodes[0], Node))
pier_nodes = set(flatten(bridge_nodes[1], Node))
all_nodes = set(flatten(bridge_nodes, Node))
# For each combination of parameters plot the nodes.
for nodes_name, nodes in [
("all", all_nodes),
("deck", deck_nodes),
("pier", pier_nodes),
]:
node_scatter_3d(nodes=nodes)
plt.title(f"Nodes of {c.bridge.name}")
plt.savefig(
c.get_image_path(
f"geometry/nodes-{ctx_name}",
safe_str(f"{nodes_name}") + ".pdf",
)
)
plt.close()
def make_available_sensors_plot(
c: Config, pier_radius: float, track_radius: float, edge_radius: float
):
"""Scatter plot of sensors used for classification."""
top_view_bridge(c.bridge, abutments=True, piers=True, compass=False)
plot_deck_sensors(
c=c,
without=without.points(
c=c,
pier_radius=pier_radius,
track_radius=track_radius,
edge_radius=edge_radius,
),
label=True,
)
for l_i, load in enumerate([Point(x=21, z=-8.4), Point(x=33, z=-4)]):
plt.scatter(
[load.x],
[load.z],
color="red",
marker="o",
s=50,
label="Sensor of interest" if l_i == 0 else None,
)
legend_marker_size(plt.legend(), 50)
plt.title(f"Sensors available for classification on Bridge 705")
plt.tight_layout()
plt.savefig(c.get_image_path("sensors", "unavailable-sensors.pdf"))
plt.close()
def make_boundary_plot(c: Config):
"""Top view of bridge with boundary conditions."""
plt.landscape()
top_view_bridge(c.bridge, abutments=True, piers=True, compass=False)
plt.vlines(
[0, c.bridge.length],
c.bridge.z_min,
c.bridge.z_max,
lw=5,
color="orange",
label=" Y = 1, Z = 1",
)
for p_i, pier in enumerate(c.bridge.supports):
z_min_top, z_max_top = pier.z_min_max_bottom()
x_min, x_max = pier.x_min_max_top()
x_center = x_min + ((x_max - x_min) / 2)
plt.vlines(
[x_center],
z_min_top,
z_max_top,
lw=5,
color="red" if (8 <= p_i <= 15) else "orange",
label="X = 1, Y = 1, Z = 1" if p_i == 8 else None,
)
legend_marker_size(plt.legend(), 50)
plt.title("Bridge 705 boundary conditions of nodal supports")
plt.tight_layout()
plt.savefig(c.get_image_path("sensors", "boundary.pdf"))
plt.close()
|
import json
def getGrch38():
with open("grch38_utr3.json") as infile:
grch38_utr3 = json.load(infile)
return grch38_utr3
def getIdconverter():
with open("/home/fux/fux/miRNASNP3/test_let_7a_3p/ID_convert.jaon") as infile:
id_convert = json.load(infile)
return id_convert
def outNmseq(outch):
with open("nm_grch38_utr3.fa","a") as out:
out.write(outch+'\n')
def outNnmseq(outch):
with open("nnm_grch38_utr3.fa","a") as out:
out.write(outch+'\n')
def outNbiomart(outch):
with open("nbiomart_grch38_utr3.fa","a") as out:
out.write(outch+'\n')
def outStatutr(outch):
with open("stat_grch38_utr3","a") as out:
out.write(outch+'\n')
def fixId():
with open("re_grch38_utr3.fa") as infile:
line = infile.readline()
while line:
grchid = line.strip()[1:]
utrid = grch38_utr3[grchid]
enst = utrid.split('.')[0]
# print(enst)
if enst in id_convert.keys():
symbol = id_convert[enst]
seq = infile.readline().strip()
seq_len = len(seq)
if len(symbol) == 2:
newid = grchid+'#'+enst+'#'+symbol[0]+'#'+symbol[1]+'#'+str(seq_len)
newid_stat = grchid+'\t'+enst+'\t'+symbol[0]+'\t'+symbol[1]+'\t'+str(seq_len)
outNmseq('>'+newid)
outNmseq(seq)
outStatutr(newid_stat)
else:
newid = grchid+'#'+enst+'#'+symbol[0]+'#'+str(seq_len)
newid_stat = grchid+'\t'+enst+'\t'+symbol[0]+'\t0\t'+str(seq_len)
outStatutr(newid_stat)
outNnmseq('>'+newid)
outNnmseq(seq)
else:
seq = infile.readline().strip()
seq_len = len(seq)
newid = grchid+'#'+enst+'#'+str(seq_len)
newid_stat = grchid+'\t'+enst+'\tno_biomart\t0\t'+str(seq_len)
outStatutr(newid_stat)
outNbiomart('>'+newid)
outNbiomart(seq)
line = infile.readline()
if __name__ == '__main__':
grch38_utr3 = getGrch38()
id_convert = getIdconverter()
fixId()
|
#ex077: Crie um programa que tenha uma tupla com várias palavras (não usar acentos). Depois disso, você deve mostrar, para cada palavra, quais são as suas vogais.
#NÃO CONSEGUI FAZER
a = '\033[1;33m'
v = '\033[1;32m'
l = '\033[m'
palavras = ('abacaxi', 'tomate', 'uva', 'morango', 'melancia', 'coco', 'cenoura', 'mirtilo')
for p in palavras:
print(f'\nNa palavra {a}{p}{l} temos: ', end='')
for letra in p:
if letra.lower() in 'aeiou':
print(f'{v}{letra}{l}', end=' ')
#PASSO A PASSO PARA ENTENDER A RESOLUÇÃO
#1º Criei uma tupla com várias palavras.
#2º A tupla 'palavras' possui 8 termos: 'abacaxi' = 1º termo/posição 0 na memória e assim em diante.
#3º Fiz um laço de repetição. 'p' vai girar cada termo da dupla 'palavras', até acabar, ex: abacaxi, tomate etc.
#4º Vamos traduzir o 1º comando: 'Na palavra abacaxi (1º termo/posição 0 na memória) temos:'
#5º Delimitei para a repetição continuar em cada termo (p).
#6º Temos outro laço. Enquanto 'letra' (cada palavra) in 'p' (termo = 'abacaxi', 'tomate', etc.)
#7º Se 'letra' (cada palavra) tiver 'a e i o u', escreva essas vogais. |
from metaphor.common.cli import cli_main
from .extractor import LookerExtractor
if __name__ == "__main__":
cli_main("Looker metadata extractor", LookerExtractor)
|
import os.path
import tensorflow as tf
from datetime import datetime
import models
import data
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("summaries_dir", "/home/uai/tf_logs/ic_fcn", "Path to the tensorboard logs directory.")
#tf.flags.DEFINE_string("summaries_dir", "C://Temp/tf_logs/ic_fcn", "Path to the tensorboard logs directory.")
tf.flags.DEFINE_string("checkpoint_dir", "/home/uai/tf_chkp/ic_fcn", "Path to the tensorboard logs directory.")
tf.flags.DEFINE_string("dataset_train_dir", 'ic_fcn/data', "Path to the tensorboard logs directory.")
tf.flags.DEFINE_string("dataset_eval_dir", 'ic_fcn/data', "Path to the tensorboard logs directory.")
tf.flags.DEFINE_string("dataset_test_dir", 'ic_fcn/data', "Path to the tensorboard logs directory.")
tf.flags.DEFINE_float("alpha", 0.0004, "Hyperparameter alpha.")
#tf.flags.DEFINE_integer("T_C", 90000, "Hyperparameter T_C which defines how many iteration the MSE pretraining is run.")
tf.flags.DEFINE_integer("T_C", 2160000, "Hyperparameter T_C which defines how many iteration the MSE pretraining is run.")
tf.flags.DEFINE_integer("T_D", 10000, "Hyperparameter T_D which defines how many iteration the discrimitar is run.")
tf.flags.DEFINE_integer("T_Train", 500000, "Hyperparameter T_Train which defines how many iteration the discrimitar and MSE is run together.")
#tf.flags.DEFINE_integer("batch_size", 96, "Size of the input batching.")
tf.flags.DEFINE_integer("batch_size", 4, "Size of the input batching.")
tf.flags.DEFINE_integer("sum_hook_everysec", 10*60, "How often is the tensorboard summary updated (in seconds).")
tf.flags.DEFINE_float("learning_rate", 0.001, "Learning rate for training.")
TIMESTAMP_STRING = datetime.now().isoformat(sep='_').replace('-', '').replace(':', '')[0:15] # example: '20171206_205345'
SUMHOOK_TIME = 100
CNN_NAME = 'ic_fcn'
def train_data_fn():
dataset = data.MaskedImageDataset(FLAGS.dataset_train_dir, random=True).get_tf_dataset()
dataset = dataset.repeat(FLAGS.T_C)
#dataset = dataset.repeat(FLAGS.steps * FLAGS.batch_size + 500)
#dataset = dataset.batch(128)
dataset = dataset.batch(FLAGS.batch_size)
#dataset = dataset.prefetch(20)
# A one-shot iterator automatically initializes itself on first use.
iterator = dataset.make_one_shot_iterator()
# The return value of get_next() matches the dataset element type.
feature, mask, orginal = iterator.get_next()
label = {'mask': mask, 'orginal': orginal}
return feature, label
def mse_estimator(features, labels, mode, params):
# unpack labels
mask = labels['mask']
orginal = labels['orginal']
# Logic to do the following:
# 1. Configure the model via TensorFlow operations
completion_cnn = models.build_completion_fcn(input_features=features)
# 2. Define the loss function for training/evaluation
mse_loss = tf.square(tf.norm(mask * (completion_cnn - orginal)), name="loss_mse")
# 3. Define the training operation/optimizer
optimizer = tf.train.AdadeltaOptimizer(learning_rate=params["learning_rate"])
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # import for batch norm!
with tf.control_dependencies(update_ops): # import for batch norm!
train_op = optimizer.minimize(loss=mse_loss, global_step=tf.train.get_global_step())
# 4. Generate predictions
predictions = completion_cnn # dict with all predictions
# 5. Return predictions/loss/train_op/eval_metric_ops in EstimatorSpec object
eval_metric_ops = None
eval_metric_ops = {"mean_absolute_error": tf.metrics.mean_absolute_error(orginal, completion_cnn) }
# 6. Tensorboard
tf.summary.scalar("loss_mse", mse_loss)
#tf.summary.image('mask', mask, max_outputs=3, collections=None, family='predi')
tf.summary.image('input', features[:,:,:,0:3], max_outputs=3, collections=None, family='predi')
#masked_orginal = tf.multiply(orginal, mask)
#tf.summary.image('masked_orginal', masked_orginal, max_outputs=3, collections=None, family='predi')
predict = mask * completion_cnn
tf.summary.image('predict', predict, max_outputs=3, collections=None, family='predi')
inverted_mask = mask * -1.0 + 1.0
predict_full = predict + inverted_mask * orginal
tf.summary.image('predict_ful', predict_full, max_outputs=3, collections=None, family='predi')
# tf.summary.image('orginal_masked', orginal, max_outputs=3, collections=None, family='predi')
all_summaries = tf.summary.merge_all()
summary_trainhook = tf.train.SummarySaverHook(
save_secs=FLAGS.sum_hook_everysec,
output_dir=os.path.join(FLAGS.summaries_dir, '{}_{}_{}'.format(CNN_NAME, TIMESTAMP_STRING, mode)),
summary_op=tf.summary.merge_all())
#profiler_hook = tf.train.ProfilerHook()
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions, loss=mse_loss,
train_op=train_op, eval_metric_ops=eval_metric_ops, training_hooks=[summary_trainhook], evaluation_hooks=[])
def main(unused_argv):
# Settings for the trainings
runconfig = tf.estimator.RunConfig(model_dir=FLAGS.checkpoint_dir)
# Set model params
model_params = {"learning_rate": FLAGS.learning_rate, "summary_trainhook_rate": SUMHOOK_RATE}
# Instantiate Estimator
estimator = tf.estimator.Estimator(model_fn=mse_estimator, params=model_params, config=runconfig)
estimator.train(input_fn=train_data_fn, steps=FLAGS.T_C)
#estimator.evaluate(input_fn=train_data_fn, steps=500)
if __name__ == "__main__":
#tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
|
import numpy as np
import scipy.sparse as sp
import warnings
import properties
from . import Utils
from . import Maps
from . import Mesh
from . import ObjectiveFunction
from . import Props
from .Utils import mkvc, speye, sdiag, kron3
__all__ = [
"SimpleSmall",
"SimpleSmoothDeriv",
"Simple",
"Small",
"SmoothDeriv",
"SmoothDeriv2",
"Tikhonov",
"SparseSmall",
"SparseDeriv",
"Sparse",
]
###############################################################################
# #
# Regularization Mesh #
# #
###############################################################################
class RegularizationMesh(Props.BaseSimPEG):
"""
**Regularization Mesh**
This contains the operators used in the regularization. Note that these
are not necessarily true differential operators, but are constructed from
a SimPEG Mesh.
:param BaseMesh mesh: problem mesh
:param numpy.array indActive: bool array, size nC, that is True where we have active cells. Used to reduce the operators so we regularize only on active cells
"""
regularization_type = None # or 'Simple', 'Sparse' or 'Tikhonov'
def __init__(self, mesh, **kwargs):
self.mesh = mesh
Utils.setKwargs(self, **kwargs)
indActive = properties.Array("active indices in mesh", dtype=[bool, int])
@properties.validator("indActive")
def _cast_to_bool(self, change):
value = change["value"]
if value is not None:
if value.dtype != "bool": # cast it to a bool otherwise
tmp = value
value = np.zeros(self.mesh.nC, dtype=bool)
value[tmp] = True
change["value"] = value
@property
def vol(self):
"""
reduced volume vector
:rtype: numpy.array
:return: reduced cell volume
"""
if getattr(self, "_vol", None) is None:
self._vol = self.Pac.T * self.mesh.vol
return self._vol
@property
def nC(self):
"""
reduced number of cells
:rtype: int
:return: number of cells being regularized
"""
if self.indActive is not None:
return int(self.indActive.sum())
return self.mesh.nC
@property
def dim(self):
"""
dimension of regularization mesh (1D, 2D, 3D)
:rtype: int
:return: dimension
"""
if getattr(self, "_dim", None) is None:
self._dim = self.mesh.dim
return self._dim
@property
def Pac(self):
"""
projection matrix that takes from the reduced space of active cells to
full modelling space (ie. nC x nindActive)
:rtype: scipy.sparse.csr_matrix
:return: active cell projection matrix
"""
if getattr(self, "_Pac", None) is None:
if self.indActive is None:
self._Pac = Utils.speye(self.mesh.nC)
else:
self._Pac = Utils.speye(self.mesh.nC)[:, self.indActive]
return self._Pac
@property
def Pafx(self):
"""
projection matrix that takes from the reduced space of active x-faces
to full modelling space (ie. nFx x nindActive_Fx )
:rtype: scipy.sparse.csr_matrix
:return: active face-x projection matrix
"""
if getattr(self, "_Pafx", None) is None:
if self.indActive is None:
self._Pafx = Utils.speye(self.mesh.nFx)
else:
# if getattr(self.mesh, 'aveCC2Fx', None) is not None:
if self.mesh._meshType == "TREE":
if self.regularization_type == "Tikhonov":
indActive_Fx = (self.mesh.aveFx2CC.T * self.indActive) >= 1
self._Pafx = Utils.speye(self.mesh.nFx)[:, indActive_Fx]
else:
indActive_Fx = (
self.mesh._aveCC2FxStencil() * self.indActive
) >= 1
self._Pafx = Utils.speye(self.mesh.ntFx)[:, indActive_Fx]
else:
indActive_Fx = self.mesh.aveFx2CC.T * self.indActive >= 1
self._Pafx = Utils.speye(self.mesh.nFx)[:, indActive_Fx]
return self._Pafx
@property
def Pafy(self):
"""
projection matrix that takes from the reduced space of active y-faces
to full modelling space (ie. nFy x nindActive_Fy )
:rtype: scipy.sparse.csr_matrix
:return: active face-y projection matrix
"""
if getattr(self, "_Pafy", None) is None:
if self.indActive is None:
self._Pafy = Utils.speye(self.mesh.nFy)
else:
# if getattr(self.mesh, 'aveCC2Fy', None) is not None:
if self.mesh._meshType == "TREE":
if self.regularization_type == "Tikhonov":
print("Use Tikhonov")
indActive_Fy = (self.mesh.aveFy2CC.T * self.indActive) >= 1
self._Pafy = Utils.speye(self.mesh.nFy)[:, indActive_Fy]
else:
indActive_Fy = (
self.mesh._aveCC2FyStencil() * self.indActive
) >= 1
self._Pafy = Utils.speye(self.mesh.ntFy)[:, indActive_Fy]
else:
indActive_Fy = (self.mesh.aveFy2CC.T * self.indActive) >= 1
self._Pafy = Utils.speye(self.mesh.nFy)[:, indActive_Fy]
return self._Pafy
@property
def Pafz(self):
"""
projection matrix that takes from the reduced space of active z-faces
to full modelling space (ie. nFz x nindActive_Fz )
:rtype: scipy.sparse.csr_matrix
:return: active face-z projection matrix
"""
if getattr(self, "_Pafz", None) is None:
if self.indActive is None:
self._Pafz = Utils.speye(self.mesh.nFz)
else:
# if getattr(self.mesh, 'aveCC2Fz', None) is not None:
if self.mesh._meshType == "TREE":
if self.regularization_type == "Tikhonov":
indActive_Fz = (self.mesh.aveFz2CC.T * self.indActive) >= 1
self._Pafz = Utils.speye(self.mesh.nFz)[:, indActive_Fz]
else:
indActive_Fz = (
self.mesh._aveCC2FzStencil() * self.indActive
) >= 1
self._Pafz = Utils.speye(self.mesh.ntFz)[:, indActive_Fz]
else:
indActive_Fz = (self.mesh.aveFz2CC.T * self.indActive) >= 1
self._Pafz = Utils.speye(self.mesh.nFz)[:, indActive_Fz]
return self._Pafz
@property
def aveFx2CC(self):
"""
averaging from active cell centers to active x-faces
:rtype: scipy.sparse.csr_matrix
:return: averaging from active cell centers to active x-faces
"""
if getattr(self, "_aveFx2CC", None) is None:
if self.mesh._meshType == "TREE":
if self.regularization_type == "Tikhonov":
self._aveFx2CC = self.Pac.T * self.mesh.aveFx2CC * self.Pafx
else:
nCinRow = mkvc((self.aveCC2Fx.T).sum(1))
nCinRow[nCinRow > 0] = 1.0 / nCinRow[nCinRow > 0]
self._aveFx2CC = Utils.sdiag(nCinRow) * self.aveCC2Fx.T
else:
self._aveFx2CC = self.Pac.T * self.mesh.aveFx2CC * self.Pafx
return self._aveFx2CC
@property
def aveCC2Fx(self):
"""
averaging from active x-faces to active cell centers
:rtype: scipy.sparse.csr_matrix
:return: averaging matrix from active x-faces to active cell centers
"""
if getattr(self, "_aveCC2Fx", None) is None:
# if getattr(self.mesh, 'aveCC2Fx', None) is not None:
if self.mesh._meshType == "TREE":
if self.regularization_type == "Tikhonov":
self._aveCC2Fx = (
Utils.sdiag(1.0 / (self.aveFx2CC.T).sum(1)) * self.aveFx2CC.T
)
else:
self._aveCC2Fx = (
self.Pafx.T * self.mesh._aveCC2FxStencil() * self.Pac
)
else:
self._aveCC2Fx = (
Utils.sdiag(1.0 / (self.aveFx2CC.T).sum(1)) * self.aveFx2CC.T
)
return self._aveCC2Fx
@property
def aveFy2CC(self):
"""
averaging from active cell centers to active y-faces
:rtype: scipy.sparse.csr_matrix
:return: averaging from active cell centers to active y-faces
"""
if getattr(self, "_aveFy2CC", None) is None:
if self.mesh._meshType == "TREE":
if self.regularization_type == "Tikhonov":
self._aveFy2CC = self.Pac.T * self.mesh.aveFy2CC * self.Pafy
else:
nCinRow = mkvc((self.aveCC2Fy.T).sum(1))
nCinRow[nCinRow > 0] = 1.0 / nCinRow[nCinRow > 0]
self._aveFy2CC = Utils.sdiag(nCinRow) * self.aveCC2Fy.T
else:
self._aveFy2CC = self.Pac.T * self.mesh.aveFy2CC * self.Pafy
return self._aveFy2CC
@property
def aveCC2Fy(self):
"""
averaging from active y-faces to active cell centers
:rtype: scipy.sparse.csr_matrix
:return: averaging matrix from active y-faces to active cell centers
"""
if getattr(self, "_aveCC2Fy", None) is None:
# if getattr(self.mesh, 'aveCC2Fy', None) is not None:
if self.mesh._meshType == "TREE":
if self.regularization_type == "Tikhonov":
self._aveCC2Fy = (
Utils.sdiag(1.0 / (self.aveFy2CC.T).sum(1)) * self.aveFy2CC.T
)
else:
self._aveCC2Fy = (
self.Pafy.T * self.mesh._aveCC2FyStencil() * self.Pac
)
else:
self._aveCC2Fy = (
Utils.sdiag(1.0 / (self.aveFy2CC.T).sum(1)) * self.aveFy2CC.T
)
return self._aveCC2Fy
@property
def aveFz2CC(self):
"""
averaging from active cell centers to active z-faces
:rtype: scipy.sparse.csr_matrix
:return: averaging from active cell centers to active z-faces
"""
if getattr(self, "_aveFz2CC", None) is None:
if self.mesh._meshType == "TREE":
if self.regularization_type == "Tikhonov":
self._aveFz2CC = self.Pac.T * self.mesh.aveFz2CC * self.Pafz
else:
nCinRow = mkvc((self.aveCC2Fz.T).sum(1))
nCinRow[nCinRow > 0] = 1.0 / nCinRow[nCinRow > 0]
self._aveFz2CC = Utils.sdiag(nCinRow) * self.aveCC2Fz.T
else:
self._aveFz2CC = self.Pac.T * self.mesh.aveFz2CC * self.Pafz
return self._aveFz2CC
@property
def aveCC2Fz(self):
"""
averaging from active z-faces to active cell centers
:rtype: scipy.sparse.csr_matrix
:return: averaging matrix from active z-faces to active cell centers
"""
if getattr(self, "_aveCC2Fz", None) is None:
# if getattr(self.mesh, 'aveCC2Fz', None) is not None:
if self.mesh._meshType == "TREE":
if self.regularization_type == "Tikhonov":
self._aveCC2Fz = (
Utils.sdiag(1.0 / (self.aveFz2CC.T).sum(1)) * self.aveFz2CC.T
)
else:
self._aveCC2Fz = (
self.Pafz.T * self.mesh._aveCC2FzStencil() * self.Pac
)
else:
self._aveCC2Fz = (
Utils.sdiag(1.0 / (self.aveFz2CC.T).sum(1)) * self.aveFz2CC.T
)
return self._aveCC2Fz
@property
def cellDiffx(self):
"""
cell centered difference in the x-direction
:rtype: scipy.sparse.csr_matrix
:return: differencing matrix for active cells in the x-direction
"""
if getattr(self, "_cellDiffx", None) is None:
self._cellDiffx = self.Pafx.T * self.mesh.cellGradx * self.Pac
return self._cellDiffx
@property
def cellDiffy(self):
"""
cell centered difference in the y-direction
:rtype: scipy.sparse.csr_matrix
:return: differencing matrix for active cells in the y-direction
"""
if getattr(self, "_cellDiffy", None) is None:
self._cellDiffy = self.Pafy.T * self.mesh.cellGrady * self.Pac
return self._cellDiffy
@property
def cellDiffz(self):
"""
cell centered difference in the z-direction
:rtype: scipy.sparse.csr_matrix
:return: differencing matrix for active cells in the z-direction
"""
if getattr(self, "_cellDiffz", None) is None:
self._cellDiffz = self.Pafz.T * self.mesh.cellGradz * self.Pac
return self._cellDiffz
@property
def faceDiffx(self):
"""
x-face differences
:rtype: scipy.sparse.csr_matrix
:return: differencing matrix for active faces in the x-direction
"""
if getattr(self, "_faceDiffx", None) is None:
self._faceDiffx = self.Pac.T * self.mesh.faceDivx * self.Pafx
return self._faceDiffx
@property
def faceDiffy(self):
"""
y-face differences
:rtype: scipy.sparse.csr_matrix
:return: differencing matrix for active faces in the y-direction
"""
if getattr(self, "_faceDiffy", None) is None:
self._faceDiffy = self.Pac.T * self.mesh.faceDivy * self.Pafy
return self._faceDiffy
@property
def faceDiffz(self):
"""
z-face differences
:rtype: scipy.sparse.csr_matrix
:return: differencing matrix for active faces in the z-direction
"""
if getattr(self, "_faceDiffz", None) is None:
self._faceDiffz = self.Pac.T * self.mesh.faceDivz * self.Pafz
return self._faceDiffz
@property
def cellDiffxStencil(self):
"""
cell centered difference stencil (no cell lengths include) in the
x-direction
:rtype: scipy.sparse.csr_matrix
:return: differencing matrix for active cells in the x-direction
"""
if getattr(self, "_cellDiffxStencil", None) is None:
self._cellDiffxStencil = (
self.Pafx.T * self.mesh._cellGradxStencil * self.Pac
)
return self._cellDiffxStencil
@property
def cellDiffyStencil(self):
"""
cell centered difference stencil (no cell lengths include) in the
y-direction
:rtype: scipy.sparse.csr_matrix
:return: differencing matrix for active cells in the y-direction
"""
if self.dim < 2:
return None
if getattr(self, "_cellDiffyStencil", None) is None:
self._cellDiffyStencil = (
self.Pafy.T * self.mesh._cellGradyStencil * self.Pac
)
return self._cellDiffyStencil
@property
def cellDiffzStencil(self):
"""
cell centered difference stencil (no cell lengths include) in the
y-direction
:rtype: scipy.sparse.csr_matrix
:return: differencing matrix for active cells in the y-direction
"""
if self.dim < 3:
return None
if getattr(self, "_cellDiffzStencil", None) is None:
self._cellDiffzStencil = (
self.Pafz.T * self.mesh._cellGradzStencil * self.Pac
)
return self._cellDiffzStencil
###############################################################################
# #
# Base Regularization #
# #
###############################################################################
class BaseRegularization(ObjectiveFunction.BaseObjectiveFunction):
"""
Base class for regularization. Inherit this for building your own
regularization. The base regularization assumes a weighted l2 style of
regularization. However, if you wish to employ a different norm, the
methods :meth:`__call__`, :meth:`deriv` and :meth:`deriv2` can be
over-written
:param BaseMesh mesh: SimPEG mesh
"""
def __init__(self, mesh=None, **kwargs):
super().__init__()
self.regmesh = RegularizationMesh(mesh)
Utils.setKwargs(self, **kwargs)
counter = None
mrefInSmooth = properties.Bool(
"include mref in the smoothness calculation?", default=None
)
# Properties
mref = Props.Array("reference model")
indActive = properties.Array(
"indices of active cells in the mesh", dtype=(bool, int)
)
cell_weights = properties.Array(
"regularization weights applied at cell centers", dtype=float
)
regmesh = properties.Instance(
"regularization mesh", RegularizationMesh, required=True
)
mapping = properties.Instance(
"mapping which is applied to model in the regularization",
Maps.IdentityMap,
default=Maps.IdentityMap(),
)
# Observers and Validators
@properties.validator("indActive")
def _cast_to_bool(self, change):
value = change["value"]
if value is not None:
if value.dtype != "bool": # cast it to a bool otherwise
tmp = value
value = np.zeros(self.regmesh.nC, dtype=bool)
value[tmp] = True
change["value"] = value
# update regmesh indActive
if getattr(self, "regmesh", None) is not None:
self.regmesh.indActive = Utils.mkvc(value)
@properties.observer("indActive")
def _update_regmesh_indActive(self, change):
# update regmesh indActive
if getattr(self, "regmesh", None) is not None:
self.regmesh.indActive = change["value"]
@properties.validator("cell_weights")
def _validate_cell_weights(self, change):
if change["value"] is not None:
# todo: residual size? we need to know the expected end shape
if self._nC_residual != "*":
assert (
len(change["value"]) == self._nC_residual
), "cell_weights must be length {} not {}".format(
self._nC_residual, len(change["value"])
)
# Other properties and methods
@property
def nP(self):
"""
number of model parameters
"""
if getattr(self.mapping, "nP") != "*":
return self.mapping.nP
elif getattr(self.regmesh, "nC") != "*":
return self.regmesh.nC
else:
return "*"
@property
def _nC_residual(self):
"""
Shape of the residual
"""
if getattr(self, "mapping", None) != "*":
return self.mapping.shape[0]
elif getattr(self.regmesh, "nC", None) != "*":
return self.regmesh.nC
else:
return self.nP
def _delta_m(self, m):
if self.mref is None:
return m
return -self.mref + m # in case self.mref is Zero, returns type m
@Utils.timeIt
def __call__(self, m):
"""
We use a weighted 2-norm objective function
.. math::
r(m) = \\frac{1}{2}
"""
r = self.W * (self.mapping * (self._delta_m(m)))
return 0.5 * r.dot(r)
@Utils.timeIt
def deriv(self, m):
"""
The regularization is:
.. math::
R(m) = \\frac{1}{2}\\mathbf{(m-m_\\text{ref})^\\top W^\\top
W(m-m_\\text{ref})}
So the derivative is straight forward:
.. math::
R(m) = \\mathbf{W^\\top W (m-m_\\text{ref})}
"""
mD = self.mapping.deriv(self._delta_m(m))
r = self.W * (self.mapping * (self._delta_m(m)))
return mD.T * (self.W.T * r)
@Utils.timeIt
def deriv2(self, m, v=None):
"""
Second derivative
:param numpy.array m: geophysical model
:param numpy.array v: vector to multiply
:rtype: scipy.sparse.csr_matrix
:return: WtW, or if v is supplied WtW*v (numpy.ndarray)
The regularization is:
.. math::
R(m) = \\frac{1}{2}\\mathbf{(m-m_\\text{ref})^\\top W^\\top
W(m-m_\\text{ref})}
So the second derivative is straight forward:
.. math::
R(m) = \\mathbf{W^\\top W}
"""
mD = self.mapping.deriv(self._delta_m(m))
if v is None:
return mD.T * self.W.T * self.W * mD
return mD.T * (self.W.T * (self.W * (mD * v)))
class Small(BaseRegularization):
"""
Small regularization - L2 regularization on the difference between a
model and a reference model. Cell weights may be included.
.. math::
r(m) = \\frac{1}{2}(\\mathbf{m} - \\mathbf{m_ref})^\top \\mathbf{W}^T
\\mathbf{W} (\\mathbf{m} - \\mathbf{m_{ref}})
where :math:`\\mathbf{m}` is the model, :math:`\\mathbf{m_{ref}}` is a
reference model (default Zero) and :math:`\\mathbf{W}` is a weighting
matrix (default Identity. If cell weights are provided, then it is
:code:`diag(cell_weights)`)
**Optional Inputs**
:param BaseMesh mesh: SimPEG mesh
:param int nP: number of parameters
:param IdentityMap mapping: regularization mapping, takes the model from model space to the space you want to regularize in
:param numpy.ndarray mref: reference model
:param numpy.ndarray indActive: active cell indices for reducing the size of differential operators in the definition of a regularization mesh
:param numpy.ndarray cell_weights: cell weights
"""
_multiplier_pair = "alpha_s"
def __init__(self, mesh=None, **kwargs):
super().__init__(mesh=mesh, **kwargs)
@property
def W(self):
"""
Weighting matrix
"""
if self.cell_weights is not None:
return Utils.sdiag((self.regmesh.vol * self.cell_weights) ** 0.5)
# elif self._nC_residual != '*':
# return sp.eye(self._nC_residual)
else:
return Utils.sdiag(self.regmesh.vol ** 0.5)
###############################################################################
# #
# Base Combo Regularization #
# #
###############################################################################
class BaseComboRegularization(ObjectiveFunction.ComboObjectiveFunction):
def __init__(self, mesh, objfcts=[], **kwargs):
super().__init__(objfcts=objfcts, multipliers=None)
self.regmesh = RegularizationMesh(mesh)
Utils.setKwargs(self, **kwargs)
# link these attributes
linkattrs = ["regmesh", "indActive", "cell_weights", "mapping"]
for attr in linkattrs:
val = getattr(self, attr)
if val is not None:
[setattr(fct, attr, val) for fct in self.objfcts]
# Properties
alpha_s = Props.Float("smallness weight")
alpha_x = Props.Float("weight for the first x-derivative")
alpha_y = Props.Float("weight for the first y-derivative")
alpha_z = Props.Float("weight for the first z-derivative")
alpha_xx = Props.Float("weight for the second x-derivative")
alpha_yy = Props.Float("weight for the second y-derivative")
alpha_zz = Props.Float("weight for the second z-derivative")
counter = None
mref = Props.Array("reference model")
mrefInSmooth = properties.Bool(
"include mref in the smoothness calculation?", default=False
)
indActive = properties.Array(
"indices of active cells in the mesh", dtype=(bool, int)
)
cell_weights = properties.Array(
"regularization weights applied at cell centers", dtype=float
)
scale = properties.Float("General nob for scaling", default=1.0)
regmesh = properties.Instance(
"regularization mesh", RegularizationMesh, required=True
)
mapping = properties.Instance(
"mapping which is applied to model in the regularization",
Maps.IdentityMap,
default=Maps.IdentityMap(),
)
# Other properties and methods
@property
def nP(self):
"""
number of model parameters
"""
if getattr(self.mapping, "nP") != "*":
return self.mapping.nP
elif getattr(self.regmesh, "nC") != "*":
return self.regmesh.nC
else:
return "*"
@property
def _nC_residual(self):
"""
Shape of the residual
"""
if getattr(self.regmesh, "nC", None) != "*":
return self.regmesh.nC
elif getattr(self, "mapping", None) != "*":
return self.mapping.shape[0]
else:
return self.nP
def _delta_m(self, m):
if self.mref is None:
return m
return -self.mref + m # in case self.mref is Zero, returns type m
@property
def multipliers(self):
"""
Factors that multiply the objective functions that are summed together
to build to composite regularization
"""
return [getattr(self, f"{objfct._multiplier_pair}") for objfct in self.objfcts]
# Observers and Validators
@properties.validator("indActive")
def _cast_to_bool(self, change):
value = change["value"]
if value is not None:
if value.dtype != "bool": # cast it to a bool otherwise
tmp = value
value = np.zeros(self.regmesh.nC, dtype=bool)
value[tmp] = True
change["value"] = value
# update regmesh indActive
if getattr(self, "regmesh", None) is not None:
self.regmesh.indActive = Utils.mkvc(value)
@properties.observer("indActive")
def _update_regmesh_indActive(self, change):
# update regmesh indActive
if getattr(self, "regmesh", None) is not None:
self.regmesh.indActive = change["value"]
@properties.validator("cell_weights")
def _validate_cell_weights(self, change):
if change["value"] is not None:
# todo: residual size? we need to know the expected end shape
if self._nC_residual != "*":
assert (
len(change["value"]) == self._nC_residual
), "cell_weights must be length {} not {}".format(
self._nC_residual, len(change["value"])
)
@properties.observer("mref")
def _mirror_mref_to_objfctlist(self, change):
for fct in self.objfcts:
if getattr(fct, "mrefInSmooth", None) is not None:
if self.mrefInSmooth is False:
fct.mref = Utils.Zero()
else:
fct.mref = change["value"]
else:
fct.mref = change["value"]
@properties.observer("mrefInSmooth")
def _mirror_mrefInSmooth_to_objfctlist(self, change):
for fct in self.objfcts:
if getattr(fct, "mrefInSmooth", None) is not None:
fct.mrefInSmooth = change["value"]
@properties.observer("indActive")
def _mirror_indActive_to_objfctlist(self, change):
value = change["value"]
if value is not None:
if value.dtype != "bool":
tmp = value
value = np.zeros(self.mesh.nC, dtype=bool)
value[tmp] = True
change["value"] = value
if getattr(self, "regmesh", None) is not None:
self.regmesh.indActive = value
for fct in self.objfcts:
fct.indActive = value
@properties.observer("cell_weights")
def _mirror_cell_weights_to_objfctlist(self, change):
for fct in self.objfcts:
fct.cell_weights = change["value"]
@properties.observer("mapping")
def _mirror_mapping_to_objfctlist(self, change):
for fct in self.objfcts:
fct.mapping = change["value"]
###############################################################################
# #
# Simple Regularization (no volume contribution) #
# #
###############################################################################
class SimpleSmall(BaseRegularization):
"""
Simple Small regularization - L2 regularization on the difference between a
model and a reference model. Cell weights may be included. This does not
include a volume contribution.
.. math::
r(m) = \\frac{1}{2}(\\mathbf{m} - \\mathbf{m_ref})^\top \\mathbf{W}^T
\\mathbf{W} (\\mathbf{m} - \\mathbf{m_{ref}})
where :math:`\\mathbf{m}` is the model, :math:`\\mathbf{m_{ref}}` is a
reference model and :math:`\\mathbf{W}` is a weighting
matrix (default Identity). If cell weights are provided, then it is
:code:`diag(np.sqrt(cell_weights))`)
**Optional Inputs**
:param BaseMesh mesh: SimPEG mesh
:param int nP: number of parameters
:param IdentityMap mapping: regularization mapping, takes the model from model space to the space you want to regularize in
:param numpy.ndarray mref: reference model
:param numpy.ndarray indActive: active cell indices for reducing the size of differential operators in the definition of a regularization mesh
:param numpy.ndarray cell_weights: cell weights
"""
_multiplier_pair = "alpha_s"
def __init__(self, mesh=None, **kwargs):
super().__init__(mesh=mesh, **kwargs)
@property
def W(self):
"""
Weighting matrix
"""
if self.cell_weights is not None:
return Utils.sdiag(np.sqrt(self.cell_weights))
elif self._nC_residual != "*":
return sp.eye(self._nC_residual)
else:
return Utils.Identity()
class SimpleSmall(BaseRegularization):
"""
Small regularization - L2 regularization on the difference between a
model and a reference model. Cell weights may be included.
.. math::
r(m) = \\frac{1}{2}(\\mathbf{m} - \\mathbf{m_ref})^\top \\mathbf{W}^T
\\mathbf{W} (\\mathbf{m} - \\mathbf{m_{ref}})
where :math:`\\mathbf{m}` is the model, :math:`\\mathbf{m_{ref}}` is a
reference model (default Zero) and :math:`\\mathbf{W}` is a weighting
matrix (default Identity. If cell weights are provided, then it is
:code:`diag(cell_weights)`)
**Optional Inputs**
:param BaseMesh mesh: SimPEG mesh
:param int nP: number of parameters
:param IdentityMap mapping: regularization mapping, takes the model from model space to the space you want to regularize in
:param numpy.ndarray mref: reference model
:param numpy.ndarray indActive: active cell indices for reducing the size of differential operators in the definition of a regularization mesh
:param numpy.ndarray cell_weights: cell weights
"""
_multiplier_pair = "alpha_s"
def __init__(self, mesh=None, **kwargs):
super().__init__(mesh=mesh, **kwargs)
@property
def W(self):
"""
Weighting matrix
"""
if self.cell_weights is not None:
return Utils.sdiag(self.cell_weights ** 0.5)
elif self._nC_residual != "*":
return sp.eye(self._nC_residual)
else:
return Utils.Identity()
class SimpleSmoothDeriv(BaseRegularization):
"""
Base Simple Smooth Regularization. This base class regularizes on the first
spatial derivative, not considering length scales, in the provided
orientation
**Optional Inputs**
:param BaseMesh mesh: SimPEG mesh
:param int nP: number of parameters
:param IdentityMap mapping: regularization mapping, takes the model from model space to the space you want to regularize in
:param numpy.ndarray mref: reference model
:param numpy.ndarray indActive: active cell indices for reducing the size of differential operators in the definition of a regularization mesh
:param numpy.ndarray cell_weights: cell weights
:param bool mrefInSmooth: include the reference model in the smoothness computation? (eg. look at Deriv of m (False) or Deriv of (m-mref) (True))
:param numpy.ndarray cell_weights: vector of cell weights (applied in all terms)
"""
def __init__(self, mesh, orientation="x", **kwargs):
self.length_scales = None
self.orientation = orientation
assert self.orientation in [
"x",
"y",
"z",
], "Orientation must be 'x', 'y' or 'z'"
self.gradientType = gradientType
assert self.gradientType in [
"orthogonal",
"total",
], "gradientType must be 'orthogonal' or 'total'"
if self.orientation == "y":
assert mesh.dim > 1, (
"Mesh must have at least 2 dimensions to regularize along the "
"y-direction"
)
elif self.orientation == "z":
assert mesh.dim > 2, (
"Mesh must have at least 3 dimensions to regularize along the "
"z-direction"
)
super().__init__(mesh=mesh, **kwargs)
mrefInSmooth = properties.Bool(
"include mref in the smoothness calculation?", default=False
)
@property
def _multiplier_pair(self):
return f"alpha_{self.orientation}"
@property
def W(self):
"""
Weighting matrix that takes the first spatial difference (no
length scales considered) in the specified orientation
"""
Ave = getattr(self.regmesh, f"aveCC2F{self.orientation}")
W = getattr(self.regmesh, f"cellDiff{self.orientation}Stencil",)
if self.cell_weights is not None:
W = Utils.sdiag((Ave * (self.cell_weights)) ** 0.5) * W
else:
W = Utils.sdiag((Ave * (self.regmesh.vol)) ** 0.5) * W
return W
@property
def length_scales(self):
if getattr(self, "_length_scales", None) is None:
index = "xyz".index(self.orientation)
length_scales = (
self.regmesh.Pac.T * self.regmesh.mesh.h_gridded[:, index]
) ** 2.0
self._length_scales = length_scales / length_scales.min()
return self._length_scales
@length_scales.setter
def length_scales(self, value):
self._length_scales = value
class Simple(BaseComboRegularization):
"""
Simple regularization that does not include length scales in the
derivatives.
.. math::
r(\\mathbf{m}) = \\alpha_s \\phi_s + \\alpha_x \\phi_x +
\\alpha_y \\phi_y + \\alpha_z \\phi_z
where:
- :math:`\\phi_s` is a :class:`SimPEG.Regularization.Small` instance
- :math:`\\phi_x` is a :class:`SimPEG.Regularization.SimpleSmoothDeriv` instance, with :code:`orientation='x'`
- :math:`\\phi_y` is a :class:`SimPEG.Regularization.SimpleSmoothDeriv` instance, with :code:`orientation='y'`
- :math:`\\phi_z` is a :class:`SimPEG.Regularization.SimpleSmoothDeriv` instance, with :code:`orientation='z'`
**Required Inputs**
:param BaseMesh mesh: a SimPEG mesh
**Optional Inputs**
:param IdentityMap mapping: regularization mapping, takes the model from model space to the space you want to regularize in
:param numpy.ndarray mref: reference model
:param numpy.ndarray indActive: active cell indices for reducing the size of differential operators in the definition of a regularization mesh
:param numpy.ndarray cell_weights: cell weights
:param bool mrefInSmooth: include the reference model in the smoothness computation? (eg. look at Deriv of m (False) or Deriv of (m-mref) (True))
:param numpy.ndarray cell_weights: vector of cell weights (applied in all terms)
**Weighting Parameters**
:param float alpha_s: weighting on the smallness (default 1.)
:param float alpha_x: weighting on the x-smoothness (default 1.)
:param float alpha_y: weighting on the y-smoothness (default 1.)
:param float alpha_z: weighting on the z-smoothness(default 1.)
"""
def __init__(
self, mesh, alpha_s=1.0, alpha_x=1.0, alpha_y=1.0, alpha_z=1.0, **kwargs
):
objfcts = [
SimpleSmall(mesh=mesh, **kwargs),
SimpleSmoothDeriv(mesh=mesh, orientation="x", **kwargs),
]
if mesh.dim > 1:
objfcts.append(SimpleSmoothDeriv(mesh=mesh, orientation="y", **kwargs))
if mesh.dim > 2:
objfcts.append(SimpleSmoothDeriv(mesh=mesh, orientation="z", **kwargs))
super().__init__(
mesh=mesh,
objfcts=objfcts,
alpha_s=alpha_s,
alpha_x=alpha_x,
alpha_y=alpha_y,
alpha_z=alpha_z,
**kwargs,
)
###############################################################################
# #
# Tikhonov-Style Regularization (includes volume contribution) #
# #
###############################################################################
class Small(BaseRegularization):
"""
Small regularization - L2 regularization on the difference between a
model and a reference model. Cell weights may be included. A volume
contribution is included
.. math::
r(m) = \\frac{1}{2}(\\mathbf{m} - \\mathbf{m_ref})^\top \\mathbf{W}^T
\\mathbf{W} (\\mathbf{m} - \\mathbf{m_{ref}})
where :math:`\\mathbf{m}` is the model, :math:`\\mathbf{m_{ref}}` is a
reference model and :math:`\\mathbf{W}` is a weighting
matrix (default :code:`diag(np.sqrt(vol))`. If cell weights are provided, then it is
:code:`diag(np.sqrt(vol * cell_weights))`)
**Optional Inputs**
:param BaseMesh mesh: SimPEG mesh
:param int nP: number of parameters
:param IdentityMap mapping: regularization mapping, takes the model from model space to the space you want to regularize in
:param numpy.ndarray mref: reference model
:param numpy.ndarray indActive: active cell indices for reducing the size of differential operators in the definition of a regularization mesh
:param numpy.ndarray cell_weights: cell weights
"""
_multiplier_pair = "alpha_s"
def __init__(self, mesh=None, **kwargs):
super().__init__(mesh=mesh, **kwargs)
@property
def W(self):
"""
Weighting matrix
"""
if self.cell_weights is not None:
return Utils.sdiag(np.sqrt(self.regmesh.vol * self.cell_weights))
return Utils.sdiag(np.sqrt(self.regmesh.vol))
class SmoothDeriv(BaseRegularization):
"""
Base Smooth Regularization. This base class regularizes on the first
spatial derivative in the provided orientation
**Optional Inputs**
:param BaseMesh mesh: SimPEG mesh
:param int nP: number of parameters
:param IdentityMap mapping: regularization mapping, takes the model from model space to the space you want to regularize in
:param numpy.ndarray mref: reference model
:param numpy.ndarray indActive: active cell indices for reducing the size of differential operators in the definition of a regularization mesh
:param numpy.ndarray cell_weights: cell weights
:param bool mrefInSmooth: include the reference model in the smoothness computation? (eg. look at Deriv of m (False) or Deriv of (m-mref) (True))
:param numpy.ndarray cell_weights: vector of cell weights (applied in all terms)
"""
mrefInSmooth = properties.Bool(
"include mref in the smoothness calculation?", default=False
)
def __init__(self, mesh, orientation="x", **kwargs):
self.orientation = orientation
assert orientation in ["x", "y", "z"], "Orientation must be 'x', 'y' or 'z'"
if self.orientation == "y":
assert mesh.dim > 1, (
"Mesh must have at least 2 dimensions to regularize along the "
"y-direction"
)
elif self.orientation == "z":
assert mesh.dim > 2, (
"Mesh must have at least 3 dimensions to regularize along the "
"z-direction"
)
super().__init__(mesh=mesh, **kwargs)
if self.mrefInSmooth is False:
self.mref = Utils.Zero()
@property
def _multiplier_pair(self):
return f"alpha_{self.orientation}"
@property
def W(self):
"""
Weighting matrix that constructs the first spatial derivative stencil
in the specified orientation
"""
vol = self.regmesh.vol.copy()
if self.cell_weights is not None:
vol *= self.cell_weights
D = getattr(self.regmesh, f"cellDiff{self.orientation}")
Ave = getattr(self.regmesh, f"aveCC2F{self.orientation}")
return Utils.sdiag(np.sqrt(Ave * vol)) * D
class SmoothDeriv2(BaseRegularization):
"""
Base Smooth Regularization. This base class regularizes on the second
spatial derivative in the provided orientation
**Optional Inputs**
:param BaseMesh mesh: SimPEG mesh
:param int nP: number of parameters
:param IdentityMap mapping: regularization mapping, takes the model from model space to the space you want to regularize in
:param numpy.ndarray mref: reference model
:param numpy.ndarray indActive: active cell indices for reducing the size of differential operators in the definition of a regularization mesh
:param numpy.ndarray cell_weights: cell weights
:param bool mrefInSmooth: include the reference model in the smoothness computation? (eg. look at Deriv of m (False) or Deriv of (m-mref) (True))
:param numpy.ndarray cell_weights: vector of cell weights (applied in all terms)
"""
def __init__(self, mesh, orientation="x", **kwargs):
self.orientation = orientation
if self.orientation == "y":
assert mesh.dim > 1, (
"Mesh must have at least 2 dimensions to regularize along the "
"y-direction"
)
elif self.orientation == "z":
assert mesh.dim > 2, (
"Mesh must have at least 3 dimensions to regularize along the "
"z-direction"
)
super().__init__(mesh=mesh, **kwargs)
@property
def _multiplier_pair(self):
return "alpha_{orientation}{orientation}".format(orientation=self.orientation)
@property
def W(self):
"""
Weighting matrix that takes the second spatial derivative in the
specified orientation
"""
vol = self.regmesh.vol.copy()
if self.cell_weights is not None:
vol *= self.cell_weights
W = (
Utils.sdiag(vol ** 0.5)
* getattr(self.regmesh, f"faceDiff{self.orientation}",)
* getattr(self.regmesh, f"cellDiff{self.orientation}",)
)
return W
class Tikhonov(BaseComboRegularization):
"""
L2 Tikhonov regularization with both smallness and smoothness (first order
derivative) contributions.
.. math::
\\phi_m(\\mathbf{m}) = \\alpha_s \\| W_s (\\mathbf{m} - \\mathbf{m_{ref}} ) \\|^2
+ \\alpha_x \\| W_x \\frac{\\partial}{\\partial x} (\\mathbf{m} - \\mathbf{m_{ref}} ) \\|^2
+ \\alpha_y \\| W_y \\frac{\\partial}{\\partial y} (\\mathbf{m} - \\mathbf{m_{ref}} ) \\|^2
+ \\alpha_z \\| W_z \\frac{\\partial}{\\partial z} (\\mathbf{m} - \\mathbf{m_{ref}} ) \\|^2
Note if the key word argument `mrefInSmooth` is False, then mref is not
included in the smoothness contribution.
:param BaseMesh mesh: SimPEG mesh
:param IdentityMap mapping: regularization mapping, takes the model from model space to the thing you want to regularize
:param numpy.ndarray indActive: active cell indices for reducing the size of differential operators in the definition of a regularization mesh
:param bool mrefInSmooth: (default = False) put mref in the smoothness component?
:param float alpha_s: (default 1e-6) smallness weight
:param float alpha_x: (default 1) smoothness weight for first derivative in the x-direction
:param float alpha_y: (default 1) smoothness weight for first derivative in the y-direction
:param float alpha_z: (default 1) smoothness weight for first derivative in the z-direction
:param float alpha_xx: (default 1) smoothness weight for second derivative in the x-direction
:param float alpha_yy: (default 1) smoothness weight for second derivative in the y-direction
:param float alpha_zz: (default 1) smoothness weight for second derivative in the z-direction
"""
def __init__(
self,
mesh,
alpha_s=1e-6,
alpha_x=1.0,
alpha_y=1.0,
alpha_z=1.0,
alpha_xx=0.0,
alpha_yy=0.0,
alpha_zz=0.0,
**kwargs,
):
objfcts = [
Small(mesh=mesh, **kwargs),
SmoothDeriv(mesh=mesh, orientation="x", **kwargs),
# SmoothDeriv2(mesh=mesh, orientation='x', **kwargs)
]
if mesh.dim > 1:
objfcts += [
SmoothDeriv(mesh=mesh, orientation="y", **kwargs),
SmoothDeriv2(mesh=mesh, orientation="y", **kwargs),
]
if mesh.dim > 2:
objfcts += [
SmoothDeriv(mesh=mesh, orientation="z", **kwargs),
SmoothDeriv2(mesh=mesh, orientation="z", **kwargs),
]
super().__init__(
mesh,
alpha_s=alpha_s,
alpha_x=alpha_x,
alpha_y=alpha_y,
alpha_z=alpha_z,
alpha_xx=alpha_xx,
alpha_yy=alpha_yy,
alpha_zz=alpha_zz,
objfcts=objfcts,
**kwargs,
)
self.regmesh.regularization_type = "Tikhonov"
class BaseSparse(BaseRegularization):
"""
Base class for building up the components of the Sparse Regularization
"""
def __init__(self, mesh, **kwargs):
self._stashedR = None
super().__init__(mesh=mesh, **kwargs)
model = properties.Array("current model", dtype=float)
gamma = properties.Float(
"Model norm scaling to smooth out convergence", default=1.0
)
epsilon = properties.Float(
"Threshold value for the model norm", default=1e-3, required=True
)
norm = properties.Array("norm used", dtype=float)
space = properties.String("By default inherit the objctive", default="linear")
gradientType = properties.String("type of gradient", default="total")
scale = properties.Array("General nob for scaling", dtype=float)
# Give the option to scale or not
scaledIRLS = properties.Bool("Scale the gradients of the IRLS norms", default=True)
@properties.validator("scale")
def _validate_scale(self, change):
if change["value"] is not None:
# todo: residual size? we need to know the expected end shape
if self._nC_residual != "*":
assert (
len(change["value"]) == self._nC_residual
), "scale must be length {} not {}".format(
self._nC_residual, len(change["value"])
)
@property
def stashedR(self):
return self._stashedR
@stashedR.setter
def stashedR(self, value):
self._stashedR = value
class SparseSmall(BaseSparse):
"""
Sparse smallness regularization
**Inputs**
:param int norm: norm on the smallness
"""
_multiplier_pair = "alpha_s"
def __init__(self, mesh, **kwargs):
super().__init__(mesh=mesh, **kwargs)
# Give the option to scale or not
scaledIRLS = properties.Bool("Scale the gradients of the IRLS norms", default=True)
@property
def f_m(self):
return self.mapping * self._delta_m(self.model)
@property
def W(self):
if getattr(self, "model", None) is None:
R = Utils.speye(self.mapping.shape[0])
else:
r = self.R(self.f_m)
R = Utils.sdiag(r)
if self.scale is None:
self.scale = np.ones(self.mapping.shape[0])
if self.cell_weights is not None:
return Utils.sdiag((self.scale * self.cell_weights) ** 0.5) * R
return Utils.sdiag((self.scale * self.regmesh.vol) ** 0.5) * R
def R(self, f_m):
# if R is stashed, return that instead
if getattr(self, "stashedR") is not None:
return self.stashedR
# Default to 1 for zero gradients
eta = np.ones_like(f_m)
if self.scaledIRLS:
# Eta scaling is important for mix-norms...do not mess with it
maxVal = np.ones_like(f_m) * np.abs(f_m).max()
maxVal[self.norm < 1] = self.epsilon / np.sqrt(
1.0 - self.norm[self.norm < 1]
)
maxGrad = maxVal / (maxVal ** 2.0 + self.epsilon ** 2.0) ** (
1.0 - self.norm / 2.0
)
eta[maxGrad != 0] = np.abs(f_m).max() / maxGrad[maxGrad != 0]
r = (eta / (f_m ** 2.0 + self.epsilon ** 2.0) ** (1.0 - self.norm / 2.0)) ** 0.5
# Theoritical full deriv for testing
# r = (
# eta * (
# 1. / (f_m**2. + self.epsilon**2.)**(1.-self.norm/2.) +
# (self.norm/2. - 1.) * f_m**2. / (f_m**2. + self.epsilon**2.)**(2.-self.norm/2.)
# )
# )**0.5
self.stashedR = r # stash on the first calculation
return r
@Utils.timeIt
def deriv(self, m):
"""
The regularization is:
.. math::
R(m) = \\frac{1}{2}\\mathbf{(m-m_\\text{ref})^\\top W^\\top
W(m-m_\\text{ref})}
So the derivative is straight forward:
.. math::
R(m) = \\mathbf{W^\\top W (m-m_\\text{ref})}
"""
mD = self.mapping.deriv(self._delta_m(m))
r = self.gamma * self.W * (self.mapping * (self._delta_m(m)))
return mD.T * (self.W.T * r)
class SparseDeriv(BaseSparse):
"""
Base Class for sparse regularization on first spatial derivatives
"""
def __init__(self, mesh, orientation="x", **kwargs):
self.length_scales = None
self.ratio = 1.0
self.orientation = orientation
super().__init__(mesh=mesh, **kwargs)
mrefInSmooth = properties.Bool(
"include mref in the smoothness calculation?", default=False
)
# Give the option to scale or not
scaledIRLS = properties.Bool("Scale the gradients of the IRLS norms", default=True)
@Utils.timeIt
def __call__(self, m):
"""
We use a weighted 2-norm objective function
.. math::
r(m) = \\frac{1}{2}
"""
if self.mrefInSmooth:
f_m = self._delta_m(m)
else:
f_m = m
if self.scale is None:
self.scale = np.ones(self.mapping.shape[0])
if self.space == "spherical":
Ave = getattr(self.regmesh, f"aveCC2F{self.orientation}")
if getattr(self, "model", None) is None:
R = Utils.speye(self.cellDiffStencil.shape[0])
else:
r = self.R(self.f_m)
R = Utils.sdiag(r)
if self.cell_weights is not None:
W = Utils.sdiag((Ave * (self.scale * self.cell_weights)) ** 0.5) * R
else:
W = Utils.sdiag((Ave * (self.scale * self.regmesh.vol)) ** 0.5) * R
dmdx = self.cellDiffStencil * (self.mapping * f_m)
dmdx = Utils.sdiag(self.length_scales) * coterminal(dmdx)
r = W * dmdx
else:
r = self.W * (self.mapping * f_m)
return 0.5 * r.dot(r)
def R(self, f_m):
# if R is stashed, return that instead
if getattr(self, "stashedR") is not None:
return self.stashedR
# Ave = getattr(self.regmesh, 'aveCC2F{}'.format(self.orientation))
eta = np.ones_like(f_m)
if self.scaledIRLS:
# Eta scaling is important for mix-norms...do not mess with it
maxVal = np.ones_like(f_m) * np.abs(f_m).max()
maxVal[self.norm < 1] = self.epsilon / np.sqrt(
1.0 - self.norm[self.norm < 1]
)
maxGrad = maxVal / (maxVal ** 2.0 + (self.epsilon) ** 2.0) ** (
1.0 - self.norm / 2.0
)
eta[maxGrad != 0] = np.abs(f_m).max() / maxGrad[maxGrad != 0]
r = (
eta / (f_m ** 2.0 + (self.epsilon) ** 2.0) ** (1.0 - self.norm / 2.0)
) ** 0.5
# Theoritical full deriv for testing
# r = (
# eta * (
# 1. / (f_m**2. + self.epsilon**2.)**(1.-self.norm/2.) +
# (self.norm/2. - 1.) * f_m**2. / (f_m**2. + self.epsilon**2.)**(2.-self.norm/2.)
# )
# )**0.5
# print(eta)
self.stashedR = r # stash on the first calculation
return r
@Utils.timeIt
def deriv(self, m):
"""
The regularization is:
.. math::
R(m) = \\frac{1}{2}\\mathbf{(m-m_\\text{ref})^\\top W^\\top
W(m-m_\\text{ref})}
So the derivative is straight forward:
.. math::
R(m) = \\mathbf{W^\\top W (m-m_\\text{ref})}
"""
if self.mrefInSmooth:
f_m = self._delta_m(m)
else:
f_m = m
if self.scale is None:
self.scale = np.ones(self.mapping.shape[0])
if self.space == "spherical":
Ave = getattr(self.regmesh, f"aveCC2F{self.orientation}")
if getattr(self, "model", None) is None:
R = Utils.speye(self.cellDiffStencil.shape[0])
else:
r = self.R(self.f_m)
R = Utils.sdiag(r)
if self.cell_weights is not None:
W = Utils.sdiag((Ave * (self.scale * self.cell_weights)) ** 0.5) * R
else:
W = Utils.sdiag((Ave * (self.scale * self.regmesh.vol)) ** 0.5) * R
dmdx = self.cellDiffStencil * (self.mapping * f_m)
dmdx = Utils.sdiag(self.length_scales) * coterminal(dmdx)
r = self.gamma * W * dmdx
else:
r = self.gamma * self.W * (self.mapping * f_m)
mD = self.mapping.deriv(f_m)
return mD.T * (self.W.T * r)
@property
def _multiplier_pair(self):
return f"alpha_{self.orientation}"
@property
def f_m(self):
if self.mrefInSmooth:
f_m = self._delta_m(self.model)
else:
f_m = self.model
if self.scale is None:
self.scale = np.ones(self.mapping.shape[0])
if self.gradientType == "total":
Ave = getattr(self.regmesh, f"aveCC2F{self.orientation}")
dmdx = self.regmesh.cellDiffxStencil * (self.mapping * f_m)
if self.space == "spherical":
dmdx = coterminal(dmdx)
dmdx = self.regmesh.aveFx2CC * (Utils.sdiag(self.length_x) * dmdx)
if self.regmesh.dim > 1:
dmdy = self.regmesh.cellDiffyStencil * (self.mapping * f_m)
if self.space == "spherical":
dmdy = coterminal(dmdy)
dmdy = self.regmesh.aveFy2CC * (Utils.sdiag(self.length_y) * dmdy)
if self.regmesh.dim > 2:
dmdz = self.regmesh.cellDiffzStencil * (self.mapping * f_m)
if self.space == "spherical":
dmdz = coterminal(dmdz)
dmdz = self.regmesh.aveFz2CC * (Utils.sdiag(self.length_z) * dmdz)
if self.regmesh.dim == 2:
dmdx = (dmdx ** 2.0 + dmdy ** 2.0) ** 0.5
elif self.regmesh.dim == 3:
dmdx = (dmdx ** 2.0 + dmdy ** 2.0 + dmdz ** 2.0) ** 0.5
dmdx = Ave * dmdx
else:
dmdx = self.cellDiffStencil * (self.mapping * f_m)
if self.space == "spherical":
dmdx = Utils.sdiag(self.length_scales) * coterminal(dmdx)
return dmdx
@property
def cellDiffStencil(self):
return getattr(self.regmesh, f"cellDiff{self.orientation}Stencil")
@property
def W(self):
Ave = getattr(self.regmesh, f"aveCC2F{self.orientation}")
if getattr(self, "model", None) is None:
R = Utils.speye(self.cellDiffStencil.shape[0])
else:
r = self.R(self.f_m)
R = Utils.sdiag(r)
if self.scale is None:
self.scale = np.ones(self.mapping.shape[0])
if self.cell_weights is not None:
return (
Utils.sdiag((Ave * (self.scale * self.cell_weights)) ** 0.5)
* R
* Utils.sdiag(self.length_scales)
* self.cellDiffStencil
)
return (
Utils.sdiag((Ave * (self.scale * self.regmesh.vol)) ** 0.5)
* R
* Utils.sdiag(self.length_scales)
* self.cellDiffStencil
)
@property
def length_scales(self):
if getattr(self, "_length_scales", None) is None:
Ave = getattr(self.regmesh, f"aveCC2F{self.orientation}")
index = "xyz".index(self.orientation)
length_scales = Ave * (
self.regmesh.Pac.T * self.regmesh.mesh.h_gridded[:, index]
)
# print(self.orientation, self.ratio)
self._length_scales = self.ratio * length_scales.min() / length_scales
return self._length_scales
@length_scales.setter
def length_scales(self, value):
self._length_scales = value
@property
def length_x(self):
if getattr(self, "_length_x", None) is None:
Ave = getattr(self.regmesh, "aveCC2Fx")
length_scales = Ave * (
self.regmesh.Pac.T * self.regmesh.mesh.h_gridded[:, 0]
)
self._length_x = self.ratio * length_scales.min() / length_scales
return self._length_x
@property
def length_y(self):
if getattr(self, "_length_y", None) is None:
Ave = getattr(self.regmesh, "aveCC2Fy")
length_scales = Ave * (
self.regmesh.Pac.T * self.regmesh.mesh.h_gridded[:, 1]
)
self._length_y = self.ratio * length_scales.min() / length_scales
return self._length_y
@property
def length_z(self):
if getattr(self, "_length_z", None) is None:
Ave = getattr(self.regmesh, "aveCC2Fz")
length_scales = Ave * (
self.regmesh.Pac.T * self.regmesh.mesh.h_gridded[:, 2]
)
self._length_z = self.ratio * length_scales.min() / length_scales
return self._length_z
class Sparse(BaseComboRegularization):
"""
The regularization is:
.. math::
R(m) = \\frac{1}{2}\\mathbf{(m-m_\\text{ref})^\\top W^\\top R^\\top R
W(m-m_\\text{ref})}
where the IRLS weight
.. math::
R = \\eta TO FINISH LATER!!!
So the derivative is straight forward:
.. math::
R(m) = \\mathbf{W^\\top R^\\top R W (m-m_\\text{ref})}
The IRLS weights are recomputed after each beta solves.
It is strongly recommended to do a few Gauss-Newton iterations
before updating.
"""
def __init__(
self, mesh, alpha_s=1.0, alpha_x=1.0, alpha_y=1.0, alpha_z=1.0, **kwargs
):
objfcts = [
SparseSmall(mesh=mesh, **kwargs),
SparseDeriv(mesh=mesh, orientation="x", **kwargs),
]
max_h = np.max(np.hstack(mesh.h))
if mesh.dim > 1:
objfcts.append(SparseDeriv(mesh=mesh, orientation="y", **kwargs))
objfcts[1].ratio = max_h / mesh.hx.min()
objfcts[2].ratio = max_h / mesh.hy.min()
if mesh.dim > 2:
objfcts.append(SparseDeriv(mesh=mesh, orientation="z", **kwargs))
objfcts[3].ratio = max_h / mesh.hz.min()
super().__init__(
mesh=mesh,
objfcts=objfcts,
alpha_s=alpha_s,
alpha_x=alpha_x,
alpha_y=alpha_y,
alpha_z=alpha_z,
**kwargs,
)
self.mesh = mesh
# Utils.setKwargs(self, **kwargs)
# Properties
norms = properties.Array(
"Norms used to create the sparse regularization",
default=np.c_[2.0, 2.0, 2.0, 2.0],
shape={("*", "*")},
)
eps_p = properties.Float("Threshold value for the model norm", required=True)
eps_q = properties.Float(
"Threshold value for the model gradient norm", required=True
)
model = properties.Array("current model", dtype=float)
gamma = properties.Float(
"Model norm scaling to smooth out convergence", default=1.0
)
space = properties.String("type of model", default="linear")
gradientType = properties.String("type of gradient", default="components")
scales = properties.Array(
"General nob for scaling", default=np.c_[1.0, 1.0, 1.0, 1.0], shape={("*", "*")}
)
# Give the option to scale or not
scaledIRLS = properties.Bool("Scale the gradients of the IRLS norms", default=True)
# Save the l2 result during the IRLS
l2model = None
@properties.validator("norms")
def _validate_norms(self, change):
if change["value"].shape[0] == 1:
change["value"] = np.kron(
np.ones((self.regmesh.Pac.shape[1], 1)), change["value"]
)
elif change["value"].shape[0] > 1:
assert change["value"].shape[0] == self.regmesh.Pac.shape[1], (
"Vector of norms must be the size of active model parameters ({})"
"The provided vector has length "
"{}".format(self.regmesh.Pac.shape[0], len(change["value"]))
)
# Observers
@properties.observer("norms")
def _mirror_norms_to_objfcts(self, change):
self.objfcts[0].norm = change["value"][:, 0]
for i, objfct in enumerate(self.objfcts[1:]):
Ave = getattr(objfct.regmesh, f"aveCC2F{objfct.orientation}")
objfct.norm = Ave * change["value"][:, i + 1]
@properties.observer("model")
def _mirror_model_to_objfcts(self, change):
for objfct in self.objfcts:
objfct.model = change["value"]
@properties.observer("gamma")
def _mirror_gamma_to_objfcts(self, change):
for objfct in self.objfcts:
objfct.gamma = change["value"]
@properties.observer("eps_p")
def _mirror_eps_p_to_smallness(self, change):
for objfct in self.objfcts:
if isinstance(objfct, SparseSmall):
objfct.epsilon = change["value"]
@properties.observer("eps_q")
def _mirror_eps_q_to_derivs(self, change):
for objfct in self.objfcts:
if isinstance(objfct, SparseDeriv):
objfct.epsilon = change["value"]
@properties.observer("space")
def _mirror_space_to_objfcts(self, change):
for objfct in self.objfcts:
objfct.space = change["value"]
@properties.observer("scaledIRLS")
def _mirror_scaledIRLS_to_objfcts(self, change):
for objfct in self.objfcts:
objfct.scaledIRLS = change["value"]
@properties.observer("gradientType")
def _mirror_gradientType_to_objfcts(self, change):
for objfct in self.objfcts:
objfct.gradientType = change["value"]
@properties.validator("scales")
def _validate_scales(self, change):
if change["value"].shape[0] == 1:
change["value"] = np.kron(
np.ones((self.regmesh.Pac.shape[1], 1)), change["value"]
)
elif change["value"].shape[0] > 1:
assert change["value"].shape[0] == self.regmesh.Pac.shape[1], (
"Vector of scales must be the size of active model parameters ({})"
"The provided vector has length "
"{}".format(self.regmesh.Pac.shape[0], len(change["value"]))
)
# Observers
@properties.observer("scales")
def _mirror_scale_to_objfcts(self, change):
for i, objfct in enumerate(self.objfcts):
objfct.scale = change["value"][:, i]
def coterminal(theta):
""" Compute coterminal angle so that [-pi < theta < pi]"""
sub = theta[np.abs(theta) >= np.pi]
sub = -np.sign(sub) * (2 * np.pi - np.abs(sub))
theta[np.abs(theta) >= np.pi] = sub
return theta
def ddx(n, vals):
"""Define 1D averaging operator from cell-centers to nodes."""
ddx = sp.spdiags((np.ones((n, 1)) * vals).T, [-1, 0, 1], n, n, format="csr")
return ddx
def getDiffOpRot(mesh, psi, theta, phi, vec, forward=True):
import scipy as sp
assert mesh.dim > 1, "Only for mesh 2D and 3D"
def getCellNeighbors(mesh):
Dx = mesh._cellGradxStencil
Dy = mesh._cellGradyStencil
# Get the current IJ of the stencil derive
Ix, Jx, _ = sp.sparse.find(Dx)
Iy, Jy, _ = sp.sparse.find(Dy)
jx = np.sort(Jx[np.argsort(Ix)].reshape((int(Ix.shape[0] / 2), 2)), axis=1)
jy = np.sort(Jy[np.argsort(Iy)].reshape((int(Iy.shape[0] / 2), 2)), axis=1)
jx_bck = np.c_[jx[:, 1], jx[:, 0]]
jy_bck = np.c_[jy[:, 1], jy[:, 0]]
maxInd = np.max([jx.max(), jy.max()])
if mesh.dim == 3:
Dz = mesh._cellGradzStencil
Iz, Jz, _ = sp.sparse.find(Dz)
jz = np.sort(Jz[np.argsort(Iz)].reshape((int(Iz.shape[0] / 2), 2)), axis=1)
jz_bck = np.c_[jz[:, 1], jz[:, 0]]
maxInd = np.max([jz.max(), maxInd])
# Cycle through the gradients forward and backward to deal with multiple
# levels on Tree mesh
# Pre-allocate index arrays
jAll = [] # Store
div_xy = np.ones(maxInd + 1, dtype="int") * -1
div_yx = np.ones(maxInd + 1, dtype="int") * -1
div_xyb = np.ones(maxInd + 1, dtype="int") * -1
div_yxb = np.ones(maxInd + 1, dtype="int") * -1
div_xy[jy[:, 0]] = jy[:, 1] # Find y neigbour of x adjacent
div_yx[jx[:, 1]] = jx[:, 0] # Find x neigbour of y adjacent
div_xyb[jy_bck[:, 0]] = jy_bck[:, 1] # Find y neigbour of x adjacent backward
div_yxb[jx_bck[:, 1]] = jx_bck[:, 0] # Find x neigbour of y adjacent backward
jAll += [jx]
jAll += [jy]
jAll += [np.c_[jx[:, 0], div_xy[jx[:, 1]]]]
jAll += [np.c_[jx[:, 1], div_xy[jx[:, 0]]]]
jAll += [np.c_[div_yx[jy[:, 0]], jy[:, 1]]]
jAll += [np.c_[div_yx[jy[:, 1]], jy[:, 0]]]
# Repeat backward for Treemesh
jAll += [jx_bck]
jAll += [jy_bck]
jAll += [np.c_[jx_bck[:, 0], div_xyb[jx_bck[:, 1]]]]
jAll += [np.c_[jx_bck[:, 1], div_xyb[jx_bck[:, 0]]]]
# Stack all and keep only unique pairs
jAll = np.vstack(jAll)
jAll = np.unique(jAll, axis=0)
# Remove all the -1 for TreeMesh
jAll = jAll[(jAll[:, 0] != -1) & (jAll[:, 1] != -1), :]
# Use all the neighbours on the xy plane to find neighbours in z
if mesh.dim == 3:
jAllz = []
div_z = np.ones(maxInd + 1, dtype="int") * -1
div_zb = np.ones(maxInd + 1, dtype="int") * -1
div_z[jz[:, 0]] = jz[:, 1]
div_zb[jz_bck[:, 0]] = jz_bck[:, 1]
jAllz += [jz]
jAllz += [jz_bck]
jAllz += [np.c_[jAll[:, 0], div_z[jAll[:, 1]]]]
jAllz += [np.c_[jAll[:, 1], div_z[jAll[:, 0]]]]
jAllz += [np.c_[jAll[:, 0], div_zb[jAll[:, 1]]]]
jAllz += [np.c_[jAll[:, 1], div_zb[jAll[:, 0]]]]
# Stack all and keep only unique pairs
jAll = np.vstack([jAll, np.vstack(jAllz)])
jAll = np.unique(jAll, axis=0)
# Remove all the -1 for TreeMesh
jAll = jAll[(jAll[:, 0] != -1) & (jAll[:, 1] != -1), :]
return jAll
hx = mesh.h_gridded[:, 0]
hy = mesh.h_gridded[:, 1]
if isinstance(phi, float):
phi = np.ones(mesh.nC) * phi
phi = np.arctan2((np.sin(phi) / hy), (np.cos(phi) / hx))
if mesh.dim == 3:
hz = mesh.h_gridded[:, 2]
if isinstance(theta, float):
theta = np.ones(mesh.nC) * theta
theta = np.arctan2((np.sin(theta) / hz), (np.cos(theta) / hx))
if isinstance(psi, float):
psi = np.ones(mesh.nC) * psi
psi = np.arctan2((np.sin(psi) / hz), (np.cos(psi) / hy))
if forward:
ind = 1
else:
ind = -1
if mesh.dim == 2:
if vec == "X":
px = np.kron(np.ones(mesh.nC), np.c_[ind, 0])
elif vec == "Y":
px = np.kron(np.ones(mesh.nC), np.c_[0, ind])
if mesh.dim == 3:
if vec == "X":
px = np.kron(np.ones(mesh.nC), np.c_[ind, 0, 0])
elif vec == "Y":
px = np.kron(np.ones(mesh.nC), np.c_[0, ind, 0])
else:
px = np.kron(np.ones(mesh.nC), np.c_[0, 0, ind])
if mesh.dim == 2:
rza = mkvc(np.c_[np.cos(phi), np.cos(phi)].T)
rzb = mkvc(np.c_[np.sin(phi), np.zeros(mesh.nC)].T)
rzc = mkvc(np.c_[-np.sin(phi), np.zeros(mesh.nC)].T)
Rz = sp.sparse.diags([rzb[:-1], rza, rzc[:-1]], [-1, 0, 1])
rx = (Rz * px.T).reshape((mesh.nC, 2))
else:
# Create sparse rotation operators
rxa = mkvc(np.c_[np.ones(mesh.nC), np.cos(psi), np.cos(psi)].T)
rxb = mkvc(np.c_[np.zeros(mesh.nC), np.sin(psi), np.zeros(mesh.nC)].T)
rxc = mkvc(np.c_[np.zeros(mesh.nC), -np.sin(psi), np.zeros(mesh.nC)].T)
Rx = sp.sparse.diags([rxb[:-1], rxa, rxc[:-1]], [-1, 0, 1])
rya = mkvc(np.c_[np.cos(theta), np.ones(mesh.nC), np.cos(theta)].T)
ryb = mkvc(np.c_[np.sin(theta), np.zeros(mesh.nC), np.zeros(mesh.nC)].T)
ryc = mkvc(np.c_[-np.sin(theta), np.zeros(mesh.nC), np.zeros(mesh.nC)].T)
Ry = sp.sparse.diags([ryb[:-2], rya, ryc[:-2]], [-2, 0, 2])
rza = mkvc(np.c_[np.cos(phi), np.cos(phi), np.ones(mesh.nC)].T)
rzb = mkvc(np.c_[np.sin(phi), np.zeros(mesh.nC), np.zeros(mesh.nC)].T)
rzc = mkvc(np.c_[-np.sin(phi), np.zeros(mesh.nC), np.zeros(mesh.nC)].T)
Rz = sp.sparse.diags([rzb[:-1], rza, rzc[:-1]], [-1, 0, 1])
# Rotate all cell vectors
rx = (Rz * (Ry * (Rx * px.T))).reshape((mesh.nC, 3))
jd = getCellNeighbors(mesh)
# Move the bottom-SW and top-NE nodes of stencil cell
nBSW = (
mesh.gridCC[jd[:, 0], :]
- mesh.h_gridded[jd[:, 0], :] / 2
+ rx[jd[:, 0], :] * mesh.h_gridded[jd[:, 0], :]
)
nTNE = (
mesh.gridCC[jd[:, 0], :]
+ mesh.h_gridded[jd[:, 0], :] / 2
+ rx[jd[:, 0], :] * mesh.h_gridded[jd[:, 0], :]
)
# Get corners for neighbours
sBSW = mesh.gridCC[jd[:, 1], :] - mesh.h_gridded[jd[:, 1], :] / 2
sTNE = mesh.gridCC[jd[:, 1], :] + mesh.h_gridded[jd[:, 1], :] / 2
# Compute fractional volumes with base stencil
V = np.max(
[
np.min([sTNE[:, 0], nTNE[:, 0]], axis=0)
- np.max([sBSW[:, 0], nBSW[:, 0]], axis=0),
np.zeros(jd.shape[0]),
],
axis=0,
) * np.max(
[
np.min([sTNE[:, 1], nTNE[:, 1]], axis=0)
- np.max([sBSW[:, 1], nBSW[:, 1]], axis=0),
np.zeros(jd.shape[0]),
],
axis=0,
)
if mesh.dim == 3:
V *= np.max(
[
np.min([sTNE[:, 2], nTNE[:, 2]], axis=0)
- np.max([sBSW[:, 2], nBSW[:, 2]], axis=0),
np.zeros(jd.shape[0]),
],
axis=0,
)
# Remove all rows of zero
ind = (V > 0) * (jd[:, 0] != jd[:, 1])
jd = jd[ind, :]
V = V[ind]
Dx2 = sp.sparse.csr_matrix((V, (jd[:, 0], jd[:, 1])), shape=(mesh.nC, mesh.nC))
# Normalize rows
V = mkvc(sp.sum(Dx2, axis=1))
V[V > 0] = 1.0 / V[V > 0]
Dx2 = -sdiag(V) * Dx2
diag = np.ones(mesh.nC)
diag[V == 0] = 0
Dx = sdiag(diag) + Dx2
return Dx
|
from flask import Flask, jsonify, request
from flask_restful import Api, Resource
from pymongo import MongoClient
import bcrypt
import logging
logging.basicConfig(level=logging.DEBUG,format='%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] [%(thread)d] - %(message)s',datefmt='%d/%m/%Y %H:%M:%S',filename='flask.log')
from logging.handlers import TimedRotatingFileHandler
app = Flask(__name__)
api = Api(app)
client = MongoClient("mongodb://my_db:27017")
db = client.projectDB
users = db["Users"]
"""
HELPER FUNCTIONS
"""
def userExist(username):
if users.find({"Username": username}).count() == 0:
return False
else:
return True
def verifyUser(username, password):
if not userExist(username):
return False
user_hashed_pw = users.find({
"Username": username
})[0]["Password"]
if bcrypt.checkpw(password.encode('utf8'), user_hashed_pw):
return True
else:
return False
def getUserMessages(username):
# get the messages
return users.find({
"Username": username,
})[0]["Messages"]
"""
RESOURCES
"""
class Hello(Resource):
def get(self):
app.logger.info("Info message")
app.logger.warning("Warning msg")
app.logger.error("Error msg!!!")
return "Hello World!"
class Register(Resource):
def post(self):
# Get posted data from request
data = request.get_json()
# get data
username = data["username"]
password = data["password"]
# check if user exists
if userExist(username):
retJson = {
"status": 301,
"msg": "Invalid Username"
}
return jsonify(retJson)
# encrypt password
hashed_pw = bcrypt.hashpw(password.encode('utf8'), bcrypt.gensalt())
# Insert record
users.insert({
"Username": username,
"Password": hashed_pw,
"Messages": []
})
# Return successful result
retJosn = {
"status": 200,
"msg": "Registration successful"
}
return jsonify(retJosn)
class Retrieve(Resource):
def post(self):
# Get posted data from request
data = request.get_json()
# get data
username = data["username"]
password = data["password"]
# check if user exists
if not userExist(username):
retJson = {
"status": 301,
"msg": "Invalid Username"
}
return jsonify(retJson)
# check password
correct_pw = verifyUser(username, password)
if not correct_pw:
retJson = {
"status": 302,
"msg": "Invalid password"
}
return jsonify(retJson)
# get the messages
messages = getUserMessages(username)
# Build successful response
retJson = {
"status": 200,
"obj": messages
}
return jsonify(retJson)
class Save(Resource):
def post(self):
# Get posted data from request
data = request.get_json()
# get data
username = data["username"]
password = data["password"]
message = data["message"]
# check if user exists
if not userExist(username):
retJson = {
"status": 301,
"msg": "Invalid Username"
}
return jsonify(retJson)
# check password
correct_pw = verifyUser(username, password)
if not correct_pw:
retJson = {
"status": 302,
"msg": "Invalid password"
}
return jsonify(retJson)
if not message:
retJson = {
"status": 303,
"msg": "Please supply a valid message"
}
return jsonify(retJson)
# get the messages
messages = getUserMessages(username)
# add new message
messages.append(message)
# save the new user message
users.update({
"Username": username
}, {
"$set": {
"Messages": messages
}
})
retJson = {
"status": 200,
"msg": "Message has been saved successfully"
}
return jsonify(retJson)
api.add_resource(Hello, '/hello')
api.add_resource(Register, '/register')
api.add_resource(Retrieve, '/retrieve')
api.add_resource(Save, '/save')
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=False)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Implements the Generalized R-CNN framework
"""
import torch
from torch import nn
from maskrcnn_benchmark.structures.image_list import to_image_list
from ..backbone import build_backbone
from ..rpn.rpn import build_rpn
from ..roi_heads.roi_heads import build_roi_heads
class GeneralizedRCNN(nn.Module):
"""
Main class for Generalized R-CNN. Currently supports boxes and masks.
It consists of three main parts:
- backbone
- rpn
- heads: takes the features + the proposals from the RPN and computes
detections / masks from it.
"""
def __init__(self, cfg):
super(GeneralizedRCNN, self).__init__()
self.backbone = build_backbone(cfg)
self.rpn = build_rpn(cfg, self.backbone.out_channels)
self.roi_heads = build_roi_heads(cfg, self.backbone.out_channels)
def forward(self, template_images, template_targets, search_images, search_targets=None):
"""
Arguments:
images (list[Tensor] or ImageList): images to be processed
targets (list[BoxList]): ground-truth boxes present in the image (optional)
Returns:
result (list[BoxList] or dict[Tensor]): the output from the model.
During training, it returns a dict[Tensor] which contains the losses.
During testing, it returns list[BoxList] contains additional fields
like `scores`, `labels` and `mask` (for Mask R-CNN models).
"""
if self.training and search_targets is None:
raise ValueError("In training mode, targets should be passed")
template_images = to_image_list(template_images)
template_features = self.backbone(template_images.tensors)
template_feature = template_features[-1] # 7*7
search_images = to_image_list(search_images)
search_features = self.backbone(search_images.tensors) # list of tensors
proposals, proposal_losses = self.rpn(template_images, template_feature, template_targets,
search_images, search_features, search_targets)
if self.roi_heads:
x, result, detector_losses = self.roi_heads(template_feature, search_features, proposals, search_targets)
else:
# RPN-only models don't have roi_heads
x = features
result = proposals
detector_losses = {}
if self.training:
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
return result
|
from selenium.webdriver.common.by import By
class LoginPageLocators:
USER_NAME_INPUT = (By.NAME, "email")
PASSWORD_INPUT = (By.NAME, "password")
H1 = (By.TAG_NAME, "H1")
LOGIN_BUTTON = (By.NAME, "login")
class NavigationLocators:
LOG_OUT_LINK = (By.XPATH, "//button[text()='Sign out']")
class HeaderLocators:
SEARCH_INPUT = (By.ID, "site-search-text")
SEARCH_SUBMIT = (By.CSS_SELECTOR, ".eff-search__input--submit")
class FooterLinkLocators:
CMS_LINK = (By.LINK_TEXT, "CMS")
class PageLinkLocators:
HOME_BREADCRUMB = (By.LINK_TEXT, "Home")
NEW_MEASURE = (By.LINK_TEXT, "Add a measure")
@staticmethod
def page_link(link_text):
return By.LINK_TEXT, link_text
@staticmethod
def breadcrumb_link(page):
return By.LINK_TEXT, page.title
class MeasureActionLocators:
@staticmethod
def view_link(measure):
print("measure_action__view-%s" % measure.guid)
return By.ID, "measure_action__view-%s" % measure.guid
@staticmethod
def delete_link(measure):
return By.ID, "measure_action__delete-%s" % measure.guid
@staticmethod
def title_link(measure):
return By.LINK_TEXT, measure.title
class CreateMeasureLocators:
TITLE_INPUT = (By.NAME, "title")
SAVE_BUTTON = (By.NAME, "save")
class EditMeasureLocators:
@staticmethod
def lowest_level_of_geography_radio_button(index_value):
# index_value should be in the range 0 to 8 - (as per `lowest_level_of_geography` table per 2018-11-19)
return (By.ID, "lowest_level_of_geography_id-%s" % str(index_value))
STATUS_LABEL = (By.ID, "status")
LOWEST_LEVEL_OF_GEOGRAPHY_RADIO = (By.XPATH, "//*[@type='radio']")
SAVE_BUTTON = (By.NAME, "save")
SAVE_AND_REVIEW_BUTTON = (By.NAME, "save-and-review")
SEND_TO_DEPARTMENT_REVIEW_BUTTON = (By.ID, "send-to-department-review")
REJECT_BUTTON = (By.ID, "reject-measure")
SEND_TO_DRAFT_BUTTON = (By.ID, "send-back-to-draft")
CREATE_PRIMARY_DATA_SOURCE = (By.XPATH, "//a[normalize-space(.)='Add primary data source information']")
CREATE_SECONDARY_DATA_SOURCE = (By.XPATH, "//a[normalize-space(.)='Add secondary data source information']")
REMOVE_PRIMARY_DATA_SOURCE = (By.XPATH, "//button[normalize-space(.)='Remove primary data source']")
REMOVE_SECONDARY_DATA_SOURCE = (By.XPATH, "//button[normalize-space(.)='Remove secondary data source']")
SEND_TO_APPROVED = (By.ID, "send-to-approved")
UPDATE_MEASURE = (By.LINK_TEXT, "Update")
DEPARTMENT_REVIEW_LINK = (By.ID, "review-link-url")
PREVIEW_LINK = (By.NAME, "preview")
ADD_DIMENSION_LINK = (By.LINK_TEXT, "Add dimension")
ADD_SOURCE_DATA_LINK = (By.LINK_TEXT, "Add data file (CSV)")
PUBLISHED_AT_DATE_PICKER = (By.NAME, "published_at")
PUBLISHED_LABEL = (By.NAME, "published")
TITLE_INPUT = (By.NAME, "title")
DESCRIPTION_TEXTAREA = (By.NAME, "description")
MEASURE_SUMMARY_TEXTAREA = (By.NAME, "measure_summary")
SUMMARY_TEXTAREA = (By.NAME, "summary")
GEOGRAPHIC_COVERAGE_TEXTAREA = (By.NAME, "geographic_coverage")
LOWEST_LEVEL_OF_GEOGRAPHY_TEXTAREA = (By.NAME, "lowest_level_of_geography")
TIME_COVERED_TEXTAREA = (By.NAME, "time_covered")
NEED_TO_KNOW_TEXTAREA = (By.NAME, "need_to_know")
ETHNICITY_DEFINITION_DETAIL_TEXTAREA = (By.NAME, "ethnicity_definition_detail")
ETHNICITY_SUMMARY_DETAIL_TEXTAREA = (By.NAME, "ethnicity_definition_summary")
RELATED_PUBLICATIONS_TEXTAREA = (By.NAME, "related_publications")
METHODOLOGY_TEXTAREA = (By.NAME, "methodology")
DATA_TYPE_INPUT = (By.NAME, "data_type")
SUPPRESSION_RULES_TEXTAREA = (By.NAME, "suppression_rules")
DISCLOSURE_CONTROLS_TEXTAREA = (By.NAME, "disclosure_controls")
ESTIMATION_TEXTAREA = (By.NAME, "estimation")
TYPE_OF_STATISTIC_INPUT = (By.NAME, "type_of_statistic")
QMI_URL_INPUT = (By.NAME, "qmi_url")
FURTHER_TECHNICAL_INFORMATION_INPUT = (By.NAME, "further_technical_information")
UPDATE_CORRECTS_DATA_MISTAKE = (By.NAME, "update_corrects_data_mistake")
UPDATE_CORRECTS_DATA_MISTAKE = (By.NAME, "update_corrects_measure_version")
EXTERNAL_EDIT_SUMMARY = (By.ID, "external_edit_summary")
class DimensionPageLocators:
TITLE_INPUT = (By.NAME, "title")
TIME_PERIOD_INPUT = (By.NAME, "time_period")
SUMMARY_TEXTAREA = (By.NAME, "summary")
SUPPRESSION_RULES_TEXTAREA = (By.NAME, "suppression_rules")
DISCLOSURE_CONTROL_TEXTAREA = (By.NAME, "disclosure_control")
TYPE_OF_STATISTIC_INPUT = (By.NAME, "type_of_statistic")
LOCATION_INPUT = (By.NAME, "location")
SOURCE_INPUT = (By.NAME, "source")
SAVE_BUTTON = (By.NAME, "save")
UPDATE_BUTTON = (By.NAME, "update")
CREATE_CHART = (By.ID, "create_chart")
CREATE_TABLE = (By.ID, "create_table")
class DataSourceSearchPageLocations:
SEARCH_BUTTON = (By.XPATH, "//button[normalize-space(.)='Search']")
SELECT_BUTTON = (By.XPATH, "//button[normalize-space(.)='Select']")
class CreateDataSourcePageLocators:
SAVE_BUTTON = (By.XPATH, "//button[normalize-space(.)='Save']")
TITLE_TEXTAREA = (By.NAME, "title")
SOURCE_URL_INPUT = (By.NAME, "source_url")
PUBLISHER_ID_INPUT = (By.ID, "publisher_id")
PUBLICATION_DATE_INPUT = (By.NAME, "publication_date")
FREQUENCY_INPUT = (By.NAME, "frequency_of_release_id")
PURPOSE_TEXTAREA = (By.NAME, "purpose")
@staticmethod
def frequency_radio_button(index_value):
# index_value should be in the range 0 to 11 - (as per `frequency_of_release` table per 2018-11-19)
return By.ID, f"frequency_of_release_id-{index_value}"
@staticmethod
def type_of_data_checkbox(index_value):
# index_value should be in the range 0 to 1 - (as per application.cms.models.TypeOfData per 2018-11-19)
return By.ID, f"type_of_data-{index_value}"
@staticmethod
def type_of_statistic_radio_button(index_value):
# index_value should be in the range 0 to 4 - (as per `type_of_statistic` table per 2018-11-19)
return By.ID, f"type_of_statistic_id-{index_value}"
class SourceDataPageLocators:
FILE_UPLOAD_INPUT = (By.NAME, "upload")
TITLE_INPUT = (By.NAME, "title")
DESCRIPTION_TEXTAREA = (By.NAME, "description")
SAVE_BUTTON = (By.NAME, "save")
class ChartBuilderPageLocators:
DATA_TEXT_AREA = (By.ID, "data_text_area")
CHART_TYPE_SELECTOR = (By.ID, "chart_type_selector")
BAR_CHART_PRIMARY = (By.ID, "primary_column")
BAR_CHART_SECONDARY = (By.ID, "secondary_column")
BAR_CHART_ORDER = (By.ID, "order_column")
OPTIONS_CHART_TITLE = (By.ID, "chart_title")
OPTIONS_X_AXIS = (By.ID, "x_axis_label")
OPTIONS_Y_AXIS = (By.ID, "y_axis_label")
OPTIONS_NUMBER_FORMAT = (By.ID, "number_format")
CHART_PREVIEW = (By.ID, "preview")
CHART_SAVE = (By.ID, "save")
CHART_BACK = (By.ID, "exit")
CHART_DATA_OK = (By.ID, "confirm-data")
CHART_DATA_CANCEL = (By.ID, "cancel-edit-data")
CHART_EDIT_DATA = (By.ID, "edit-data")
PANEL_BAR_CHART_PRIMARY = (By.ID, "panel_primary_column")
PANEL_BAR_CHART_SECONDARY = (By.ID, "panel_grouping_column")
CHART_ETHNICITY_SETTINGS = (By.ID, "ethnicity_settings")
CUSTOM_CLASSIFICATION_PANEL = (By.ID, "custom_classification__panel")
CHART_LINE_X_AXIS = (By.ID, "line__x-axis_column")
CHART_GROUPED_BAR_DATA_STYLE = (By.ID, "grouped-bar__data_style")
CHART_GROUPED_BAR_COLUMN = (By.ID, "grouped-bar__bar_column")
CHART_GROUPED_GROUPS_COLUMN = (By.ID, "grouped-bar__groups_column")
CHART_COMPONENT_DATA_STYLE = (By.ID, "component__data_style")
CHART_COMPONENT_SECTION_COLUMN = (By.ID, "component__section_column")
CHART_COMPONENT_BAR_COLUMN = (By.ID, "component__bar_column")
CHART_PANEL_DATA_STYLE = (By.ID, "panel-bar__data_style")
CHART_PANEL_BAR_COLUMN = (By.ID, "panel-bar__bar_column")
CHART_PANEL_PANEL_COLUMN = (By.ID, "panel-bar__panel_column")
CHART_PANEL_X_AXIS_COLUMN = (By.ID, "panel-line__x-axis_column")
class TableBuilderPageLocators:
DATA_TEXT_AREA = (By.ID, "data_text_area")
TABLE_TITLE_BOX = (By.ID, "table_title")
ROWS_SELECTOR = (By.ID, "table_category_column")
GROUPING_SELECTOR = (By.ID, "table_group_column")
TABLE_PREVIEW = (By.ID, "preview")
TABLE_SAVE = (By.ID, "save")
TABLE = (By.ID, "container")
TABLE_ERROR_CONTAINER = (By.ID, "error_container")
COLUMN_SELECTOR_1 = (By.ID, "table_column_1")
COLUMN_SELECTOR_2 = (By.ID, "table_column_2")
COLUMN_SELECTOR_3 = (By.ID, "table_column_3")
COLUMN_SELECTOR_4 = (By.ID, "table_column_4")
COLUMN_SELECTOR_5 = (By.ID, "table_column_5")
INDEX_COLUMN_NAME = (By.ID, "index_column_name")
TABLE_DATA_OK = (By.ID, "confirm-data")
TABLE_DATA_CANCEL = (By.ID, "cancel-edit-data")
TABLE_DATA_EDIT = (By.ID, "edit-data")
TABLE_ETHNICITY_SETTINGS = (By.ID, "ethnicity_settings")
CUSTOM_CLASSIFICATION_PANEL = (By.ID, "custom_classification__panel")
COMPLEX_TABLE_DATA_STYLE = (By.ID, "complex-table__data-style")
COMPLEX_TABLE_COLUMNS = (By.ID, "ethnicity-as-row__columns")
COMPLEX_TABLE_ROWS = (By.ID, "ethnicity-as-column__rows")
class TopicPageLocators:
@staticmethod
def get_accordion(data_event_text):
return By.XPATH, "//h2[contains(., '%s')]" % data_event_text
@staticmethod
def get_add_measure_link(link_text):
return By.XPATH, "//a[contains(., 'Create a new page')]"
@staticmethod
def get_measure_link(measure):
return By.LINK_TEXT, measure.title
@staticmethod
def get_measure_edit_link(measure):
return By.ID, "measure-action-section__edit_button-%s" % measure.id
@staticmethod
def get_measure_view_form_link(measure):
return By.ID, "measure-action-section__view_form_link-%s" % measure.id
@staticmethod
def get_measure_create_new_link(measure):
return By.ID, "measure-action-section__create_new_link-%s" % measure.id
@staticmethod
def get_measure_delete_link(measure):
return By.ID, "measure-action-section__delete_button-%s" % measure.id
@staticmethod
def get_measure_confirm_yes_radio(measure):
return By.ID, "delete-radio-yes-%s" % measure.id
@staticmethod
def get_measure_confirm_no_radio(measure):
return By.ID, "delete-radio-yes-%s" % measure.id
@staticmethod
def get_measure_confirm_delete_button(measure):
return By.ID, "delete-confirm-button-%s" % measure.id
|
from .alexnet import AlexNet
from .lenet import LeNet5
from .mobilenet_v2 import MobileNetV2
from .mobilenet_v3 import MobileNetv3
from .regnet import RegNet
from .resnest import ResNeSt
from .resnet import ResNet, ResNetV1d
from .resnet_cifar import ResNet_CIFAR
from .resnext import ResNeXt
from .seresnet import SEResNet
from .seresnext import SEResNeXt
from .shufflenet_v1 import ShuffleNetV1
from .shufflenet_v2 import ShuffleNetV2
from .res2net import Res2Net
from .sk2net import SK2Net
from .vgg import VGG
from .sk2net_cifar import SK2Net_CIFAR
from .resnext_cifar import *
from .res2next_cifar import Res2NeXt29
from .sk2next_cifar import SK2NeXt29
from .seres2next_cifar import SERes2NeXt29
from .sk2resnest import SK2ResNeSt
from .cyclenet import CycLeNet
from .repvggnet import RepVGGNet
from .skresnest import SKResNeSt
from .sk2resnet import SK2ResNet
from .sacresnet import SACResNet
from .skresnet import SKResNet
__all__ = [
'LeNet5', 'AlexNet', 'VGG', 'RegNet', 'ResNet', 'ResNeXt', 'ResNetV1d',
'ResNeSt', 'ResNet_CIFAR', 'SEResNet', 'SEResNeXt', 'ShuffleNetV1',
'ShuffleNetV2', 'MobileNetV2', 'MobileNetv3', 'SK2Net', 'Res2Net',
'SK2Net_CIFAR'
]
|
"""
Service-related Mongoengine document definitions
"""
import mongoengine
from mongoengine import (
EmbeddedDocumentField,
EmbeddedDocumentListField,
ListField,
ReferenceField,
StringField,
UUIDField,
)
from gatekeeper.models.base import TimestampsDocument, UserIdMixin, UuidDocument
from gatekeeper.models.descriptors import DescriptorSnapshot
from gatekeeper.util.mongoengine_custom_json import CustomJsonRules
class ServiceInstance(UuidDocument, TimestampsDocument, UserIdMixin):
status = StringField(required=True)
message = StringField()
correlationId = UUIDField(required=True, custom_json=CustomJsonRules.HIDDEN)
internalId = UUIDField(custom_json=CustomJsonRules.HIDDEN)
class Service(UuidDocument, TimestampsDocument, UserIdMixin):
"""
Document class for services. A `Service` contains snapshots of all descriptors
required to instantiate it, as well as information on its service instances.
"""
descriptor = EmbeddedDocumentField(DescriptorSnapshot, required=True)
functionDescriptors = EmbeddedDocumentListField(DescriptorSnapshot, required=True)
vendor = StringField(required=True)
name = StringField(required=True)
version = StringField(required=True)
instances = ListField(
ReferenceField(ServiceInstance, reverse_delete_rule=mongoengine.PULL),
custom_json=CustomJsonRules.HIDDEN,
)
|
import os
from os import path
from zipfile import ZipFile
import cv2
from glob import glob
from tqdm import tqdm
import numpy as np
import shutil
FOLDER_NAME = "DATA/"
def __is_picture(file):
"""
It returns whether the processed file is a jpg picture or not.
All arguments must be of equal length.
:param file: longitude of first place
:return: A boolean indicating whether the file is a jpg image or not
"""
if file.lower().endswith("jpg") or file.lower().endswith("jpeg"):
return True
else:
return False
def __unzip_folders(path):
"""
It unzips all the the compressed files inside a directory
and removes the zipped folders
:param zip_name: a folder
:return: None
"""
print("Unzipping folders...")
# Unzip folders
all_zip_files = os.listdir(path)
for zip_file in all_zip_files:
if zip_file != ".DS_Store":
shutil.unpack_archive(zip_file)
break
print("Removing ZIP folders...")
# Remove Zipped folders
all_files = os.listdir(".")
for file_name in all_files:
if file_name.endswith("zip"):
os.remove(path + file_name)
def load_datasets(path="./", im_size=(128, 128)):
"""
High-level function for creating Numpy Arrays from Zip Files
:param path: path where the zip files are stored
:param im_size: Image size of the loaded picures. Width and height
must be positive.
:return: Numpy arrays for both the images (X) and the labels (y)
=========
Example:
=========
X, y = load_datasets(".", (256, 256))
"""
if im_size[0] <= 0 or im_size[1] <= 0:
return -1
__unzip_folders(path)
print("Loading Datasets...")
# Create index for transform labels into class numbers
tag2idx = {tag.split("/")[1]: i for i, tag in enumerate(glob(path + "*"))}
im_path = path + "*/*"
print("Loading data...")
# Create X array from images with OpenCV
X = np.array(
[
cv2.resize(cv2.imread(file_path), im_size)
for file_path in tqdm(glob(im_path))
if __is_picture(file_path)
]
)
# Create y array from index
y = [
tag2idx[file_path.split("/")[1]]
for file_path in glob(im_path)
if __is_picture(file_path)
]
# Transform y array into a categorical array
y = np.eye(len(np.unique(y)))[y].astype(np.uint8)
return X, y
|
import unittest
class TestImport(unittest.TestCase):
def test_import_general(self):
import jellylamp
def test_import_version(self):
from jellylamp import __version__
print(__version__)
if __name__ == '__main__':
unittest.main()
|
import numpy as np
n = tuple(map(int, input().strip().split()))
print(np.zeros(n, int))
print(np.ones(n, int)) |
def update_dict(params, keys, value):
if len(keys) == 1:
params[keys[0]] = value
else:
update_dict(params[keys[0]], keys[1:], value)
def flatten(list):
return [item for sublist in list for item in sublist]
|
"""Test hook for verifying the consistency and integrity of collection and index data."""
import os.path
from buildscripts.resmokelib.testing.hooks import jsfile
class ValidateCollections(jsfile.PerClusterDataConsistencyHook):
"""Run full validation.
This will run on all collections in all databases on every stand-alone
node, primary replica-set node, or primary shard node.
"""
IS_BACKGROUND = False
def __init__( # pylint: disable=super-init-not-called
self, hook_logger, fixture, shell_options=None):
"""Initialize ValidateCollections."""
description = "Full collection validation"
js_filename = os.path.join("jstests", "hooks", "run_validate_collections.js")
jsfile.JSHook.__init__( # pylint: disable=non-parent-init-called
self, hook_logger, fixture, js_filename, description, shell_options=shell_options)
|
from __future__ import annotations
from typing import Dict, Union, Any, Sequence
import collections
import boto3
from .enums import BILLING_MODE, RETURN_VALUES, DATATYPE, STREAM_VIEW, SSE_TYPE
dynamodb = boto3.resource("dynamodb")
class KeyDefinition:
DATATYPE = DATATYPE
name: str
datatype: DATATYPE
def __init__(self, name: str, datatype: DATATYPE = DATATYPE.STRING):
self.name = name
self.datatype = datatype
def __eq__(self, other):
if not isinstance(other, KeyDefinition):
return False
return self.name == other.name and self.datatype == other.datatype
def __hash__(self):
return hash(self.name + self.datatype)
def export(self) -> dict:
return {"AttributeName": self.name, "AttributeType": self.datatype}
@classmethod
def schema(cls, hash_key: KeyDefinition, range_key: KeyDefinition = None) -> list:
schema = [{"AttributeName": hash_key.name, "KeyType": "HASH"}]
if range_key:
schema.append({"AttributeName": range_key.name, "KeyType": "RANGE"})
return schema
class Stream:
VIEW = STREAM_VIEW
def __init__(self, view_type: VIEW):
self.view_type = view_type
def export(self) -> dict:
return {"StreamEnabled": True, "StreamViewType": self.view_type}
class SSESpecification:
SSE_TYPE = SSE_TYPE
def __init__(self, sse_type: SSE_TYPE, kms_master_key: str = None):
self.sse_type = sse_type
self.kms_master_key = kms_master_key
def export(self) -> dict:
spec = {"Enabled": True, "SSEType": self.sse_type}
if self.sse_type == SSE_TYPE.KMS:
spec["KMSMasterKeyId"] = self.kms_master_key
return spec
class ProvisionedThroughput:
def __init__(self, read_capacity: int, write_capacity: int):
self.read_capacity = read_capacity
self.write_capacity = write_capacity
def export(self) -> dict:
return {
"ReadCapacityUnits": self.read_capacity,
"WriteCapacityUnits": self.write_capacity,
}
class BaseTable:
BILLING_MODE = BILLING_MODE
RETURN_VALUES = RETURN_VALUES
resource = dynamodb
name: str
partition_key: KeyDefinition = KeyDefinition("pk")
sort_key: KeyDefinition = None
billing_mode: BILLING_MODE = BILLING_MODE.PROVISIONED
throughput: ProvisionedThroughput = ProvisionedThroughput(5, 5)
stream: Stream = None
sse: SSESpecification = None
tags: Dict = {}
def __init__(self, **kwargs):
self._local_secondary_indexes = []
self._global_secondary_indexes = []
self.name = kwargs.get("name") or self.name
self.billing_mode = kwargs.get("billing_mode") or self.billing_mode
self.throughput = kwargs.get("throughput") or self.throughput
self.stream = kwargs.get("stream") or self.stream
self.sse = kwargs.get("sse") or self.sse
self.resource = kwargs.get("resource") or self.resource
if kwargs.get("tags"):
self.tags.update(kwargs["tags"])
def get_table(self):
return self.resource.Table(self.name)
def convert_key(self, key: Union[Any, Sequence[Any, Any]]) -> dict:
if isinstance(key, str) or not isinstance(key, collections.abc.Sequence):
key = (key,)
converted = {self.partition_key.name: key[0]}
if self.sort_key and len(key) > 1:
converted[self.sort_key.name] = key[1]
return converted
def serialize_attributes(self, attributes: Sequence[str]) -> dict:
attribute_names = {}
attribute_tokens = []
counter = 0
for attribute in attributes:
token = f"#ref{counter}"
attribute_names[token] = attribute
attribute_tokens.append(token)
counter += 1
return {
"ProjectionExpression": ", ".join(attribute_tokens),
"ExpressionAttributeNames": attribute_names,
}
|
import argparse
import json
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-dev', type=str, default='../data/dev_toy.tsv')
parser.add_argument('-res', type=str, default='../results/cknrm_ca.trec')
parser.add_argument('-k', type=int, default=2)
args = parser.parse_args()
score_dic = {}
for i in range(args.k):
with open('f' + str(i+1) + '.score', 'r') as r:
for line in r:
line = line.strip('\n').split('\t')
score_dic[line[0] + '$' + line[1]] = line[2]
outs = {}
with open(args.dev, 'r') as r:
qid = ''
cnt = 0
for line in r:
line = line.strip().split('\t')
if line[3] != qid:
qid = line[3]
cnt = 0
outs[line[3]] = {}
outs[line[3]][line[4]] = float(score_dic[line[3]+'$'+str(cnt)])
cnt += 1
f = open(args.res, 'w')
for qid in outs:
ps = {}
out_idx = sorted(outs[qid].items(), key=lambda x:x[1], reverse=True)
for i, out in enumerate(out_idx):
if out[0] not in ps:
ps[out[0]] = 1
f.write(' '.join([qid, 'Q0', out[0], str(len(ps)), str(out[1]), 'default']) + '\n')
f.close()
if __name__ == "__main__":
main()
|
from onegov.agency import AgencyApp
from onegov.agency.layout import PageLayout
from onegov.core.security import Public
from onegov.org.models import Topic
from onegov.org.views.page import view_topic as view_topic_base
@AgencyApp.html(model=Topic, template='topic.pt', permission=Public)
def view_topic(self, request):
layout = PageLayout(self, request)
return view_topic_base(self, request, layout)
|
################################################################################
from relative_to_absolute_path import get_absolute_path
from squares_ids import get_id_to_square_dict
from os.path import exists
import numpy as np
import tkinter as tk
from PIL import Image, ImageTk
################################################################################
################################################################################
# Set paths:
dataset_squares: str = "../datasets/squares.csv"
################################################################################
################################################################################
def get_square_name_from_id(identifier: int) -> str:
return get_id_to_square_dict()[identifier]
################################################################################
################################################################################
def load_data() -> np.ndarray:
absolute_path = get_absolute_path(dataset_squares, __file__)
assert exists(absolute_path), "'%s' must be a valid directory path" % absolute_path
return np.genfromtxt(absolute_path, delimiter=",", dtype=np.uint8)
################################################################################
################################################################################
class Application(tk.Frame):
def __init__(self, root, *args, **kwargs):
tk.Frame.__init__(self, root, *args, **kwargs)
self.parent = root
squares_data = load_data()
self.X = squares_data[:, 1:].reshape((-1, 32, 32))
self.Y = squares_data[:, 0]
self.root = root
self.root.title("Squares")
self.root.geometry("300x200")
# entry
self.entry_widget = tk.Entry(root)
self.entry_widget.place(x=140, y=70, width=100)
# button
self.show_image_button = tk.Button(
self.root,
text="Show Image",
command=lambda: self.show_image(self.entry_widget.get())
)
self.show_image_button.place(x=140, y=100, width=100)
# image
self.canvas = tk.Canvas(self.root, width=32, height=32)
self.canvas.place(x=10, y=10)
self.image = ImageTk.PhotoImage(
image=Image.fromarray(self.X[0, :], "L")
)
self.image_on_canvas = self.canvas.create_image(
0, 0, anchor="nw", image=self.image
)
# label
self.label = tk.Label(
self.root, text="Square id:\n" + get_square_name_from_id(self.Y[0])
)
self.label.place(x=70, y=10)
def show_image(self, index_str: str):
m: int = self.X.shape[0]
if not (str.isdecimal(index_str) and 0 <= int(index_str) < m):
print("'%s' must be a decimal string between 0 and %d" % (index_str, m-1))
return
index_int = int(index_str)
self.image = ImageTk.PhotoImage(
image=Image.fromarray(
self.X[index_int, :],
"L"
)
)
self.canvas.itemconfig(self.image_on_canvas, image=self.image)
self.label["text"] = "Square id:\n" + get_square_name_from_id(self.Y[index_int])
################################################################################
################################################################################
def main():
root = tk.Tk()
Application(root)
root.mainloop()
################################################################################
################################################################################
main()
|
## These are the mask bits in ANYMASK / ALLMASK.
#
# From: http://www.noao.edu/noao/staff/fvaldes/CPDocPrelim/PL201_3.html
# 1 -- detector bad pixel InstCal
# 1 -- detector bad pixel/no data Resampled
# 1 -- No data Stacked
# 2 -- saturated InstCal/Resampled
# 4 -- interpolated InstCal/Resampled
# 16 -- single exposure cosmic ray InstCal/Resampled
# 64 -- bleed trail InstCal/Resampled
# 128 -- multi-exposure transient InstCal/Resampled
DQ_BITS = dict(badpix = 1,
satur = 2,
interp = 4,
cr = 16, # 0x 10
bleed = 64, # 0x 40
trans = 128, # 0x 80
edge = 256, # 0x100
edge2 = 512, # 0x200
# Added by our stage_outliers rejection
outlier = 2048, # 0x800
)
# Bit codes for why a CCD got cut (survey-ccds file, ccd_cuts column)
CCD_CUTS = dict(
err_legacyzpts = 0x1,
not_grz = 0x2,
not_third_pix = 0x4, # Mosaic3 one-third-pixel interpolation problem
exptime = 0x8,
ccdnmatch = 0x10,
zpt_diff_avg = 0x20,
zpt_small = 0x40,
zpt_large = 0x80,
sky_is_bright = 0x100,
badexp_file = 0x200,
phrms = 0x400,
radecrms = 0x800,
seeing_bad = 0x1000,
early_decam = 0x2000,
depth_cut = 0x4000,
too_many_bad_ccds = 0x8000,
flagged_in_des = 0x10000,
phrms_s7 = 0x20000,
)
FITBITS = dict(
FORCED_POINTSOURCE = 0x1,
FIT_BACKGROUND = 0x2,
HIT_RADIUS_LIMIT = 0x4,
HIT_SERSIC_LIMIT = 0x8,
FROZEN = 0x10, # all source parameters were frozen at ref-cat values
BRIGHT = 0x20,
MEDIUM = 0x40,
GAIA = 0x80,
TYCHO2 = 0x100,
LARGEGALAXY = 0x200,
WALKER = 0x400,
RUNNER = 0x800,
GAIA_POINTSOURCE = 0x1000,
ITERATIVE = 0x2000,
)
# Outlier mask bit values
OUTLIER_POS = 1
OUTLIER_NEG = 2
# Bits in the "maskbits" data product
MASKBITS = dict(
NPRIMARY = 0x1, # not PRIMARY
BRIGHT = 0x2,
SATUR_G = 0x4,
SATUR_R = 0x8,
SATUR_Z = 0x10,
ALLMASK_G = 0x20,
ALLMASK_R = 0x40,
ALLMASK_Z = 0x80,
WISEM1 = 0x100, # WISE masked
WISEM2 = 0x200,
BAILOUT = 0x400, # bailed out of processing
MEDIUM = 0x800, # medium-bright star
GALAXY = 0x1000, # SGA large galaxy
CLUSTER = 0x2000, # Cluster catalog source
SATUR_I = 0x4000,
ALLMASK_I = 0x8000,
)
# Bits in the "brightblob" bitmask
IN_BLOB = dict(
BRIGHT = 0x1, # "bright" star
MEDIUM = 0x2, # "medium-bright" star
CLUSTER = 0x4, # Globular cluster
GALAXY = 0x8, # large SGA galaxy
)
|
import math
import numpy as np
import calculator
class ball:
'''
class used to define the balls involved in the simulation.
This class is used by all of the other classes:
- calculator.py uses the update functions (euler_update, cromer_update, and runge_kutta2) to update the positions and velocities of the balls based on the accelerations it calculated
- calculator.py requires the position, velocity, mass, and anchor points of the balls to calculate the change in acceleration
- calculator.py uses the overlap function to determine when a collision between balls has taken place
- plotter.py uses the position attribute of the ball object to plot the balls position over time
- plotter.py uses the energy functions - kinetic and potential - to plot the energy of the system over time
'''
def __init__(self, position = np.array([0,-1], dtype = float), velocity = np.array([0,0], dtype = float), radius = 1, mass = 1, anchor = np.array([0,0])):
'''
Initialisation function of the class:
position, radius, mass and velocity attributes all describe the ball itself
anchor attribute describe where the 'string' is attached, from which the length of the string is calculated
radius, mass, and anchor should not be changed at any point
all of the inputs that can be entered as lists are turned into numpy arrays for easier manipulation
'''
self.anchor = np.array(anchor)
self.position = np.array(position)
self.radius = radius
self.mass = mass
self.velocity = np.array(velocity)
self.length = np.linalg.norm(self.anchor - self.position)
def euler_update(self, dv, dt):
'''
updates the position of the ball after a time dt, using the euler approximation
dt = the timestep between iterations used by the calculator
dv = the change in velocity that has been calculated using the calculator
'''
self.position += self.velocity * dt
self.velocity += dv
def cromer_update(self, dv, dt):
'''
updates the position of the ball after a time dt, using the euler-cromer approximation
dt = the timestep between iterations used by the calculator
dv = the change in velocity that has been calculated using the calculator
'''
self.velocity += dv
self.position += self.velocity * dt
def runge_kutta2_prep(self, acceleration_start, dt):
'''
in order to complete the runge kutta approximation of motion, midpoints of position, velocity and acceleration must be used
this function is used to update the ball to the midpoint position and velocity, so that the midpoint acceleration can be calculated
'''
start_position = self.position
start_velocity = self.velocity
position_midpoint = self.position + 0.5 * dt * self.velocity
velocity_midpoint = self.velocity + 0.5 * dt * acceleration_start
self.position = position_midpoint
self.velocity = velocity_midpoint
return [start_position, start_velocity]
def runge_kutta2(self, start_position, start_velocity, acceleration_start, acceleration_mid, dt):
'''
updates the position of the ball after a time dt, using the Runge-Kutta, second order approximation
dt = the timestep between iterations used by the calculator
acceleration = the acceleration at a given moment that has been calculated using the calculator
'''
velocity_midpoint = self.velocity + 0.5 * dt * acceleration_start
self.position = start_position + velocity_midpoint * dt
self.velocity = start_velocity + dt * acceleration_mid
def overlap(self, incident):
'''
input incident must also be a ball object
returns true if an incident ball overlaps with the current ball.
'''
return np.hypot(*(self.position - incident.position)) <= self.radius + incident.radius
def kinetic(self):
'''
returns kinetic energy of the ball at a given moment
'''
ke = 0.5*self.mass*np.linalg.norm(self.velocity**2)
return ke
def potential(self):
'''
returns potential energy of the ball at a given moment
Assumes constant acceleration due to gravity, and that the simulation is taking place on Earth's surface.
'''
lowest_point = self.anchor[1] - self.length
h = self.position[1] - lowest_point
mgh = self.mass * 9.81 * h
return mgh
|
class Deque:
def __init__(self):
self.items = []
def empty(self):
return len(self.items) == 0
def append(self, item):
self.items.append(item)
def pop(self):
if self.empty(): raise Exception
return self.items.pop()
def append_left(self, item):
self.items.insert(0, item)
def pop_left(self):
if self.empty(): raise Exception
return self.items.pop(0)
def __len__(self):
return len(self.items)
# ----------------------------------------------------
class Node:
def __init__(self, item):
self.item = item
self.next = None
self.prev = None
class Deque:
def __init__(self):
self.front = None
self.back = None
def empty(self):
return self.front is None and self.back is None
def append(self, item):
new_node = Node(item)
new_node.prev = self.back
if not self.empty():
self.back.next = new_node
else:
self.front = new_node
self.back = new_node
def pop(self):
if self.empty(): raise Exception
current_node = self.back
item = current_node.item
self.back = current_node.prev
if self.back is None: self.front = None
else: self.back.next = None
del current_node
return item
def append_left(self, item):
new_node = Node(item)
new_node.next = self.front
if not self.empty:
self.front.prev = new_node
else:
self.back = new_node
self.front = new_node
def pop_left(self):
if self.empty(): raise Exception
current_node = self.front
item = current_node.item
self.front = current_node.next
if self.front is None:
self.back = None
else:
self.front.prev = None
del current_node
return item
def __del__(self):
while self.front is not None:
current_node = self.front
self.front = self.front.next
del current_node
self.back = None
|
#!/usr/bin/env python
# coding: utf-8
import gzip
import json
import logging
import os
import tarfile
import time
from datetime import datetime
from functools import partial, reduce
from glob import glob
from typing import Callable, Dict, List, Type
import numpy as np
import pandas as pd
import seaborn as sns
import torch
import tqdm
from elasticsearch import Elasticsearch, helpers
from es_pandas import es_pandas
from sentence_transformers import (InputExample, LoggingHandler,
SentenceTransformer, util)
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CERerankingEvaluator
from sklearn.metrics import balanced_accuracy_score
from torch.utils.data import DataLoader
pd.set_option("display.max_rows", 200)
es_host = 'localhost:9200'
bi_model_path = os.path.join(os.path.dirname("__file__"), os.path.pardir, "bi_encoder_save/")
bi_model = SentenceTransformer(bi_model_path, device = "cpu")
cross_model_path = "output/training_ms-marco_cross-encoder-xlm-roberta-base-2021-01-17_14-43-23_map-train-eval"
cross_model = CrossEncoder(cross_model_path, num_labels=1, max_length=512, device = "cpu")
class es_pandas_edit(es_pandas):
@staticmethod
def serialize(row, columns, use_pandas_json, iso_dates):
if use_pandas_json:
return json.dumps(dict(zip(columns, row)), iso_dates=iso_dates)
return dict(zip(columns, [None if (all(pd.isna(r)) if (hasattr(r, "__len__") and type(r) != type("")) else pd.isna(r)) else r for r in row]))
def to_pandas_iter(self, index, query_rule=None, heads=[], dtype={}, infer_dtype=False, show_progress=True,
chunk_size = None, **kwargs):
if query_rule is None:
query_rule = {'query': {'match_all': {}}}
count = self.es.count(index=index, body=query_rule)['count']
if count < 1:
raise Exception('Empty for %s' % index)
query_rule['_source'] = heads
anl = helpers.scan(self.es, query=query_rule, index=index, **kwargs)
source_iter = self.get_source(anl, show_progress = show_progress, count = count)
print(source_iter)
if chunk_size is None:
df = pd.DataFrame(list(source_iter)).set_index('_id')
if infer_dtype:
dtype = self.infer_dtype(index, df.columns.values)
if len(dtype):
df = df.astype(dtype)
yield df
return
assert type(chunk_size) == type(0)
def map_list_of_dicts_into_df(list_of_dicts, set_index = "_id"):
from collections import defaultdict
req = defaultdict(list)
for dict_ in list_of_dicts:
for k, v in dict_.items():
req[k].append(v)
req = pd.DataFrame.from_dict(req)
if set_index:
assert set_index in req.columns.tolist()
t_df = req.set_index(set_index)
if infer_dtype:
dtype = self.infer_dtype(index, t_df.columns.values)
if len(dtype):
t_df = t_df.astype(dtype)
return t_df
list_of_dicts = []
for dict_ in source_iter:
list_of_dicts.append(dict_)
if len(list_of_dicts) >= chunk_size:
yield map_list_of_dicts_into_df(list_of_dicts)
list_of_dicts = []
if list_of_dicts:
yield map_list_of_dicts_into_df(list_of_dicts)
ep = es_pandas_edit(es_host)
ep.ic.get_alias("*")
chunk_size = 1000
valid_part_from_es_iter = ep.to_pandas_iter(index = "valid_part", chunk_size = chunk_size)
valid_part_tiny = None
for ele in valid_part_from_es_iter:
valid_part_tiny = ele
break
del valid_part_from_es_iter
if ep.ic.exists("valid_part_tiny"):
ep.ic.delete(index = "valid_part_tiny")
ep.init_es_tmpl(valid_part_tiny, "valid_part_tiny_doc_type", delete=True)
valid_part_tmp = ep.es.indices.get_template("valid_part_tiny_doc_type")
es_index = valid_part_tmp["valid_part_tiny_doc_type"]
es_index["mappings"]["properties"]["question_emb"] = {
"type": "dense_vector",
"dims": 768
}
es_index["mappings"]["properties"]["answer_emb"] = {
"type": "dense_vector",
"dims": 768
}
es_index["mappings"]["properties"]["question"] = {
"type": "text",
}
es_index["mappings"]["properties"]["answer"] = {
"type": "text",
}
es_index = {"mappings": es_index["mappings"]}
ep.es.indices.create(index='valid_part_tiny', body=es_index, ignore=[400])
question_embeddings = bi_model.encode(valid_part_tiny["question"].tolist(), convert_to_tensor=True, show_progress_bar=True)
answer_embeddings = bi_model.encode(valid_part_tiny["answer"].tolist(), convert_to_tensor=True, show_progress_bar=True)
valid_part_tiny["question_emb"] = question_embeddings.cpu().numpy().tolist()
valid_part_tiny["answer_emb"] = answer_embeddings.cpu().numpy().tolist()
ep.to_es(valid_part_tiny, "valid_part_tiny")
chunk_size = 1000
valid_part_tiny = list(ep.to_pandas_iter(index = "valid_part_tiny", chunk_size = None))[0]
def search_by_embedding_in_es(index = "valid_part" ,embedding = np.asarray(valid_part_tiny["question_emb"].iloc[0]), on_column = "answer_emb"):
vector_search_one = ep.es.search(index=index, body={
"query": {
"script_score": {
"query": {
"match_all": {}
},
"script": {
"source": "cosineSimilarity(params.queryVector, doc['{}']) + 1.0".format(on_column),
"params": {
"queryVector": embedding
}
}
}
}
}, ignore = [400])
req = list(map(lambda x: (x["_source"]["question"], x["_source"]["answer"], x["_score"]) ,vector_search_one["hits"]["hits"]))
req_df = pd.DataFrame(req, columns = ["question", "answer", "score"])
return req_df
def search_by_text_in_es(index = "valid_part" ,text = valid_part_tiny["question"].iloc[0], on_column = "answer",
analyzer = "smartcn"):
if analyzer is not None:
bm25 = es.search(index = index,
body={"query":
{
"match": {on_column:{"query" :text, "analyzer": analyzer} },
}
},
)
else:
bm25 = ep.es.search(index=index, body={"query": {"match": {on_column: text}}})
req = list(map(lambda x: (x["_source"]["question"], x["_source"]["answer"], x["_score"]) ,bm25["hits"]["hits"]))
req_df = pd.DataFrame(req, columns = ["question", "answer", "score"])
return req_df
def valid_two_model(cross_model, ep, index, question, question_embedding, on_column = "answer_emb", size = 10):
def search_by_embedding(ep ,index = "valid_part" ,embedding = np.asarray(valid_part_tiny["question_emb"].iloc[0]), on_column = "answer_emb"):
vector_search_one = ep.es.search(index=index, body={
"size": size,
"query": {
"script_score": {
"query": {
"match_all": {}
},
"script": {
"source": "cosineSimilarity(params.queryVector, doc['{}']) + 1.0".format(on_column),
"params": {
"queryVector": embedding
}
}
}
}
}, ignore = [400])
req = list(map(lambda x: (x["_source"]["question"], x["_source"]["answer"], x["_score"]) ,vector_search_one["hits"]["hits"]))
req_df = pd.DataFrame(req, columns = ["question", "answer", "score"])
return req_df
search_by_emb = search_by_embedding(ep ,index = index, embedding = question_embedding, on_column = on_column)
print("question : {}".format(question))
preds = cross_model.predict(search_by_emb.apply(lambda r: [question, r["answer"]], axis = 1).tolist())
search_by_emb["cross_score"] = preds.tolist()
return search_by_emb
def produce_df(question, size = 10):
question, question_embedding = valid_part_tiny[valid_part_tiny["question"] == question].iloc[0][["question", "question_emb"]]
valid_df = valid_two_model(cross_model, ep, index = "valid_part_tiny", question = question, question_embedding = question_embedding, size = size)
return valid_df
class ScoreCalculator(object):
def __init__(self,
queries_ids,
relevant_docs,
mrr_at_k: List[int] = [10],
ndcg_at_k: List[int] = [10],
accuracy_at_k: List[int] = [1, 3, 5, 10],
precision_recall_at_k: List[int] = [1, 3, 5, 10],
map_at_k: List[int] = [100],
):
"queries_ids list of query, relevant_docs key query value set or list of relevant_docs"
self.queries_ids = queries_ids
self.relevant_docs = relevant_docs
self.mrr_at_k = mrr_at_k
self.ndcg_at_k = ndcg_at_k
self.accuracy_at_k = accuracy_at_k
self.precision_recall_at_k = precision_recall_at_k
self.map_at_k = map_at_k
def compute_metrics(self, queries_result_list: List[object]):
# Init score computation values
num_hits_at_k = {k: 0 for k in self.accuracy_at_k}
precisions_at_k = {k: [] for k in self.precision_recall_at_k}
recall_at_k = {k: [] for k in self.precision_recall_at_k}
MRR = {k: 0 for k in self.mrr_at_k}
ndcg = {k: [] for k in self.ndcg_at_k}
AveP_at_k = {k: [] for k in self.map_at_k}
# Compute scores on results
#### elements with hits one hit is a dict : {"corpus_id": corpus_text, "score": score}
#### corpus_id replace by corpus text
for query_itr in range(len(queries_result_list)):
query_id = self.queries_ids[query_itr]
# Sort scores
top_hits = sorted(queries_result_list[query_itr], key=lambda x: x['score'], reverse=True)
query_relevant_docs = self.relevant_docs[query_id]
# Accuracy@k - We count the result correct, if at least one relevant doc is accross the top-k documents
for k_val in self.accuracy_at_k:
for hit in top_hits[0:k_val]:
if hit['corpus_id'] in query_relevant_docs:
num_hits_at_k[k_val] += 1
break
# Precision and Recall@k
for k_val in self.precision_recall_at_k:
num_correct = 0
for hit in top_hits[0:k_val]:
if hit['corpus_id'] in query_relevant_docs:
num_correct += 1
precisions_at_k[k_val].append(num_correct / k_val)
recall_at_k[k_val].append(num_correct / len(query_relevant_docs))
# MRR@k
for k_val in self.mrr_at_k:
for rank, hit in enumerate(top_hits[0:k_val]):
if hit['corpus_id'] in query_relevant_docs:
MRR[k_val] += 1.0 / (rank + 1)
#break
# NDCG@k
for k_val in self.ndcg_at_k:
predicted_relevance = [1 if top_hit['corpus_id'] in query_relevant_docs else 0 for top_hit in top_hits[0:k_val]]
true_relevances = [1] * len(query_relevant_docs)
ndcg_value = self.compute_dcg_at_k(predicted_relevance, k_val) / self.compute_dcg_at_k(true_relevances, k_val)
ndcg[k_val].append(ndcg_value)
# MAP@k
for k_val in self.map_at_k:
num_correct = 0
sum_precisions = 0
for rank, hit in enumerate(top_hits[0:k_val]):
if hit['corpus_id'] in query_relevant_docs:
num_correct += 1
sum_precisions += num_correct / (rank + 1)
avg_precision = sum_precisions / min(k_val, len(query_relevant_docs))
AveP_at_k[k_val].append(avg_precision)
# Compute averages
for k in num_hits_at_k:
#num_hits_at_k[k] /= len(self.queries)
num_hits_at_k[k] /= len(queries_result_list)
for k in precisions_at_k:
precisions_at_k[k] = np.mean(precisions_at_k[k])
for k in recall_at_k:
recall_at_k[k] = np.mean(recall_at_k[k])
for k in ndcg:
ndcg[k] = np.mean(ndcg[k])
for k in MRR:
#MRR[k] /= len(self.queries)
MRR[k] /= len(queries_result_list)
for k in AveP_at_k:
AveP_at_k[k] = np.mean(AveP_at_k[k])
return {'accuracy@k': num_hits_at_k, 'precision@k': precisions_at_k, 'recall@k': recall_at_k, 'ndcg@k': ndcg, 'mrr@k': MRR, 'map@k': AveP_at_k}
@staticmethod
def compute_dcg_at_k(relevances, k):
dcg = 0
for i in range(min(len(relevances), k)):
dcg += relevances[i] / np.log2(i + 2) #+2 as we start our idx at 0
return dcg
def map_dev_samples_to_score_calculator_format(dev_samples):
if isinstance(dev_samples, dict):
dev_samples = list(dev_samples.values())
queries_ids = list(map(lambda x: x["query"] ,dev_samples))
relevant_docs = dict(map(lambda idx: (dev_samples[idx]["query"], dev_samples[idx]["positive"]), range(len(dev_samples))))
return ScoreCalculator(queries_ids, relevant_docs)
def map_valid_df_to_score_calculator_format(query ,valid_df):
queries_ids = [query]
relevant_docs = {query: valid_df[valid_df["question"] == query]["answer"].tolist()}
return ScoreCalculator(queries_ids, relevant_docs)
def df_to_mrr_score(df, query, score_col, mrr_at_k = 10):
#model_input = [[query, doc] for doc in docs]
#pred_scores = model.predict(model_input, convert_to_numpy=True, show_progress_bar=False)
is_relevant = list(map(lambda t2: True if t2[1]["question"] == query else False, df.iterrows()))
pred_scores = df[score_col].values
pred_scores_argsort = np.argsort(-pred_scores) #Sort in decreasing order
mrr_score = 0
for rank, index in enumerate(pred_scores_argsort[0:mrr_at_k]):
if is_relevant[index]:
mrr_score = 1 / (rank+1)
#mrr_score += 1 / (rank+1)
break
return mrr_score
question_list = valid_part_tiny["question"].value_counts().index.tolist()
valid_df = produce_df(question_list[10], size = 100)
def produce_score_dict(query ,valid_df, column = "score"):
queries_result_list = valid_df[["answer", column]].apply(lambda x: {"corpus_id": x["answer"], "score": x[column]}, axis = 1).tolist()
score_dict = map_valid_df_to_score_calculator_format(query, valid_df).compute_metrics([queries_result_list])
return score_dict
produce_score_dict(question_list[10] ,valid_df, "score")
produce_score_dict(question_list[10] ,valid_df, "cross_score")
produce_score_dict(question_list[10] ,valid_df.head(20), "score")
produce_score_dict(question_list[10] ,valid_df.head(20), "cross_score")
valid_df.head(20)
valid_df.head(20).sort_values(by = "cross_score", ascending = False)
valid_df.sort_values(by = "cross_score", ascending = False).head(10)
sns.distplot(valid_df["cross_score"])
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class NodePoolOptions(object):
"""
Options for creating or updating node pools.
"""
def __init__(self, **kwargs):
"""
Initializes a new NodePoolOptions object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param kubernetes_versions:
The value to assign to the kubernetes_versions property of this NodePoolOptions.
:type kubernetes_versions: list[str]
:param shapes:
The value to assign to the shapes property of this NodePoolOptions.
:type shapes: list[str]
:param images:
The value to assign to the images property of this NodePoolOptions.
:type images: list[str]
:param sources:
The value to assign to the sources property of this NodePoolOptions.
:type sources: list[oci.container_engine.models.NodeSourceOption]
"""
self.swagger_types = {
'kubernetes_versions': 'list[str]',
'shapes': 'list[str]',
'images': 'list[str]',
'sources': 'list[NodeSourceOption]'
}
self.attribute_map = {
'kubernetes_versions': 'kubernetesVersions',
'shapes': 'shapes',
'images': 'images',
'sources': 'sources'
}
self._kubernetes_versions = None
self._shapes = None
self._images = None
self._sources = None
@property
def kubernetes_versions(self):
"""
Gets the kubernetes_versions of this NodePoolOptions.
Available Kubernetes versions.
:return: The kubernetes_versions of this NodePoolOptions.
:rtype: list[str]
"""
return self._kubernetes_versions
@kubernetes_versions.setter
def kubernetes_versions(self, kubernetes_versions):
"""
Sets the kubernetes_versions of this NodePoolOptions.
Available Kubernetes versions.
:param kubernetes_versions: The kubernetes_versions of this NodePoolOptions.
:type: list[str]
"""
self._kubernetes_versions = kubernetes_versions
@property
def shapes(self):
"""
Gets the shapes of this NodePoolOptions.
Available shapes for nodes.
:return: The shapes of this NodePoolOptions.
:rtype: list[str]
"""
return self._shapes
@shapes.setter
def shapes(self, shapes):
"""
Sets the shapes of this NodePoolOptions.
Available shapes for nodes.
:param shapes: The shapes of this NodePoolOptions.
:type: list[str]
"""
self._shapes = shapes
@property
def images(self):
"""
Gets the images of this NodePoolOptions.
Deprecated. See sources.
When creating a node pool using the `CreateNodePoolDetails` object, only image names contained in this
property can be passed to the `nodeImageName` property.
:return: The images of this NodePoolOptions.
:rtype: list[str]
"""
return self._images
@images.setter
def images(self, images):
"""
Sets the images of this NodePoolOptions.
Deprecated. See sources.
When creating a node pool using the `CreateNodePoolDetails` object, only image names contained in this
property can be passed to the `nodeImageName` property.
:param images: The images of this NodePoolOptions.
:type: list[str]
"""
self._images = images
@property
def sources(self):
"""
Gets the sources of this NodePoolOptions.
Available source of the node.
:return: The sources of this NodePoolOptions.
:rtype: list[oci.container_engine.models.NodeSourceOption]
"""
return self._sources
@sources.setter
def sources(self, sources):
"""
Sets the sources of this NodePoolOptions.
Available source of the node.
:param sources: The sources of this NodePoolOptions.
:type: list[oci.container_engine.models.NodeSourceOption]
"""
self._sources = sources
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
"""
Functions for performing numerical differentiation.
"""
import numpy as np
from scipy import signal
def differentiate(
X, t, diff_method="forward difference", smoother=None, window_length=35
):
"""
Approximate the derivative of set of 1D data points using finite
difference methods
Parameters
----------
X : Numpy 1d or 2d array
Array of data from which to approximate the derivative. Rows
correspond to different variables and columns correspond to
measurements at different times.
t : scalar or 1d array
Times when measurements in X were taken. If scalar, we assume a uniform
time step.
method (optional) : str
Which finite difference method to use. Options include
'forward difference'
'centered difference'
smoother (optional) : str
Which smoothing method to use. Options include
'savgol' for a Savitzky-Golay filter
'median' for a median filter
'wiener' for a wiener filter
window_length (optional) : int
The length of the window the smoother should use.
"""
# Require an odd window length
if np.mod(window_length, 2) == 0:
window_length -= 1
# Apply smoothing
if smoother:
X = smooth_data(X, smoother, window_length=window_length)
if diff_method == "forward difference":
return forward_difference(X, t)
else:
return centered_difference(X, t)
#
# Helper functions
#
def smooth_data(X, smoother="savgol", window_length=35):
"""Apply smoothing to data."""
# Axis along which to smooth
axis = np.ndim(X) - 1
# Check that window_length was appropriately set
if window_length > X.shape[axis]:
print(
"Window length {} larger than size of array {}.".format(
window_length, X.shape[axis]
)
)
window_length = X.shape[axis]
if np.mod(window_length, 2) == 0:
window_length -= 1
print("Shrinking window to size {}.".format(window_length))
if smoother == "savgol":
if window_length <= 3:
raise ValueError(
"window_length must be larger than 3, currently set to {}".format(
window_length
)
)
return signal.savgol_filter(X, window_length, polyorder=3, axis=axis)
if smoother == "median":
if axis == 0:
return signal.medfilt(X, kernel_size=window_length)
else:
return np.vstack([signal.medfilt(x, kernel_size=window_length) for x in X])
if smoother == "wiener":
if axis == 0:
return signal.wiener(X, mysize=window_length)
else:
return np.vstack([signal.wiener(x, mysize=window_length) for x in X])
def forward_difference(X, t=1):
"""
First order forward difference (and 2nd order backward difference for final
point)
"""
# Check whether data is 1D
if np.ndim(X) == 1:
# Uniform timestep
if np.isscalar(t):
X_diff = (X[1:] - X[:-1]) / t
backward_diff = np.array([(3 * X[-1] / 2 - 2 * X[-2] + X[-3] / 2) / t])
return np.concatenate((X_diff, backward_diff))
# Variable timestep
else:
t_diff = t[1:] - t[:-1]
X_diff = (X[1:] - X[:-1]) / t_diff
backward_diff = np.array(
[(3 * X[-1] / 2 - 2 * X[-2] + X[-3] / 2) / t_diff[-1]]
)
return np.concatenate((X_diff, backward_diff))
# Otherwise assume data is 2D
else:
# Uniform timestep
if np.isscalar(t):
X_diff = (X[:, 1:] - X[:, :-1]) / t
backward_diff = (
(3 * X[:, -1] / 2 - 2 * X[:, -2] + X[:, -3] / 2) / t
).reshape(X.shape[0], 1)
return np.concatenate((X_diff, backward_diff), axis=1)
# Variable timestep
else:
t_diff = t[1:] - t[:-1]
X_diff = (X[:, 1:] - X[:, :-1]) / t_diff
backward_diff = (
(3 * X[:, -1] / 2 - 2 * X[:, -2] + X[:, -3] / 2) / t_diff[-1]
).reshape(X.shape[0], 1)
return np.concatenate((X_diff, backward_diff), axis=1)
def centered_difference(X, t):
"""
Second order centered difference with third order forward/backward
difference at endpoints.
Warning: Sometimes has trouble with nonuniform grid spacing near boundaries
"""
# Check whether data is 1D
if np.ndim(X) == 1:
# Uniform timestep
if np.isscalar(t):
X_diff = (X[2:] - X[:-2]) / (2 * t)
forward_diff = np.array(
[(-11 / 6 * X[0] + 3 * X[1] - 3 / 2 * X[2] + X[3] / 3) / t]
)
backward_diff = np.array(
[(11 / 6 * X[-1] - 3 * X[-2] + 3 / 2 * X[-3] - X[-4] / 3) / t]
)
return np.concatenate((forward_diff, X_diff, backward_diff))
# Variable timestep
else:
t_diff = t[2:] - t[:-2]
X_diff = (X[2:] - X[:-2]) / (t_diff)
forward_diff = np.array(
[(-11 / 6 * X[0] + 3 * X[1] - 3 / 2 * X[2] + X[3] / 3) / (t[1] - t[0])]
)
backward_diff = np.array(
[
(11 / 6 * X[-1] - 3 * X[-2] + 3 / 2 * X[-3] - X[-4] / 3)
/ (t[-1] - t[-2])
]
)
return np.concatenate((forward_diff, X_diff, backward_diff))
# Otherwise assume data is 2D
else:
# Uniform timestep
if np.isscalar(t):
X_diff = (X[:, 2:] - X[:, :-2]) / (2 * t)
forward_diff = (
(-11 / 6 * X[:, 0] + 3 * X[:, 1] - 3 / 2 * X[:, 2] + X[:, 3] / 3) / t
).reshape(X.shape[0], 1)
backward_diff = (
(11 / 6 * X[:, -1] - 3 * X[:, -2] + 3 / 2 * X[:, -3] - X[:, -4] / 3) / t
).reshape(X.shape[0], 1)
return np.concatenate((forward_diff, X_diff, backward_diff), axis=1)
# Variable timestep
else:
t_diff = t[2:] - t[:-2]
X_diff = (X[:, 2:] - X[:, :-2]) / t_diff
forward_diff = (
(-11 / 6 * X[:, 0] + 3 * X[:, 1] - 3 / 2 * X[:, 2] + X[:, 3] / 3)
/ (t_diff[0] / 2)
).reshape(X.shape[0], 1)
backward_diff = (
(11 / 6 * X[:, -1] - 3 * X[:, -2] + 3 / 2 * X[:, -3] - X[:, -4] / 3)
/ (t_diff[-1] / 2)
).reshape(X.shape[0], 1)
return np.concatenate((forward_diff, X_diff, backward_diff), axis=1)
|
"""
Censor 3 ----> CENSOR 2
Designed to be run by the evaluator.
TCP Censor that synchronizes on first SYN only, works 100% of the time, sends 5 RSTs to
server AND client.
"""
import logging
import netifaces
import layers.packet
# Disable scapy ::1 warnings
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import IP, TCP, wrpcap
from censors.censor import Censor
class Censor3(Censor):
"""
TCP Censor that synchronizes on first SYN only, works 100% of the time, sends 5 RSTs to
server AND client.
"""
def __init__(self, environment_id, forbidden, log_dir, log_level, port, queue_num):
Censor.__init__(self, environment_id, log_dir, log_level, port, queue_num)
self.forbidden = forbidden
self.enabled = True
self.tcb = 0
self.drop_all_from = None
self.num = 0
self.censor_interfaces = netifaces.interfaces()
if(len(self.censor_interfaces) > 1) and 'eth0' in self.censor_interfaces:
self.censor_ip = netifaces.ifaddresses('eth0')[netifaces.AF_INET][0]['addr']
def check_censor(self, packet):
"""
Check if the censor should run against this packet. Returns true or false.
"""
try:
self.num += 1
# Only censor TCP packets for now
self.logger.debug("Inbound packet to censor: " + layers.packet.Packet._str_packet(packet))
if "TCP" not in packet:
return False
if packet["TCP"].sprintf('%TCP.flags%') == "S":
self.tcb = packet["TCP"].seq + 1
self.logger.debug("Synchronizing TCB on packet " + layers.packet.Packet._str_packet(packet))
return False
if packet["TCP"].seq == self.tcb:
self.tcb += len(self.get_payload(packet))
else:
self.logger.debug("Ignoring packet: " + layers.packet.Packet._str_packet(packet))
return False
for keyword in self.forbidden:
if keyword in self.get_payload(packet):
self.logger.debug("Packet triggered censor: " + layers.packet.Packet._str_packet(packet))
return True
return False
except Exception:
self.logger.exception("Censor 3 Error caught.")
return False
def censor(self, scapy_packet):
"""
Send 5 resets to the client and the server.
"""
client_ip_rst = IP(src=scapy_packet[IP].dst, dst=scapy_packet[IP].src)
client_tcp_rst = TCP(
dport=scapy_packet[TCP].sport,
sport=scapy_packet[TCP].dport,
ack=scapy_packet[TCP].seq+len(str(scapy_packet[TCP].payload)),
seq=scapy_packet[TCP].ack,
flags="R"
)
client_rst = client_ip_rst / client_tcp_rst
server_ip_rst = IP(src=self.censor_ip, dst=scapy_packet[IP].dst)
server_tcp_rst = TCP(
dport=scapy_packet[TCP].dport,
sport=scapy_packet[TCP].sport,
ack=scapy_packet[TCP].ack,
seq=scapy_packet[TCP].seq,
flags="R"
)
server_tcp_rst.show()
server_rst = server_ip_rst / server_tcp_rst
for _ in range(0, 5):
self.mysend(client_rst)
self.mysend(server_rst)
|
import pandas as pd
import numpy as np
from python_nw import newey
from scipy.stats import f
from scipy.stats import chi2
def ap_FMB(mr,mf):
# Initialize
dT, dN = mr.shape
dT, dK = mf.shape
valpha = np.empty((dN,1))
mbeta = np.empty((dN,dK))
mresid =np.empty((dT,dN))
mlambda_t = np.empty((dT,dK))
malpha_t = np.empty((dT,dN))
# Pass 1: Time-series regressions
vones = np.ones((dT,1))
for i in range(0,dN):
vres = newey(mr[:,i],np.hstack((vones, mf)).reshape(dT,dK+1),0)
valpha[i] = vres.beta[0]
mbeta[i,:] = vres.beta[1:].transpose()
mresid[:,i] = vres.resid
# Pass 2: Time-series regressions
for t in range(0,dT):
vres = newey(mr[t,:].transpose(),mbeta,0)
mlambda_t[t,:] = vres.beta.transpose()
malpha_t[t,:] = vres.resid.transpose()
valpha = np.mean(malpha_t,0).transpose()
vlambda = np.mean(mlambda_t,0).transpose()
# Compute standard errors
mcov_alpha =1/dT*np.cov(malpha_t,rowvar=0)
mcov_lambda = 1/dT*np.cov(mlambda_t,rowvar=0)
valpha_t = valpha / np.sqrt(np.diag(mcov_alpha))
vlambda_t = vlambda / np.sqrt(np.diag(mcov_lambda))
# Asset pricing test
dmodel_test_stat = valpha.transpose()@np.linalg.pinv(mcov_alpha)@valpha
dmodel_p = 1-chi2.cdf(dmodel_test_stat,dN-dK)
return vlambda, vlambda_t, valpha, valpha_t, dmodel_test_stat, dmodel_p
|
"""
zeep.xsd
--------
"""
from zeep.xsd.const import SkipValue # noqa
from zeep.xsd.elements import * # noqa
from zeep.xsd.schema import Schema # noqa
from zeep.xsd.types import * # noqa
from zeep.xsd.types.builtins import * # noqa
from zeep.xsd.valueobjects import * # noqa
|
"""
Main script for training and evaluating a TCN on a multi-step forecasting task.
You can chose bewteen:
- Running a simple experiment
- Running multiple experiments trying out diffrent combinations of hyperparamters (grid-search)
"""
import warnings
warnings.filterwarnings(action='ignore')
from keras.callbacks import EarlyStopping
from keras.optimizers import Adam
from dts import config
from dts.datasets import uci_single_households
from dts.datasets import gefcom2014
from dts import logger
from dts.utils.plot import plot
from dts.utils import metrics
from dts.utils import run_grid_search, run_single_experiment
from dts.utils import DTSExperiment, log_metrics, get_args
from dts.utils.decorators import f_main
from dts.utils.split import *
from dts.models.TCN import *
import time
import os
args = get_args()
@f_main(args=args)
def main(_run):
################################
# Load Experiment's paramaters #
################################
params = vars(args)
logger.info(params)
################################
# Load Dataset #
################################
dataset_name = params['dataset']
if dataset_name == 'gefcom':
dataset = gefcom2014
else:
dataset = uci_single_households
data = dataset.load_data(fill_nan='median',
preprocessing=True,
split_type='simple',
is_train=params['train'],
detrend=params['detrend'],
exogenous_vars=params['exogenous'],
use_prebuilt=True)
scaler, train, test, trend = data['scaler'], data['train'], data['test'], data['trend']
if not params['detrend']:
trend = None
X_train, y_train = get_rnn_inputs(train,
window_size=params['input_sequence_length'],
horizon=params['output_sequence_length'],
shuffle=True,
multivariate_output=params['exogenous'])
################################
# Build & Train Model #
################################
tcn = TCNModel(layers=params['layers'],
filters=params['out_channels'],
kernel_size=params['kernel_size'],
kernel_initializer='glorot_normal',
kernel_regularizer=l2(params['l2_reg']),
bias_regularizer=l2(params['l2_reg']),
dilation_rate=params['dilation'],
use_bias=False,
return_sequence=True,
tcn_type=params['tcn_type'])
if params['exogenous']:
exog_var_train = y_train[:, :, 1:] # [n_samples, horizon, n_features]
y_train = y_train[:, :, 0] # [n_samples, horizon]
conditions_shape = (exog_var_train.shape[1], exog_var_train.shape[-1])
X_test, y_test = get_rnn_inputs(test,
window_size=params['input_sequence_length'],
horizon=params['output_sequence_length'],
shuffle=False,
multivariate_output=True)
exog_var_test = y_test[:, :, 1:] # [n_samples, horizon, n_features]
y_test = y_test[:, :, 0] # [n_samples, horizon]
else:
X_test, y_test = get_rnn_inputs(test,
window_size=params['input_sequence_length'],
horizon=params['output_sequence_length'],
shuffle=False)
exog_var_train = None
exog_var_test = None
conditions_shape = None
# IMPORTANT: Remember to pass the trend values through the same ops as the inputs values
if params['detrend']:
X_trend_test, y_trend_test = get_rnn_inputs(trend[1],
window_size=params['input_sequence_length'],
horizon=params['output_sequence_length'],
shuffle=False)
trend = y_trend_test
model = tcn.build_model(input_shape=(X_train.shape[1], X_train.shape[-1]),
horizon=params['output_sequence_length'],
conditions_shape=conditions_shape,
use_final_dense=True)
if params['load'] is not None:
logger.info("Loading model's weights from disk using {}".format(params['load']))
model.load_weights(params['load'])
optimizer = Adam(params['learning_rate'])
model.compile(optimizer=optimizer, loss=['mse'], metrics=metrics)
callbacks = [EarlyStopping(patience=50, monitor='val_loss')]
if params['exogenous'] and params['tcn_type'] == 'conditional_tcn':
history = model.fit([X_train, exog_var_train], y_train,
validation_split=0.1,
batch_size=params['batch_size'],
epochs=params['epochs'],
callbacks=callbacks,
verbose=2)
else:
history = model.fit(X_train, y_train,
validation_split=0.1,
batch_size=params['batch_size'],
epochs=params['epochs'],
callbacks=callbacks,
verbose=2)
################################
# Save weights #
################################
model_filepath = os.path.join(
config['weights'],'{}_{}_{}'
.format(params['tcn_type'], params['dataset'], time.time()))
model.save_weights(model_filepath)
logger.info("Model's weights saved at {}".format(model_filepath))
#################################
# Evaluate on Validation & Test #
#################################
fn_inverse_val = lambda x: dataset.inverse_transform(x, scaler=scaler, trend=None)
fn_inverse_test = lambda x: dataset.inverse_transform(x, scaler=scaler, trend=trend)
fn_plot = lambda x: plot(x, dataset.SAMPLES_PER_DAY, save_at=None)
if params['exogenous'] and params['tcn_type'] == 'conditional_tcn':
val_scores = tcn.evaluate(history.validation_data[:-1], fn_inverse=fn_inverse_val)
test_scores = tcn.evaluate([[X_test, exog_var_test], y_test], fn_inverse=fn_inverse_test, fn_plot=fn_plot)
else:
val_scores = tcn.evaluate(history.validation_data[:-1], fn_inverse=fn_inverse_val)
test_scores = tcn.evaluate([X_test, y_test], fn_inverse=fn_inverse_test, fn_plot=fn_plot)
metrics_names = [m.__name__ if not isinstance(m, str) else m for m in model.metrics]
return dict(zip(metrics_names, val_scores)), \
dict(zip(metrics_names, test_scores)), \
model_filepath
if __name__ == '__main__':
grid_search = args.grid_search
if grid_search:
run_grid_search(
experimentclass=DTSExperiment,
f_config=args.add_config,
db_name=config['db'],
ex_name='tcn_grid_search',
f_main=main,
f_metrics=log_metrics,
observer_type=args.observer)
else:
run_single_experiment(
experimentclass=DTSExperiment,
db_name=config['db'],
ex_name='tcn',
f_main=main,
f_config=args.add_config,
f_metrics=log_metrics,
observer_type=args.observer) |
Subsets and Splits