max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
gshiw/quotes_web/quotes/adminx.py | superlead/gsw | 0 | 12785951 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from quotes_web.adminx import BaseAdmin
import xadmin
from .models import Quotes, Categories, Works, Writers, Speakers, Topics
class QuotesAdmin(BaseAdmin):
exclude = ('owner', 'view_nums', 'dig_nums')
xadmin.site.register(Quotes, QuotesAdmin)
class CategoryAdmin(BaseAdmin):
exclude = ('owner', 'view_nums')
xadmin.site.register(Categories, CategoryAdmin)
class WorkAdmin(BaseAdmin):
exclude = ('owner', 'view_nums')
xadmin.site.register(Works, WorkAdmin)
class WriterAdmin(BaseAdmin):
exclude = ('owner', 'view_nums')
xadmin.site.register(Writers, WriterAdmin)
class SpeakerAdmin(BaseAdmin):
exclude = ('owner', 'view_nums')
xadmin.site.register(Speakers, SpeakerAdmin)
class TopicAdmin(BaseAdmin):
exclude = ('owner', 'view_nums')
xadmin.site.register(Topics, TopicAdmin)
| 2.015625 | 2 |
lyapas_to_json.py | tsu-iscd/lyapas-lcc | 4 | 12785952 | <filename>lyapas_to_json.py
#!/usr/bin/env python2.7
import json
import argparse
import codecs
import sys
def main(args):
data = args.in_lyapas.read()
data = json.dumps(data, ensure_ascii=False, encoding='utf-8')
json_data = '{"file": "' + args.in_lyapas.name + '",' + ' "source": ' + data +'}'
args.out_filename.write(json_data)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Getting json from lyapas sourses')
parser.add_argument('in_lyapas', help='Path in filesystem for input lyapas-file', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('-out_filename', help='Name of output file', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
main(args)
| 3.0625 | 3 |
src/python/toolchain/plans.py | andyjost/Sprite | 1 | 12785953 | <filename>src/python/toolchain/plans.py
import collections, itertools
from . import _curry2icurry, _icurry2json, _json2py
Stage = collections.namedtuple('Stage', ['suffixes', 'step'])
class Plan(object):
'''
Represents a compilation plan.
Describes the sequence of steps that must be performed and the functions that
implement them.
'''
def __init__(self, kwds={}):
self.do_py = kwds.pop('py', False)
self.do_json = kwds.pop('json', True) or self.do_py
self.do_icy = kwds.pop('icy', True) or self.do_json
self.enabled = [self.do_icy, self.do_json, self.do_py]
do_zip = kwds.get('zip', True)
self.stages = list(self.get_stages(do_zip))
assert len(self.enabled) == len(self.stages) - 1
self.n_steps = sum(1 for _ in itertools.takewhile(lambda a:a, self.enabled))
@staticmethod
def get_stages(zip_json):
yield Stage(['.curry'] , _curry2icurry.curry2icurry)
yield Stage(['.icy'] , _icurry2json.icurry2json)
if zip_json:
yield Stage(['.json.z'], _json2py.json2py)
else:
yield Stage(['.json'] , _json2py.json2py)
yield Stage(['.py'] , None)
@property
def suffixes(self):
'''Returns the sequence of file suffixes in this plan.'''
def seq():
for en, stage in zip([True] + self.enabled, self.stages):
if en:
for suffix in stage.suffixes:
yield suffix
else:
break
return list(seq())
def __len__(self):
'''Gives the number of steps in the plan.'''
return self.n_steps
def position(self, filename):
'''Gives the current position in the plan.'''
for i,(suffixes,_) in enumerate(self.stages):
if any(filename.endswith(suffix) for suffix in suffixes):
return i
assert False
| 2.609375 | 3 |
air_ticket/views/airline_staff.py | X-czh/Air-Ticket-Reservation-System | 0 | 12785954 | from flask import Blueprint, render_template, request, session, redirect, url_for
from pymysql import MySQLError
from datetime import date, datetime, timedelta
from dateutil.relativedelta import relativedelta
from air_ticket import conn
from air_ticket.utils import requires_login_airline_staff
mod = Blueprint('airline_staff', __name__, url_prefix='/airline_staff')
# Define route for homepage
@mod.route('/')
@requires_login_airline_staff
def homepage():
return render_template('airline_staff/index.html')
# Define route for update
@mod.route('/update')
@requires_login_airline_staff
def update():
return render_template('airline_staff/update.html')
# Define route for view
@mod.route('/view')
@requires_login_airline_staff
def view():
return render_template('airline_staff/view.html')
# Define route for compare
@mod.route('/compare')
@requires_login_airline_staff
def compare():
return render_template('airline_staff/compare.html')
# View my flights in the next 30 days
@mod.route('/viewMyFlights', methods=['POST'])
@requires_login_airline_staff
def viewMyFlights():
# grabs information
airline_name = session['airline_name']
# cursor used to send queries
cursor = conn.cursor()
# executes query
query = '''
SELECT *
FROM flight
WHERE airline_name = %s AND
departure_time BETWEEN CURDATE() AND DATE_ADD(NOW(), INTERVAL 30 DAY)
ORDER BY departure_time '''
cursor.execute(query, (airline_name))
# stores the results in a variable
data = cursor.fetchall()
cursor.close()
# check data
if data:
return render_template('airline_staff/index.html', result_viewMyFlights=data)
else:
msg = 'No records are found!'
return render_template('airline_staff/index.html', message=msg)
# View my flights option - sepcifying departure/arrival airport and a range of departure date
@mod.route('/viewMyFlightsOption', methods=['POST'])
@requires_login_airline_staff
def viewMyFlightsOption():
# grabs information
airline_name = session['airline_name']
start_date = request.form['start_date']
end_date = request.form['end_date']
departure_airport = request.form['departure_airport']
arrival_airport = request.form['arrival_airport']
# check consistence of dates
if start_date > end_date:
error = 'Error: end date is earlier than start date!'
return render_template('airline_staff/index.html', message=error)
# cursor used to send queries
cursor = conn.cursor()
# executes query
query = '''
SELECT *
FROM flight
WHERE airline_name = %s AND departure_airport = %s
AND arrival_airport = %s AND departure_time BETWEEN %s AND %s
ORDER BY departure_time DESC '''
cursor.execute(query, (airline_name, departure_airport, arrival_airport,
start_date, end_date))
# stores the results in a variable
data = cursor.fetchall()
cursor.close()
# check data
if data:
return render_template('airline_staff/index.html', result_viewMyFlights=data)
else:
msg = 'No records are found!'
return render_template('airline_staff/index.html', message=msg)
# View all customers of a flight, sub module for view my flights
@mod.route('/viewAllCustomers', methods=['POST'])
@requires_login_airline_staff
def viewAllCustomers():
# grabs information
airline_name = session['airline_name']
flight_num = request.form['flight_num']
# cursor used to send queries
cursor = conn.cursor()
# executes query
query = '''
SELECT ticket_id, customer_email, booking_agent_id, purchase_date
FROM ticket NATURAL JOIN purchases
WHERE airline_name = %s AND flight_num = %s
ORDER by purchase_date DESC '''
cursor.execute(query, (airline_name, flight_num))
data = cursor.fetchall()
# check data
if data:
return render_template('airline_staff/index.html', airline_name=airline_name,
flight_num=flight_num, result_viewAllCustomers=data)
else:
msg = 'No customers yet!'
return render_template('airline_staff/index.html', message=msg)
@mod.route('/createNewFlights', methods=['POST'])
@requires_login_airline_staff
def createNewFlights():
# grabs information
airline_name = session['airline_name']
flight_num = request.form['flight_num']
departure_airport = request.form['departure_airport']
departure_time = request.form['departure_time']
arrival_airport = request.form['arrival_airport']
arrival_time = request.form['arrival_time']
price = request.form['price']
status = request.form['status']
airplane_id = request.form['airplane_id']
# check consistence of time
if departure_time >= arrival_time:
error = 'Error: wrong time format or inconsistent departure and arrival time!'
return render_template('airline_staff/update.html', result=error)
try:
msg = 'Create successfully!'
with conn.cursor() as cursor:
ins = 'INSERT INTO flight VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)'
cursor.execute(ins, (airline_name, flight_num, departure_airport, departure_time,
arrival_airport, arrival_time, price, status, airplane_id))
conn.commit()
except MySQLError as e:
msg = 'Got error {!r}, errno is {}'.format(e, e.args[0])
return render_template('airline_staff/update.html', result=msg)
# Change status of flights
@mod.route('/changeFlightStatus', methods=['POST'])
@requires_login_airline_staff
def changeFlightStatus():
# grabs information
airline_name = session['airline_name']
flight_num = request.form['flight_num']
status = request.form['status']
try:
msg = "Update successfully!"
with conn.cursor() as cursor:
query = '''
UPDATE flight
SET status = %s
WHERE airline_name = %s AND flight_num = %s '''
cursor.execute(query, (status, airline_name, flight_num))
conn.commit()
except MySQLError as e:
msg = 'Got error {!r}, errno is {}'.format(e, e.args[0])
return render_template('airline_staff/update.html', result=msg)
# Add new airplane
@mod.route('/addNewAirplane', methods=['POST'])
@requires_login_airline_staff
def addNewAirplane():
# grabs information
airline_name = session['airline_name']
airplane_id = request.form['airplane_id']
seats = request.form['seats']
try:
msg = 'Add successfully!'
with conn.cursor() as cursor:
ins = 'INSERT INTO airplane VALUES(%s, %s, %s)'
cursor.execute(ins, (airline_name, airplane_id, seats))
conn.commit()
except MySQLError as e:
msg = 'Got error {!r}, errno is {}'.format(e, e.args[0])
return render_template('airline_staff/update.html', result=msg)
# Add new airport
@mod.route('/addNewAirport', methods=['POST'])
@requires_login_airline_staff
def addNewAirport():
# grabs information
airport_name = request.form['airport_name']
airport_city = request.form['airport_city']
try:
msg = 'Add successfully!'
with conn.cursor() as cursor:
ins = 'INSERT INTO airport VALUES(%s, %s)'
cursor.execute(ins, (airport_name, airport_city))
conn.commit()
except MySQLError as e:
msg = 'Got error {!r}, errno is {}'.format(e, e.args[0])
return render_template('airline_staff/update.html', result=msg)
# View top5 booking agent
@mod.route('/viewTop5BookingAgent', methods=['POST'])
@requires_login_airline_staff
def viewTop5BookingAgent():
# grabs information
airline_name = session['airline_name']
# cursor used to send queries
cursor = conn.cursor()
# executes query
query = '''
SELECT booking_agent_id, COUNT(ticket_id) as count
FROM ticket NATURAL JOIN purchases
WHERE airline_name = %s AND booking_agent_id IS NOT NULL AND
purchase_date BETWEEN DATE_SUB(NOW(), INTERVAL 1 MONTH) AND CURDATE()
GROUP BY booking_agent_id
ORDER by count DESC
LIMIT 5 '''
cursor.execute(query, (airline_name))
top5bycount_past_month = cursor.fetchall()
query = '''
SELECT booking_agent_id, COUNT(ticket_id) as count
FROM ticket NATURAL JOIN purchases
WHERE airline_name = %s AND booking_agent_id IS NOT NULL AND
purchase_date BETWEEN DATE_SUB(NOW(), INTERVAL 1 YEAR) AND CURDATE()
GROUP BY booking_agent_id
ORDER by count DESC
LIMIT 5 '''
cursor.execute(query, (airline_name))
top5bycount_past_year = cursor.fetchall()
query = '''
SELECT booking_agent_id, SUM(price) * 0.1 as commission
FROM ticket NATURAL JOIN purchases NATURAL JOIN flight
WHERE airline_name = %s AND booking_agent_id IS NOT NULL AND
purchase_date BETWEEN DATE_SUB(NOW(), INTERVAL 1 YEAR) AND CURDATE()
GROUP BY booking_agent_id
ORDER by commission DESC
LIMIT 5 '''
cursor.execute(query, (airline_name))
top5bycommission_past_year = cursor.fetchall()
cursor.close()
# check data
msg = None
if top5bycount_past_year == None or top5bycount_past_year == ():
msg = 'No records in the last year!'
elif top5bycount_past_month == None or top5bycount_past_month == ():
msg = 'No records in the last month!'
return render_template('airline_staff/view.html',
top5bycount_past_month=top5bycount_past_month,
top5bycount_past_year=top5bycount_past_year,
top5bycommission_past_year=top5bycommission_past_year,
message_viewTop5BookingAgent=msg)
# View frequent customers
@mod.route('/viewFrequentCustomers', methods=['POST'])
@requires_login_airline_staff
def viewFrequentCustomers():
# grabs information
airline_name = session['airline_name']
# cursor used to send queries
cursor = conn.cursor()
# executes query
query = '''
SELECT customer_email, COUNT(ticket_id) AS count
FROM ticket NATURAL JOIN purchases
WHERE airline_name = %s AND
purchase_date BETWEEN DATE_SUB(NOW(), INTERVAL 1 YEAR) AND CURDATE()
GROUP BY customer_email
ORDER by count DESC '''
cursor.execute(query, (airline_name))
data = cursor.fetchall()
if data != None and data != ():
return render_template('airline_staff/view.html', result_viewFrequentCustomers=data)
else:
msg = 'No records are found!'
return render_template('airline_staff/view.html', message_viewFrequentCustomers=msg)
# View flights taken, sub module for view frequent customers
@mod.route('/viewFlightsTaken', methods=['POST'])
@requires_login_airline_staff
def viewFlightsTaken():
# grabs information
airline_name = session['airline_name']
customer_email = request.form['customer_email']
# cursor used to send queries
cursor = conn.cursor()
# executes query
query = '''
SELECT customer_email, flight_num, purchase_date
FROM ticket NATURAL JOIN purchases
WHERE airline_name = %s AND customer_email = %s
ORDER by purchase_date DESC '''
cursor.execute(query, (airline_name, customer_email))
data = cursor.fetchall()
return render_template('airline_staff/view.html', result_viewFlightsTaken=data)
# View reports
@mod.route('/viewReports', methods=['POST'])
@requires_login_airline_staff
def viewReports():
# grabs information
airline_name = session['airline_name']
start_month = request.form['start_month']
end_month = request.form['end_month']
# check consistence of months
if start_month > end_month:
error = 'Error: end month is earlier than start month!'
return render_template('airline_staff/view.html', message_viewReports=error)
# computes date
start_date = datetime.strptime(start_month, '%Y-%m').date()
start_date_str = start_date.strftime('%Y-%m-%d')
end_date = datetime.strptime(end_month, '%Y-%m').date() + relativedelta(months=+1)
end_date_str = end_date.strftime('%Y-%m-%d')
diff = (end_date.year - start_date.year) * 12 + (end_date.month - start_date.month)
# cursor used to send queries
cursor = conn.cursor()
# query
query = '''
SELECT COUNT(ticket_id) as total
FROM purchases NATURAL JOIN ticket
WHERE airline_name = %s AND purchase_date >= %s AND purchase_date < %s '''
# total
cursor.execute(query, (airline_name, start_date_str, end_date_str))
data = cursor.fetchone()
total = data['total'] if data['total'] != None else 0
# monthwise
monthwise_label = []
monthwise_total = []
end_date = start_date + relativedelta(months=+1)
for _ in range(diff):
start_date_str = start_date.strftime('%Y-%m-%d')
end_date_str = end_date.strftime('%Y-%m-%d')
cursor.execute(query, (airline_name, start_date_str, end_date_str))
monthwise = cursor.fetchone()
monthwise_label.append(start_date.strftime('%y/%m'))
monthwise_total.append(monthwise['total'] if monthwise['total'] != None else 0)
start_date += relativedelta(months=+1)
end_date += relativedelta(months=+1)
cursor.close()
return render_template('airline_staff/view.html', total=total,
monthwise_label=monthwise_label, monthwise_total=monthwise_total)
# Compare revenue
@mod.route('/compareRevenue', methods=['POST'])
@requires_login_airline_staff
def compareRevenue():
# grabs information
airline_name = session['airline_name']
# cursor used to send queries
cursor = conn.cursor()
# revenue in the last month
query = '''
SELECT SUM(price) as revenue
FROM flight NATURAL JOIN ticket NATURAL JOIN purchases
WHERE airline_name = %s AND booking_agent_id IS NULL AND
purchase_date BETWEEN DATE_SUB(NOW(), INTERVAL 1 MONTH) AND CURDATE() '''
cursor.execute(query, (airline_name))
data = cursor.fetchone()
if data == None:
revenue_direct_sale_last_month = 0
elif data['revenue'] == None:
revenue_direct_sale_last_month = 0
else:
revenue_direct_sale_last_month = data['revenue']
query = '''
SELECT SUM(price) as revenue
FROM flight NATURAL JOIN ticket NATURAL JOIN purchases
WHERE airline_name = %s AND booking_agent_id IS NOT NULL AND
purchase_date BETWEEN DATE_SUB(NOW(), INTERVAL 1 MONTH) AND CURDATE() '''
cursor.execute(query, (airline_name))
data = cursor.fetchone()
if data == None:
revenue_indirect_sale_last_month = 0
elif data['revenue'] == None:
revenue_indirect_sale_last_month = 0
else:
revenue_indirect_sale_last_month = data['revenue']
# revenue in the last year
query = '''
SELECT SUM(price) as revenue
FROM flight NATURAL JOIN ticket NATURAL JOIN purchases
WHERE airline_name = %s AND booking_agent_id IS NULL AND
purchase_date BETWEEN DATE_SUB(NOW(), INTERVAL 1 YEAR) AND CURDATE() '''
cursor.execute(query, (airline_name))
data = cursor.fetchone()
if data == None:
revenue_direct_sale_last_year = 0
elif data['revenue'] == None:
revenue_direct_sale_last_year = 0
else:
revenue_direct_sale_last_year = data['revenue']
query = '''
SELECT SUM(price) as revenue
FROM flight NATURAL JOIN ticket NATURAL JOIN purchases
WHERE airline_name = %s AND booking_agent_id IS NOT NULL AND
purchase_date BETWEEN DATE_SUB(NOW(), INTERVAL 1 YEAR) AND CURDATE() '''
cursor.execute(query, (airline_name))
data = cursor.fetchone()
if data == None:
revenue_indirect_sale_last_year = 0
elif data['revenue'] == None:
revenue_indirect_sale_last_year = 0
else:
revenue_indirect_sale_last_year = data['revenue']
# check data
msg = None
if revenue_direct_sale_last_year * revenue_indirect_sale_last_year == 0:
msg = 'No sale in the last year!'
elif revenue_direct_sale_last_month * revenue_indirect_sale_last_month == 0:
msg = 'No sale in the last month!'
return render_template('airline_staff/compare.html',
revenue_direct_sale_last_month=revenue_direct_sale_last_month,
revenue_indirect_sale_last_month=revenue_indirect_sale_last_month,
revenue_direct_sale_last_year=revenue_direct_sale_last_year,
revenue_indirect_sale_last_year=revenue_indirect_sale_last_year,
message=msg)
# View top3 destinations
@mod.route('/viewTop3Destinations', methods=['POST'])
@requires_login_airline_staff
def viewTop3Destinations():
#grabs information
airline_name = session['airline_name']
# cursor used to send queries
cursor = conn.cursor()
# executes query
query = '''
SELECT arrival_airport, airport_city, COUNT(ticket_id) as count
FROM flight NATURAL JOIN ticket NATURAL JOIN purchases, airport
WHERE airline_name = %s AND arrival_airport = airport_name AND
purchase_date BETWEEN DATE_SUB(NOW(), INTERVAL 3 MONTH) AND CURDATE()
GROUP BY arrival_airport
ORDER BY count DESC
LIMIT 3 '''
cursor.execute(query, (airline_name))
top3_past3month = cursor.fetchall()
query = '''
SELECT arrival_airport, airport_city, COUNT(ticket_id) as count
FROM flight NATURAL JOIN ticket NATURAL JOIN purchases, airport
WHERE airline_name = %s AND arrival_airport = airport_name AND
purchase_date BETWEEN DATE_SUB(NOW(), INTERVAL 1 YEAR) AND CURDATE()
GROUP BY arrival_airport
ORDER BY count DESC
LIMIT 3 '''
cursor.execute(query, (airline_name))
top3_past1year = cursor.fetchall()
cursor.close()
# check data
msg = None
if top3_past1year == None or top3_past1year == ():
msg = 'No records in the last year!'
elif top3_past3month == None or top3_past3month == ():
msg = 'No records in the last 3 months!'
return render_template('airline_staff/view.html',
top3_past3month=top3_past3month,
top3_past1year=top3_past1year,
message_viewTop3Destinations=msg)
# Define route for logout
@mod.route('/logout')
@requires_login_airline_staff
def logout():
session.pop('username')
session.pop('usertype')
session.pop('airline_name')
return redirect('/')
| 2.40625 | 2 |
sbr_ui/services/gateway_authentication_service.py | ajorpheus/sbr-ui | 1 | 12785955 | <reponame>ajorpheus/sbr-ui
import logging
import requests
from structlog import wrap_logger
from typing import Tuple
from flask import current_app
from sbr_ui.models.exceptions import ApiError
from sbr_ui.utilities.helpers import log_api_error, base_64_encode
logger = wrap_logger(logging.getLogger(__name__))
class GatewayAuthenticationService:
Role = str
Token = str # A uuid from the API Gateway
TokenAndRole = Tuple[Token, Role]
@staticmethod
def login(username: str, password: str) -> TokenAndRole:
gateway_auth_url = current_app.config['AUTH_URL']
timeout = current_app.config['AUTH_TIMEOUT']
logger.debug("Logging user in", username=username)
headers = {'content-type': 'application/json', 'Authorization': str(base_64_encode(f'{username}:{password}'))}
response = requests.post(gateway_auth_url, headers=headers, timeout=timeout)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
log_api_error(response.status_code, 'Failed to authorize via the API Gateway', gateway_auth_url)
raise ApiError(response)
json = response.json()
token = json.get('token')
role = json.get('role')
if token is None or role is None:
logger.error("Returned Gateway JSON is in the wrong format")
raise ValueError(response)
return token, role
| 1.992188 | 2 |
containers/mobilenet/dataset/scripts/tar_to_record.py | AustinShalit/ml-react-app | 18 | 12785956 | <reponame>AustinShalit/ml-react-app<gh_stars>10-100
import sys, os, shutil, tarfile, argparse, zipfile
import json_to_csv, generate_tfrecord, parse_meta, parse_hyperparams
from os.path import join, splitext, split
def main(dataset_paths, percent_eval, directory):
ROOT_PATH, PATH_EXT = os.path.splitext(dataset_paths)
DATASET_NAME = ROOT_PATH.split('/')[-1]
OUTPUT_PATH = directory
EXTRACT_PATH = "/home"
TMP_PATH = "/home/tmp"
# Placeholder for enum, here 1 is tar, 0 is ZIP
NORMAL_MODE = 1 # Assume this is a tar
if not os.path.exists(TMP_PATH):
os.makedirs(TMP_PATH)
if not os.path.exists(EXTRACT_PATH):
os.makedirs(EXTRACT_PATH)
if PATH_EXT == '.zip':
print('.zip file extension found, interpreting as tensorflow object detection csv zip')
NORMAL_MODE = 0 # Not a tar file
if NORMAL_MODE: # Perform working tar code
print("normal mode")
try:
shutil.copy(dataset_paths, join(EXTRACT_PATH, 'data.tar'))
except:
print('unable to retrieve a dataset tar file:')
sys.exit(1)
with tarfile.open(dataset_paths) as tar_file:
tar_file.extractall(join(EXTRACT_PATH, 'out'))
if percent_eval > 100 or percent_eval < 0:
percent_eval = 30
json_to_csv.main(percent_eval)
try:
generate_tfrecord.main(TMP_PATH + "/train.csv", join(OUTPUT_PATH, 'train.record'), NORMAL_MODE, "/home/")
generate_tfrecord.main(TMP_PATH + "/eval.csv", join(OUTPUT_PATH, 'eval.record'), NORMAL_MODE, "/home/")
parse_meta.main(join(OUTPUT_PATH, 'map.pbtxt'), NORMAL_MODE, TMP_PATH + "/eval.csv")
print(".\nRecords generated")
except ValueError:
print("The datasets provided do not have the same class labels. Please make sure that labels are spelt the same in both datasets, or label the same objects for both datasets.")
if not NORMAL_MODE:
print('treating as zip of tf obj detect')
#Psuedocode
#Unzip the zip in correct dir
with zipfile.ZipFile(dataset_paths, 'r') as zip_file: # Unzip the file (Assuming 1 zip at this time)
namelist = zip_file.namelist()[-1]
if any([namelist.startswith(i) for i in ["valid", "train", "test"]]):
zip_file.extractall(EXTRACT_PATH+"/"+DATASET_NAME)
else:
zip_file.extractall(EXTRACT_PATH)
from fnmatch import fnmatch
pattern = "*.csv"
for path, subdirs, files in os.walk(EXTRACT_PATH):
for name in files:
if fnmatch(name, pattern):
print("CSV:",os.path.join(path, name))
#Generate the records
# try:
print(EXTRACT_PATH + "/" + DATASET_NAME + "/test/_annotations.csv")
generate_tfrecord.main(EXTRACT_PATH + "/" + DATASET_NAME + "/test/_annotations.csv", join(OUTPUT_PATH, 'eval.record'), NORMAL_MODE, EXTRACT_PATH + "/" + DATASET_NAME + "/test/")
generate_tfrecord.main(EXTRACT_PATH + "/" + DATASET_NAME + "/train/_annotations.csv", join(OUTPUT_PATH, 'train.record'), NORMAL_MODE, EXTRACT_PATH + "/" + DATASET_NAME + "/train/")
print('main records generated')
parse_meta.main(join(OUTPUT_PATH, 'map.pbtxt'), NORMAL_MODE, EXTRACT_PATH + "/" + DATASET_NAME + "/train/_annotations.csv") # Edge case of missing label in one csv
print(".\nRecords generated")
# except ValueError:
# print("The datasets provided do not have the same class labels. Please make sure that labels are spelt the same in both datasets, or label the same objects for both datasets.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dir', type=str, help='Path of the folder to train in.')
DIRECTORY = parser.parse_args().dir
data = parse_hyperparams.parse(os.path.join(DIRECTORY,"hyperparameters.json"))
DATASET_PATHS = data["dataset-path"]
PERCENT_EVAL = data["percent-eval"]
main(DATASET_PATHS, PERCENT_EVAL, DIRECTORY)
| 2.3125 | 2 |
setup.py | caozhichongchong/traits_finder | 1 | 12785957 | from setuptools import setup
setup(
name="traits_finder",
packages=['traits_finder'],
version="1.6",
description="search and summarize traits in genomes and metagenomes",
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/caozhichongchong/traits_finder',
keywords=['metagenomes', 'genomes', 'function', 'traits'],
license='MIT',
install_requires=[
'biopython',
'argparse',
'glob2',
'statistics'
],
include_package_data=True,
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
package_dir={'traits_finder': 'traits_finder'},
package_data={'traits_finder': ['scripts/*','database/*','example/*','*.py']},
entry_points={'console_scripts': ['traits_finder = traits_finder.__main__:main']},
classifiers=[
#'Development Status :: 1 - Alpha',
#'Intended Audience :: Bioinformatics and Researchers',
#'License :: MIT',
#'Operating System :: MacOS',
#'Operating System :: Microsoft :: Windows',
#'Operating System :: LINUX',
'Programming Language :: Python :: 3',
#'Topic :: Antibiotic resistance :: risk ranking',
#'Topic :: Metagenomes :: Antibiotic resistance',
]
)
| 1.3125 | 1 |
lstm4backend/train.py | cap-ntu/Autocomplete | 1 | 12785958 |
import just
import json
import pandas as pd
from pathlib import Path
pd.set_option('max_colwidth',300)
from encoder_decoder import TextEncoderDecoder, text_tokenize
from model import LSTMBase
TRAINING_TEST_CASES = ["from keras.layers import"]
columns_long_list = ['repo', 'path', 'url', 'code',
'code_tokens', 'docstring', 'docstring_tokens',
'language', 'partition']
def jsonl_list_to_dataframe(file_list, columns=columns_long_list):
return pd.concat([pd.read_json(f,
orient='records',
compression='gzip',
lines=True)[columns]
for f in file_list], sort=False)
def get_data():
print("loading data... \n")
python_files = sorted(Path('./data/python/').glob('**/*.gz'))
pydf = jsonl_list_to_dataframe(python_files)
code_data = pydf["code"].to_numpy()
# code_data = list(just.multi_read("data/**/*.py").values())
print(len(code_data), "\n =====> Sample code as training data: \n", code_data[0])
# 只有 30 个训练样本测试
return code_data[:30]
def train(ted, model_name):
lb = LSTMBase(model_name, ted)
try:
lb.train(test_cases=TRAINING_TEST_CASES)
except KeyboardInterrupt:
pass
print("saving")
lb.save()
def train_char(model_name):
data = get_data()
# list makes a str "str" into a list ["s","t","r"]
ted = TextEncoderDecoder(data, tokenize=list, untokenize="".join, padding=" ",
min_count=1, maxlen=40)
train(ted, model_name)
def train_token(model_name):
data = get_data()
# text tokenize splits source code into python tokens
ted = TextEncoderDecoder(data, tokenize=text_tokenize, untokenize="".join, padding=" ",
min_count=1, maxlen=20)
# print("[Token Training] Loading data...")
# python_files = sorted(Path('./data/python/').glob('**/*.gz'))
# pydf = jsonl_list_to_dataframe(python_files)
# tokens = pydf["code_tokens"]
train(ted, model_name)
def get_model(model_name):
return LSTMBase(model_name)
def complete(model, text, diversities):
predictions = [model.predict(text, diversity=d, max_prediction_steps=80,
break_at_token="\n")
for d in diversities]
# returning the latest sentence, + prediction
suggestions = [text.split("\n")[-1] + x.rstrip("\n") for x in predictions]
return suggestions
if __name__ == "__main__":
import sys
if len(sys.argv) != 3:
raise Exception(
"expecting model name, such as 'neural' and type (either 'char' or 'token'")
model_name = "_".join(sys.argv[1:])
if sys.argv[2] == "char":
train_char(model_name)
elif sys.argv[2] == "token":
train_token(model_name)
else:
msg = "The second argument cannot be {}, but should be either 'char' or 'token'"
raise Exception(msg.format(sys.argv[2]))
| 2.53125 | 3 |
uclasm/utils.py | cfld/uclasm | 0 | 12785959 | """Miscellaneous functions and helpers for the uclasm package."""
import numpy as np
def one_hot(idx, length):
"""Return a 1darray of zeros with a single one in the idx'th entry."""
one_hot = np.zeros(length, dtype=np.bool)
one_hot[idx] = True
return one_hot
def index_map(args):
"""Return a dict mapping elements to their indices.
Parameters
----------
args : Iterable[str]
Strings to be mapped to their indices.
"""
return {elm: idx for idx, elm in enumerate(args)}
# TODO: change the name of this function
def invert(dict_of_sets):
"""TODO: Docstring."""
new_dict = {}
for k, v in dict_of_sets.items():
for x in v:
new_dict[x] = new_dict.get(x, set()) | set([k])
return new_dict
def values_map_to_same_key(dict_of_sets):
"""TODO: Docstring."""
matches = {}
# get the sets of candidates
for key, val_set in dict_of_sets.items():
frozen_val_set = frozenset(val_set)
matches[frozen_val_set] = matches.get(frozen_val_set, set()) | {key}
return matches
def apply_index_map_to_cols(df, cols, values):
"""Replace df[cols] with their indexes as taken from names.
Parameters
----------
df : DataFrame
To be modified inplace.
cols : Iterable[str]
Columns of df to operate on.
values : Iterable[str]
Values expected to be present in df[cols] to be replaced with their
corresponding indexes.
"""
val_to_idx = index_map(values)
df[cols] = df[cols].applymap(val_to_idx.get)
| 3.21875 | 3 |
tools/simulator.py | zhongxinghong/Botzone-Tank2 | 11 | 12785960 | # -*- coding: utf-8 -*-
# @Author: Administrator
# @Date: 2019-04-30 11:25:35
# @Last Modified by: Administrator
# @Last Modified time: 2019-05-26 01:25:58
"""
无 GUI 的游戏模拟器,可以模拟播放比赛记录
"""
import sys
sys.path.append("../")
from core import const as game_const
import os
import time
import json
import subprocess
import multiprocessing
from _lib.utils import json_load
from _lib.simulator.const import BLUE_INPUT_JSON_FILENAME, RED_INPUT_JSON_FILENAME,\
DATASET_DIR, CONFIG_JSON_FILE
from _lib.simulator.utils import cut_by_turn
from _lib.simulator.stream import SimulatorConsoleOutputStream, SimulatorTextInputStream
try:
config = json_load(CONFIG_JSON_FILE)
except json.JSONDecodeError as e: # 配置文件写错
raise e
## 环境变量设置 ##
game_const.DEBUG_MODE = config["environment"]["debug"] # 是否为 DEBUG 模式
game_const.LONG_RUNNING_MODE = config["environment"]["long_running"] # 是否为 LONG_RUNNING 模式
game_const.SIMULATOR_ENV = config["environment"]["simulator"] # 是否为模拟器环境
game_const.COMPACT_MAP = config["debug"]["compact_map"] # 是否以紧凑的形式打印地图
game_const.SIMULATOR_PRINT = config["simulator"]["print"] # 是否输出模拟器日志
## 游戏相关 ##
MATCH_ID = config["game"]["match_id"] # 比赛 ID
SIDE = config["game"]["side"] # 我方属于哪一方,这决定了使用什么数据源。
# 0 表示 blue.input.json, 1 表示 red.input.json
INITIAL_TURN = config["game"]["initial_turn"] # 从哪一回合开始
## 模拟器配置 ##
TURN_INTERVAL = config["simulator"]["turn_interval"] # 在自动播放的情况下,每回合结束后时间间隔
PAUSE_PER_TURN = config["simulator"]["pause"] # 设置为非自动播放,每回合结束后需要用户按下任意键继续
HIDE_DATA = config["simulator"]["hide_data"] # 是否隐藏游戏输出 json 中的 data 和 globaldata 字段
def main():
from main import main as run_game
if SIDE == 0:
INPUT_JSON = os.path.join(DATASET_DIR, MATCH_ID, BLUE_INPUT_JSON_FILENAME)
elif SIDE == 1:
INPUT_JSON = os.path.join(DATASET_DIR, MATCH_ID, RED_INPUT_JSON_FILENAME)
else:
raise Exception("unknown side %s" % SIDE)
wholeInputJSON = json_load(INPUT_JSON)
totalTurn = len(wholeInputJSON["responses"])
data = None
globaldata = None
parentConnection, childrenConnection = multiprocessing.Pipe()
for turn in range(INITIAL_TURN, totalTurn+2):
CUT_OFF_RULE = "-" * 30
inputJSON = cut_by_turn(wholeInputJSON, turn)
if data is not None:
inputJSON["data"] = data
if globaldata is not None:
inputJSON["globaldata"] = globaldata
istream = SimulatorTextInputStream(json.dumps(inputJSON))
ostream = SimulatorConsoleOutputStream(connection=childrenConnection, hide_data=HIDE_DATA)
p = multiprocessing.Process( target=run_game, args=(istream, ostream) )
p.daemon = True
p.start()
output = parentConnection.recv()
p.join()
if p.exitcode != 0:
break
outputJSON = json.loads(output)
data = outputJSON.get("data")
globaldata = outputJSON.get("globaldata")
print(CUT_OFF_RULE)
print("End Turn %s" % turn)
if PAUSE_PER_TURN:
#subprocess.call("pause",shell=True)
os.system('pause')
else:
time.sleep(TURN_INTERVAL)
if __name__ == '__main__':
main() | 1.703125 | 2 |
{{ cookiecutter.repo_name }}/test/__init__.py | jakebrinkmann/waldo-jakebrinkmann | 0 | 12785961 | <gh_stars>0
"""Common testing configuration.
""" | 1.015625 | 1 |
neural_cdes/ode_with_context.py | jb-c/dissertation | 0 | 12785962 | <reponame>jb-c/dissertation<filename>neural_cdes/ode_with_context.py
import numpy as np
from F import F
#------------------------ ODE MODEL With Context ---------------------------
# The idea is to use a neural ode with context for sequence to sequence forcasting
#---------------------------------------------------------------------------
| 1.859375 | 2 |
src/illumideskdummyauthenticator/tests/test_authenticator.py | IllumiDesk/illumidesk | 41 | 12785963 | import json
from unittest.mock import Mock
from unittest.mock import patch
import pytest
from illumideskdummyauthenticator.authenticator import IllumiDeskDummyAuthenticator
from illumideskdummyauthenticator.validators import IllumiDeskDummyValidator
from tornado.web import RequestHandler
@pytest.mark.asyncio
async def test_authenticator_returns_auth_state(make_dummy_authentication_request_args):
"""
Ensure we get a valid authentication dictionary.
"""
with patch.object(
IllumiDeskDummyValidator, "validate_login_request", return_value=True
):
authenticator = IllumiDeskDummyAuthenticator()
handler = Mock(
spec=RequestHandler,
get_secure_cookie=Mock(return_value=json.dumps(["key", "secret"])),
request=Mock(
arguments=make_dummy_authentication_request_args(),
headers={},
items=[],
),
)
result = await authenticator.authenticate(handler, None)
expected = {
"name": "foobar",
"auth_state": {
"assignment_name": "lab101",
"course_id": "intro101",
"lms_user_id": "abc123",
"user_role": "Student",
},
}
assert result == expected
| 2.265625 | 2 |
atcoder/abc/abc140_d.py | knuu/competitive-programming | 1 | 12785964 | N, K = map(int, input().split())
S = input()
intervals = []
idx = 0
dirs = []
while idx < N:
start = idx
dirs.append([0, 1][S[idx] == "R"])
while idx < N and S[idx] == S[start]:
idx += 1
intervals.append(idx - start)
assert(sum(intervals) == N)
def calc_ans(result_dirs):
ans = N
if result_dirs[0] == 0:
ans -= 1
for i in range(len(result_dirs)-1):
now_dir, next_dir = result_dirs[i], result_dirs[i+1]
if now_dir == 1 and next_dir == 0:
ans -= 2
if result_dirs[-1] == 1:
ans -= 1
return ans
rest = K
result_dirs = [dirs[0]]
for now_dir in dirs[1:]:
if rest > 0 and now_dir == 1:
result_dirs.append(0)
rest -= 1
else:
result_dirs.append(now_dir)
if rest > 0 and dirs[0] == 1:
result_dirs[0] = 0
left_ans = calc_ans(result_dirs)
rest = K
result_dirs = [dirs[0]]
for now_dir in dirs[1:]:
if rest > 0 and now_dir == 0:
result_dirs.append(1)
rest -= 1
else:
result_dirs.append(now_dir)
if rest > 0 and dirs[0] == 0:
result_dirs[0] = 1
right_ans = calc_ans(result_dirs)
print(max(left_ans, right_ans))
| 2.703125 | 3 |
Hack-Message/hack_message.py | yashk2810/Alert-on-Intrusion | 5 | 12785965 | #!/usr/bin/env python
import urllib2,re,pygeoip,json
import os
import twilio
from twilio.rest import TwilioRestClient
import pygame
import pygame.camera
TWILIO_ACCOUNT_SID="YOUR_ACCOUNT_SID"
TWILIO_AUTH_TOKEN="<PASSWORD>"
def main ():
new=open("/home/yash/auth",'r')
for i in new:
length1=len(i)
new.close()
new_file=open("/home/yash/auth",'w')
f=open("/var/log/auth.log",'r')
text=f.read()
failed=re.findall(r'failed|FAILED',text)
new_file.write(str(failed))
new_file.close()
new2=open("/home/yash/auth",'r')
for i in new2:
length2=len(i)
new2.close()
if length2>length1:
ip_url=urllib2.urlopen("http://whatismyip.org").read()
ip_search=re.search(r'\d+.\d+.\d+.\d+',ip_url)
ip=ip_search.group()
url="http://www.freegeoip.net/json/"+ip
location=urllib2.urlopen(url).read()
data=json.loads(location)
hack_message="City: "+data['city']+\
", Country: "+data['country_name']+\
", IP: "+data['ip']+\
", Latitude: "+str(data['latitude'])+\
", longitude: "+str(data['longitude'])+\
", zipcode: "+str(data['zip_code'])
pygame.camera.init()
pygame.camera.list_cameras()
cam = pygame.camera.Camera("/dev/video0", (640, 480))
cam.start()
img = cam.get_image()
f=open("/home/yash/logs/count",'r')
c=f.read()
pygame.image.save(img, "/home/yash/logs/intruder"+c+".jpg")
cam.stop()
file_write=open("/home/yash/logs/count",'w')
c=str(int(c)+1)
file_write.write(c)
file_write.close()
f.close()
client = TwilioRestClient(TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN)
message=client.messages.create(
body="Someone is trying to break into your laptop. The details are: "+hack_message,
to="YOUR_CONTACT_NUMBER",
from_="YOUR_TWILIO_PHONE_NUMBER")
try:
logs=open('logs/file.txt', 'a')
except IOError as e:
# dir & file don't exist; create them
os.mkdir('logs')
logs=open('logs/file.txt', 'a')
except Exception as e:
print e
else:
pass
# log it
logs.write("Message with sid {} sent at {}".format(message.sid,strftime("%a, %d %b %Y %H:%M:%S") + "\n"))
logs.close()
if __name__ == '__main__':
main()
| 2.921875 | 3 |
ticketbase/codebase/migrations/0005_ticket_is_closed.py | Hipo/codebase-ultra | 0 | 12785966 | # Generated by Django 2.2.4 on 2019-09-27 09:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('codebase', '0004_ticket_status'),
]
operations = [
migrations.AddField(
model_name='ticket',
name='is_closed',
field=models.BooleanField(default=True),
),
]
| 1.578125 | 2 |
python/html-color-converter.py | ModestTG/scripts | 0 | 12785967 | <reponame>ModestTG/scripts<filename>python/html-color-converter.py
from __future__ import print_function, unicode_literals, division
def rgb_to_hex(value):
rgb_values = value.split(",")
rgb_values = [int(i) for i in rgb_values]
print(rgb_values)
converted_values = []
for j, value in enumerate(rgb_values):
converted_values[j] = hex(rgb_values[j])
print("The converted color value is: #{}{}{}", *converted_values)
print("Convert HTML Colors to and from both RGB and Hex.\nHex format: #000000\nRGB format: ###,###,### (don't need leading zeroes but commas are important)\n")
try:
color = raw_input("Enter an RGB combination, or Hex value to have it converted. Hex must start with a \'#\': ")
except NameError:
color = input("Enter an RGB combination, or Hex value to have it converted. Hex must start with a \'#\': ")
if color[0] == '#':
pass
else:
rgb_to_hex(color)
| 4.0625 | 4 |
puck_install.py | drsooch/puck-python | 0 | 12785968 | """
This is the main setup file for Puck.
"""
from pathlib import Path
import subprocess
import json
import psycopg2 as pg
PUCK = Path.home().joinpath('.puck/')
print('Creating Configuration file...')
if Path.exists(PUCK):
for file in Path.iterdir(PUCK):
Path.unlink(file)
Path.rmdir(PUCK)
Path.mkdir(PUCK)
Path.touch(PUCK.joinpath('config.json'))
print(
"""NOTE: Please make sure you have set up a database for puck.
I have not been able to get Postgres to cooperate to allow for generic \
database and user creation."""
)
connected = False
with open(PUCK.joinpath('config.json'), 'w') as f:
while not connected:
db_name = input('Please enter the name of the database created\n> ')
db_user = input(
'Please enter the name of the user associated with the DB\n> '
)
try:
pg.connect(database=db_name, user=db_user)
except pg.OperationalError as err:
if db_name in str(err):
print(f'{db_name} is not a valid database.')
elif db_user in str(err):
print(f'{db_user} is not a valid username.')
else:
connected = True
json.dump({'dbName': db_name, 'dbUser': db_user}, f)
| 2.5 | 2 |
src/pyff/exceptions.py | dnmvisser/pyFF | 15 | 12785969 | <filename>src/pyff/exceptions.py
__author__ = 'leifj'
import six
class PyffException(BaseException):
def __init__(self, msg, wrapped=None, data=None):
self._wrapped = wrapped
self._data = data
if six.PY2:
super(self.__class__, self).__init__(msg)
else:
super().__init__(msg)
def raise_wrapped(self):
raise self._wrapped
class ResourceException(PyffException):
pass
class MetadataException(ResourceException):
pass
class MetadataExpiredException(MetadataException):
pass
| 2.59375 | 3 |
mc_pdf.py | shclem/mcdeck | 5 | 12785970 | <filename>mc_pdf.py
import os.path
from fpdf import FPDF
from mc_args import MCArgs
from mc_progress import MCProgress
class MCPdf(FPDF):
__args: MCArgs = None
__progress: MCProgress = None
def __init__(self, args, progress):
FPDF.__init__(self, orientation=args.pageOrientation, unit='mm', format=args.pageFormat)
self.__args = args
self.__progress = progress
self.pageMarginWidth = 10
self.pageMarginHeight = 10
self.cardIndexX = 0
self.cardIndexY = 0
self.cardWidth = 62
self.cardHeight = 88
self.columlIndexX = 0
self.columnWidth = 31
self.columnMarginTop = 9
self.columnMarginStart = 2
self.set_fill_color(230)
self.set_font("Arial", size = 6)
self.x = self.pageMarginWidth
self.y = self.pageMarginHeight
self.nbCardOnPageWidth = int((self.w - 2*self.pageMarginWidth) // self.cardWidth)
self.nbCardOnPageHeight = int((self.h - 2*self.pageMarginHeight) // self.cardHeight)
self.nbCardOnPage = self.nbCardOnPageWidth * self.nbCardOnPageHeight
def __drawCutLines(self):
self.set_draw_color(128)
for xIndex in range(self.nbCardOnPageWidth+1):
x = self.pageMarginWidth + xIndex * self.cardWidth
self.line(x,0, x, self.h)
for yIndex in range(self.nbCardOnPageHeight+1):
y = self.pageMarginHeight + yIndex * self.cardHeight
self.line(0,y,self.w,y)
def __drawDeckSection(self,title,cards,count):
if count > 0:
self.x = self.pageMarginWidth + self.cardIndexX*self.cardWidth + self.columlIndexX*self.columnWidth
if (self.y-self.pageMarginHeight-self.cardIndexY*self.cardHeight) > self.cardHeight - 12:
self.columlIndexX = 1
self.y = self.pageMarginHeight + self.cardIndexY*self.cardHeight + self.columnMarginTop
self.x = self.pageMarginWidth + self.cardIndexX*self.cardWidth + self.columlIndexX*self.columnWidth
self.set_font("Arial", size = 6, style = 'B')
self.cell(20, 3, txt = f"{title} ({count})",ln = 2, align = 'L')
self.x = self.pageMarginWidth + self.cardIndexX*self.cardWidth + self.columlIndexX*self.columnWidth + self.columnMarginStart
self.y += 0.7
self.set_font("Arial", size = 6, style = self.__args.itemFontStyle)
cards.sort(key=lambda card: 'a' if card['icon']=='resources/basic.png' else card['icon'])
for card in cards:
if (self.y-self.pageMarginHeight-self.cardIndexY*self.cardHeight) > self.cardHeight - 8:
self.columlIndexX = 1
self.y = self.pageMarginHeight + self.cardIndexY*self.cardHeight + self.columnMarginTop
self.x = self.pageMarginWidth + self.cardIndexX*self.cardWidth + self.columlIndexX*self.columnWidth + self.columnMarginStart
x = self.x
y = self.y
self.image(card['icon'], w=2, h=2)
self.y = y
self.x += 2
self.cell(4, 2.8, txt = f"{card['quantity']}X",ln = 0, align = 'L')
self.multi_cell(22, 2.8, txt = card['name'], align = 'L', border = 0)
self.x = x
self.y += 0.2
self.y += 0.7
def __drawDeck(self,deck):
totalCount = 0
for section in deck['sections'].values():
totalCount += section["count"]
deckName = deck["name"]
heroName = deck["hero"]
self.x = self.pageMarginWidth + self.cardIndexX * self.cardWidth
self.y = self.pageMarginHeight + self.cardIndexY * self.cardHeight
if self.__args.background:
dir = self.__args.backgroundDir
imagePath = f"{dir}/{heroName}.png"
if os.path.exists(imagePath):
self.image(imagePath, w=self.cardWidth, h=self.cardHeight)
self.x = self.pageMarginWidth + self.cardIndexX * self.cardWidth
self.y = self.pageMarginHeight + self.cardIndexY * self.cardHeight
self.set_font("Arial", size = 6, style = 'UB')
self.cell(62, 6, txt = f"{deckName}", ln = 0, align = 'C', border = 0)
self.x = self.pageMarginWidth + self.cardIndexX*self.cardWidth
self.y = self.pageMarginHeight + self.cardIndexY*self.cardHeight + 4
self.set_font("Arial", size = 6, style = 'I')
self.cell(62, 4, txt = f"{heroName} ({totalCount})", ln = 0, align = 'C', border = 0)
self.columlIndexX = 0
self.y = self.pageMarginHeight + self.cardIndexY*self.cardHeight + self.columnMarginTop
for sectionType, section in deck['sections'].items():
self.__drawDeckSection(sectionType, section["cards"], section["count"])
self.x = self.pageMarginWidth + self.cardIndexX * self.cardWidth
self.y = self.pageMarginHeight + (self.cardIndexY+1) * self.cardHeight - 5
self.set_font("Arial", size = 5, style = 'I')
self.cell(2, 6, txt = f"(v{deck['version']})",ln = 0, align = 'L', border = 0)
self.cell(60, 6, txt = deck['url'],ln = 0, align = 'R', border = 0, link = deck['url'])
def drawDecks(self,decks):
count = 1
for deck in self.__progress.apply(decks,desc="Create Pdf "):
if count > self.nbCardOnPage:
self.__drawCutLines() # Draw cut lines before to add new one
count = 1
if count==1:
self.add_page()
self.cardIndexX = (count-1)%self.nbCardOnPageWidth
self.cardIndexY = (count-1)//self.nbCardOnPageWidth
self.rect(
self.pageMarginWidth + self.cardIndexX*self.cardWidth,
self.pageMarginHeight + self.cardIndexY*self.cardHeight,
self.cardWidth,
self.cardHeight,
style = 'F'
)
self.__drawDeck(deck)
count+=1
if count>1:
self.__drawCutLines()
| 2.875 | 3 |
URI/1-Beginner/1759.py | vicenteneto/online-judge-solutions | 0 | 12785971 | # -*- coding: utf-8 -*-
n = int(raw_input())
result = ''.join(['Ho ' for x in range(n)])
print result[:-1] + '!'
| 3.734375 | 4 |
src/signer/secret20/signer.py | RainbowNetwork/RainbowBridge | 14 | 12785972 | <gh_stars>10-100
import json
from collections import namedtuple
from threading import Thread, Event
from typing import Dict, Union
from mongoengine import signals
from mongoengine import OperationError
from src.contracts.ethereum.multisig_wallet import MultisigWallet
from src.db.collections.commands import Commands
from src.db.collections.eth_swap import Swap, Status
from src.db.collections.signatures import Signatures
from src.util.common import temp_file
from src.util.config import Config
from src.util.logger import get_logger
from src.util.secretcli import sign_tx as secretcli_sign, decrypt, account_info
SecretAccount = namedtuple('SecretAccount', ['address', 'name'])
class Secret20Signer(Thread):
"""Signs on the SCRT side, after verifying Ethereum tx stored in the db"""
def __init__(self, multisig: SecretAccount, contract: MultisigWallet, config: Config, **kwargs):
self.multisig = multisig
self.contract = contract
self.config = config
self.stop_event = Event()
self.logger = get_logger(
db_name=config.db_name,
loglevel=config.log_level,
logger_name=config.logger_name or f"SecretSigner-{self.multisig.name}"
)
super().__init__(group=None, name=f"SecretSigner-{self.multisig.name}", target=self.run, **kwargs)
self.setDaemon(True) # so tests don't hang
self.account_num, _ = self._account_details()
signals.post_save.connect(self._sign_add_token, sender=Commands)
def running(self):
return self.is_alive()
def stop(self):
self.logger.info("Stopping..")
self.stop_event.set()
def run(self):
"""Scans the db for unsigned swap tx and signs them"""
self.logger.info("Starting..")
while not self.stop_event.is_set():
failed = False
for tx in Swap.objects(status=Status.SWAP_UNSIGNED):
# if there are 2 transactions that depend on each other (sequence number), and the first fails we mark
# the next as "retry"
if failed:
tx.status = Status.SWAP_RETRY
continue
self.logger.info(f"Found new unsigned swap event {tx}")
try:
self._validate_and_sign(tx)
self.logger.info(
f"Signed transaction successfully id:{tx.id}")
except ValueError as e:
self.logger.error(f'Failed to sign transaction: {tx} error: {e}')
failed = True
for tx in Commands.objects(status=Status.SWAP_UNSIGNED):
self.logger.info(f"Found new unsigned swap event {tx}")
try:
self._sign_add_token(sender="", document=tx)
self.logger.info(
f"Signed transaction successfully id:{tx.id}")
except ValueError as e:
self.logger.error(f'Failed to sign transaction: {tx} error: {e}')
self.stop_event.wait(self.config.sleep_interval)
def _validate_and_sign_command(self, tx: Commands):
"""
Makes sure that the tx is valid and signs it
:raises: ValueError
"""
if self._is_signed(tx):
self.logger.debug(f"This signer already signed this transaction. Waiting for other signers... id:{tx.id}")
return
self.sign(tx)
def _validate_and_sign(self, tx: Union[Commands, Swap]):
"""
Makes sure that the tx is valid and signs it
:raises: ValueError
"""
if self._is_signed(tx):
self.logger.debug(f"This signer already signed this transaction. Waiting for other signers... id:{tx.id}")
return
if not self._is_valid(tx):
self.logger.error(f"Validation failed. Signer: {self.multisig.name}. Tx id:{tx.id}.")
tx.status = Status.SWAP_FAILED
tx.save()
raise ValueError
self.sign(tx)
def sign(self, tx: Union[Commands, Swap]):
try:
signed_tx = self._sign_with_secret_cli(tx.unsigned_tx, tx.sequence)
except RuntimeError as e:
tx.status = Status.SWAP_FAILED
tx.save()
raise ValueError from e
try:
Signatures(tx_id=tx.id, signer=self.multisig.name, signed_tx=signed_tx).save()
except OperationError as e:
self.logger.error(f'Failed to save tx in database: {tx}')
raise ValueError from e
def _is_signed(self, tx: Union[Swap, Commands]) -> bool:
""" Returns True if tx was already signed by us, else False """
return Signatures.objects(tx_id=tx.id, signer=self.multisig.name).count() > 0
def _sign_add_token(self, sender, document: Commands, **kwargs): # pylint: disable=unused-argument
decrypted_data = self.decrypt_tx(document)
if not decrypted_data['add_token']:
raise ValueError('Tried to get a signature for a different command than add_token!')
self._validate_and_sign_command(document)
def _is_valid(self, tx: Swap) -> bool:
"""Assert that the data in the unsigned_tx matches the tx on the chain"""
log = self.contract.get_events_by_tx(tx.src_tx_hash)
if not log: # because for some reason event_log can return None???
return False
try:
decrypted_data = self.decrypt_tx(tx)
except json.JSONDecodeError:
self.logger.error(f'Tried to load tx with hash: {tx.src_tx_hash} {tx.id}'
f'but got data as invalid json, or failed to decrypt')
return False
# extract address and value from unsigned transaction
try:
tx_amount = int(decrypted_data['mint_from_ext_chain']['amount'])
tx_address = decrypted_data['mint_from_ext_chain']['address']
except KeyError:
self.logger.error(f"Failed to validate tx data: {tx}, {decrypted_data}, "
f"failed to get amount or destination address from tx")
return False
# extract amount from on-chain swap tx
try:
eth_on_chain_amount = self.contract.extract_amount(log)
eth_on_chain_address = self.contract.extract_addr(log)
except AttributeError:
self.logger.error(f"Failed to validate tx data: {tx}, {log}, "
f"failed to get amount or address from on-chain eth tx")
return False
# check that amounts on-chain and in the db match the amount we're minting
if tx_amount != eth_on_chain_amount or tx_amount != int(tx.amount):
self.logger.error(f"Failed to validate tx data: {tx} ({tx_amount}, {eth_on_chain_amount}, {int(tx.amount)})"
f" amounts do not match")
return False
# check that the address we're minting to matches the target from the TX
if tx_address != eth_on_chain_address:
self.logger.error(f"Failed to validate tx data: {tx}, ({tx_address}, {eth_on_chain_address}),"
f" addresses do not match")
return False
return True
def decrypt_tx(self, tx: Union[Commands, Swap]):
unsigned_tx = json.loads(tx.unsigned_tx)
res = self._decrypt(unsigned_tx)
self.logger.debug(f'Decrypted unsigned tx successfully {res}')
json_start_index = res.find('{')
json_end_index = res.rfind('}') + 1
decrypted_data = json.loads(res[json_start_index:json_end_index])
return decrypted_data
def _sign_with_secret_cli(self, unsigned_tx: str, sequence: int) -> str:
with temp_file(unsigned_tx) as unsigned_tx_path:
res = secretcli_sign(unsigned_tx_path, self.multisig.address, self.multisig.name,
self.account_num, sequence)
return res
@staticmethod
def _decrypt(unsigned_tx: Dict):
msg = unsigned_tx['value']['msg'][0]['value']['msg']
return decrypt(msg)
def _account_details(self):
details = account_info(self.multisig.address)
return details["value"]["account_number"], details["value"]["sequence"]
| 2.171875 | 2 |
src/fractal/world/state.py | jedhsu/fractal | 0 | 12785973 | """
World State
===========
A type of world state described by fractums.
# [TODO] clarify
"""
class WorldState:
pass
| 1.90625 | 2 |
INBa/2015/Mitin_D_S/task_8_15.py | YukkaSarasti/pythonintask | 0 | 12785974 | # Задача 8. Вариант 15.
# Доработайте игру "Анаграммы" (см. М.Доусон Программируем на Python. Гл.4) так, чтобы к каждому слову полагалась подсказка.
# Игрок должен получать право на подсказку в том случае, если у него нет никаких предположений.
# Разработайте систему начисления очков, по которой бы игроки, отгадавшие слово без подсказки, получали больше тех, кто запросил подсказку.
# <NAME>.
# 19.04.2016, 11:08
import random
ochki = 500000
slova = ("питон", "программирование", "компьютер", "университет", "россия", "безопасность", "информатика")
zagadka=random.choice(slova)
proverka = zagadka
i=0
jumble = ""
while zagadka:
bykva = random.randrange(len(zagadka))
jumble += zagadka[bykva]
zagadka = zagadka[:bykva] + zagadka[(bykva+1):]
print("Вы попали в передачу 'Анаграммы'")
print("Загаданное слово: ", jumble)
slovo = input ("Ваш ответ: ")
while (slovo != proverka):
if(slovo == "не знаю"):
print(i,"буква: ",proverka[i])
i+=1
if ochki <= 0:
break
slovo=input("Неправильно. Попробуй еще раз: ")
ochki-=50000
if slovo == proverka:
print("\nПравильно! Это слово: ", proverka)
print("Вы набрали",ochki," очков! Поздравляем!")
else:
print("К сожалению, у вас 0 очков, и вы проиграли :( Загаданное слово:",proverka)
input ("Нажмите ENTER для продолжения")
| 3.09375 | 3 |
iserver.py | DreamVB/httpServer | 1 | 12785975 | <filename>iserver.py<gh_stars>1-10
# A Very simple web server to serve statoc pages
# By <NAME> a.k.a DreamVB
import servfunc
import os.path
import cfgread
from http.server import BaseHTTPRequestHandler, HTTPServer
class testHTTPServer_RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
# Send response status code
self.send_response(200)
# Get home root
root = cfgread.readVal("HOME_DIR")
# Get the request line
b = self.raw_requestline.decode("utf-8")
# Split the line by the spaces
items = b.split(" ")
# Get the second item it should be the request page
page = items[1].replace("/","\\")
if page == "\\":
page = cfgread.readVal("DEFAULT_PAGE")
# Get the file ext
file_ext = servfunc.getFileExt(page)
# Get the full path to the page to serv
full_file = servfunc.fixPath(root) + page
# Get the file in question mimetype
mime_type = cfgread.readVal(file_ext)
# Make sure the full path filename is found
if os.path.exists(full_file):
# Send headers
self.send_header('Content-type', mime_type)
self.end_headers()
self.wfile.write(servfunc.get_bytes_from_file(full_file))
else:
# Send 404 error page
self.send_header('Content-type',"text/html")
self.wfile.write(servfunc.get_bytes_from_file("err_pages\\404.html"))
return
def start_server():
print('Starting Server...')
# Load server config
cfgread.loadCfg("server.cfg")
server_address = ("", int(cfgread.readVal("PORT")))
httpd = HTTPServer(server_address, testHTTPServer_RequestHandler)
print('Running Server...')
# Keep the server running
httpd.serve_forever()
if __name__ == '__main__':
# Start the web server
start_server()
| 3.171875 | 3 |
Exercises/Exercises Chapter 04/Collect Digits.py | tonysulfaro/CSE-231 | 2 | 12785976 | <filename>Exercises/Exercises Chapter 04/Collect Digits.py
s = input("Input a string: ")
digits = ""
for i,ch in enumerate(s):
if ch.isdigit():
digits += ch
print(digits) | 3.984375 | 4 |
chcd.py | JohanAR/chcd | 0 | 12785977 | <reponame>JohanAR/chcd<filename>chcd.py
#!/usr/bin/python
# CHCD by <NAME>
import os
import os.path
import getpass
USERNAME = getpass.getuser()
def meFirstGen(iterable):
if USERNAME in iterable:
yield USERNAME
for item in iterable:
if item != USERNAME:
yield item
def recfind(curpath, dirs, lastdir):
# print(curpath, dirs, lastdir)
try:
files = os.listdir(curpath)
except OSError:
return None
if not dirs:
if lastdir in files:
finalpath = os.path.join(curpath, lastdir)
if os.path.isdir(finalpath):
return finalpath
else:
for filename in meFirstGen(files):
if filename.startswith(dirs[0]):
nextpath = os.path.join(curpath, filename)
if os.path.isdir(nextpath):
res = recfind(nextpath, dirs[1:], lastdir)
if res:
return res
return None
def chcd(shortpath):
dirs = shortpath.split('/')
lastdir = dirs[-1]
dirs = dirs[:-1]
if not dirs:
return os.getcwd()
elif not dirs[0]:
curpath = '/'
dirs = dirs[1:]
elif dirs[0] == '~':
curpath = os.path.expanduser(dirs[0])
dirs = dirs[1:]
elif dirs[0] == '.':
curpath = os.getcwd()
dirs = dirs[1:]
else:
curpath = os.getcwd()
return recfind(curpath, dirs, lastdir)
if __name__ == '__main__':
import sys
if len(sys.argv) == 1:
print('')
else:
print(chcd(sys.argv[1]))
| 2.765625 | 3 |
recommender/core/models.py | abhishekpathak/recommendation-system | 0 | 12785978 | <gh_stars>0
# -*- coding: utf-8 -*-
import json
from collections import Generator
from server import config
from server.extensions import redis_conn
""" A simple, quickly-prototyped ORM layer on top of Redis.
Supports all the functions needed by the serving layer, and nothing more.
The implementation details are hackish, and best left alone.
"""
class Users(object):
""" A model that represents the user.
Supports the following queries:
* get a user from id
* get all users in the system
* get the ratings given by a user
* persist the given ratings for a user
* get all the products used by a user
* get the recommendations for a user
"""
redis = redis_conn
def __init__(self, id: int, data_partition: str):
self.id = id
self.data_partition = data_partition
@classmethod
def get(cls, id: int, data_partition: str):
if id in config.ALLOWED_USER_IDS:
return cls(id=id, data_partition=data_partition)
else:
raise KeyError("user with id: {} does not exist.".format(id))
@classmethod
def get_all(cls, data_partition: str) -> list:
return [cls.get(id=user_id, data_partition=data_partition) for user_id
in config.ALLOWED_USER_IDS]
def get_ratings(self) -> list:
key = '{}_ratings_{}'.format(self.data_partition, self.id)
ratings_hash = self.redis.hgetall(key)
ratings = []
for key, value in ratings_hash.items():
ratings.append({
'product_id': int(key),
'rating': int(value)
})
return ratings
def get_products_used(self) -> list:
ratings = self.get_ratings()
return [item['product_id'] for item in ratings]
def get_recommendations(self) -> list:
key = '{}_recommendations_{}'.format(self.data_partition, self.id)
return self._get_recommendations_for_key(key)
def set_recommendations(self, recommendations: list) -> None:
key = '{}_recommendations_{}'.format(self.data_partition, self.id)
value = json.dumps(recommendations)
self.redis.set(key, value)
@classmethod
def _get_recommendations_for_key(cls, key):
value = cls.redis.get(key)
recommendations = json.loads(value) if value else []
return recommendations
@classmethod
def get_default_recommendations(cls, data_partition: str):
key = '{}_recommendations_-1'.format(data_partition)
return cls._get_recommendations_for_key(key)
def set_ratings(self, ratings: list) -> None:
key = '{}_ratings_{}'.format(self.data_partition, self.id)
for rating in ratings:
self.redis.hset(key, rating['product_id'], rating['rating'])
def has_rated(self):
return self.get_ratings() != []
class Products(object):
""" A model that represents the Product.
Supports the following queries:
* get a product from id
* get all products in the system
* add a new product to the system. If the product already exists, update it.
"""
redis = redis_conn
def __init__(self, id: int, name: str, desc: str):
self.id = id
self.name = name
self.desc = desc
@classmethod
def get(cls, id: int, data_partition: str):
key = '{}_products_{}'.format(data_partition, id)
meta = cls.redis.get(key)
if meta:
meta = json.loads(meta)
return cls(id=id, name=meta['name'], desc=meta['desc'])
@classmethod
def get_all(cls, data_partition: str) -> Generator:
# products catalog can be large, use a generator
return (cls.get(id=key, data_partition=data_partition) for key in
cls.redis.keys('{}_products_*'.format(data_partition)))
@classmethod
def upsert(cls, id, name, desc, data_partition: str) -> None:
key = '{}_products_{}'.format(data_partition, id)
value = json.dumps({
'name': name,
'desc': desc
})
cls.redis.set(key, value)
| 2.9375 | 3 |
post/migrations/0002_auto_20180224_1857.py | bozburak/Blog_With_Django | 0 | 12785979 | <filename>post/migrations/0002_auto_20180224_1857.py<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-02-24 15:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('post', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='image',
field=models.ImageField(default=1, upload_to=b''),
preserve_default=False,
),
migrations.AlterField(
model_name='post',
name='content',
field=models.TextField(verbose_name='\u0130\xe7erik'),
),
migrations.AlterField(
model_name='post',
name='publishing_date',
field=models.DateTimeField(verbose_name='Yay\u0131nlanma Tarihi'),
),
migrations.AlterField(
model_name='post',
name='title',
field=models.CharField(max_length=100, verbose_name='Ba\u015fl\u0131k'),
),
]
| 1.492188 | 1 |
pyecore/__init__.py | 4ekin/pyecore | 0 | 12785980 | <filename>pyecore/__init__.py
"""
"""
__version__ = "0.11.6"
| 0.964844 | 1 |
omsdk/sdkunits.py | DanielFroehlich/omsdk | 61 | 12785981 | <gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
#
# Copyright © 2018 Dell Inc. or its subsidiaries. All rights reserved.
# Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
# Other trademarks may be trademarks of their respective owners.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: <NAME>
#
import math
class iUnitsFactory(object):
def __init__(self):
self.units_spec = {
"Bytes": [
("B", 1024),
("KB", 1024),
("MB", 1024),
("GB", 1024),
("TB", 1024)
],
"Voltage": [
("V", 1000),
("KV", 1000),
],
"Bandwidth": [
("Bps", 1024),
("KBps", 1024),
("MBps", 1024),
("GBps", 1024),
("TBps", 1024)
],
"Watts": [
("W", 1000),
("KW", 1000),
("MW", 1000),
("GW", 1000),
("TW", 1000)
],
"ClockSpeed": [
("Hz", 1000),
("KHz", 1000),
("MHz", 1000),
("GHz", 1000),
("THz", 1000)
],
"MetricDistance": [
("MM", 10),
("CM", 100),
("M", 1000),
("KM", 1000),
]
}
def Convert(self, rjson):
for field in ['Type', 'InUnits', 'Value']:
if not field in rjson:
raise Exception("No " + field + " in the json")
if not (isinstance(rjson['Value'], int) or \
isinstance(rjson['Value'], float)):
raise Exception("invalid value type!")
if not rjson['Type'] in self.units_spec:
raise Exception("Units for " + rjson['Type'] + " not defined")
uspec = self.units_spec[rjson['Type']]
cur_index = -1
for i in range(0, len(uspec)):
if rjson['InUnits'] == uspec[i][0]:
cur_index = i
if cur_index < 0:
raise Exception("Invalid Value Units Specified")
tgt_index = -1
expected_units = None
if 'OutUnits' in rjson:
expected_units = rjson['OutUnits']
if 'Metrics' in rjson:
expected_units = rjson['Metrics']
if not expected_units:
tgt_index = len(uspec)
else:
for i in range(0, len(uspec)):
if expected_units == uspec[i][0]:
tgt_index = i
if tgt_index < 0:
tgt_index = len(uspec)
if tgt_index < 0:
raise Exception("Invalid Value Units Specified for target")
final_value = rjson['Value']
found = False
if tgt_index == cur_index:
found = True
final_spec = uspec[tgt_index][0]
elif tgt_index > cur_index:
k = rjson['Value']
for i in range(cur_index, tgt_index + 1):
k1 = k / uspec[i][1]
if (expected_units == None and k1 < 1) or (i == tgt_index):
found = True
final_value = k
final_spec = uspec[i][0]
break
k = k1
elif tgt_index < cur_index:
k = rjson['Value']
for i in range(tgt_index, cur_index):
k = k * uspec[i][1]
found = True
final_value = k
final_spec = uspec[i][0]
final_value = round(final_value, 2)
if not found:
return rjson['Value']
if 'Metrics' in rjson:
return final_value
else:
return str(final_value) + " " + final_spec
def append_sensors_unit(self, reading, unitmodifier, unitstr):
if reading :
if str(reading).isdigit() :
retval = float(reading) * math.pow(10, int(unitmodifier))
s = str(retval).rstrip('0').rstrip('.')
if unitstr:
s = s + " " + unitstr
return s
return "Not Available"
UnitsFactory = iUnitsFactory()
| 1.976563 | 2 |
basis_selector/nwchem.py | adewyer/Exabyte | 0 | 12785982 | <gh_stars>0
"""
Author: <NAME>
Date: September 7, 2020
Class to create nwchem input file for basis set testing
"""
import re
import os
import sys
import logging
import subprocess
import pkg_resources
import basis_selector
from basis_selector import basis_sets as basis
from basis_selector import molecule as mol
from basis_selector.parameters import Parameters
class Nwchem:
"""
Class creates an NWChem object.
NWChem objects are used to:
1. Create NWChem input files
2. Run NWChem calculations from input files
3. Check the status of NWChem calculations (Running or Finished)
4. Pull data about properties from NWChem output files
"""
def __init__(self, param):
self.param = param
self.molname = param.par['molecule_name']
self.jobDescription = param.par['job_description']
self.charge = param.par['charge']
self.mult = param.par['multiplicity']
self.qcMethod = param.par['qc_method'] # Default set to DFT
self.dft = param.par['dft_method'] # Default set to B3LYP
self.optThreshold = param.par['opt_threshold']
self.refType = param.par['reference_type']
self.refVal = param.par['reference_value']
self.propertyThreshold = param.par['property_threshold']
self.structure = param.par['structure']
def get_nwchem_args(self, basisSet):
# Function to grab keyword arguments for NWChem input
kwargs = {
'molname': self.molname,
'title': self.jobDescription,
'charge': self.charge,
'basis': basisSet,
'functional': self.dft,
'mult': self.mult,
'thresh': self.optThreshold
}
return kwargs
def write_nwchem_input(self, basis):
"""
Creation of a single point calculation input file for NWChem.
Input files with varying basis sets are created for the same molecule
as defined by within the 'self' parameter.
"""
# molecule object created based on the 'self' parameter
molecule = mol.Molecule(self.charge, self.mult, self.structure)
kwargs = self.get_nwchem_args(basis)
molName = kwargs.get('molname')
calcName = molName + '_' + basis # Filename for NWChem input
"""
Formatting of geometry for NWChem input so that
each atom label and corresponding XYZ coordinates
on an individual line within the NWChem input file.
"""
geometry = molecule.reshape_geom(self.structure)
formattedGeom = []
sep = '\t'
for line in geometry:
newLine = sep.join(line)
formattedGeom.append(newLine)
sep2 = '\n '
formattedGeom = sep2.join(formattedGeom)
# Use of NWChem template file to create NWChem input files
nwChemInpFi = pkg_resources.resource_filename(__name__, 'templates/nwchem_energy.tpl')
with open(nwChemInpFi, 'r') as f:
self.nwChemInpFi = f.read()
nwChemInput = self.nwChemInpFi.format(molname=calcName,
description=kwargs.get('title'),
charge=self.charge,
structure=formattedGeom,
basis=basis,
functional=kwargs.get('functional'),
mult=self.mult,
method=self.qcMethod)
# Creation of input files inside of the singlePoints dir.
# SinglePoints Dir is where all single point calculations are stored
with open('./singlePoints/' + calcName + '.inp', 'w') as nwChemFile:
nwChemFile.write(nwChemInput)
logging.info("NWChem input file created for {}.".format(calcName))
return calcName
def nwchem_submit(self, calcName):
# Function that submits NWChem calculations to the computer for completion
os.chdir('./singlePoints')
cmd = 'nwchem ' + calcName + '.inp' + ' >> ' + calcName + '.out 2>' + calcName + '.err'
nwchemRun = os.popen(cmd)
os.chdir('../')
return None
def check_nwchem(self, calcName):
"""
Function to check whether an NWChem calculation is running or not.
Calculation status is checked by looking at the output on the
final line of the output file.
0 = running, 1 = done
"""
status = 0
os.chdir('./singlePoints')
with open(calcName + '.out', 'r') as fi:
line = fi.read().splitlines()
sep=''
finalLine = sep.join(line[-1])
if 'Total times' in finalLine:
status = 1 # calc done, finished successfully
elif "For further details see manual section:" in finalLine:
status = -1 # calc done, failed
logging.info("Calculation failed for {}.".format(calcName))
os.chdir('../')
return status
def get_nwchem_energy(self, calcName):
# Function grabs and returns the energy from an NWChem output file.
energyKeywords = 'Total DFT energy'
os.chdir('./singlePoints')
with open(calcName + '.out', 'r') as fi:
lines = fi.read().splitlines()
if "For further details see manual section:" in lines[-1]:
energy = 0 # Set energy to 0 if calculation failed
else:
for line in lines:
if energyKeywords in line:
energy = line[34::] # Location of energy in file
os.chdir('../')
return energy
| 2.796875 | 3 |
pytest_sqlalchemy.py | crowdcomms/pytest-sqlalchemy | 3 | 12785983 | # -*- coding: utf-8 -*-
import os
from sqlalchemy import create_engine
from sqlalchemy.engine.url import make_url
from sqlalchemy.exc import ProgrammingError
import logging
import pytest
logger = logging.getLogger(__name__)
def pytest_addoption(parser):
group = parser.getgroup('sqlalchemy')
group.addoption(
'--test-db-prefix',
action='store',
dest='test_db_prefix',
default='test',
help='Define a prefix for the test database that is created'
)
parser.addini('test_db_prefix', 'Prefix for test database')
parser.addini('drop_existing_test_db', 'Drop existing test database for each session')
@pytest.fixture(scope='session')
def test_db_prefix():
return 'test_'
@pytest.fixture(scope='session')
def database_url():
return os.environ['DATABASE_URL']
@pytest.fixture(scope='session')
def test_database_url(test_db_prefix, database_url):
test_url = make_url(database_url)
test_url.database = test_db_prefix + test_url.database
return test_url
@pytest.fixture(scope='session')
def test_db(database_url, test_database_url):
engine = create_engine(database_url)
conn = engine.connect()
conn.execution_options(autocommit=False)
conn.execute('ROLLBACK')
try:
conn.execute("DROP DATABASE {}".format(test_database_url.database))
except ProgrammingError:
pass
finally:
conn.execute('ROLLBACK')
logger.debug('Creating Test Database {}'.format(test_database_url.database))
conn.execute("CREATE DATABASE {}".format(test_database_url.database))
conn.close()
engine.dispose()
@pytest.fixture(scope='session')
def sqlalchemy_base():
raise ValueError('Please supply sqlalchemy_base fixture')
@pytest.fixture(scope='session')
def sqlalchemy_session_class():
raise ValueError('Please supply sqlalchemy_session_class fixture')
@pytest.fixture(scope='session')
def engine(test_database_url):
return create_engine(test_database_url)
@pytest.yield_fixture(scope='session')
def tables(engine, sqlalchemy_base, test_db):
sqlalchemy_base.metadata.create_all(engine)
yield
sqlalchemy_base.metadata.drop_all(engine)
@pytest.yield_fixture(scope='function')
def db_session(engine, tables, sqlalchemy_session_class):
sqlalchemy_session_class.remove()
with engine.connect() as connection:
transaction = connection.begin_nested()
sqlalchemy_session_class.configure(bind=connection)
session = sqlalchemy_session_class()
session.begin_nested()
yield session
session.close()
transaction.rollback()
| 2.421875 | 2 |
spaghetti/__init__.py | boris-hanin/spaghetti | 0 | 12785984 | # -*- coding: utf-8 -*-
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
| 1.054688 | 1 |
Retrieve Comments/ConvertText.py | zhoudanxie/analyzing-public-comments | 2 | 12785985 | #--------------------------------------Convert Attachment (DOC & PDF) Comments to Text---------------------------------#
#---------------------------------------------The GW Regulatory Studies Center-----------------------------------------#
#--------------------------------------------------Author: <NAME>-------------------------------------------------#
# Import packages
import sys
import os
import comtypes.client
from PIL import Image
import pytesseract
import sys
from pdf2image import convert_from_path
import fitz
import json
filePath="Retrieve Comments/Comment Attachments/" #! Specify the path of the folder where the comment attachments are saved
#-------------------------------------------Convert DOC files to PDF----------------------------------------------------
# Define a function to convert doc to pdf
def docToPdf(filePath,fileName):
wdFormatPDF = 17
in_file = os.path.abspath(filePath+fileName+'.doc')
out_file = os.path.abspath(filePath+fileName+'.pdf')
word = comtypes.client.CreateObject('Word.Application')
word.Visible = False
doc = word.Documents.Open(in_file)
doc.SaveAs(out_file, FileFormat=wdFormatPDF)
doc.Close()
word.Quit()
# Convert DOC comments to PDF
for file in os.listdir(filePath):
if file.endswith(".doc"):
fileName = str(file).split('.doc')[0]
if os.path.isfile(filePath + fileName + ".pdf"):
pass
else:
docToPdf(filePath,fileName)
#---------------------------------------------Convert PDF files to text-------------------------------------------------
# Define a function to convert scanned PDF to text
def convertScanPDF(file):
## Part 1 : Converting PDF to images
# Store all the pages of the PDF in a variable
pages = convert_from_path(file, 500)
# Counter to store images of each page of PDF to image
image_counter = 1
# Iterate through all the pages stored above
for page in pages:
# Declaring filename for each page of PDF as JPG
# For each page, filename will be:
# PDF page 1 -> page_1.jpg
# ....
# PDF page n -> page_n.jpg
filename = "page_" + str(image_counter) + ".jpg"
# Save the image of the page in system
page.save(filename, 'JPEG')
# Increment the counter to update filename
image_counter = image_counter + 1
##Part 2 - Recognizing text from the images using OCR
# Variable to get count of total number of pages
filelimit = image_counter - 1
text=''
# Iterate from 1 to total number of pages
for i in range(1, filelimit + 1):
# Set filename to recognize text from
# Again, these files will be:
# page_1.jpg
# page_2.jpg
# ....
# page_n.jpg
filename = "page_" + str(i) + ".jpg"
# Recognize the text as string in image using pytesserct
new_text = str(((pytesseract.image_to_string(Image.open(filename)))))
# The recognized text is stored in variable text.
# Any string processing may be applied on text
# Here, basic formatting has been done: In many PDFs, at line ending, if a word can't be written fully,
# a 'hyphen' is added. The rest of the word is written in the next line. Eg: This is a sample text this
# word here GeeksF-orGeeks is half on first line, remaining on next. To remove this, we replace every '-\n' to ''.
new_text = new_text.replace('-\n', '')
# Finally, write the processed text to the file.
text += new_text
return text
# Convert PDF comments to text
dic_pdfComments={}
notConverted=[]
for file in os.listdir(filePath):
if file.endswith(".pdf"):
doc = fitz.open(filePath+file)
fileName=str(file).split('.pdf')[0]
num_pages = doc.pageCount
count = 0
text = ""
while count < num_pages:
page = doc[count]
count += 1
text += page.getText('text')
if text != "":
text=text.replace('\n',' ')
dic_pdfComments.update({fileName: text})
else:
try:
text = convertScanPDF(filePath+file)
text = text.replace('\n', ' ')
dic_pdfComments.update({fileName: text})
except:
notConverted.append(file)
doc.close
print("The number of PDF files that have been converted to text is:", len(dic_pdfComments))
if len(notConverted)>0:
print("The following PDF files could not be converted:")
print(notConverted)
print("END")
# Print an example
print(dic_pdfComments.keys())
for key, value in dic_pdfComments.items():
if key=="<KEY>": #! Print the text of a specified document
print(key, ":", value)
#---------------------------------------------Export converted text-------------------------------------------------
# Export to JSON
## Output file will include text from all converted comments in one file
js_pdfComments=json.dumps(dic_pdfComments)
with open('Retrieve Comments/Attachment Comments Example.json', 'w', encoding='utf-8') as f: #! Specify the file to which you want to export the JSON
json.dump(js_pdfComments, f, ensure_ascii=False, indent=4)
| 3.03125 | 3 |
day_16/main.py | orfeasa/advent-of-code-2021 | 29 | 12785986 | <gh_stars>10-100
from dataclasses import dataclass, field
@dataclass
class Packet:
version: int = 0
type_id: int = 0
value: int = None
subpackets: list = field(default_factory=list)
def parse_transmission(
transmission: str, length=None, number_of_packets=None
) -> list[Packet]:
if len(transmission) == 0:
return []
first_bit = 0
packet = Packet()
packet.version = int(transmission[first_bit : first_bit + 3], 2)
packet.type_id = int(transmission[first_bit + 3 : first_bit + 6], 2)
last_bit = first_bit + 6
# literal
if packet.type_id == 4:
keep_reading = True
number = ""
while keep_reading:
keep_reading = bool(int(transmission[last_bit : last_bit + 1]))
number += transmission[last_bit + 1 : last_bit + 5]
last_bit += 5
packet.value = int(number, 2)
else:
length_type_id = int(transmission[last_bit : last_bit + 1])
last_bit += 1
if length_type_id == 0:
total_length = int(transmission[last_bit : last_bit + 15], 2)
last_bit += 15
packet.subpackets = parse_transmission(
transmission[last_bit : last_bit + total_length]
)
last_bit += total_length
else:
number_of_subpackets = int(transmission[last_bit : last_bit + 11], 2)
last_bit += 11
packet.subpackets = parse_transmission(
transmission[last_bit:], number_of_packets=number_of_subpackets
)
if len(packet.subpackets) == number_of_subpackets:
return [packet]
if (
len(transmission) == last_bit
or transmission[last_bit:] == len(transmission[last_bit:]) * "0"
# or length is not None and last_bit == length
):
return [packet]
return [packet] + parse_transmission(transmission[last_bit:])
def sum_version(packets: list) -> int:
sum = 0
for packet in packets:
sum += packet.version if packet.version else 0
sum += sum_version(packet.subpackets) if packet.subpackets else 0
return sum
def part_one(filename: str) -> int:
with open(filename) as f:
transmission = f.read().strip()
transmission = "620080001611562C8802118E34"
transmission = str(bin(int(transmission, 16)))[2:].zfill(len(transmission) * 4)
packets = parse_transmission(transmission)
return sum_version(packets)
def part_two(filename: str) -> int:
return 0
if __name__ == "__main__":
input_path = "./day_16/input.txt"
print("---Part One---")
print(part_one(input_path))
print("---Part Two---")
print(part_two(input_path))
| 2.765625 | 3 |
neural_compilers/utils/config.py | jordiae/neural-compilers | 4 | 12785987 | from enum import Enum
from dataclasses import dataclass
from typing import Optional
class TokenizerType(str, Enum):
PYGMENTS = 'pygments'
class SubwordType(str, Enum):
SUBWORD_NMT = 'subword-nmt'
@dataclass
class TokenizerConfig:
tokenizer_type: TokenizerType
subword_tokenizer: SubwordType
subword_vocab_size: int
shared_vocab: bool
@dataclass
class DataGenConfig:
input_path: str
output_path: str
min_tokens: int
max_tokens: int
supervised: bool
valid_test_size: int
seed: int
tokenizer_config: TokenizerConfig
just_func: bool = False
config_path: Optional[str] = None
max_train_data: Optional[int] = None
@classmethod
def from_dict(cls, d):
res = cls(**d)
res.tokenizer_config = TokenizerConfig(**d['tokenizer_config'])
return res
| 2.953125 | 3 |
data/scripts/compile_acs_data.py | datamade/just-spaces | 6 | 12785988 | <filename>data/scripts/compile_acs_data.py
import csv
import sys
from django.conf import settings
# Set up a temporary settings file to ensure that we can import the pldp app
settings.configure(INSTALLED_APPS=['pldp'], USE_I18N=False)
from pldp import forms as pldp_forms
from fobi_custom.plugins.form_elements.fields.intercept import forms as plugin_forms
def display(choices, slug):
"""
Get the display name for a form choice based on its slug. We need this function
because we want to be able to store ACS data using the human-readable display
name for each field, but in the code we want to reference the fields using their
slugs, which are easier to change.
:param choices: A list of tuples representing Django-style form choices.
:param slug: The slug of the choice to select.
:return: The display name for the given slug.
"""
for choice_slug, display_name in choices:
if choice_slug == slug:
return display_name
raise NameError('No choice for for slug {} in {}'.format(slug, str(choices)))
# Create mappings for compiled ACS tables based on the input variables that we can
# sum to generate those tables. For example, the value for `age_complex.under_5`
# is the sum of the ACS variables `male_under_5` and `female_under_5`.
FIELD_MAPPINGS = {
# Ages don't need display names because their slugs are the same as their
# human-readable names.
'age_basic': {
'0-14': [
'male_under_5', 'male_5_to_9', 'male_10_to_14',
'female_under_5', 'female_5_to_9', 'female_10_to_14',
],
'15-24': [
'male_15_to_17', 'male_18_to_19', 'male_20', 'male_21', 'male_22_to_24',
'female_15_to_17', 'female_18_to_19', 'female_20', 'female_21', 'female_22_to_24',
],
'25-64': [
'male_25_to_29', 'male_30_to_34', 'male_35_to_39', 'male_40_to_44',
'male_45_to_49', 'male_50_to_54', 'male_55_to_59', 'male_60_to_61', 'male_62_to_64',
'female_25_to_29', 'female_30_to_34', 'female_35_to_39', 'female_40_to_44',
'female_45_to_49', 'female_50_to_54', 'female_55_to_59', 'female_60_to_61', 'female_62_to_64',
],
'65+': [
'male_65_to_66', 'male_67_to_69', 'male_70_to_74', 'male_75_to_79', 'male_80_to_84', 'male_85_plus',
'female_65_to_66', 'female_67_to_69', 'female_70_to_74', 'female_75_to_79', 'female_80_to_84', 'female_85_plus',
]
},
'age_detailed': {
'0-4': ['male_under_5', 'female_under_5'],
'5-14': ['male_5_to_9', 'male_10_to_14', 'female_5_to_9', 'female_10_to_14'],
'15-24': [
'male_15_to_17', 'male_18_to_19', 'male_20', 'male_21', 'male_22_to_24',
'female_15_to_17', 'female_18_to_19', 'female_20', 'female_21', 'female_22_to_24',
],
'25-44': [
'male_25_to_29', 'male_30_to_34', 'male_35_to_39', 'male_40_to_44',
'female_25_to_29', 'female_30_to_34', 'female_35_to_39', 'female_40_to_44',
],
'45-64': [
'male_45_to_49', 'male_50_to_54', 'male_55_to_59', 'male_60_to_61', 'male_62_to_64',
'female_45_to_49', 'female_50_to_54', 'female_55_to_59', 'female_60_to_61', 'female_62_to_64',
],
'65-74': [
'male_65_to_66', 'male_67_to_69', 'male_70_to_74',
'female_65_to_66', 'female_67_to_69', 'female_70_to_74',
],
'75+': [
'male_75_to_79', 'male_80_to_84', 'male_85_plus',
'female_75_to_79', 'female_80_to_84', 'female_85_plus',
],
},
'age_complex': {
'0-4': ['male_under_5', 'female_under_5'],
'5-9': ['male_5_to_9', 'female_5_to_9'],
'10-14': ['male_10_to_14', 'female_10_to_14'],
'15-17': ['male_15_to_17', 'female_15_to_17'],
'18-24': [
'male_18_to_19', 'male_20', 'male_21', 'male_22_to_24',
'female_18_to_19', 'female_20', 'female_21', 'female_22_to_24',
],
'25-34': ['male_25_to_29', 'male_30_to_34', 'female_25_to_29', 'female_30_to_34'],
'35-44': ['male_35_to_39', 'male_40_to_44', 'female_35_to_39', 'female_40_to_44'],
'45-54': ['male_45_to_49', 'male_50_to_54', 'female_45_to_49', 'female_50_to_54'],
'55-64': [
'male_55_to_59', 'male_60_to_61', 'male_62_to_64',
'female_55_to_59', 'female_60_to_61', 'female_62_to_64',
],
'65-74': [
'male_65_to_66', 'male_67_to_69', 'male_70_to_74',
'female_65_to_66', 'female_67_to_69', 'female_70_to_74',
],
'75+': [
'male_75_to_79', 'male_80_to_84', 'male_85_plus',
'female_75_to_79', 'female_80_to_84', 'female_85_plus',
],
},
'gender_observational': {
display(pldp_forms.GENDER_BASIC_CHOICES, 'male'): ['male_total'],
display(pldp_forms.GENDER_BASIC_CHOICES, 'female'): ['female_total'],
display(pldp_forms.GENDER_BASIC_CHOICES, 'unknown'): [],
},
'gender_intercept': {
display(plugin_forms.GENDER_INTERCEPT_CHOICES, 'male'): ['male_total'],
display(plugin_forms.GENDER_INTERCEPT_CHOICES, 'female'): ['female_total'],
display(plugin_forms.GENDER_INTERCEPT_CHOICES, 'non_binary'): [],
display(plugin_forms.GENDER_INTERCEPT_CHOICES, 'no_answer'): [],
},
'income': {
display(plugin_forms.INCOME_CHOICES, 'under_20k'): ['under_10k', '10k_to_15k', '15k_to_20k'],
display(plugin_forms.INCOME_CHOICES, '20_40k'): ['20k_to_25k', '25k_to_30k', '30k_to_35k', '35k_to_40k'],
display(plugin_forms.INCOME_CHOICES, '40_60k'): ['40k_to_45k', '45k_to_50k', '50k_to_60k'],
display(plugin_forms.INCOME_CHOICES, '60_75k'): ['60k_to_75k'],
display(plugin_forms.INCOME_CHOICES, '75_100k'): ['75k_to_100k'],
display(plugin_forms.INCOME_CHOICES, '100k_plus'): ['100k_to_125k', '125k_to_150k', '150k_to_200k', '200k_plus'],
},
'education': {
display(plugin_forms.EDUCATION_CHOICES, 'no_high_school'): [
'no_schooling', 'nursery', 'kindergarten', 'first_grade', 'second_grade',
'third_grade', 'fourth_grade', 'fifth_grade', 'sixth_grade', 'seventh_grade',
'eighth_grade', 'ninth_grade', 'tenth_grade', 'eleventh_grade', 'twelfth_grade',
],
display(plugin_forms.EDUCATION_CHOICES, 'high_school'): [
'high_school_diploma', 'ged', 'less_than_1_year_college',
'one_or_more_years_college'
],
display(plugin_forms.EDUCATION_CHOICES, 'associate'): ['associates'],
display(plugin_forms.EDUCATION_CHOICES, 'bachelor'): ['bachelors'],
display(plugin_forms.EDUCATION_CHOICES, 'graduate'): ['masters', 'professional', 'doctorate'],
},
'race': {
display(plugin_forms.RACE_CHOICES, 'black'): ['black'],
display(plugin_forms.RACE_CHOICES, 'asian'): ['asian'],
display(plugin_forms.RACE_CHOICES, 'white'): ['white'],
display(plugin_forms.RACE_CHOICES, 'hispanic_latino'): [], # ACS doesn't include Hispanic as a race
display(plugin_forms.RACE_CHOICES, 'native'): ['american_indian'],
display(plugin_forms.RACE_CHOICES, 'hawaiian'): ['pacific_islander'],
display(plugin_forms.RACE_CHOICES, 'multiple'): ['two_or_more'],
display(plugin_forms.RACE_CHOICES, 'other'): ['other'],
},
'household_tenure': {
# Household tenure is a free response intercept question, so it doesn't have
# official choices in its form.
'0-1979': ['owner_1979_or_earlier', 'renter_1979_or_earlier'],
'1980-1989': ['owner_1980_to_1989', 'renter_1980_to_1989'],
'1990-1999': ['owner_1990_to_1999', 'renter_1990_to_1999'],
'2000-2009': ['owner_2000_to_2009', 'renter_2000_to_2009'],
'2010-2014': ['owner_2010_to_2014', 'renter_2010_to_2014'],
'2015+': ['owner_2015_plus', 'renter_2015_plus'],
},
'employment': {
display(plugin_forms.EMPLOYMENT_CHOICES, 'employed'): ['employed'],
display(plugin_forms.EMPLOYMENT_CHOICES, 'seeking'): ['unemployed'],
display(plugin_forms.EMPLOYMENT_CHOICES, 'not_seeking'): ['not_in_labor_force'],
display(plugin_forms.EMPLOYMENT_CHOICES, 'in_armed_forces'): ['in_armed_forces'],
},
'own_or_rent': {
display(plugin_forms.OWN_OR_RENT_CHOICES, 'owner'): ['owner_total'],
display(plugin_forms.OWN_OR_RENT_CHOICES, 'renter'): ['renter_total'],
display(plugin_forms.OWN_OR_RENT_CHOICES, 'other'): [],
},
'transportation': {
display(plugin_forms.TRANSPORTATION_CHOICES, 'walked'): ['walked'],
display(plugin_forms.TRANSPORTATION_CHOICES, 'bicycle'): ['bicycle'],
display(plugin_forms.TRANSPORTATION_CHOICES, 'car_truck_van'): ['car_truck_van'],
display(plugin_forms.TRANSPORTATION_CHOICES, 'motorcycle'): ['motorcycle'],
display(plugin_forms.TRANSPORTATION_CHOICES, 'train'): ['subway', 'railroad'],
display(plugin_forms.TRANSPORTATION_CHOICES, 'bus'): ['bus'],
display(plugin_forms.TRANSPORTATION_CHOICES, 'trolley'): ['trolley'],
display(plugin_forms.TRANSPORTATION_CHOICES, 'ferryboat'): ['ferryboat'],
display(plugin_forms.TRANSPORTATION_CHOICES, 'taxicab'): ['taxicab'],
display(plugin_forms.TRANSPORTATION_CHOICES, 'other_means'): ['other_means'],
},
}
if __name__ == '__main__':
# Parse input files from the command line
variable_name = sys.argv[1]
if len(variable_name) == 0:
raise NameError(
'Script requires an argument representing the name of the output variable'
)
field_mapping = FIELD_MAPPINGS[variable_name]
reader = csv.DictReader(sys.stdin)
writer = csv.DictWriter(sys.stdout, fieldnames=['fips'] + list(field_mapping.keys()))
writer.writeheader()
for row in reader:
output_row = {'fips': row['fips']}
for output_field, input_fields in field_mapping.items():
output_row[output_field] = 0
for input_field in input_fields:
output_row[output_field] += float(row[input_field])
writer.writerow(output_row)
| 2.75 | 3 |
tests/util.py | Codesflow-Simon/Poker-framework | 0 | 12785989 | import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../src')))
from card import Card, suit_num_dict, rank_num_dict
from itertools import product
deck = []
suits = []
ranks = []
for suit, rank in product(suit_num_dict.keys(),rank_num_dict.keys()):
deck.append(Card(suit, rank))
suits.append(suit)
ranks.append(rank)
| 2.6875 | 3 |
tensorflow_privacy/privacy/privacy_tests/membership_inference_attack/models_test.py | SoaringChicken/tensorflow-privacy | 0 | 12785990 | # Copyright 2020, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
import numpy as np
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import models
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackInputData
class TrainedAttackerTest(absltest.TestCase):
def test_base_attacker_train_and_predict(self):
base_attacker = models.TrainedAttacker()
self.assertRaises(NotImplementedError, base_attacker.train_model, [], [])
self.assertRaises(AssertionError, base_attacker.predict, [])
def test_predict_before_training(self):
lr_attacker = models.LogisticRegressionAttacker()
self.assertRaises(AssertionError, lr_attacker.predict, [])
def test_create_attacker_data_loss_only(self):
attack_input = AttackInputData(
loss_train=np.array([1, 3]), loss_test=np.array([2, 4]))
attacker_data = models.create_attacker_data(attack_input, 2)
self.assertLen(attacker_data.features_all, 4)
def test_create_attacker_data_loss_and_logits(self):
attack_input = AttackInputData(
logits_train=np.array([[1, 2], [5, 6], [8, 9]]),
logits_test=np.array([[10, 11], [14, 15]]),
loss_train=np.array([3, 7, 10]),
loss_test=np.array([12, 16]))
attacker_data = models.create_attacker_data(attack_input, balance=False)
self.assertLen(attacker_data.features_all, 5)
self.assertLen(attacker_data.fold_indices, 5)
self.assertEmpty(attacker_data.left_out_indices)
def test_unbalanced_create_attacker_data_loss_and_logits(self):
attack_input = AttackInputData(
logits_train=np.array([[1, 2], [5, 6], [8, 9]]),
logits_test=np.array([[10, 11], [14, 15]]),
loss_train=np.array([3, 7, 10]),
loss_test=np.array([12, 16]))
attacker_data = models.create_attacker_data(attack_input, balance=True)
self.assertLen(attacker_data.features_all, 5)
self.assertLen(attacker_data.fold_indices, 4)
self.assertLen(attacker_data.left_out_indices, 1)
self.assertIn(attacker_data.left_out_indices[0], [0, 1, 2])
def test_balanced_create_attacker_data_loss_and_logits(self):
attack_input = AttackInputData(
logits_train=np.array([[1, 2], [5, 6], [8, 9]]),
logits_test=np.array([[10, 11], [14, 15], [17, 18]]),
loss_train=np.array([3, 7, 10]),
loss_test=np.array([12, 16, 19]))
attacker_data = models.create_attacker_data(attack_input)
self.assertLen(attacker_data.features_all, 6)
self.assertLen(attacker_data.fold_indices, 6)
self.assertEmpty(attacker_data.left_out_indices)
if __name__ == '__main__':
absltest.main()
| 2 | 2 |
challonge.py | garsh0p/garpr-goog | 2 | 12785991 | <gh_stars>1-10
import iso8601
import os
import requests
from requests_toolbelt.adapters import appengine
appengine.monkeypatch()
BASE_CHALLONGE_API_URL = 'https://api.challonge.com/v1/tournaments'
URLS = {
'tournament': os.path.join(BASE_CHALLONGE_API_URL, '%s.json'),
'participants': os.path.join(
BASE_CHALLONGE_API_URL, '%s', 'participants.json'),
'matches': os.path.join(BASE_CHALLONGE_API_URL, '%s', 'matches.json'),
}
# http://api.challonge.com/v1
class ChallongeScraper(object):
def __init__(self, tournament_id):
self.api_key = os.environ.get('CHALLONGE_KEY')
self.api_key_dict = {'api_key': self.api_key}
self.tournament_id = tournament_id
self.raw_dict = None
self.get_raw()
def get_raw(self):
if self.raw_dict is not None:
return self.raw_dict
self.raw_dict = {}
for key, url in URLS.items():
url = url % self.tournament_id
self.raw_dict[key] = self._check_for_200(
requests.get(url, params=self.api_key_dict)).json()
return self.raw_dict
def get_url(self):
return self.get_raw()['tournament']['tournament']['full_challonge_url']
def get_name(self):
return self.get_raw()['tournament']['tournament']['name'].strip()
def get_date(self):
return iso8601.parse_date(self.get_raw()['tournament']['tournament']['created_at'])
def _human_round_names(self, matches):
"""Convert round names from numbers into strings like WQF and LF."""
last_round = matches[-1]['round']
SUFFIXES = ['GF', 'F', 'SF', 'QF']
rounds = {}
for i, finals in enumerate(SUFFIXES):
rounds[last_round-i] = finals
for i, finals in enumerate(SUFFIXES[1:]):
rounds[-(last_round-i)-1] = finals
reset = matches[-1]['round'] == matches[-2]['round']
reset_count = 1
for m in matches:
r = m['round']
name = 'W' if r > 0 else 'L'
if r not in rounds:
name = '{}R{}'.format(name, abs(r))
else:
if rounds[r] != 'GF':
name += rounds[r]
else:
name = 'GF'
if reset:
name += str(reset_count)
reset_count += 1
m['round'] = name
def get_matches(self):
# sometimes challonge seems to use the "group_player_ids" parameter of "participant" instead
# of the "id" parameter of "participant" in the "matches" api.
# not sure exactly when this happens, but the following code checks for both
player_map = dict()
for p in self.get_raw()['participants']:
if p['participant'].get('name'):
player_name = p['participant']['name'].strip()
else:
player_name = p['participant'].get('username', '<unknown>').strip()
player_map[p['participant'].get('id')] = player_name
if p['participant'].get('group_player_ids'):
for gpid in p['participant']['group_player_ids']:
player_map[gpid] = player_name
matches = []
for m in self.get_raw()['matches']:
m = m['match']
set_count = m['scores_csv']
winner_id = m['winner_id']
loser_id = m['loser_id']
round_num = m['round']
if winner_id is not None and loser_id is not None:
winner = player_map[winner_id]
loser = player_map[loser_id]
# TODO db object here?
match_result = {'winner': winner, 'loser': loser, 'round': round_num}
matches.append(match_result)
self._human_round_names(matches)
return matches
def get_players(self):
return [p['participant']['name'].strip()
if p['participant']['name'] else p['participant']['username'].strip()
for p in self.get_raw()['participants']]
def _check_for_200(self, response):
response.raise_for_status()
return response
| 2.453125 | 2 |
RecoJets/JetAnalyzers/test/DijetRatioPlotExample_cfg.py | ckamtsikis/cmssw | 852 | 12785992 | <gh_stars>100-1000
# PYTHON configuration file.
# Description: Example of dijet ratio plot
# with corrected and uncorrected jets
# Author: <NAME>
# Date: 22 - November - 2009
import FWCore.ParameterSet.Config as cms
process = cms.Process("Ana")
process.load("FWCore.MessageService.MessageLogger_cfi")
############# Set the number of events #############
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
############# Define the source file ###############
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/mc/Summer09/QCDFlat_Pt15to3000/GEN-SIM-RECO/MC_31X_V9_7TeV-v1/0000/FABD2A94-C0D3-DE11-B6FD-00237DA13C2E.root')
)
process.source.inputCommands = cms.untracked.vstring("keep *","drop *_MEtoEDMConverter_*_*")
############# Include the jet corrections ##########
process.load("JetMETCorrections.Configuration.L2L3Corrections_Summer09_7TeV_ReReco332_cff")
# set the record's IOV. Must be defined once. Choose ANY correction service. #
process.prefer("L2L3JetCorrectorAK5Calo")
############# User analyzer (calo jets) ##
process.DijetRatioCaloJets = cms.EDAnalyzer("DijetRatioCaloJets",
# Uncorrected CaloJets
UnCorrectedJets = cms.string('ak5CaloJets'),
# Corrected CaloJets
CorrectedJets = cms.string('L2L3CorJetAK5Calo'),
# Name of the output ROOT file containing the histograms
HistoFileName = cms.untracked.string('DijetRatioCaloJets.root')
)
############# User analyzer (PF jets) ##
process.DijetRatioPFJets = cms.EDAnalyzer("DijetRatioPFJets",
# Uncorrected PFJets
UnCorrectedJets = cms.string('ak5PFJets'),
# Corrected PFJets
CorrectedJets = cms.string('L2L3CorJetAK5PF'),
# Name of the output ROOT file containing the histograms
HistoFileName = cms.untracked.string('DijetRatioPFJets.root')
)
############# User analyzer (gen jets) ##
# ak5GenJets are NOT there: First load the needed modules
process.load("RecoJets.Configuration.GenJetParticles_cff")
process.load("RecoJets.JetProducers.ak5GenJets_cfi")
process.DijetRatioGenJets = cms.EDAnalyzer("DijetRatioGenJets",
# Uncorrected GenJets
UnCorrectedJets = cms.string('ak5GenJets'),
# Corrected GenJets == Uncorrected GenJets
CorrectedJets = cms.string('ak5GenJets'),
# Name of the output ROOT file containing the histograms
HistoFileName = cms.untracked.string('DijetRatioGenJets.root')
)
############# Path ###########################
process.p = cms.Path(process.L2L3CorJetAK5Calo * process.DijetRatioCaloJets)
process.p2 = cms.Path(process.L2L3CorJetAK5PF * process.DijetRatioPFJets)
process.p3 = cms.Path(process.genParticlesForJets *
process.ak5GenJets * process.DijetRatioGenJets)
############# Format MessageLogger #################
process.MessageLogger.cerr.FwkReport.reportEvery = 10
| 1.765625 | 2 |
bin/bucmup.py | aelzenaar/bucephalus | 0 | 12785993 | <reponame>aelzenaar/bucephalus
import sys
import argparse
import json
import dbops
import config
from pathlib import Path
parser = argparse.ArgumentParser(description='Bucephalus: Update Metadata')
parser.add_argument('id', metavar='IDENT', type=str, nargs=1,
help='article ID')
parser.add_argument('-t', metavar='TITLE', type=str, nargs=1,
help='set title', default=None)
parser.add_argument('-a', metavar='AUTHOR', type=str, nargs=1,
help='set author name', default=None)
parser.add_argument('-T', metavar='TAGS', type=str, nargs='+',
help='set tags', default=None)
args = vars(parser.parse_args())
record = dbops.get_record_by_id(args['id'][0])
didSomething = False
if(args['a'] != None):
record['Buc_author'] = args['a'][0]
didSomething = True
if(args['t'] != None):
record['Buc_title'] = args['t'][0]
didSomething = True
if(args['T'] != None):
record['Buc_tags'] = args['T']
didSomething = True
if didSomething:
dbops.write_metadata(record)
| 2.421875 | 2 |
005/011.py | gerssivaldosantos/MeuGuru | 1 | 12785994 | <gh_stars>1-10
""" Crie uma função que irá pegar alguma jogada válida do usuário. No 2048, as
jogadas válidas serão ’a’, ’s’, ’d’, ’w’. Não aceite qualquer outro tipo de informação
Atenção 2: A função recebe a informação a partir no shell, não confunda
com parâmetros """
def receber_jogada():
""" Lê uma jogada e confere se ela é válida """
#Lista com as jogadas válidas
validas = ["a","s","d","w"]
while True:
#Conferindo se a jogada faz parte do conjunto
#de válidas
jogada = str(input("Digite a jogada :"))
if jogada in validas:
break
else:
print("Jogada inválida. ",end="")
| 3.46875 | 3 |
slnee_quality/slnee_quality/doctype/strategic_plan/strategic_plan.py | erpcloudsystems/slnee_quality | 0 | 12785995 | # Copyright (c) 2021, erpcloud.systems and contributors
# For license information, please see license.txt
# import frappe
from __future__ import unicode_literals
import frappe
from frappe.utils import getdate, nowdate
from frappe import _
from frappe.model.document import Document
from frappe.utils import cstr, get_datetime, formatdate
class StrategicPlan(Document):
def validate(self):
self.validate_duplicate_record()
def validate_duplicate_record(self):
res = frappe.db.sql("""
select name from `tabStrategic Plan`
where workflow_state NOT IN ("Approved","Rejected","Completed")
and name != %s
and docstatus != 2
""", (self.name))
if res:
frappe.throw(_("You Can't Create A New Strategic Plan While Another Plan Is Still In Progress").format(
frappe.bold(self.name)))
#pass
| 2.0625 | 2 |
addons/azureblobstorage/provider.py | tsukaeru/RDM-osf.io | 11 | 12785996 | # -*- coding: utf-8 -*-
from osf.models.external import BasicAuthProviderMixin
class AzureBlobStorageProvider(BasicAuthProviderMixin):
"""An alternative to `ExternalProvider` not tied to OAuth"""
name = 'Azure Blob Storage'
short_name = 'azureblobstorage'
def __init__(self, account=None):
super(AzureBlobStorageProvider, self).__init__()
# provide an unauthenticated session by default
self.account = account
def __repr__(self):
return '<{name}: {status}>'.format(
name=self.__class__.__name__,
status=self.account.display_name if self.account else 'anonymous'
)
| 2.484375 | 2 |
GermanOK/Book.py | romainledru/GermanOK | 0 | 12785997 | <reponame>romainledru/GermanOK
import random
from Save import Save
class Word:
def __init__(self):
"""Initialisation: download the actual version of data.json
"""
d = Save("")
self.dico = d.download()
def getDico(self):
return self.dico
def pickWord(self):
print("A Word is picked")
key = random.choice(list(self.dico))
return key ,self.dico[key]
def compareWord(self,key,word):
word = word.lower()
counter = 0
for letter in range(len(word)):
if self.dico[key][0][letter] == word[letter]:
counter += 1
if counter >= len(self.dico[key][0])-2:
return True
else:
return False
def updateWord(self,word,point):
word = word.lower()
if point:
self.dico[word][1] += 1
else:
self.dico[word][2] += 1
d = Save(self.getDico())
d.upload()
def deleteWord(self,word):
word = word.lower()
try:
self.dico.pop(word)
except KeyError:
print("Word does not exist on database")
pass
d = Save(self.getDico())
d.upload()
def newWord(self,de,fr):
de = de.lower()
fr = fr.lower()
print("New Word learned: {} for {}".format(de,fr))
try:
if self.dico[de]:
print("Word Already Exist")
pass
except KeyError:
print("Creating New Word")
self.dico[de] = [fr,0,0]
d = Save(self.getDico())
d.upload()
| 3.53125 | 4 |
_note_/_xml_.py | By2048/_python_ | 2 | 12785998 | <reponame>By2048/_python_
from xml.etree.ElementTree import parse
def main():
with open("T:\\keymap.xml", 'r', encoding='utf-8') as file:
data = parse(file).getroot()
for action in data.findall('action'):
if not action.findall('keyboard-shortcut'):
continue
print(action.attrib['id'].lstrip('$'))
for keystroke in action.findall('keyboard-shortcut'):
first = keystroke.attrib['first-keystroke']
second = keystroke.attrib.get('second-keystroke')
print(f'{first}{" " + second if second else ""}')
print()
if __name__ == '__main__':
main()
| 3.078125 | 3 |
Camera/AutoDriving.py | maroneal/MircoITS | 0 | 12785999 | <reponame>maroneal/MircoITS
# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
from time import sleep
import cv2
import numpy as np
import socket
import time
#Variables for the communication with the C program
TCP_IP = "127.0.0.1"
TCP_PORT = 4200
print ("TCP target IP:"), TCP_IP
print ("TCP target port:"), TCP_PORT
#Variables sent according of the position on the line
DataOnLine = 0xAC #Robot on the line
DataNoLine = 0xAF #No line seen
DataRight = 0xAE #Turn Right
DataLeft = 0xAD #Turn Left
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera)
i=1;
ThreshBin = 90 #binarization level
RightLim = 380 #Limit when a point is consider on the right
LeftLim = 260 #Limit when a point is consider on the left
ThreshObjSize = 27000 #limit area when an object is considered an obstacle
ObjectDetected = 0; #Object detected or not
# allow the camera to warmup
sleep(0.1)
# capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image - this array
# will be 3D, representing the width, height, and # of channels
image = frame.array
#IMAGE PROCESSING FOR THE OBJECT DETECTION
crop_img_high = image[120:320, 0:640] #The top of the camera is selected
gray = cv2.cvtColor(crop_img_high, cv2.COLOR_BGR2GRAY) #Color to grayscale
blur=cv2.GaussianBlur(gray,(5,5),0) #remove noise
ret, thresh = cv2.threshold(blur,ThreshBin,255,cv2.THRESH_BINARY_INV) #Binarization
# Erode and dilate to remove accidental line detections
mask = cv2.erode(thresh, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# Find the contours of the frame
_,contours,hierarchy = cv2.findContours(mask, 1, cv2.CHAIN_APPROX_NONE)
# Find the biggest contour (if detected)
if len(contours) > 0:
#We take informations about the object detected
c = max(contours, key=cv2.contourArea)
M = cv2.moments(c)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
cv2.line(crop_img_high,(cx,0),(cx,720),(255,0,0),1)
cv2.line(crop_img_high,(0,cy),(1280,cy),(255,0,0),1)
cv2.drawContours(crop_img_high, contours, -1, (0,255,0), 1)
#If the object is small enough, we consider it as the line
if (M['m00']<ThreshObjSize):
ObjectDetected = 0;
else:
#sending the data to the C program
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((TCP_IP, TCP_PORT))
print("I see an obstacle")
MESSAGE = TCP_IP+";0;0;0;0;0;0;0;0;"+chr(DataNoLine)+";"
sock.send((MESSAGE).encode())
sock.close()
ObjectDetected = 1;
#IMAGE PROCESSING FOR THE LINE FOLLOWING
crop_img_low = image[200:640, 0:640] #We only consider the low part of the camera
gray2 = cv2.cvtColor(crop_img_low, cv2.COLOR_BGR2GRAY)
blur2=cv2.GaussianBlur(gray2,(5,5),0)
ret, thresh2 = cv2.threshold(blur2,ThreshBin,255,cv2.THRESH_BINARY_INV)
# Erode and dilate to remove accidental line detections
mask2 = cv2.erode(thresh2, None, iterations=2)
mask2 = cv2.dilate(mask2, None, iterations=2)
# Find the contours of the frame
_,contours2,_ = cv2.findContours(mask2, 1, cv2.CHAIN_APPROX_NONE)
# Find the biggest contour (if detected)
if ((len(contours2) > 0) and ObjectDetected==0):
c = max(contours2, key=cv2.contourArea)
M = cv2.moments(c)
cv2.imshow("Line", mask2) #Show the frame of the line
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
cv2.line(crop_img_low,(cx,0),(cx,720),(255,0,0),1)
cv2.line(crop_img_low,(0,cy),(1280,cy),(255,0,0),1)
cv2.drawContours(crop_img_low, contours2, -1, (0,255,0), 1)
if cx >= RightLim:
#sending the data to the C program
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((TCP_IP, TCP_PORT))
print ("Turn Right")
MESSAGE = TCP_IP+";0;0;0;0;0;0;0;0;"+chr(DataRight)+";"
sock.send((MESSAGE).encode())
sock.close()
if cx < RightLim and cx > LeftLim:
#sending the data to the C program
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((TCP_IP, TCP_PORT))
print ("On Track!")
MESSAGE =TCP_IP+";0;0;0;0;0;0;0;0;"+chr(DataOnLine)+";"
sock.send((MESSAGE).encode())
sock.close()
if cx <= LeftLim:
#sending the data to the C program
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((TCP_IP, TCP_PORT))
print ("Turn Left")
MESSAGE = TCP_IP+";0;0;0;0;0;0;0;0;"+chr(DataLeft)+";"
sock.send((MESSAGE).encode())
sock.close()
else:
#sending the data to the C program
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((TCP_IP, TCP_PORT))
print("I don't see the line")
MESSAGE = TCP_IP+";0;0;0;0;0;0;0;0;"+chr(DataNoLine)+";"
sock.send((MESSAGE).encode())
sock.close()
# show the frame
cv2.imshow("Obstacle", mask)
#cv2.imshow("Camera", image)
key = cv2.waitKey(1) & 0xFF
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
#Press "r" to start a record and "s" to stop
if key == ord("r"):
camera.start_recording('/home/pi/Desktop/MicroITS/Camera/Server/video.h264')
if key == ord("s"):
camera.stop_recording()
# if the `p` key was pressed, take a picture
if key == ord("p"):
camera.capture('/home/pi/Desktop/MicroITS/Camera/Server/capture%s.jpg' %i)
i= i+1
# if the `q` key was pressed, break from the loop
if key == ord("q"):
cv2.destroyAllWindows()
break
| 2.703125 | 3 |
pdiff/__main__.py | nkouevda/pdiff | 5 | 12786000 | <gh_stars>1-10
import os
import sys
from . import argument_parser
from . import diff_formatter
def main():
parser = argument_parser.get_parser()
args = parser.parse_args()
for filename in (args.old_filename, args.new_filename):
if not os.path.exists(filename):
sys.stderr.write('error: file does not exist: %s\n' % filename)
return 1
elif os.path.isdir(filename):
sys.stderr.write('error: path is a directory: %s\n' % filename)
return 1
formatter = diff_formatter.DiffFormatter(
args.old_filename,
args.new_filename,
args.context,
args.width,
args.tab_size,
args.signs,
args.line_numbers,
args.background)
for line in formatter.get_lines():
sys.stdout.write(line)
return 0
if __name__ == '__main__':
sys.exit(main())
| 2.875 | 3 |
src/data_processing/Dallas/dallas_data.py | cyrusneary/multiscaleLockdownCovid19 | 0 | 12786001 | # %%
import sys, os
import pandas as pd
import networkx as nx
# import matplotlib.pyplot as plt
import numpy as np
import pickle
base_file_path = os.path.abspath(os.path.join(os.curdir, '..','..', '..')) # should point to the level above the src directory
data_path = os.path.join(base_file_path, 'data', 'Intercity_Dallas')
# (grocery_demand, fitness_demand, pharmacy_demand, physician_demand, hotel_demand, religion_demand, restaurant_demand)
# Entity indexes
# 0 - groceries
# 1 - fitness
# 2 - pharmacy
# 3 - physician
# 4 - hotel
# 5 - religion
# 6 - restaurant
# Data processing parameters
fitness_freq = 94/12 # visits per unique visitor per month
pharmacy_freq = 35/12 # visits per unique visitor per month
physician_freq = 1 # visits per unique visitor per month
hotel_freq = 1 # visits per unique visitor per month
# religion_freq = 25/12 # visits per unique visitor per month
grocery_freq = 2 # visits per unique visitor per month
restaurant_freq = 1 # Assume each restaurant-goer only visits a given restaurant once per month (if at all)
month_day_time_conversion = 1/30 # months/day
min_demand_val = 5
# %%
# First get a list of the counties in Dallas MSA
county_fitness = pd.read_excel(os.path.join(data_path,'TX_Fitness_County.xlsx'))
counties = list(county_fitness.CNTY_NM.unique())
num_counties = len(counties)
print(counties)
county_data = dict()
for county in counties:
county_data[county] = {'index' : counties.index(county)}
# %%
# In county data, save a list of the block groups belonging to each county.
for county in counties:
county_data[county]['bg_list'] = set()
# Load and store block-group statistics
bg_info = dict()
# Save population data by county
print('Processing population data...')
population_data = pd.read_excel(os.path.join(data_path, 'Population_bg_Dallas.xlsx'))
for index, row in population_data.iterrows():
county = row['NAME']
if county in counties:
bg_id = row['GEO_ID']
population = row['Population']
bg_info[bg_id] = dict()
bg_info[bg_id]['county'] = county
bg_info[bg_id]['population'] = population
county_data[county]['bg_list'].add(bg_id)
# Save devices data by county
print('Processing device data...')
device_data = pd.read_excel(os.path.join(data_path, 'TX_Devices_bg.xlsx'))
for index, row in device_data.iterrows():
bg_id = row['census_block_group']
if bg_id in bg_info.keys():
devices = row['number_devices_residing']
bg_info[bg_id]['devices'] = devices
# %%
# Create arrays to store population and related data
devices = np.zeros((num_counties,))
populations = np.zeros((num_counties,))
# Now save populations and device counts by county
for county in counties:
county_data[county]['population'] = 0
county_data[county]['devices'] = 0
# Iterate over the block groups in each county and add the population and device count
for bg_id in county_data[county]['bg_list']:
county_data[county]['population'] = county_data[county]['population'] + bg_info[bg_id]['population']
county_data[county]['devices'] = county_data[county]['devices'] + bg_info[bg_id]['devices']
devices[county_data[county]['index']] = county_data[county]['devices']
populations[county_data[county]['index']] = county_data[county]['population']
# %%
# Create a map from safegraph ID to county
sgid_to_county = dict()
fitness_county = pd.read_excel(os.path.join(data_path, 'TX_Fitness_County.xlsx'))
for index, row in fitness_county.iterrows():
sgid = row['safegraph_']
county = row['CNTY_NM']
sgid_to_county[sgid] = county
grocery_county = pd.read_excel(os.path.join(data_path, 'TX_Grocery_County.xlsx'))
for index, row in grocery_county.iterrows():
sgid = row['safegraph_']
county = row['CNTY_NM']
sgid_to_county[sgid] = county
hmotel_county = pd.read_excel(os.path.join(data_path, 'TX_HMotel_County.xlsx'))
for index, row in hmotel_county.iterrows():
sgid = row['safegraph_']
county = row['CNTY_NM']
sgid_to_county[sgid] = county
pharmacy_county = pd.read_excel(os.path.join(data_path, 'TX_Pharmacy_County.xlsx'))
for index, row in pharmacy_county.iterrows():
sgid = row['safegraph_']
county = row['CNTY_NM']
sgid_to_county[sgid] = county
physician_county = pd.read_excel(os.path.join(data_path, 'TX_Physician_County.xlsx'))
for index, row in physician_county.iterrows():
sgid = row['safegraph_']
county = row['CNTY_NM_1']
sgid_to_county[sgid] = county
restaurant_county = pd.read_excel(os.path.join(data_path, 'TX_Restaurant_County.xlsx'))
for index, row in restaurant_county.iterrows():
sgid = row['safegraph_']
county = row['CNTY_NM']
sgid_to_county[sgid] = county
# %%
# Create arrays to store demand data
fitness_demand = np.zeros((num_counties,1))
pharmacy_demand = np.zeros((num_counties,1))
physician_demand = np.zeros((num_counties,1))
hotel_demand = np.zeros((num_counties,1))
religion_demand = np.zeros((num_counties,1))
grocery_demand = np.zeros((num_counties,1))
restaurant_demand = np.zeros((num_counties,1))
# %%
# Process grocery data
print('Processing grocery data...')
grocery_data = pd.read_excel(os.path.join(data_path, 'Intercity_Dallas_Grocery.xlsx'))
grocery_demand_dest_mat = np.zeros((num_counties, num_counties))
for indexDF, rowDF in grocery_data.iterrows():
sgid = rowDF['safegraph_place_id']
destination_county = sgid_to_county[sgid]
origin_county = bg_info[rowDF['visitor_home_cbgs']]['county']
count = rowDF['Count']
destination_ind = county_data[destination_county]['index']
origin_ind = county_data[origin_county]['index']
grocery_demand_dest_mat[origin_ind, destination_ind] = \
int(grocery_demand_dest_mat[origin_ind, destination_ind] + (count * grocery_freq))
for i in range(num_counties):
for j in range(num_counties):
grocery_demand_dest_mat[i,j] = grocery_demand_dest_mat[i,j] * populations[i] / devices[i] * month_day_time_conversion
county_data[counties[i]]['grocery_demand_dest'] = grocery_demand_dest_mat[i, :]
for i in range(num_counties):
grocery_demand[i] = np.sum(grocery_demand_dest_mat[i,:])
if grocery_demand[i] <= min_demand_val:
grocery_demand[i] = min_demand_val
county_data[counties[i]]['grocery_demand'] = grocery_demand[i]
# %%
# Process fintess data
print('Processing fitness data...')
fitness_data = pd.read_excel(os.path.join(data_path, 'Intercity_Dallas_Fitness.xlsx'))
fitness_demand_dest_mat = np.zeros((num_counties, num_counties))
for indexDF, rowDF in fitness_data.iterrows():
sgid = rowDF['safegraph_place_id']
destination_county = sgid_to_county[sgid]
origin_county = bg_info[rowDF['visitor_home_cbgs']]['county']
count = rowDF['Count']
destination_ind = county_data[destination_county]['index']
origin_ind = county_data[origin_county]['index']
fitness_demand_dest_mat[origin_ind, destination_ind] = \
int(fitness_demand_dest_mat[origin_ind, destination_ind] + (count * fitness_freq))
for i in range(num_counties):
for j in range(num_counties):
fitness_demand_dest_mat[i,j] = fitness_demand_dest_mat[i,j] * populations[i] / devices[i] * month_day_time_conversion
county_data[counties[i]]['fitness_demand_dest'] = fitness_demand_dest_mat[i, :]
for i in range(num_counties):
fitness_demand[i] = np.sum(fitness_demand_dest_mat[i,:])
if fitness_demand[i] <= min_demand_val:
fitness_demand[i] = min_demand_val
county_data[counties[i]]['fitness_demand'] = fitness_demand[i]
# %%
# Process pharmacy data
print('Processing pharmacy data...')
pharmacy_data = pd.read_excel(os.path.join(data_path, 'Intercity_Dallas_Pharmacy.xlsx'))
pharmacy_demand_dest_mat = np.zeros((num_counties, num_counties))
for indexDF, rowDF in pharmacy_data.iterrows():
sgid = rowDF['safegraph_place_id']
destination_county = sgid_to_county[sgid]
origin_county = bg_info[rowDF['visitor_home_cbgs']]['county']
count = rowDF['Count']
destination_ind = county_data[destination_county]['index']
origin_ind = county_data[origin_county]['index']
pharmacy_demand_dest_mat[origin_ind, destination_ind] = \
int(pharmacy_demand_dest_mat[origin_ind, destination_ind] + (count * pharmacy_freq))
for i in range(num_counties):
for j in range(num_counties):
pharmacy_demand_dest_mat[i,j] = pharmacy_demand_dest_mat[i,j] * populations[i] / devices[i] * month_day_time_conversion
county_data[counties[i]]['pharmacy_demand_dest'] = pharmacy_demand_dest_mat[i, :]
for i in range(num_counties):
pharmacy_demand[i] = np.sum(pharmacy_demand_dest_mat[i,:])
if pharmacy_demand[i] <= min_demand_val:
pharmacy_demand[i] = min_demand_val
county_data[counties[i]]['pharmacy_demand'] = pharmacy_demand[i]
# %%
# Process physician data
print('Processing physician data...')
physician_data = pd.read_excel(os.path.join(data_path, 'Intercity_Dallas_Physician.xlsx'))
physician_demand_dest_mat = np.zeros((num_counties, num_counties))
for indexDF, rowDF in physician_data.iterrows():
sgid = rowDF['safegraph_place_id']
destination_county = sgid_to_county[sgid]
origin_county = bg_info[rowDF['visitor_home_cbgs']]['county']
count = rowDF['Count']
destination_ind = county_data[destination_county]['index']
origin_ind = county_data[origin_county]['index']
physician_demand_dest_mat[origin_ind, destination_ind] = \
int(physician_demand_dest_mat[origin_ind, destination_ind] + (count * physician_freq))
for i in range(num_counties):
for j in range(num_counties):
physician_demand_dest_mat[i,j] = physician_demand_dest_mat[i,j] * populations[i] / devices[i] * month_day_time_conversion
county_data[counties[i]]['physician_demand_dest'] = physician_demand_dest_mat[i, :]
for i in range(num_counties):
physician_demand[i] = np.sum(physician_demand_dest_mat[i,:])
if physician_demand[i] <= min_demand_val:
physician_demand[i] = min_demand_val
county_data[counties[i]]['physician_demand'] = physician_demand[i]
# %%
# Process hotel data
print('Processing hotel data...')
hotel_data = pd.read_excel(os.path.join(data_path, 'Intercity_Dallas_HotelMotel.xlsx'))
hotel_demand_dest_mat = np.zeros((num_counties, num_counties))
for indexDF, rowDF in hotel_data.iterrows():
sgid = rowDF['safegraph_place_id']
destination_county = sgid_to_county[sgid]
origin_county = bg_info[rowDF['visitor_home_cbgs']]['county']
count = rowDF['Count']
destination_ind = county_data[destination_county]['index']
origin_ind = county_data[origin_county]['index']
hotel_demand_dest_mat[origin_ind, destination_ind] = \
int(hotel_demand_dest_mat[origin_ind, destination_ind] + (count * hotel_freq))
for i in range(num_counties):
for j in range(num_counties):
hotel_demand_dest_mat[i,j] = hotel_demand_dest_mat[i,j] * populations[i] / devices[i] * month_day_time_conversion
county_data[counties[i]]['hotel_demand_dest'] = hotel_demand_dest_mat[i, :]
for i in range(num_counties):
hotel_demand[i] = np.sum(hotel_demand_dest_mat[i,:])
if hotel_demand[i] <= min_demand_val:
hotel_demand[i] = min_demand_val
county_data[counties[i]]['hotel_demand'] = hotel_demand[i]
# %%
# Process restaurant data
print('Processing restaurant data...')
restaurant_data = pd.read_excel(os.path.join(data_path, 'Intercity_Dallas_Restaurant.xlsx'))
restaurant_demand_dest_mat = np.zeros((num_counties, num_counties))
for indexDF, rowDF in restaurant_data.iterrows():
sgid = rowDF['safegraph_place_id']
destination_county = sgid_to_county[sgid]
origin_county = bg_info[rowDF['visitor_home_cbgs']]['county']
count = rowDF['Count']
destination_ind = county_data[destination_county]['index']
origin_ind = county_data[origin_county]['index']
restaurant_demand_dest_mat[origin_ind, destination_ind] = \
int(restaurant_demand_dest_mat[origin_ind, destination_ind] + (count * restaurant_freq))
for i in range(num_counties):
for j in range(num_counties):
restaurant_demand_dest_mat[i,j] = restaurant_demand_dest_mat[i,j] * populations[i] / devices[i] * month_day_time_conversion
county_data[counties[i]]['restaurant_demand_dest'] = restaurant_demand_dest_mat[i, :]
for i in range(num_counties):
restaurant_demand[i] = np.sum(restaurant_demand_dest_mat[i,:])
if restaurant_demand[i] <= min_demand_val:
restaurant_demand[i] = min_demand_val
county_data[counties[i]]['restaurant_demand'] = restaurant_demand[i]
# %%
# Save the results
# First check if the save directory exists
if not os.path.isdir(os.path.join(data_path, 'data_processing_outputs')):
os.mkdir(os.path.join(data_path, 'data_processing_outputs'))
demand_array=np.concatenate((grocery_demand, fitness_demand, pharmacy_demand, physician_demand, hotel_demand, restaurant_demand), axis=1)
demand_array.shape
print(demand_array)
np.save(os.path.join(data_path, 'data_processing_outputs', 'demand_array_dallas.npy'), demand_array)
np.save(os.path.join(data_path, 'data_processing_outputs', 'populations_array_dallas.npy'), populations)
pickle.dump(county_data, open(os.path.join(data_path, 'data_processing_outputs', 'county_data.p'), 'wb'))
# %%
| 2.6875 | 3 |
quizzes/00.organize.me/Cracking the Coding Interview/18-8.py | JiniousChoi/encyclopedia-in-code | 2 | 12786002 | #!/usr/bin/env python3
''' 문자열 s와 s보다 짧은 길이를 갖는 문자열의 배열인 T가 주어졌을 때,
T에 있는 각 문자열을 s에서 찾는 메서드를 작성하라.'''
import unittest
class TreeRoot:
def __init__(self, s):
self.root = SuffixTreeNode()
root = self.root
for i in range(len(s)):
root.insertString(s[i:], i)
def search(self, s):
return self.root.search(s)
class SuffixTreeNode:
def __init__(self):
self.indexes = []
#self.value는 self.children의 key로 존재한다.
self.children = {}
def insertString(self, s, i):
''' build a sub-tree(children) for characters of `s`
i indicates starting index of sub-string `s` in original string `s` '''
if not s:
return
first = s[0]
remainder = s[1:]
if first not in self.children:
child = SuffixTreeNode()
self.children[first] = child
child = self.children[first]
child.indexes.append(i)
child.insertString(remainder, i)
def search(self, s):
''' follow through sub-nodes for `s` path.
Return indexes of the path if there was.
Otherwise, None'''
#invariant: there is a path in the tree so far.
if not s:
return self.indexes
first = s[0]
remainder = s[1:]
if first in self.children:
return self.children[first].search(remainder)
#invariant: Path cuts here.
return None
#def search(self, s):
# ''' follow through sub-nodes for `s` path.
# Return indexes of the path if there was.
# Otherwise, None'''
# assert s
# #invariant: there is a path in the tree so far.
# first = s[0]
# remainder = s[1:]
# if first not in self.children:
# return None
# child = self.children[first]
# if remainder:
# return child.search(remainder)
# else:
# return child.indexes
class SUffixTreeTest(unittest.TestCase):
def test_sample(self):
root = TreeRoot("bibs")
#self.assertEqual(root.search(""), [])
self.assertEqual(root.search("b"), [0,2])
self.assertEqual(root.search("bi"), [0])
self.assertEqual(root.search("bib"), [0])
self.assertEqual(root.search("bibs"), [0])
self.assertEqual(root.search("i"), [1])
self.assertEqual(root.search("ib"), [1])
self.assertEqual(root.search("ibs"), [1])
self.assertEqual(root.search("bs"), [2])
self.assertEqual(root.search("s"), [3])
self.assertEqual(root.search("not-exist"), None)
if __name__=="__main__":
unittest.main()
| 3.84375 | 4 |
app/senders/queries.py | mcherdakov/herodotus | 0 | 12786003 | from time import time
from uuid import UUID
import asyncpg
from app.senders.models import (EmailConfInDb, EmailStatus, Message,
MessageStatus, TelegramConfInDb,
TelegramStatus)
async def insert_email_conf(conn: asyncpg.Connection, conf: EmailConfInDb):
await conn.execute(
"INSERT INTO email_conf(uuid, project_uuid, email) VALUES ($1, $2, $3)",
conf.uuid,
conf.project_uuid,
conf.email,
)
async def insert_telegram_conf(conn: asyncpg.Connection, conf: TelegramConfInDb):
await conn.execute(
"INSERT INTO telegram_conf(uuid, project_uuid, chat_id) VALUES ($1, $2, $3)",
conf.uuid,
conf.project_uuid,
conf.chat_id,
)
async def get_email_conf(
conn: asyncpg.Connection, conf_uuid: UUID
) -> EmailConfInDb | None:
raw: asyncpg.Record = await conn.fetchrow(
"SELECT * FROM email_conf WHERE uuid = $1",
conf_uuid,
)
if raw is None:
return None
return EmailConfInDb(**raw)
async def get_telegram_conf(
conn: asyncpg.Connection, conf_uuid: UUID
) -> TelegramConfInDb | None:
raw: asyncpg.Record = await conn.fetchrow(
"SELECT * FROM telegram_conf WHERE uuid = $1",
conf_uuid,
)
if raw is None:
return None
return TelegramConfInDb(**raw)
async def get_project_confs(
conn: asyncpg.Connection, project_uuid: UUID
) -> list[EmailConfInDb | TelegramConfInDb]:
raw_email_confs: list[asyncpg.Record] = await conn.fetch(
"SELECT * FROM email_conf WHERE project_uuid = $1", project_uuid
)
email_confs = [EmailConfInDb(**c) for c in raw_email_confs]
raw_telegram_confs: list[asyncpg.Record] = await conn.fetch(
"SELECT * FROM telegram_conf WHERE project_uuid = $1", project_uuid
)
telegram_confs = [TelegramConfInDb(**c) for c in raw_telegram_confs]
return [*email_confs, *telegram_confs]
async def insert_message(conn: asyncpg.Connection, message: Message):
await conn.execute(
"""
INSERT INTO messages(uuid, project_uuid, title, text, sync, scheduled_ts, status, attempts)
VALUES($1, $2, $3, $4, $5, $6, $7, $8);
""",
message.uuid,
message.project_uuid,
message.title,
message.text,
message.sync,
message.scheduled_ts,
message.status,
message.attempts,
)
async def get_message(conn: asyncpg.Connection, message_uuid: UUID) -> Message | None:
raw: asyncpg.Record = await conn.fetchrow(
"SELECT * FROM messages WHERE uuid = $1", message_uuid
)
if raw is None:
return None
return Message(**raw)
async def insert_email_statuses(
conn: asyncpg.Connection, email_statuses: list[EmailStatus]
):
await conn.executemany(
"""
INSERT INTO email_status(uuid, message_uuid, email_conf_uuid, status)
VALUES ($1, $2, $3, $4);
""",
[
(
email_status.uuid,
email_status.message_uuid,
email_status.email_conf_uuid,
email_status.status,
)
for email_status in email_statuses
],
)
async def update_email_status(conn: asyncpg.Connection, email_status: EmailStatus):
await conn.execute(
"""
UPDATE email_status SET (message_uuid, email_conf_uuid, status) =
($1, $2, $3)
WHERE uuid = $4;
""",
email_status.message_uuid,
email_status.email_conf_uuid,
email_status.status,
email_status.uuid,
)
async def insert_telegram_statuses(
conn: asyncpg.Connection, telegram_statuses: list[TelegramStatus]
):
await conn.executemany(
"""
INSERT INTO telegram_status(uuid, message_uuid, telegram_conf_uuid, status)
VALUES ($1, $2, $3, $4);
""",
[
(
telegram_status.uuid,
telegram_status.message_uuid,
telegram_status.telegram_conf_uuid,
telegram_status.status,
)
for telegram_status in telegram_statuses
],
)
async def update_telegram_status(
conn: asyncpg.Connection, telegram_status: TelegramStatus
):
await conn.execute(
"""
UPDATE telegram_status SET (message_uuid, telegram_conf_uuid, status) =
($1, $2, $3)
WHERE uuid = $4;
""",
telegram_status.message_uuid,
telegram_status.telegram_conf_uuid,
telegram_status.status,
telegram_status.uuid,
)
async def get_statuses_for_message(
conn: asyncpg.Connection, message_uuid: UUID
) -> list[EmailStatus | TelegramStatus]:
email_raw = await conn.fetch(
"SELECT * FROM email_status WHERE message_uuid = $1", message_uuid
)
email_statuses = [EmailStatus(**s) for s in email_raw]
telegram_raw = await conn.fetch(
"SELECT * FROM telegram_status WHERE message_uuid = $1", message_uuid
)
telegram_statuses = [TelegramStatus(**s) for s in telegram_raw]
return [*email_statuses, *telegram_statuses]
async def get_unprocessed_messages(
conn: asyncpg.Connection, limit: int = 100
) -> list[Message]:
raw = await conn.fetch(
"""
SELECT * FROM messages
WHERE sync = false AND status = $1 AND scheduled_ts <= $2
ORDER BY scheduled_ts
LIMIT $3
""",
MessageStatus.scheduled,
time(),
limit,
)
return [Message(**m) for m in raw]
async def update_message(conn: asyncpg.Connection, message: Message):
await conn.execute(
"""
UPDATE messages SET (project_uuid, title, text, sync, scheduled_ts, status) =
($1, $2, $3, $4, $5, $6)
WHERE uuid = $7;
""",
message.project_uuid,
message.title,
message.text,
message.sync,
message.scheduled_ts,
message.status,
message.uuid,
)
| 2.34375 | 2 |
back-end/legacy/cp_pytorch_ts.py | yenchiah/deep-smoke-machine | 88 | 12786004 | <reponame>yenchiah/deep-smoke-machine
import torch
import torch.nn as nn
import torch.nn.functional as F
class MotionCNN(nn.Module):
def __init__(self):
super().__init__()
self.model = nn.Sequential(
nn.Conv2d(in_channels=72, out_channels=96, kernel_size=7, stride=2),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(),
nn.LocalResponseNorm(size=2),
nn.Conv2d(in_channels=96, out_channels=256, kernel_size=5,
stride=2),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3,
stride=1),
nn.ReLU(),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3,
stride=1),
nn.ReLU(),
#nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3,
# stride=1),
#nn.MaxPool2d(kernel_size=3, stride=2),
#nn.ReLU(),
)
mock_input = torch.randn(4, 72, 224, 224)
mock_output = self.model(mock_input)
flattened_output = torch.flatten(mock_output, start_dim=1)
fc_in_dim = flattened_output.shape[1] # Get number of nodes from flattened value's size, then convert 0 dim tensor to integer
self.full_conn1 = nn.Linear(in_features=fc_in_dim, out_features=4096)
#self.full_conn2 = nn.Linear(in_features=4096, out_features=2048)
#self.full_conn3 = nn.Linear(in_features=2048, out_features=2)
self.full_conn3 = nn.Linear(in_features=4096, out_features=2)
def forward(self, x):
x = self.model(x)
x = torch.flatten(x, start_dim=1) # Flattens layers without losing batches
x = self.full_conn1(x)
#x = F.dropout(x)
#x = self.full_conn2(x)
#x = F.dropout(x)
x = self.full_conn3(x)
return F.softmax(x, dim=0)
class SpatialCNN(nn.Module):
def __init__(self):
super().__init__()
self.model = nn.Sequential(
nn.Conv2d(in_channels=108, out_channels=96, kernel_size=7, stride=2),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(),
nn.LocalResponseNorm(size=2),
nn.Conv2d(in_channels=96, out_channels=256, kernel_size=5,
stride=2),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(),
nn.LocalResponseNorm(size=2),
nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3,
stride=1),
nn.ReLU(),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3,
stride=1),
nn.ReLU(),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3,
stride=1),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(),
)
mock_input = torch.randn(32, 108, 224, 224)
mock_output = self.model(mock_input)
flattened_output = torch.flatten(mock_output, start_dim=1)
fc_in_dim = flattened_output.shape[1] # Get number of nodes from flattened value's size, then convert 0 dim tensor to integer
self.full_conn1 = nn.Linear(in_features=fc_in_dim, out_features=4096)
self.full_conn2 = nn.Linear(in_features=4096, out_features=2048)
self.full_conn3 = nn.Linear(in_features=2048, out_features=2)
#self.full_conn3 = nn.Linear(in_features=4096, out_features=2)
def forward(self, x):
x = self.model(x)
x = torch.flatten(x, start_dim=1) # Flattens layers without losing batches
x = self.full_conn1(x)
x = F.dropout(x)
x = self.full_conn2(x)
x = F.dropout(x)
x = self.full_conn3(x)
return F.softmax(x, dim=0)
| 2.3125 | 2 |
tools/SMZ-NNP/gatherRMSE-GaN350.py | s-okugawa/HDNNP-tools | 0 | 12786005 | # coding: utf-8
import matplotlib.pyplot as plt
import csv
"""
This script is for gathering force/RMSE data from training result of
GaN 350 sample and plot them
"""
if __name__ == '__main__':
GaN350folder="/home/okugawa/NNP-F/GaN/SMZ-200901/training_2element/350smpl/"
outfile=GaN350folder+"result/RMSE.csv"
pltfile=GaN350folder+"result/fRMSE.png"
pltdata=[[] for i in range(10)]
with open(outfile, 'w') as outf:
writer1 = csv.writer(outf, lineterminator='\n')
for i in range(1,21):
testjobfile= GaN350folder+str(i)+"/testjob.dat"
with open(testjobfile, 'r') as testjob:
for line in testjob:
if "Total number of data:" in line:
totnum=int(line.split()[4])
elif "Number of training data:" in line:
trnum=int(line.split()[4])
elif "Number of test data:" in line:
tsnum=int(line.split()[4])
elif "# RMSE of training:" in line:
if "eV/atom" in line:
etrn=float(line.split()[4])*1000
elif "eV/ang" in line:
ftrn=float(line.split()[4])*1000
elif "# RMSE of test:" in line:
if "eV/atom" in line:
etstdt=line.split()[4]
if etstdt=="NaN":
etst=etstdt
else:
etst=float(etstdt)*1000
elif "eV/ang" in line:
ftstdt=line.split()[4]
if ftstdt=="NaN":
ftst=ftstdt
else:
ftst=float(ftstdt)*1000
if i<11:
pltdata[i-1].append(ftst)
else:
pltdata[i-11].append(ftst)
wrdata= [i,totnum,trnum,tsnum,etrn,ftrn,etst,ftst]
writer1.writerow(wrdata)
#Plot force/RMSE data
xlbl=["2:8","5:5"]
clr=["b","green"]
fig = plt.figure()
ax1 = fig.add_subplot(111)
plt.title("GaN 350sample force/RMSE")
ax1.set_xlabel("Loss-F Energy:Force")
ax1.set_ylabel("force/RMSE (meV/ang)")
ax1.grid(True)
for j in range(10):
ax1.scatter(xlbl,pltdata[j],c=clr,marker='.')
plt.savefig(pltfile)
plt.close() | 2.6875 | 3 |
tests/event_demo/event_demo.py | erikgqp8645/vnpy | 0 | 12786006 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 13 13:50:45 2018
@author: 18665
"""
# encoding: UTF-8
import sys
from datetime import datetime
from threading import *
#sys.path.append('D:\\works\\TestFile')
#print(sys.path)
from .eventManager import *
#事件名称 新文章
EVENT_ARTICAL = "Event_Artical"
#事件源 公众号
class PublicAccounts:
def __init__(self,eventManager):
self.__eventManager = eventManager
def WriteNewArtical(self):
#事件对象,写了新文章
event = Event(type_=EVENT_ARTICAL)
event.dict["artical"] = u'如何写出更优雅的代码\n'
#发送事件
self.__eventManager.SendEvent(event)
print(u'公众号发送新文章\n')
#监听器 订阅者
class Listener:
def __init__(self,username):
self.__username = username
#监听器的处理函数 读文章
def ReadArtical(self,event):
print(u'%s 收到新文章' % self.__username)
print(u'正在阅读新文章内容:%s' % event.dict["artical"])
"""测试函数"""
#--------------------------------------------------------------------
def test():
# 实例化监听器
listner1 = Listener("thinkroom") #订阅者1
listner2 = Listener("steve") #订阅者2
# 实例化事件操作函数
eventManager = EventManager()
#绑定事件和监听器响应函数(新文章)
eventManager.AddEventListener(EVENT_ARTICAL, listner1.ReadArtical)
eventManager.AddEventListener(EVENT_ARTICAL, listner2.ReadArtical)
# 启动事件管理器,# 启动事件处理线程
eventManager.Start()
publicAcc = PublicAccounts(eventManager)
timer = Timer(2, publicAcc.WriteNewArtical)
timer.start()
if __name__ == '__main__':
test()
| 2.390625 | 2 |
pyb4ml/inference/factored/factored_algorithm.py | ax-va/PyB4ML | 0 | 12786007 | <gh_stars>0
import copy
from pyb4ml.modeling.categorical.variable import Variable
from pyb4ml.modeling.factor_graph.factor import Factor
from pyb4ml.modeling.factor_graph.factor_graph import FactorGraph
class FactoredAlgorithm:
"""
This is an abstract class of some factored algorithm, which is inherited by
the classes of real factored algorithms, e.g. the Belief Propagation or Bucket
Elimination algorithms. The class contains and defines common attributes and
methods, respectively.
"""
def __init__(self, model: FactorGraph):
# Inner model not specified
self._inner_model = None
# Outer model not specified
self._outer_model = None
# Specify the outer and inner models
self._set_model(model)
# Query not specified
self._query = ()
# Evidential variables not specified
self._evidence = ()
# Evidence tuples of (var, val) not specified
self._evidence_tuples = ()
# Probability distribution P(query) or P(query|evidence) not specified
self._distribution = None
@property
def elimination_variables(self):
"""
Returns the non-query and non-evidential algorithm variables
"""
if self._query:
if self._evidence:
return tuple(var for var in self.variables if var not in self._query and var not in self._evidence)
else:
return tuple(var for var in self.variables if var not in self._query)
else:
if self._evidence:
return tuple(var for var in self.variables if var not in self._evidence)
else:
return self.variables
@property
def evidential(self):
"""
Returns the evidential algorithm variables
"""
return self._evidence
@property
def factors(self):
return self._inner_model.factors
@property
def non_evidential(self):
return tuple(var for var in self.variables if not var.is_evidential())
@property
def pd(self):
"""
Returns the probability distribution
P(Q_1, ..., Q_s)
or if an evidence is set then
P(Q_1, ..., Q_s | E_1 = e_1, ..., E_k = e_k)
as a function of q_1, ..., q_s, where q_1, ..., q_s are in the value domains
of random variable Q_1, ..., Q_s, respectively.
The order of values must correspond to the order of variables in the query. For example,
if algorithm.set_query(difficulty, intelligence) sets random variables Difficulty and
Intelligence as the query, then algorithm.pd('d0', 'i1') returns a probability
corresponding to Difficulty = 'd0' and Intelligence = 'i1' from a pre-calculated
probability distribution.
"""
if self._distribution is not None:
def distribution(*values):
if len(values) != len(self._query):
raise ValueError(
f'the number {len(values)} of given values does not match '
f'the number {len(self._query)} of query variables'
)
for variable, value in zip(self._query, values):
if value not in variable.domain:
raise ValueError(f'value {value!r} not in domain {variable.domain} of {variable.name}')
return self._distribution[values]
return distribution
else:
raise AttributeError('distribution not computed')
@property
def query(self):
return self._query
@property
def variables(self):
return self._inner_model.variables
def check_non_empty_query(self):
if not self._query:
raise AttributeError('query not specified')
def check_one_variable_query(self):
if len(self._query) > 1:
raise ValueError('the query contains more than one variable')
if len(self._query) < 1:
raise ValueError('the query contains less than one variable')
def check_query_and_evidence_intersection(self):
if self._evidence:
query_set = set(self._query)
evidence_set = set(self._evidence)
if not query_set.isdisjoint(evidence_set):
raise ValueError(f'query variables {tuple(var.name for var in self._query)} and '
f'evidential variables {tuple(var.name for var in self._evidence)} must be disjoint')
def print_evidence(self):
if self._evidence is not None:
print('Evidence: ' + ', '.join(f'{var.name} = {var.domain[0]!r}' for var in self._evidence))
else:
print('No evidence')
def print_pd(self):
"""
Prints the complete probability distribution of the query variables
"""
if self._distribution is not None:
evidence_str = ' | ' + ', '.join(f'{var.name} = {var.domain[0]!r}' for var in self._evidence) \
if self._evidence \
else ''
for values in Variable.evaluate_variables(self._query):
query_str = 'P(' + ', '.join(f'{var.name} = {val!r}' for var, val in zip(self._query, values))
value_str = str(self.pd(*values))
equal_str = ') = '
print(query_str + evidence_str + equal_str + value_str)
else:
raise AttributeError('distribution not computed')
def print_query(self):
if self._query is not None:
print('Query: ' + ', '.join(variable.name for variable in self.query))
else:
print('No query')
def set_evidence(self, *evidence):
"""
Sets the evidence. For example,
algorithm.set_evidence((difficulty, 'd0'), (intelligence, 'i1')) assigns the
evidential values 'd0' and 'i1' to random variables Difficulty and Intelligence,
respectively.
In fact, the domain of a variable is reduced to one evidential value.
The variable is encapsulated in the algorithm (in the inner model) and the domain
of the corresponding model variable (in the outer model) is not changed.
"""
# Return the original domains of evidential variables and delete the evidence in factors
self._delete_evidence()
if evidence[0]:
self._set_evidence(*evidence)
else:
self._evidence = ()
self._set_evidence_tuples()
def set_query(self, *variables):
"""
Sets the query. For example, algorithm.set_query(difficulty, intelligence)
sets the random variables Difficulty and Intelligence as the query. The values of
variables in a computed probability distribution must have the same order. For example,
algorithm.pd('d0', 'i1') returns a probability corresponding to Difficulty = 'd0' and
Intelligence = 'i1'.
"""
if variables[0]:
self._set_query(*variables)
else:
self._query = ()
def _clear_evidence(self):
self._evidence = ()
for inner_factor in self._inner_model.factors:
inner_factor.clear_evidence()
def _delete_evidence(self):
for var in self._evidence:
var.set_domain(self._inner_to_outer_variables[var].domain)
for factor in var.factors:
factor.delete_evidence(var)
del self._evidence
self._evidence = ()
def _print_start(self):
if self._print_info:
print('*' * 40)
print(f'{self._name} started')
def _print_stop(self):
if self._print_info:
print(f'\n{self._name} stopped')
print('*' * 40)
def _set_evidence(self, *evidence_tuples):
evidence_variables = tuple(var_val[0] for var_val in evidence_tuples)
if len(evidence_variables) != len(set(evidence_variables)):
raise ValueError(f'evidence must not contain duplicates')
for outer_var, val in evidence_tuples:
try:
inner_var = self._outer_to_inner_variables[outer_var]
except KeyError:
# Also clear the evidence in the factors
self._clear_evidence()
raise ValueError(f'no model variable corresponds to evidential variable {outer_var.name}')
try:
inner_var.check_value(val)
except ValueError as exception:
# Also clear the evidence in the factors
self._clear_evidence()
raise exception
# Set the new domain containing only one value
inner_var.set_domain({val})
# Add the evidence into its factors
for inner_factor in inner_var.factors:
inner_factor.add_evidence(inner_var)
self._evidence = tuple(
sorted(
(self._outer_to_inner_variables[outer_var] for outer_var in evidence_variables),
key=lambda x: x.name
)
)
def _set_evidence_tuples(self):
self._evidence_tuples = tuple((var, var.domain[0]) for var in self._evidence)
def _set_query(self, *query_variables):
# Check whether the query has duplicates
if len(query_variables) != len(set(query_variables)):
raise ValueError(f'query must not contain duplicates')
for outer_var in query_variables:
try:
self._outer_to_inner_variables[outer_var]
except KeyError:
self._query = ()
raise ValueError(f'no model variable corresponds to query variable {outer_var.name}')
self._query = tuple(
sorted(
(self._outer_to_inner_variables[outer_var] for outer_var in query_variables),
key=lambda x: x.name
)
)
def _set_model(self, model: FactorGraph):
self._outer_model = model
# Create algorithm variables (inner variables)
self._inner_to_outer_variables = {}
self._outer_to_inner_variables = {}
for outer_variable in self._outer_model.variables:
inner_variable = Variable(
domain=outer_variable.domain,
name=copy.deepcopy(outer_variable.name)
)
self._inner_to_outer_variables[inner_variable] = outer_variable
self._outer_to_inner_variables[outer_variable] = inner_variable
# Create algorithm factors (inner factors)
self._inner_to_outer_factors = {}
self._outer_to_inner_factors = {}
for outer_factor in self._outer_model.factors:
inner_factor = Factor(
variables=tuple(self._outer_to_inner_variables[outer_var] for outer_var in outer_factor.variables),
function=copy.deepcopy(outer_factor.function),
name=copy.deepcopy(outer_factor.name)
)
self._inner_to_outer_factors[inner_factor] = outer_factor
self._outer_to_inner_factors[outer_factor] = inner_factor
# Create an algorithm model (an inner model)
self._inner_model = FactorGraph(factors=self._inner_to_outer_factors.keys())
| 2.796875 | 3 |
utilities/HSV_detection.py | jlittek/Anki-Vector | 0 | 12786008 | from cv2 import cv2
import numpy as np
import anki_vector
from anki_vector.util import distance_mm, speed_mmps, degrees
def empty(a):
pass
robot=anki_vector.Robot()
robot.connect()
robot.camera.init_camera_feed()
robot.behavior.set_lift_height(0.0)
robot.behavior.set_head_angle(degrees(0))
cv2.namedWindow("TrackBars")
cv2.resizeWindow("TrackBars", 640, 600)
cv2.createTrackbar("Hue Min", "TrackBars", 10, 179, empty)
cv2.createTrackbar("Hue Max", "TrackBars", 47, 179, empty)
cv2.createTrackbar("Sat Min", "TrackBars", 66, 255, empty)
cv2.createTrackbar("Sat Max", "TrackBars", 186, 255, empty)
cv2.createTrackbar("Val Min", "TrackBars", 171, 255, empty)
cv2.createTrackbar("Val Max", "TrackBars", 255, 255, empty)
while True:
h_min = cv2.getTrackbarPos("Hue Min", "TrackBars")
h_max = cv2.getTrackbarPos("Hue Max", "TrackBars")
s_min = cv2.getTrackbarPos("Sat Min", "TrackBars")
s_max = cv2.getTrackbarPos("Sat Max", "TrackBars")
v_min = cv2.getTrackbarPos("Val Min", "TrackBars")
v_max = cv2.getTrackbarPos("Val Max", "TrackBars")
img = np.array(robot.camera.latest_image.raw_image)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
imgBlur = cv2.GaussianBlur(img, (3,3), 1)
imgHSV = cv2.cvtColor(imgBlur, cv2.COLOR_BGR2HSV)
print(h_min, h_max, s_min, s_max, v_min, v_max)
lower = np.array([h_min, s_min, v_min])
upper = np.array([h_max, s_max, v_max])
mask = cv2.inRange(imgHSV, lower, upper)
# Alternative method to find the Ball: Approximation of the area with a Polygon.
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.02*peri,True)
objCor = len(approx) # Number of corners
print(objCor)
x, y, w, h = cv2.boundingRect(approx)
if objCor > 6:
cv2.circle(img, center=(int(x+w/2), int(y+h/2)), radius=int((h)/2), color=(0, 255, 0), thickness=3)
cv2.imshow("Camera", img)
cv2.imshow("Mask", mask)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
| 2.921875 | 3 |
tests/test_lock_apis.py | pjz/etcd3-py | 96 | 12786009 | <filename>tests/test_lock_apis.py
import time
from threading import Thread
import pytest
from etcd3.client import Client
from tests.docker_cli import docker_run_etcd_main
from .envs import protocol, host
from .etcd_go_cli import NO_ETCD_SERVICE, etcdctl
@pytest.fixture(scope='module')
def client():
"""
init Etcd3Client, close its connection-pool when teardown
"""
_, p, _ = docker_run_etcd_main()
c = Client(host, p, protocol)
yield c
c.close()
class context:
def __init__(self):
self.exit = False
def clear():
etcdctl('del', '--from-key', '')
KEY = 'test-lock'
@pytest.mark.timeout(60)
def test_lock_flow(client):
clear()
holds = {}
def hold(name, ctx):
lock = None
try:
lock = holds[name] = client.lock(KEY)
print("%s is holding the lock" % name)
while not ctx.exit:
time.sleep(0.5)
finally:
if lock:
client.unlock(lock.key)
holds[name] = None
ctx1 = context()
ctx2 = context()
t1 = Thread(target=lambda: hold('User1', ctx1))
t2 = Thread(target=lambda: hold('User2', ctx2))
t1.setDaemon(True)
t2.setDaemon(True)
t1.start()
time.sleep(1)
assert holds['User1'].key
key1 = holds['User1'].key
keys1 = client.range(key1)
assert keys1.kvs[0].lease
assert client.lease_time_to_live(keys1.kvs[0].lease).TTL > 0
assert 'User2' not in holds
t2.start()
time.sleep(1)
assert holds['User1'].key
key1 = holds['User1'].key
keys1 = client.range(key1)
assert keys1.kvs[0].lease
assert client.lease_time_to_live(keys1.kvs[0].lease).TTL > 0
assert 'User2' not in holds
print("killing lock1")
ctx1.exit = True
t1.join()
time.sleep(1)
assert holds['User1'] is None
# https://github.com/etcd-io/etcd/blob/3546c4868cec93e1587471b42fd815684a7dd439/clientv3/concurrency/mutex.go#L82
# only key been deleted not the lease
assert client.range(key1).kvs is None
assert holds['User2'].key
key2 = holds['User2'].key
keys2 = client.range(key2)
assert keys2.kvs[0].lease
assert client.lease_time_to_live(keys2.kvs[0].lease).TTL > 0
ctx2.exit = True
t2.join()
assert holds['User1'] is None
assert holds['User2'] is None
assert client.range(key2).kvs is None
# with lease
lease1 = client.Lease(5)
lease1.grant()
lock1 = client.lock('test_lock', lease1.ID)
assert lock1.key.startswith(b'test_lock/')
lease2 = client.Lease(15)
lease2.grant()
start_lock_ts = time.time()
client.lock('test_lock', lease2.ID)
assert (time.time() - start_lock_ts) > 3
lease2.revoke()
lease3 = client.Lease(5)
lease3.grant()
start_lock_ts = time.time()
lock3 = client.lock('test_lock', lease3.ID)
assert (time.time() - start_lock_ts) < 2
client.unlock(lock3.key)
| 2.015625 | 2 |
src/snpahoy/utilities.py | asp8200/snpahoy | 5 | 12786010 | from statistics import mean
from typing import List
from snpahoy.core import SNP
def count_heterozygotes(snps: List[SNP]) -> int:
"""Exactly as advertized. Counts the number of heterozygote sites."""
return len([snp for snp in snps if snp.is_heterozygote()])
def mean_minor_allele_frequency(snps: List[SNP]) -> float:
"""Computes the mean minor allele frequency SNPs."""
if not snps:
return 0.0
return mean([snp.minor_allele_frequency() for snp in snps])
def mean_off_genotype_frequency(snps: List[SNP]) -> float:
"""Compues the mean off genotype frequency of SNPs."""
if not snps:
return 0.0
return mean([snp.off_genotype_frequency() for snp in snps])
| 3.671875 | 4 |
tests/unit/task/contexts/network/test_existing_network.py | jogeo/rally-openstack | 0 | 12786011 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.contexts.network import existing_network
from tests.unit import test
CTX = "rally_openstack.task.contexts.network"
class ExistingNetworkTestCase(test.TestCase):
def setUp(self):
super(ExistingNetworkTestCase, self).setUp()
self.config = {"foo": "bar"}
self.context = test.get_test_context()
self.context.update({
"users": [
{"id": 1,
"tenant_id": "tenant1",
"credential": mock.Mock(tenant_name="tenant_1")},
{"id": 2,
"tenant_id": "tenant2",
"credential": mock.Mock(tenant_name="tenant_2")},
],
"tenants": {
"tenant1": {},
"tenant2": {},
},
"config": {
"existing_network": self.config
},
})
@mock.patch("rally_openstack.common.osclients.Clients")
def test_setup(self, mock_clients):
clients = {
# key is tenant_name
"tenant_1": mock.MagicMock(),
"tenant_2": mock.MagicMock()
}
mock_clients.side_effect = lambda cred: clients[cred.tenant_name]
networks = {
# key is tenant_id
"tenant_1": [mock.Mock(), mock.Mock()],
"tenant_2": [mock.Mock()]
}
subnets = {
# key is tenant_id
"tenant_1": [mock.Mock()],
"tenant_2": [mock.Mock()]
}
neutron1 = clients["tenant_1"].neutron.return_value
neutron2 = clients["tenant_2"].neutron.return_value
neutron1.list_networks.return_value = {
"networks": networks["tenant_1"]}
neutron2.list_networks.return_value = {
"networks": networks["tenant_2"]}
neutron1.list_subnets.return_value = {"subnets": subnets["tenant_1"]}
neutron2.list_subnets.return_value = {"subnets": subnets["tenant_2"]}
context = existing_network.ExistingNetwork(self.context)
context.setup()
mock_clients.assert_has_calls([
mock.call(u["credential"]) for u in self.context["users"]])
neutron1.list_networks.assert_called_once_with()
neutron1.list_subnets.assert_called_once_with()
neutron2.list_networks.assert_called_once_with()
neutron2.list_subnets.assert_called_once_with()
self.assertEqual(
self.context["tenants"],
{
"tenant1": {"networks": networks["tenant_1"],
"subnets": subnets["tenant_1"]},
"tenant2": {"networks": networks["tenant_2"],
"subnets": subnets["tenant_2"]},
}
)
def test_cleanup(self):
# NOTE(stpierre): Test that cleanup is not abstract
existing_network.ExistingNetwork({"task": mock.MagicMock()}).cleanup()
| 1.851563 | 2 |
train.py | DheerajRacha/Image-Segmentation | 1 | 12786012 | <filename>train.py
import os
import sys
import csv
import numpy as np
import tensorflow as tf
from arguments import flags
from DataLoader import DataLoader
from models.DeepLabV3 import build_deeplabv3
from utils import evaluate_segmentation
def train(args):
image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name="InputImage")
annotation = tf.placeholder(tf.int32, shape=[None, None, None, args.num_class], name="Annotation")
output_logits, init_fn = build_deeplabv3(inputs=image,
num_classes=args.num_class,
pretrained_dir=args.resnet_ckpt)
loss_fn = tf.reduce_mean((tf.nn.softmax_cross_entropy_with_logits(logits=output_logits,
labels=annotation, name="Loss")))
theta = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
grads, _ = tf.clip_by_global_norm(tf.gradients(loss_fn, theta), 5)
optimizer = tf.train.AdamOptimizer(args.learning_rate, beta1=0.5, beta2=0.99)
train_op = optimizer.apply_gradients(zip(grads, theta))
sess = tf.Session()
saver = tf.train.Saver(max_to_keep=10)
init_fn(sess)
sess.run(tf.global_variables_initializer())
for epoch in range(args.num_epochs):
print("Epoch: {}".format(epoch))
''' Train for one epoch '''
data_loader = DataLoader(args.train_dir, "train")
load_batch = data_loader.load_data(args.batch_size)
num_batches = int(data_loader.dataset_size / args.batch_size)
for batch in range(num_batches):
x_batch, y_batch = next(load_batch)
feed_dict = {image: x_batch, annotation: y_batch}
sess.run([train_op], feed_dict=feed_dict)
if batch % 1000 == 0:
sys.stdout.write("\tBatch: " + str(batch) + "\r")
sys.stdout.flush()
if epoch == 0:
eval_file = open("evaluation.txt", "w")
eval_file.write("Epoch, avg_accuracy, precision, recall, f1 score, mean iou\n")
writer = csv.writer(eval_file)
else:
eval_file = open("evaluation.txt", "a")
writer = csv.writer(eval_file)
''' Evaluate on Training Dataset '''
loss_train = 0
load_batch = data_loader.load_data(args.batch_size)
print("\n\tLoss on Train Dataset")
for batch in range(num_batches):
x_batch, y_batch = next(load_batch)
feed_dict = {image: x_batch, annotation: y_batch}
loss_train += sess.run(loss_fn, feed_dict=feed_dict)
if batch % 1000 == 0:
sys.stdout.write("\t\tBatch: %d\r" % batch)
sys.stdout.flush()
print("\tTrain Loss: " + str(loss_train))
''' Evaluate on Validation Dataset '''
loss_val = 0
scores_list, class_scores_list, precision_list, recall_list, f1_list, iou_list = [], [], [], [], [], []
data_loader = DataLoader(args.val_dir, "train")
load_batch = data_loader.load_data(args.batch_size)
num_batches = int(data_loader.dataset_size / args.batch_size)
print("\n\tLoss on Validation Dataset")
for batch in range(num_batches):
x_batch, y_batch = next(load_batch)
feed_dict = {image: x_batch, annotation: y_batch}
prediction_batch, loss = sess.run([annotation, loss_fn], feed_dict=feed_dict)
loss_val += loss
for pred, annot in zip(prediction_batch, y_batch):
accuracy, class_accuracies, prec, rec, f1, iou = evaluate_segmentation(pred=pred,
label=annot,
num_classes=args.num_class)
scores_list.append(accuracy)
class_scores_list.append(class_accuracies)
precision_list.append(prec)
recall_list.append(rec)
f1_list.append(f1)
iou_list.append(iou)
if batch % 100 == 0:
sys.stdout.write("\t\tBatch: %d\r" % batch)
sys.stdout.flush()
avg_accuracy = np.mean(scores_list)
avg_prec = np.mean(precision_list)
avg_rec = np.mean(recall_list)
avg_f1 = np.mean(f1_list)
avg_iou = np.mean(iou_list)
fields = [epoch, avg_accuracy, avg_prec, avg_rec, avg_f1, avg_iou]
writer.writerow(fields)
print("\tValidation Loss: " + str(loss_val))
''' Save Checkpoints for every 10 epochs '''
if epoch % 10 == 0:
checkpoint_path = os.path.join(args.ckpt_dir, str(epoch))
os.makedirs(checkpoint_path)
checkpoint_path = os.path.join(checkpoint_path, "checkpoints.ckpt")
saver.save(sess, checkpoint_path)
fields = [loss_train, loss_val]
if epoch == 0:
with open("losses.txt", "w") as f:
writer = csv.writer(f)
writer.writerow(fields)
else:
with open("losses.txt", "a") as f:
writer = csv.writer(f)
writer.writerow(fields)
latest = os.path.join(args.ckpt_dir, "latest_checkpoints")
if not os.path.isdir(latest):
os.makedirs(latest)
saver.save(sess, latest)
if __name__ == "__main__":
train(flags())
| 2.640625 | 3 |
app/core/parser/parser.py | SLB974/GrandPyBot-dev | 0 | 12786013 | # coding: utf-8
from unidecode import unidecode
import re
from .utils import stop_words
class Parser:
"""Parse user's query"""
def __init__(self, user_query):
self.user_query = user_query
def clean_string(self):
"""remove accents, upper and punctuation
and split into list
compare to stop_words reference and remove found items"""
cleaned = unidecode(self.user_query).lower()
cleaned = re.compile("\w+").findall(cleaned)
return [item for item in cleaned if item not in stop_words]
| 3.578125 | 4 |
python/tools/cepton_georeference.py | frank-qcd-qk/cepton_sdk_redist | 23 | 12786014 | <gh_stars>10-100
#!/usr/bin/env python3
"""
Sample script to combine LiDAR data to generate point cloud.
"""
import argparse
import datetime
import json
import os.path
import matplotlib.pyplot
import numpy
import pytz
import scipy.interpolate
import scipy.spatial.transform
import utm
from mpl_toolkits.mplot3d import Axes3D
import cepton_sdk.export
import cepton_sdk.load
import cepton_sdk.plot
from cepton_sdk.common import *
from cepton_util.common import *
def from_gps_time(weeks, seconds):
d = datetime.datetime(1980, 1, 6) + \
datetime.timedelta(weeks=weeks, seconds=seconds)
# leapseconds
d -= datetime.timedelta(seconds=18)
return pytz.utc.localize(d).timestamp()
class Transforms(StructureOfArrays):
def __init__(self, n=0):
super().__init__(n)
self.timestamps = numpy.zeros([n])
self.translations = numpy.zeros([n, 3])
self.quaternions = numpy.zeros([n, 4])
@classmethod
def _get_array_member_names(cls):
return ["timestamps", "translations", "quaternions"]
@property
def rotations(self):
return scipy.spatial.transform.Rotation(self.quaternions)
def main():
parser = argparse.ArgumentParser(usage="%(prog)s [OPTIONS]",)
parser.add_argument("--downsample", action="store_true")
parser.add_argument("--duration", default="-1",
help="Export duration (if negative, export entire file).")
all_file_types = [x.name for x in cepton_sdk.export.PointsFileType]
parser.add_argument("--format", default="LAS", choices=all_file_types,
type=str.upper, help="Output file format.")
parser.add_argument("-o", "--output_path", help="Output path.")
parser.add_argument("--raw", action="store_true")
parser.add_argument("--plot", action="store_true")
parser.add_argument("--points_path", help="Path to points", required=True)
parser.add_argument(
"--serial_path", help="Path to serial data", required=True)
parser.add_argument("--version", action="version",
version="cepton_sdk {}".format(cepton_sdk.__version__))
args = parser.parse_args()
duration = parse_time_hms(args.duration)
file_type = cepton_sdk.export.PointsFileType[args.format]
assert (file_type in [cepton_sdk.export.PointsFileType.LAS,
cepton_sdk.export.PointsFileType.PLY])
timer = SimpleTimer()
# Load odometry transform
settings_dir = os.path.dirname(points_path)
transforms_path = os.path.join(settings_dir, "cepton_transforms.json")
if not os.path.isfile(transforms_path):
transforms_path = None
transform_manager = cepton_sdk.load.load_transforms(transforms_path)
odometry_transform = transform_manager.transforms[1]
# Load points
points_path = fix_path(args.points_path)
points = cepton_sdk.export.load_points_las(points_path)[0]
if duration > 0:
is_valid = points.timestamps < points.timestamps[0] + duration
points = points[is_valid]
odometry_path = os.path.join(os.path.dirname(serial_path), "odometry.txt")
if os.path.isfile(odometry_path):
# Load odometry
with open(odometry_path, "r") as f:
lines = f.readlines()
transforms = Transforms(len(lines))
for i, line in enumerate(lines):
transform_dict = json.loads(line)
transforms.timestamps[i] = transform_dict["timestamp"]
transforms.translations[i, :] = transform_dict["translation"]
transforms.quaternions[i, :] = transform_dict["rotation"]
else:
# Load serial
serial_path = fix_path(args.serial_path)
with open(serial_path, "r") as f:
serial_lines = f.readlines()
transforms = Transforms(len(serial_lines))
i_transform = 0
for line in serial_lines:
if line.startswith("#INSPVA"):
# Novatel
line = line.split("*")[0]
header, data = line.split(";")
header = header.split(",")
data = [None, None] + data.split(",")
if len(data) != 14:
continue
if data[13] != "INS_SOLUTION_GOOD":
continue
transforms.timestamps[i_transform] = \
from_gps_time(float(data[2]), float(data[3]))
transforms.translations[i_transform, :2] = utm.from_latlon(
float(data[4]), float(data[5]))[:2]
if i_transform == 0:
print("UTM: {}".format(utm.from_latlon(
float(data[4]), float(data[5]))[2:]))
transforms.translations[i_transform, 2] = float(data[6])
transforms.quaternions[i_transform, :] = \
scipy.spatial.transform.Rotation.from_euler(
"zxy",
[-float(data[12]), float(data[11]), float(data[10])],
degrees=True).as_quat()
i_transform += 1
transforms = transforms[:i_transform]
assert (numpy.all(numpy.diff(transforms.timestamps) > 0))
# Save odometry
with open(odometry_path, "w") as f:
for i in range(len(transforms)):
transform_dict = {
"timestamp": transforms.timestamps[i],
"translation": transforms.translations[i, :].tolist(),
"rotation": transforms.quaternions[i, :].tolist(),
}
f.write(json.dumps(transform_dict) + "\n")
# DEBUG
# print(datetime.datetime.utcfromtimestamp(transforms.timestamps[0]))
# print(datetime.datetime.utcfromtimestamp(points.timestamps[0]))
# print(datetime.datetime.utcfromtimestamp(points.timestamps[-1]))
# print(datetime.datetime.utcfromtimestamp(transforms.timestamps[-1]))
# Plot point timestamps
# matplotlib.pyplot.plot(transforms.timestamps)
# # matplotlib.pyplot.plot(points.timestamps)
# matplotlib.pyplot.show()
# return
# Plot 3d trajectory
# fig = matplotlib.pyplot.figure()
# ax = fig.add_subplot(projection="3d")
# matplotlib.pyplot.plot(
# transforms.translations[:, 0], transforms.translations[:, 1],
# transforms.translations[:, 2], 'o')
# matplotlib.pyplot.show()
# return
# Plot 2d trajectory with directions
# matplotlib.pyplot.axis("equal")
# matplotlib.pyplot.plot(
# transforms.translations[:, 0], transforms.translations[:, 1])
# directions = numpy.zeros([len(transforms), 3])
# directions[:, 1] = 1.0
# directions = transforms.rotations.apply(directions)
# matplotlib.pyplot.quiver(
# transforms.translations[::10, 0], transforms.translations[::10, 1],
# directions[::10, 0], directions[::10, 1])
# matplotlib.pyplot.show()
# return
indices = numpy.arange(0, len(points))
# Apply pose
if not args.raw:
is_valid = numpy.logical_and(
points.timestamps > transforms.timestamps[0],
points.timestamps < transforms.timestamps[-1])
indices = indices[is_valid]
translations_tmp = scipy.interpolate.interp1d(
transforms.timestamps, transforms.translations, axis=0)(
points.timestamps[indices])
rotations_tmp = \
scipy.spatial.transform.Slerp(
transforms.timestamps, transforms.rotations)(
points.timestamps[indices])
odometry_transform_inv = numpy.linalg.inv(
odometry_transform.to_matrix())
points.positions[indices, :] = numpy.matmul(
points.positions[indices, :], odometry_transform_inv[:3, :3].transpose()) + \
odometry_transform_inv[:3, 3]
points.positions[indices, :] = \
rotations_tmp.apply(points.positions[indices, :]) + \
translations_tmp
# Grid downsample
if args.downsample:
grid_lb = numpy.amin(points.positions, axis=0)
grid_ub = numpy.amax(points.positions, axis=0)
grid_spacing = numpy.full([3], 0.01)
grid_shape = ((grid_ub - grid_lb) / grid_spacing).astype(int)
def get_flat_grid_indices(positions):
grid_indices = ((positions - grid_lb) / grid_spacing).astype(int)
is_valid = numpy.logical_and(
numpy.all(grid_indices >= 0, axis=1),
numpy.all(grid_indices < grid_shape, axis=1))
flat_grid_indices = numpy.full(grid_indices.shape[0], -1)
flat_grid_indices[is_valid] = numpy.ravel_multi_index(
grid_indices[is_valid, :].transpose(), grid_shape)
return flat_grid_indices
grid_indices = get_flat_grid_indices(points.positions[indices, :])
is_valid = grid_indices >= 0
indices = indices[is_valid]
grid_indices = grid_indices[is_valid]
is_valid = numpy.unique(grid_indices, return_index=True)[1]
indices = indices[is_valid]
grid_indices = grid_indices[is_valid]
assert (len(indices) > 0)
points = points[indices]
# Save
if args.output_path is not None:
if file_type is not cepton_sdk.export.PointsFileType.LAS:
points.positions[:, :] -= numpy.mean(points.positions, axis=0)
output_ext = \
cepton_sdk.export.get_points_file_type_extension(file_type)
output_path = set_extension(fix_path(args.output_path), output_ext)
cepton_sdk.export.save_points(points, output_path, file_type=file_type)
# Check
points_tmp = cepton_sdk.export.load_points(output_path)[0]
assert (numpy.max(numpy.abs(points.positions - points_tmp.positions)) < 1e-3)
points = points_tmp
# Plot
if args.plot:
cepton_sdk.plot.plot_points(points)
if __name__ == "__main__":
main()
| 2.296875 | 2 |
generator/storage_spider_template.django.py | chongiadung/choinho | 0 | 12786015 | <reponame>chongiadung/choinho<filename>generator/storage_spider_template.django.py
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "{{ spider.xpath.name|safe }}",
'price' : "{{ spider.xpath.price|safe }}",
'category' : "{{ spider.xpath.category|safe}}",
'description' : "{{ spider.xpath.description|safe}}",
'images' : "{{ spider.xpath.images|safe}}",
'canonical' : "{{spider.xpath.canonical|safe}}",
'base_url' : "{{spider.xpath.base_url|safe}}",
'brand' : "{{spider.xpath.brand|safe}}",
'in_stock' : "{{spider.xpath.in_stock|safe}}",
'guarantee' : "{{spider.xpath.guarantee|safe}}",
'promotion' : "{{spider.xpath.promotion|safe}}"
}
name = '{{ spider.spider }}'
allowed_domains = ['{{ spider.allowed_domain }}']
start_urls = ['{{ spider.start_url|safe }}']
tracking_url = '{{ spider.tracking_url|safe }}'
sitemap_urls = ['{{ spider.sitemap_urls|safe }}']
sitemap_rules = [('{{ spider.sitemap_rules|safe }}', 'parse_item')]
sitemap_follow = ['{{ spider.sitemap_follow|safe }}']
rules = [
{{ spider.hashtag_no_rule|safe }}Rule(LinkExtractor({{ spider.item_url_pattern|safe }}), 'parse_item'),
{{ spider.hashtag_no_rule|safe }}Rule(LinkExtractor({{ spider.follow_link_pattern|safe }}), 'parse'),
{{ spider.hashtag_all_rule|safe }}Rule(LinkExtractor({{ spider.all_links_pattern|safe }}), 'parse_item_and_links'),
]
| 1.789063 | 2 |
km_api/rest_order/mixins.py | knowmetools/km-api | 4 | 12786016 | from rest_framework.response import Response
class SortModelMixin(object):
sort_child_name = None
sort_parent = None
sort_serializer = None
def get_sort_serializer(self, *args, **kwargs):
serializer_class = self.sort_serializer
kwargs["context"] = self.get_serializer_context()
return serializer_class(*args, **kwargs)
def sort(self, request, *args, **kwargs):
parent_pk = kwargs.get("pk", None)
parent = self.sort_parent.objects.get(pk=parent_pk)
serializer = self.get_sort_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save(parent)
collection = getattr(parent, self.sort_child_name).all()
serializer = self.get_serializer(collection, many=True)
return Response(serializer.data)
| 2.15625 | 2 |
app/audit/tasks.py | getmetamapper/metamapper | 53 | 12786017 | # -*- coding: utf-8 -*-
from metamapper.celery import app
from datetime import timedelta
from django.utils.timezone import now
from app.audit.models import Activity
@app.task(bind=True)
def audit(self,
actor_id,
workspace_id,
verb,
old_values,
new_values,
extras=None,
target_object_id=None,
target_content_type_id=None,
action_object_object_id=None,
action_object_content_type_id=None):
"""Task to commit an audit activity to a database.
"""
activity_kwargs = {
'actor_id': actor_id,
'workspace_id': workspace_id,
'verb': verb,
'target_object_id': target_object_id,
'target_content_type_id': target_content_type_id,
'action_object_object_id': action_object_object_id,
'action_object_content_type_id': action_object_content_type_id,
}
defaults = {
'extras': extras or {},
'timestamp': now(),
'old_values': old_values,
'new_values': new_values,
}
datefrom = now() - timedelta(minutes=15)
queryset = (
Activity.objects
.filter(**activity_kwargs)
.filter(timestamp__gte=datefrom)
)
for field in old_values.keys():
queryset = queryset.filter(old_values__has_key=field)
activity = queryset.first()
if activity:
activity.update_attributes(**defaults)
else:
activity_kwargs.update(defaults)
activity = Activity.objects.create(**activity_kwargs)
return activity.pk
| 2.015625 | 2 |
test/swift_project_test.py | Dan2552/SourceKittenSubl | 163 | 12786018 | from src import swift_project
from helpers import path_helper
import unittest
class TestSourceKitten(unittest.TestCase):
# Test with a simple project directory
# (i.e. without xcodeproj)
def test_source_files_simple_project(self):
project_directory = path_helper.monkey_example_directory()
output = swift_project.source_files(project_directory)
expectation = [
project_directory + "/Banana.swift",
project_directory + "/Monkey.swift"
]
self.assertEqual(sorted(list(output)), sorted(expectation))
| 2.625 | 3 |
pymodaq_plugins/daq_viewer_plugins/plugins_2D/daq_2Dviewer_AndorCCD.py | SofyMeu/pymodaq_plugins | 0 | 12786019 | from pymodaq_plugins.hardware.andor.daq_AndorSDK2 import DAQ_AndorSDK2
class DAQ_2DViewer_AndorCCD(DAQ_AndorSDK2):
"""
=============== ==================
=============== ==================
See Also
--------
utility_classes.DAQ_Viewer_base
"""
control_type = "camera" #could be "camera", "shamrock" or "both"
hardware_averaging = False
def __init__(self, *args, **kwargs):
super(DAQ_2DViewer_AndorCCD, self).__init__(*args, control_type=self.control_type, **kwargs)
| 2.28125 | 2 |
Cura/Cura/plugins/VersionUpgrade/VersionUpgrade33to34/VersionUpgrade33to34.py | TIAO-JI-FU/3d-printing-with-moveo-1 | 0 | 12786020 | <reponame>TIAO-JI-FU/3d-printing-with-moveo-1<filename>Cura/Cura/plugins/VersionUpgrade/VersionUpgrade33to34/VersionUpgrade33to34.py
# Copyright (c) 2018 <NAME>.
# Cura is released under the terms of the LGPLv3 or higher.
import configparser #To parse preference files.
import io #To serialise the preference files afterwards.
from typing import Dict, List, Tuple
from UM.VersionUpgrade import VersionUpgrade #We're inheriting from this.
_renamed_settings = {
"infill_hollow": "infill_support_enabled"
} # type: Dict[str, str]
## Upgrades configurations from the state they were in at version 3.3 to the
# state they should be in at version 3.4.
class VersionUpgrade33to34(VersionUpgrade):
## Gets the version number from a CFG file in Uranium's 3.3 format.
#
# Since the format may change, this is implemented for the 3.3 format only
# and needs to be included in the version upgrade system rather than
# globally in Uranium.
#
# \param serialised The serialised form of a CFG file.
# \return The version number stored in the CFG file.
# \raises ValueError The format of the version number in the file is
# incorrect.
# \raises KeyError The format of the file is incorrect.
def getCfgVersion(self, serialised: str) -> int:
parser = configparser.ConfigParser(interpolation = None)
parser.read_string(serialised)
format_version = int(parser.get("general", "version")) #Explicitly give an exception when this fails. That means that the file format is not recognised.
setting_version = int(parser.get("metadata", "setting_version", fallback = "0"))
return format_version * 1000000 + setting_version
## Upgrades instance containers to have the new version
# number.
def upgradeInstanceContainer(self, serialized: str, filename: str) -> Tuple[List[str], List[str]]:
parser = configparser.ConfigParser(interpolation = None)
parser.read_string(serialized)
# Update version number.
parser["general"]["version"] = "4"
if "values" in parser:
#If infill_hollow was enabled and the overhang angle was adjusted, copy that overhang angle to the new infill support angle.
if "infill_hollow" in parser["values"] and parser["values"]["infill_hollow"] and "support_angle" in parser["values"]:
parser["values"]["infill_support_angle"] = parser["values"]["support_angle"]
#Renamed settings.
for original, replacement in _renamed_settings.items():
if original in parser["values"]:
parser["values"][replacement] = parser["values"][original]
del parser["values"][original]
result = io.StringIO()
parser.write(result)
return [filename], [result.getvalue()] | 2.390625 | 2 |
general.py | SoniramSotirach/SecSerres1 | 0 | 12786021 | import pandas as pd
file = r'file.log'
cols=['host','1','userid','date','tz','endpoint','status','data','referer','user_agent']
df=pd.read_csv(file,delim_whitespace=True,names=cols).drop('1',1)
print (df.head())
unique_ip=df.host.unique()
print(unique_ip)
total = df['data'].sum()
print('the server traffic is :',(total))
status_freq = pd.DataFrame(columns=['status', 'Frequency'])
status_freq['Frequency'] = df.groupby('status').size()
status_freq['status']=df.groupby('status').agg({'status':lambda x:list(x).__getitem__(1)})
ap = status_freq[status_freq['status']>=500].sum()
print ('the requests generated requests a 5xx server error :',(ap['Frequency']))
print('distring ips visited server :',len(unique_ip)) | 3.046875 | 3 |
doc/rexi/strategies_for_solving_rexi_terms/helmholtz_problem_fault_tolerance.py | valentinaschueller/sweet | 6 | 12786022 | <gh_stars>1-10
#! /usr/bin/env python3
import math
a_list = [ -0.86304 + 0j,
-0.86304 + 1j,
-0.86304 + 10j,
-0.86304 + 100j,
-0.86304 + 1000j
]
f = 1.0
g = 1.0
eta0_hat = 1.0
u0_hat = 1.0
v0_hat = 0.5
eta_bar = 1.0
for a in a_list:
for k in [0.01, 0.1, 1.0]:
# for k in [0.1]:
lhs = (a*a+f*f)+g*eta_bar*(2.0*math.pi*k)**2
rhs = (f*f+a*a)/a * eta0_hat - eta_bar *1.0j*2.0*math.pi*k*(u0_hat + v0_hat) - (f * eta_bar / a) *1.0j*2.0*math.pi*k*(v0_hat - u0_hat)
eta_hat = rhs / lhs
print("a="+str(a)+", k="+str(k)+" > "+str(eta_hat))
| 2.875 | 3 |
till_looping/4_3.py | mdazharuddin1011999/IoT_Assignment_2 | 0 | 12786023 | <filename>till_looping/4_3.py
from math import factorial
x = int(input("Enter x: "))
n = int(input("Enter n: "))
print(1+sum([x**i/factorial(i) for i in range(2,n+1, 2)])) | 3.46875 | 3 |
tests/test_persistence.py | DiscoverAI/pungi | 0 | 12786024 | <reponame>DiscoverAI/pungi
import pungi.persistence as persistence
from collections import defaultdict
import os
import shutil
def test_save_simple_q_table():
test_q_table = defaultdict(lambda: (5, 5, 5, 5, 5))
test_q_table[0] = (4, 5, 6, 7, 8)
test_q_table[(1, 2)] = (7, 5, 4, 3, 2)
test_dir = "./tests/out/"
filename = "model.pkl"
if not os.path.exists(test_dir):
os.makedirs(test_dir)
persistence.save_q_table(test_q_table, test_dir + filename)
loaded = persistence.load_q_table(test_dir + filename)
assert test_q_table == loaded
assert (5, 5, 5, 5, 5) == loaded["non_existing_key"]
shutil.rmtree(test_dir)
| 2.453125 | 2 |
algorithms/python/leetcode/x3Sum.py | ytjia/coding-pratice | 0 | 12786025 | # -*- coding: utf-8 -*-
# Authors: <NAME> <<EMAIL>>
"""
Given an array S of n integers, are there elements a, b, c in S such that a + b + c = 0? Find all
unique triplets in the array which gives the sum of zero.
https://leetcode.com/problems/3sum/description/
"""
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
triplets_list = list()
if nums is None or len(nums) < 3:
return triplets_list
nums.sort()
i = 0
pre_i_value = None
while i < len(nums) - 2:
if nums[i] == pre_i_value:
i += 1
continue
else:
pre_i_value = nums[i]
l, j = i + 1, len(nums) - 1
while l < j:
s = nums[i] + nums[j] + nums[l]
if s < 0:
l += 1
elif s > 0:
j -= 1
else:
triplets_list.append([nums[i], nums[j], nums[l]])
while l < j and nums[l] == nums[l + 1]:
l += 1
while l < j and nums[j] == nums[j - 1]:
j -= 1
l += 1
j -= 1
i += 1
return triplets_list
| 3.4375 | 3 |
cc_backend_lib/models/country.py | prio-data/cc_backend_lib | 0 | 12786026 | <reponame>prio-data/cc_backend_lib
from typing import List, Optional
from pydantic import BaseModel
from geojson_pydantic import features, geometries
class Country(features.Feature):
class Meta:
QUERY_ORDER = ["gwno","name","iso2c","shape"]
properties: "CountryProperties"
@classmethod
def from_row(cls,
gwno: int,
name: str,
iso2c: str,
shape: geometries.Geometry):
return cls(
geometry = shape,
properties = CountryProperties(
gwno = gwno,
name = name,
iso2c = iso2c
),
id = gwno
)
class CountryIdentity(BaseModel):
gwno: int
name: str
class CountryProperties(CountryIdentity):
iso2c: Optional[str] = None
predictions: Optional[int] = None
participants:Optional[int] = None
class CountryPropertiesList(BaseModel):
countries: List[CountryProperties]
class CountryList(BaseModel):
countries: List[CountryIdentity]
Country.update_forward_refs()
| 2.4375 | 2 |
Ago-Dic-2021/fernandez-salinas-cristian-alejandro/Practica3/calculator_test.py | CristianF50/DAS_Sistemas | 41 | 12786027 | import unittest
from calculator import *
class CalculatorTest(unittest.TestCase):
def test_suma_dos_numeros(self):
calc = Calculator(5, 10)
self.assertEqual(15, calc.suma())
def test_resta_dos_numeros(self):
calc = Calculator(19, 8)
self.assertEqual(11, calc.resta())
def test_multiplica_dos_numeros(self):
calc = Calculator(42, 2)
self.assertEqual(84, calc.multiplicacion())
def test_divide_dos_numeros(self):
calc = Calculator(18, 3)
self.assertEqual(6, calc.division())
def test_potencia_de_un_numero(self):
calc = Calculator(3, 3)
self.assertEqual(27, calc.potencia())
def test_raiz_de_un_numero(self):
calc = Calculator(216, 3)
self.assertEqual(6, calc.raiz())
def test_dividir_entre_cero(self):
calc = Calculator(25, 0)
self.assertEqual(0, calc.division())
def test_dividir_entre_cero_2(self):
calc = Calculator(0, 25)
self.assertEqual(0, calc.division())
def test_raiz_num_negativo(self):
calc = Calculator(-8,2)
self.assertEqual(0, calc.raiz())
if __name__ == "__main__":
unittest.main() | 3.71875 | 4 |
tests/cmdline/test_plot.py | JuDFTteam/masci-tools | 15 | 12786028 | <filename>tests/cmdline/test_plot.py
# -*- coding: utf-8 -*-
"""
Test of the plot commands in the cli
Here we do not test the actual content of the plot but only that the
commands work without error
"""
from pathlib import Path
import os
import pytest
def test_fleur_dos():
"""
Test of the fleur-dos routine without options
"""
from masci_tools.cmdline.commands.plot import plot_fleur_banddos_dos
from click.testing import CliRunner
TEST_FILE = Path(__file__).parent.resolve() / Path('../files/hdf5_reader/banddos_dos.hdf')
runner = CliRunner()
args = [os.fspath(TEST_FILE), '--save']
with runner.isolated_filesystem():
result = runner.invoke(plot_fleur_banddos_dos, args)
print(result.output)
assert result.exception is None, 'An unexpected exception occured: {result.exception}'
assert os.path.isfile('dos_plot.png')
def test_fleur_dos_recipe():
"""
Test of the fleur-dos routine with options
"""
from masci_tools.cmdline.commands.plot import plot_fleur_banddos_dos
from click.testing import CliRunner
TEST_FILE = Path(__file__).parent.resolve() / Path('../files/hdf5_reader/banddos_spinpol_dos.hdf')
runner = CliRunner()
args = [os.fspath(TEST_FILE), '--save', '--recipe', 'FleurJDOS', '--l_resolved', 'all', '--interstitial', 'False']
with runner.isolated_filesystem():
with pytest.warns(UserWarning):
result = runner.invoke(plot_fleur_banddos_dos, args)
print(result.output)
assert result.exception is None, 'An unexpected exception occured: {result.exception}'
assert os.path.isfile('dos_plot.png')
def test_fleur_bands():
"""
Test of the fleur-bands routine without options
"""
from masci_tools.cmdline.commands.plot import plot_fleur_banddos_bands
from click.testing import CliRunner
TEST_FILE = Path(__file__).parent.resolve() / Path('../files/hdf5_reader/banddos_bands.hdf')
runner = CliRunner()
args = [os.fspath(TEST_FILE), '--save']
with runner.isolated_filesystem():
result = runner.invoke(plot_fleur_banddos_bands, args)
print(result.output)
assert result.exception is None, 'An unexpected exception occured: {result.exception}'
assert os.path.isfile('bandstructure.png')
def test_fleur_bands_recipe():
"""
Test of the fleur-bands routine with options
"""
from masci_tools.cmdline.commands.plot import plot_fleur_banddos_bands
from click.testing import CliRunner
TEST_FILE = Path(__file__).parent.resolve() / Path('../files/hdf5_reader/banddos_bands.hdf')
runner = CliRunner()
args = [os.fspath(TEST_FILE), '--save', '--weight', 'MT:1s']
with runner.isolated_filesystem():
result = runner.invoke(plot_fleur_banddos_bands, args)
print(result.output)
assert result.exception is None, 'An unexpected exception occured: {result.exception}'
assert os.path.isfile('bandstructure.png')
| 2.15625 | 2 |
vietnamese_utils.py | khiemdoan/aivivn_vietnamese_tone_prediction | 1 | 12786029 | <filename>vietnamese_utils.py
import re
import string
uni_chars_l = 'áàảãạâấầẩẫậăắằẳẵặđèéẻẽẹêếềểễệíìỉĩịóòỏõọôốồổỗộơớờởỡợúùủũụưứừửữựýỳỷỹỵ'
uni_chars_u = 'ÁÀẢÃẠÂẤẦẨẪẬĂẮẰẲẴẶĐÈÉẺẼẸÊẾỀỂỄỆÍÌỈĨỊÓÒỎÕỌÔỐỒỔỖỘƠỚỜỞỠỢÚÙỦŨỤƯỨỪỬỮỰÝỲỶỸỴ'
AEIOUYD = ['a', 'e', 'i', 'o', 'u', 'y', 'd']
A_FAMILY = list('aáàảãạăắằẳẵặâấầẩẫậ')
E_FAMILY = list('eéèẻẽẹêếềểễệ')
I_FAMILY = list('iíìỉĩị')
O_FAMILY = list('oóòỏõọôốồổỗộơớờởỡợ')
U_FAMILY = list('uúùủũụưứừửữự')
Y_FAMILY = list('yýỳỷỹỵ')
D_FAMILY = list('dđ')
tones_l = [
'1', '2', '3', '4', '5',
'6', '61', '62', '63', '64', '65',
'8', '81', '82', '83', '84', '85',
'9', '1', '2', '3', '4', '5',
'6', '61', '62', '63', '64', '65',
'1', '2', '3', '4', '5',
'1', '2', '3', '4', '5',
'6', '61', '62', '63', '64', '65',
'7', '71', '72', '73', '74', '75',
'1', '2', '3', '4', '5',
'7', '71', '72', '73', '74', '75',
'1', '2', '3', '4', '5'
]
tones_u = [
'1', '2', '3', '4', '5',
'6', '61', '62', '63', '64', '65',
'8', '81', '82', '83', '84', '85',
'9', '1', '2', '3', '4', '5',
'6', '61', '62', '63', '64', '65',
'1', '2', '3', '4', '5',
'1', '2', '3', '4', '5',
'6', '61', '62', '63', '64', '65',
'7', '71', '72', '73', '74', '75',
'1', '2', '3', '4', '5',
'7', '71', '72', '73', '74', '75',
'1', '2', '3', '4', '5'
]
no_tone_chars_l = 'a'*17 + 'd' + 'e'*11 + 'i'*5 + 'o'*17 + 'u'*11 + 'y'*5
no_tone_chars_u = 'A'*17 + 'D' + 'E'*11 + 'I'*5 + 'O'*17 + 'U'*11 + 'Y'*5
tones_dict = dict(zip(uni_chars_l + uni_chars_u, tones_l + tones_u))
no_tone_dict = dict(zip(uni_chars_l + uni_chars_u, no_tone_chars_l + no_tone_chars_u))
def decompose_predicted(text: str) -> str:
text = [c for c in text if c in tones_dict]
if len(text) == 0:
return '0'
text = [tones_dict[c] for c in text]
return ''.join(text)
def remove_vietnamese_tone(text: str) -> str:
text = [no_tone_dict.get(c, c) for c in text]
return ''.join(text)
def is_vietnamese_word(word: str) -> bool:
word = remove_vietnamese_tone(word)
word = word.lower()
pattern = r'^[bcdghklmnpqrstvx]{0,3}[aeiouy]{1,3}[bcdghklmnpqrstvx]{0,2}$'
return bool(re.match(pattern, word))
def get_vietnamese_alphabet() -> str:
return string.ascii_lowercase + tones_l
| 2.3125 | 2 |
nlpipe/Tools/alpino.py | ccs-amsterdam/nlpipe | 0 | 12786030 | <gh_stars>0
"""
Wrapper around the RUG Alpino Dependency parser
The module expects either ALPINO_HOME to point at the alpino installation dir
or an alpino server to be running at ALPINO_SERVER (default: localhost:5002)
You can use the following command to get the server running: (see github.com/vanatteveldt/alpino-server)
docker run -dp 5002:5002 vanatteveldt/alpino-server
If running alpino locally, note that the module needs the dependencies end_hook, which seems to be missing in
some builds. See: http://www.let.rug.nl/vannoord/alp/Alpino
"""
import csv
import json
import logging
import os
import subprocess
import requests
import tempfile
from io import StringIO
from nlpipe.Tools.toolsInterface import Tool
log = logging.getLogger(__name__)
CMD_PARSE = ["bin/Alpino", "end_hook=dependencies", "-parse"]
CMD_TOKENIZE = ["Tokenization/tok"]
class AlpinoParser(Tool):
name = "alpino"
def check_status(self): # check if the alpino server is running
if 'ALPINO_HOME' in os.environ:
alpino_home = os.environ['ALPINO_HOME']
if not os.path.exists(alpino_home):
raise Exception("Alpino not found at ALPINO_HOME={alpino_home}".format(**locals()))
else:
alpino_server = os.environ.get('ALPINO_SERVER', 'http://localhost:5002') # server runs on port 5002
r = requests.get(alpino_server) # check on the server
if r.status_code != 200:
raise Exception("No server found at {alpino_server} and ALPINO_HOME not set".format(**locals()))
def process(self, text, **kwargs): # process the test using alpino
if 'ALPINO_HOME' in os.environ: # run using command line (not using server API)
tokens = tokenize(text) # tokenize the text
return parse_raw(tokens)
else:
alpino_server = os.environ.get('ALPINO_SERVER', 'http://localhost:5002')
url = "{alpino_server}/parse".format(**locals())
body = {"text": text, "output": "dependencies"}
r = requests.post(url, json=body)
if r.status_code != 200:
raise Exception("Error calling Alpino at {alpino_server}: {r.status_code}:\n{r.content!r}"
.format(**locals()))
return r.text
def convert(self, doc_id, result, return_format):
"""
convert the text to an indicated return_format
:param doc_id: id of the document
:result: text/output to convert
:return_format: e.g., csv
:return: converted format
"""
assert return_format in ["csv"]
s = StringIO()
w = csv.writer(s) # write in csv
w.writerow(["doc", "doc_id", "sentence", "offset", "word", "lemma", "pos", "rel", "parent"]) # for each row
for line in interpret_parse(result): # read line by line and format the results
w.writerow((doc_id,) + line)
return s.getvalue()
AlpinoParser.register() # register alpino in the known_tools
def _call_alpino(command, input_text):
"""
Calls alpino given the command and input text
"""
alpino_home = os.environ['ALPINO_HOME']
p = subprocess.Popen(command, shell=False, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=alpino_home) # start a subprocess with the command
out, err = [x.decode("utf-8") for x in p.communicate(input_text.encode("utf-8"))]
if not out:
with tempfile.NamedTemporaryFile(suffix=".txt", delete=False, mode="wb") as f:
f.write(input_text.encode("utf-8"))
logging.exception("Error calling Alpino, input file written to {f.name}, command was {command}"
.format(**locals()))
raise Exception("Problem calling {command}, output was empty. Error: {err!r}".format(**locals()))
return out
def tokenize(text: str) -> str: # tokenize and replace "|"
return _call_alpino(CMD_TOKENIZE, text).replace("|", "")
def parse_raw(tokens): # parse the tokens
return _call_alpino(CMD_PARSE, tokens)
def get_fields(parse):
if parse.strip().startswith("{"):
parse = json.loads(parse)
for sid in parse:
for row in parse[sid]['triples']:
yield row + [sid]
else:
for line in parse.split("\n"):
if line.strip():
yield line.strip().split("|")
def interpret_parse(parse):
rels = {} # child: (rel, parent)
for line in get_fields(parse):
assert len(line) == 16
sid = int(line[-1])
func, rel = line[7].split("/")
child = interpret_token(sid, *line[8:15])
if func == "top":
parent = None
else:
parent = interpret_token(sid, *line[:7])
rels[child] = (rel, parent)
# get tokenid for each token, preserving order
tokens = sorted(rels.keys(), key=lambda token: token[:2])
tokenids = {token: i for (i, token) in enumerate(tokens)}
for token in tokens:
(rel, parent) = rels[token]
tokenid = tokenids[token]
parentid = tokenids[parent] if parent is not None else None
yield (tokenid, ) + token + (rel, parentid)
def interpret_token(sid, lemma, word, begin, _end, major_pos, _pos, full_pos):
"""Convert to raw alpino token into a (word, lemma, begin, pos1) tuple"""
if major_pos not in POSMAP:
logging.warn("UNKNOWN POS: {major_pos}".format(**locals()))
pos1 = POSMAP.get(major_pos, '?') # simplified POSMAP
return sid, int(begin), word, lemma, pos1
POSMAP = {"pronoun": 'O', "pron": 'O',
"verb": 'V',
"noun": 'N',
"preposition": 'P', "prep": 'P',
"determiner": "D", "det": "D",
"comparative": "C", "comp": "C",
"adverb": "B",
'adv': 'B',
"adj": "A",
"complementizer": "C",
"punct": ".",
"conj": "C",
"vg": 'C', "prefix": 'C', # not quite sure what vg stands for, sorry
"tag": "?",
"particle": "R", "fixed": 'R',
"name": "M",
"part": "R",
"intensifier": "B",
"number": "Q", "num": 'Q',
"cat": "Q",
"n": "Q",
"reflexive": 'O',
"conjunct": 'C',
"pp": 'P',
'anders': '?',
'etc': '?',
'enumeration': '?',
'np': 'N',
'p': 'P',
'quant': 'Q',
'sg': '?',
'zo': '?',
'max': '?',
'mogelijk': '?',
'sbar': '?',
'--': '?',
}
| 2.296875 | 2 |
thenewboston_node/business_logic/models/signed_change_request/base.py | thenewboston-developers/thenewboston-node | 30 | 12786031 | import copy
import logging
from dataclasses import dataclass
from typing import Any, Optional, Type, TypeVar
from thenewboston_node.business_logic.exceptions import ValidationError
from thenewboston_node.business_logic.models.base import BaseDataclass
from thenewboston_node.core.logging import validates
from thenewboston_node.core.utils.cryptography import derive_public_key
from thenewboston_node.core.utils.dataclass import cover_docstring, revert_docstring
from thenewboston_node.core.utils.types import hexstr
from ..mixins.signable import SignableMixin
from ..signed_change_request_message import SignedChangeRequestMessage
T = TypeVar('T', bound='SignedChangeRequest')
logger = logging.getLogger(__name__)
@revert_docstring
@dataclass
@cover_docstring
class SignedChangeRequest(SignableMixin, BaseDataclass):
message: SignedChangeRequestMessage
@classmethod
def deserialize_from_dict(cls, dict_, complain_excessive_keys=True, override: Optional[dict[str, Any]] = None):
from . import SIGNED_CHANGE_REQUEST_TYPE_MAP
# TODO(dmu) MEDIUM: This polymorphic deserializer duplicates the logic in Block/BlockMessage.
# Consider keeping only this serializer
# TODO(dmu) MEDIUM: Maybe we do not really need to subclass SignedChangeRequest, but
# subclassing of SignedChangeRequestMessage is enough
signed_change_request_type = (dict_.get('message') or {}).get('signed_change_request_type')
if cls == SignedChangeRequest:
class_ = SIGNED_CHANGE_REQUEST_TYPE_MAP.get(signed_change_request_type)
if class_ is None:
raise ValidationError('message.signed_change_request_type must be provided')
return class_.deserialize_from_dict(dict_, complain_excessive_keys=complain_excessive_keys) # type: ignore
if signed_change_request_type:
class_ = SIGNED_CHANGE_REQUEST_TYPE_MAP.get(signed_change_request_type)
if class_ is None:
raise ValidationError(f'Unsupported signed_change_request_type: {signed_change_request_type}')
if not issubclass(cls, class_):
raise ValidationError(
f'{cls} does not match with signed_change_request_type: {signed_change_request_type}'
)
return super().deserialize_from_dict(dict_, complain_excessive_keys=complain_excessive_keys)
@classmethod
def create_from_signed_change_request_message(
cls: Type[T], message: SignedChangeRequestMessage, signing_key: hexstr
) -> T:
request = cls(signer=derive_public_key(signing_key), message=copy.deepcopy(message))
request.sign(signing_key)
return request
@validates('signed request')
def validate(self, blockchain, block_number: int):
self.validate_message()
with validates('block signature'):
self.validate_signature()
@validates('signed request message')
def validate_message(self):
self.message.validate()
def get_updated_account_states(self, blockchain):
raise NotImplementedError('Must be implemented in subclass')
| 1.9375 | 2 |
src/harness/grpc_pb2.py | vmagamedov/harness | 6 | 12786032 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: harness/grpc.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from harness import net_pb2 as harness_dot_net__pb2
from harness import wire_pb2 as harness_dot_wire__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='harness/grpc.proto',
package='harness.grpc',
syntax='proto3',
serialized_options=None,
serialized_pb=b'\n\x12harness/grpc.proto\x12\x0charness.grpc\x1a\x11harness/net.proto\x1a\x12harness/wire.proto\"6\n\x07\x43hannel\x12+\n\x07\x61\x64\x64ress\x18\x01 \x01(\x0b\x32\x13.harness.net.SocketB\x05\x92}\x02\x08\x02\"2\n\x06Server\x12(\n\x04\x62ind\x18\x01 \x01(\x0b\x32\x13.harness.net.SocketB\x05\x92}\x02\x08\x02\x62\x06proto3'
,
dependencies=[harness_dot_net__pb2.DESCRIPTOR,harness_dot_wire__pb2.DESCRIPTOR,])
_CHANNEL = _descriptor.Descriptor(
name='Channel',
full_name='harness.grpc.Channel',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='address', full_name='harness.grpc.Channel.address', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\222}\002\010\002', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=75,
serialized_end=129,
)
_SERVER = _descriptor.Descriptor(
name='Server',
full_name='harness.grpc.Server',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='bind', full_name='harness.grpc.Server.bind', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\222}\002\010\002', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=131,
serialized_end=181,
)
_CHANNEL.fields_by_name['address'].message_type = harness_dot_net__pb2._SOCKET
_SERVER.fields_by_name['bind'].message_type = harness_dot_net__pb2._SOCKET
DESCRIPTOR.message_types_by_name['Channel'] = _CHANNEL
DESCRIPTOR.message_types_by_name['Server'] = _SERVER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Channel = _reflection.GeneratedProtocolMessageType('Channel', (_message.Message,), {
'DESCRIPTOR' : _CHANNEL,
'__module__' : 'harness.grpc_pb2'
# @@protoc_insertion_point(class_scope:harness.grpc.Channel)
})
_sym_db.RegisterMessage(Channel)
Server = _reflection.GeneratedProtocolMessageType('Server', (_message.Message,), {
'DESCRIPTOR' : _SERVER,
'__module__' : 'harness.grpc_pb2'
# @@protoc_insertion_point(class_scope:harness.grpc.Server)
})
_sym_db.RegisterMessage(Server)
_CHANNEL.fields_by_name['address']._options = None
_SERVER.fields_by_name['bind']._options = None
# @@protoc_insertion_point(module_scope)
| 1.117188 | 1 |
src/configure/Flink-demo/genes.py | HICAS-ChameLeon/Chameleon | 4 | 12786033 | <gh_stars>1-10
import random
import numpy as np
from sklearn.externals import joblib
from deap import base
from deap import creator
from deap import tools
from itertools import repeat
from collections import Sequence
clf=joblib.load('./data/models/Jmodel.pkl')
importance = clf.feature_importances_
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", np.ndarray, fitness=creator.FitnessMin)
IND_SIZE = 28 # 染色体数
toolbox = base.Toolbox()
toolbox.register("attr_float", random.uniform, -1, 1)
# toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_float, n=IND_SIZE)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_float, n=IND_SIZE)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# ind1 = toolbox.individual()
# print(ind1)
# print(ind1.fitness.valid)
def evaluate(individual):
return clf.predict(individual.reshape(1,-1)),
'''根据每个特征的重要性决定个体的基因是否变异'''
def mutate(individual,low,up,indpbs):
"""Mutate an individual by replacing attributes, with probability *indpb*,
by a integer uniformly drawn between *low* and *up* inclusively.
:param individual: :term:`Sequence <sequence>` individual to be mutated.
:param low: The lower bound or a :term:`python:sequence` of
of lower bounds of the range from wich to draw the new
integer.
:param up: The upper bound or a :term:`python:sequence` of
of upper bounds of the range from wich to draw the new
integer.
:param indpbs: Independent probability for each attribute to be mutated.
:returns: A tuple of one individual.
"""
size = len(individual)
if not isinstance(low, Sequence):
low = repeat(low, size)
elif len(low) < size:
raise IndexError("low must be at least the size of individual: %d < %d" % (len(low), size))
if not isinstance(up, Sequence):
up = repeat(up, size)
elif len(up) < size:
raise IndexError("up must be at least the size of individual: %d < %d" % (len(up), size))
for i, xl, xu, indpb in zip(range(size-1), low, up,indpbs):
if random.random() < indpb*10:
individual[i] = individual[i]+random.uniform(xl, xu)/3
return individual,
LOW=0.0
UP=1.0
toolbox.register("evaluate", evaluate)
toolbox.register("mate", tools.cxTwoPoint) #交叉
toolbox.register("mutate",mutate,low=LOW,up=UP, indpbs=importance)
toolbox.register("select", tools.selTournament, tournsize=3)
def main(confs):
random.seed(64)
NGEN=10
CXPB,MUTPB=0.5 ,0.2 #交叉概率 变异概率
pop = toolbox.population(n=0)
for i in confs:
pop.append(creator.Individual(i))
fitnesses = map(toolbox.evaluate, pop)
# print(fitnesses)
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
print(" Evaluated %i individuals" % len(pop))
print("-- Iterative %i times --" % NGEN)
for g in range(NGEN):
if g % 10 == 0:
print("-- Generation %i --" % g)
# Select the next generation individuals
offspring = toolbox.select(pop, len(pop))
# Clone the selected individuals
offspring = list(map(toolbox.clone, offspring))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(child1, child2)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# The population is entirely replaced by the offspring
pop[:] = offspring
print("-- End of (successful) evolution --")
best_ind = tools.selBest(pop, 1)[0]
return best_ind, best_ind.fitness.values # return the result:Last individual,The Return of Evaluate function
| 2.6875 | 3 |
src/service/setup.py | MrMonkeyPi/monkey-pi | 0 | 12786034 | import os
import shelve
APP_SETTING_FILE = os.path.join(os.getcwd(), 'instance', "data", "app")
CACHE_DIR = os.path.join(os.getcwd(), 'instance', 'cache')
try:
os.makedirs(CACHE_DIR)
os.makedirs(os.path.dirname(APP_SETTING_FILE))
except OSError:
pass
# for item, value in os.environ.items():
# print(f"{item} > {value}")
MEDIA_HOME = os.environ.get('MEDIA_HOME')
if MEDIA_HOME is not None:
with shelve.open(APP_SETTING_FILE) as db:
db['MEDIA_HOME'] = list(map(lambda x: os.path.abspath(x), MEDIA_HOME.split(':')))
with shelve.open(APP_SETTING_FILE) as db:
print(dict(db))
| 2.546875 | 3 |
meal_plan_optimizer/__init__.py | ilSommo/meal-plan-optimizer | 2 | 12786035 | <filename>meal_plan_optimizer/__init__.py
__version__ = '1.3.0'
__author__ = '<NAME>'
| 0.996094 | 1 |
webcheck/webcheck.py | sintrb/webcheck | 0 | 12786036 | # -*- coding: UTF-8 -*
from __future__ import print_function
__version__ = "1.2.0"
def get_certificate(hostname, port, sername=None):
import idna
from socket import socket
from OpenSSL import SSL
sock = socket()
sock.setblocking(True)
sock.connect((hostname, port), )
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.check_hostname = False
ctx.verify_mode = SSL.VERIFY_NONE
sock_ssl = SSL.Connection(ctx, sock)
sock_ssl.set_tlsext_host_name(idna.encode(sername or hostname))
sock_ssl.set_connect_state()
sock_ssl.do_handshake()
cert = sock_ssl.get_peer_certificate()
sock_ssl.close()
sock.close()
return cert
_last_line = ''
def _print_status(s):
import sys
global _last_line
if not sys.stdout.isatty():
return
if _last_line:
print('\b' * len(_last_line), end='')
sys.stdout.flush()
print(' ' * len(_last_line), end='')
sys.stdout.flush()
print(u'\r%s' % s, end='')
_last_line = s
sys.stdout.flush()
def main():
import io
import sys
import time
import socket
import argparse
import datetime
from collections import OrderedDict
import ssl
try:
import urlparse as parse
import urllib2
urlopen = urllib2.urlopen
except:
from urllib import parse, request
urlopen = request.urlopen
ssl._create_default_https_context = ssl._create_unverified_context
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-f', '--file', help='the text file(or uri) to read URLs')
parser.add_argument('-e', '--expire', help='the expire days for ssl certificate', type=int, default=7)
parser.add_argument('-c', '--code', help='the http response status code', type=int, default=[200], nargs='*')
parser.add_argument('-t', '--timeout', help='the timeout to check', type=int, default=10)
parser.add_argument('urls', help='the URLs what will be check', default=[], type=str, nargs='*')
args = parser.parse_args()
start = time.time()
rawurls = [] + args.urls
if args.file:
if '://' in args.file:
# uri
_print_status('fetch urls file from %s...' % args.file)
r = urlopen(args.file)
for l in r.readlines():
if type(l) != type(''):
l = l.decode()
rawurls.append(l)
else:
rawurls += list(io.open(args.file, encoding='utf-8').readlines())
urls = []
for l in rawurls:
if '://' not in l:
continue
ls = l.split('#')
if not ls:
continue
u = ls[0].strip()
if not u or u in urls:
continue
ud = {
'url': u
}
urls.append(ud)
if not urls:
_print_status('')
print('no url to check', file=sys.stderr)
exit(1)
today = datetime.datetime.today()
results = []
socket.setdefaulttimeout(args.timeout)
errct = 0
for ix, ud in enumerate(urls):
url = ud['url']
_print_status(u'%s/%d/%d %s...' % (errct, ix + 1, len(urls), url))
rs = parse.urlparse(url)
res = OrderedDict()
if args.expire and rs.scheme == 'https':
# ssl check
err = ''
try:
cert = get_certificate(rs.hostname, int(rs.port or 443))
es = cert.get_notAfter()[:-1]
if type(es) != type(''):
es = es.decode()
expdate = datetime.datetime.strptime(es, '%Y%m%d%H%M%S')
offdays = (expdate - today).days
if offdays <= args.expire:
err = 'days %s' % offdays
except Exception as e:
err = str(e) or str(type(e).__name__)
res['ssl'] = {
'title': 'ssl',
'error': err
}
if args.code:
# check http status
err = ''
try:
code = urlopen(url, timeout=args.timeout).getcode()
if code not in args.code:
err = 'code %s' % code
except Exception as e:
err = str(e)
res['http'] = {
'title': 'http',
'error': err
}
errors = list([u'%s(%s)' % (r['title'], r['error']) for r in res.values() if r['error']])
results.append({
'title': ud.get('title', url),
'url': url,
'result': res,
'error': u'/'.join(errors) if errors else ''
})
if errors:
errct += 1
# print(results)
_print_status('')
errors = list(['%s [%s]' % (r['title'], r['error']) for r in results if r['error']])
print('TIME:%ds CHECKED:%d ERROR:%s' % (int(time.time() - start), len(results), len(errors)))
if errors:
print('\n'.join(errors))
if __name__ == '__main__':
main()
| 2.984375 | 3 |
goodman_pipeline/wcs/tests/test_functional.py | SunilSimha/goodman_pipeline | 6 | 12786037 | from __future__ import absolute_import
from unittest import TestCase, skip
from ..wcs import WCS
import numpy as np
import os
import re
import sys
from astropy.io import fits
from astropy.modeling import (models, fitting, Model)
import matplotlib.pyplot as plt
from ccdproc import CCDData
class TestWCSBase(TestCase):
def setUp(self):
self.data_path = os.path.join(
os.path.dirname(sys.modules['goodman_pipeline'].__file__),
'data/test_data/wcs_data')
self.wcs = WCS()
@staticmethod
def _recover_lines(ccd):
lines_pixel = []
lines_angstrom = []
pixel_keywords = ccd.header['GSP_P*']
for pixel_key in pixel_keywords:
if re.match(r'GSP_P\d{3}', pixel_key) is not None:
angstrom_key = re.sub('GSP_P', 'GSP_A', pixel_key)
if int(ccd.header[angstrom_key]) != 0:
lines_pixel.append(float(ccd.header[pixel_key]))
lines_angstrom.append(float(ccd.header[angstrom_key]))
return lines_pixel, lines_angstrom
class TestWCS(TestWCSBase):
# def test_wcs__call__(self):
# self.assertRaisesRegex(SystemExit, '1', self.wcs)
# self.assertRaises(SystemExit, self.wcs)
def test_fit_chebyshev(self):
test_file = os.path.join(self.data_path,
'goodman_comp_400M1_HgArNe.fits')
ccd = CCDData.read(test_file, unit='adu')
pixel, angstrom = self._recover_lines(ccd=ccd)
model = self.wcs.fit(physical=pixel, wavelength=angstrom)
self.assertIsInstance(model, Model)
self.assertEqual(model.__class__.__name__, ccd.header['GSP_FUNC'])
self.assertEqual(model.degree, ccd.header['GSP_ORDR'])
for i in range(model.degree + 1):
self.assertAlmostEqual(model.__getattribute__('c{:d}'.format(i)).value,
ccd.header['GSP_C{:03d}'.format(i)])
def test_fit_linear(self):
test_file = os.path.join(self.data_path,
'goodman_comp_400M1_HgArNe.fits')
ccd = CCDData.read(test_file, unit='adu')
pixel, angstrom = self._recover_lines(ccd=ccd)
model = self.wcs.fit(physical=pixel,
wavelength=angstrom,
model_name='linear')
self.assertIsInstance(model, Model)
def test_fit_invalid(self):
test_file = os.path.join(self.data_path,
'goodman_comp_400M1_HgArNe.fits')
ccd = CCDData.read(test_file, unit='adu')
pixel, angstrom = self._recover_lines(ccd=ccd)
self.assertRaisesRegex(NotImplementedError,
'The model invalid is not implemented',
self.wcs.fit,
pixel,
angstrom,
'invalid')
self.assertRaises(NotImplementedError,
self.wcs.fit,
pixel,
angstrom,
'invalid')
def test_fit__unable_to_fit(self):
pixel = [0, 1, 2, 3]
angstrom = [20, 30, 40]
# self.assertRaisesRegex(ValueError,
# 'x and y should have the same shape',
# self.wcs.fit, pixel, angstrom)
self.assertRaises(ValueError, self.wcs.fit, pixel, angstrom)
def test_read__linear(self):
test_file = os.path.join(self.data_path,
'linear_fits_solution.fits')
self.assertTrue(os.path.isfile(test_file))
ccd = CCDData.read(test_file, unit='adu')
result = self.wcs.read(ccd=ccd)
self.assertIsInstance(result, list)
self.assertEqual(len(result), 2)
self.assertIsInstance(self.wcs.get_model(), Model)
def test_read__log_linear(self):
test_file = os.path.join(self.data_path,
'log-linear_fits_solution.fits')
self.assertTrue(os.path.isfile(test_file))
ccd = CCDData.read(test_file, unit='adu')
#
# result = self.wcs.read(ccd=ccd)
#
# self.assertIsInstance(result, list)
# self.assertEqual(len(result), 2)
# self.assertIsInstance(self.wcs.get_model(), Model)
self.assertRaises(NotImplementedError, self.wcs.read, ccd)
def test_read__non_linear_chebyshev(self):
test_file = os.path.join(self.data_path,
'non-linear_fits_solution_cheb.fits')
self.assertTrue(os.path.isfile(test_file))
ccd = CCDData.read(test_file, unit='adu')
result = self.wcs.read(ccd=ccd)
self.assertIsInstance(self.wcs.model, Model)
self.assertEqual(self.wcs.model.__class__.__name__, 'Chebyshev1D')
def test_read__non_linear_legendre(self):
test_file = os.path.join(self.data_path,
'non-linear_fits_solution_legendre.fits')
self.assertTrue(os.path.isfile(test_file))
ccd = CCDData.read(test_file, unit='adu')
result = self.wcs.read(ccd=ccd)
self.assertIsInstance(self.wcs.model, Model)
self.assertEqual(self.wcs.model.__class__.__name__, 'Legendre1D')
def test_read__non_linear_lspline(self):
test_file = os.path.join(self.data_path,
'non-linear_fits_solution_linear-spline.fits')
self.assertTrue(os.path.isfile(test_file))
ccd = CCDData.read(test_file, unit='adu')
# self.wcs.read(ccd=ccd)
self.assertRaises(NotImplementedError, self.wcs.read, ccd)
self.assertRaisesRegex(NotImplementedError,
'Linear spline is not implemented',
self.wcs.read, ccd)
def test_read__non_linear_cspline(self):
test_file = os.path.join(self.data_path,
'non-linear_fits_solution_cubic-spline.fits')
self.assertTrue(os.path.isfile(test_file))
ccd = CCDData.read(test_file, unit='adu')
self.assertRaises(NotImplementedError, self.wcs.read, ccd)
self.assertRaisesRegex(NotImplementedError,
'Cubic spline is not implemented',
self.wcs.read, ccd)
def test_write_fits_wcs(self):
self.assertRaises(NotImplementedError, self.wcs.write_fits_wcs,
None,
None)
def test_read__invalid(self):
test_file = os.path.join(self.data_path,
'linear_fits_solution.fits')
self.assertTrue(os.path.isfile(test_file))
ccd = CCDData.read(test_file, unit='adu')
ccd.wcs.wcs.ctype[0] = 'INVALID'
self.assertRaisesRegex(NotImplementedError,
'CTYPE INVALID is not recognized',
self.wcs.read,
ccd)
self.assertRaises(NotImplementedError, self.wcs.read, ccd)
def test_write_gsp_wcs(self):
test_file = os.path.join(self.data_path,
'goodman_comp_400M1_HgArNe.fits')
ccd = CCDData.read(test_file, unit='adu')
pixel, angstrom = self._recover_lines(ccd=ccd)
model = self.wcs.fit(physical=pixel, wavelength=angstrom)
self.assertIsInstance(model, Model)
blank_ccd = CCDData(data=np.ones(ccd.data.shape),
meta=fits.Header(),
unit='adu')
blank_ccd.header.set('GSP_WREJ', value=None, comment='empty')
new_ccd = self.wcs.write_gsp_wcs(ccd=blank_ccd, model=model)
self.assertEqual(new_ccd.header['GSP_FUNC'], ccd.header['GSP_FUNC'])
self.assertEqual(new_ccd.header['GSP_ORDR'], ccd.header['GSP_ORDR'])
self.assertEqual(new_ccd.header['GSP_NPIX'], ccd.header['GSP_NPIX'])
for i in range(model.degree + 1):
self.assertAlmostEqual(new_ccd.header['GSP_C{:03d}'.format(i)],
ccd.header['GSP_C{:03d}'.format(i)])
def test_read_gsp_wcs(self):
test_file = os.path.join(self.data_path,
'goodman_comp_400M1_HgArNe.fits')
self.assertTrue(os.path.isfile(test_file))
ccd = CCDData.read(test_file, unit='adu')
result = self.wcs.read_gsp_wcs(ccd=ccd)
self.assertIsInstance(result, list)
self.assertEqual(len(result), 2)
self.assertIsInstance(self.wcs.get_model(), Model)
def test_get_model_is_None(self):
self.wcs.model = None
self.assertIsNone(self.wcs.get_model())
def test_get_model_is_not_None(self):
self.wcs.model = models.Chebyshev1D(degree=3)
self.assertIsInstance(self.wcs.get_model(), Model)
def test_pm_none(self):
# test_file = os.path.join(self.data_path,
# 'non-linear_fits_solution_cheb.fits')
# self.assertTrue(os.path.isfile(test_file))
#
# ccd = CCDData.read(test_file, unit='adu')
#
# WAT2_001 = 'wtype = multispec spec1 = "1 1 2 1. 1.5114461210693 4096 0. 834.39 864'
# WAT2_002 = '.39 1. 0. 1 3 1616.37 3259.98 5115.64008185559 535.515983711607 -0.7'
# WAT2_003 = '79265625182385"'
#
# dtype = -1
self.assertRaises(NotImplementedError, self.wcs._none)
| 2.03125 | 2 |
Guizero/05.Widgets.py | sarincr/Python-modules-for-GUI-Dev | 0 | 12786038 | from guizero import App, TextBox, Text
def count():
character_count.value = len(entered_text.value)
app = App()
entered_text = TextBox(app, command=count)
character_count = Text(app)
app.display()
| 2.71875 | 3 |
warehouse/utils/admin_flags.py | jw/warehouse | 0 | 12786039 | <filename>warehouse/utils/admin_flags.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy import Column, Boolean, Text
from warehouse import db
class AdminFlag(db.Model):
__tablename__ = "warehouse_admin_flag"
id = Column(Text, primary_key=True, nullable=False)
description = Column(Text, nullable=False)
enabled = Column(Boolean, nullable=False)
@classmethod
def is_enabled(cls, session, flag_name):
flag = (session.query(cls)
.filter(cls.id == flag_name)
.first())
if flag is None:
return False
return flag.enabled
| 2.171875 | 2 |
mobile/views/mobile_data_tool_view.py | invinst/CPDB | 16 | 12786040 | from django.views.generic import RedirectView
from mobile.constants import DEFAULT_REDIRECT_URL, DEFAULT_REDIRECTORS
from mobile.services.mobile_redirector_service import DesktopToMobileRedirectorService
from share.models import Session
class MobileDataToolView(RedirectView):
def get_redirect_url(self, *args, **kwargs):
hash_id = kwargs.get('hash_id', '')
try:
session_id = Session.id_from_hash(hash_id)[0]
session = Session.objects.get(id=session_id)
self.filters = session.query.get('filters', {})
except (IndexError, Session.DoesNotExist):
self.filters = {}
redirect_urls = DesktopToMobileRedirectorService(DEFAULT_REDIRECTORS).perform(self.filters)
if len(redirect_urls) == 1:
return redirect_urls[0]
return DEFAULT_REDIRECT_URL
| 1.992188 | 2 |
lib/exaproxy/icap/parser.py | oriolarcas/exaproxy | 124 | 12786041 | <reponame>oriolarcas/exaproxy
#!/usr/bin/env python
# encoding: utf-8
from .request import ICAPRequestFactory
from .response import ICAPResponseFactory
from .header import ICAPResponseHeaderFactory
class ICAPParser (object):
ICAPResponseHeaderFactory = ICAPResponseHeaderFactory
ICAPRequestFactory = ICAPRequestFactory
ICAPResponseFactory = ICAPResponseFactory
VERSIONS = ('ICAP/1.0',)
METHODS = ('REQMOD', 'OPTIONS')
HEADERS = ('cache-control', 'connection', 'date', 'trailer', 'upgrade', 'via',
'authorization','allow','from','host','referer','user-agent', 'preview',
'encapsulated','proxy-authenticate','proxy-authorization', 'istag')
def __init__ (self, configuration):
self.configuration = configuration
self.header_factory = self.ICAPResponseHeaderFactory(configuration)
self.request_factory = self.ICAPRequestFactory(configuration)
self.response_factory = self.ICAPResponseFactory(configuration)
def parseRequestLine (self, request_line):
request_parts = request_line.split() if request_line else []
if len(request_parts) == 3:
method, url, version = request_parts
method = method.upper()
version = version.upper()
else:
method, url, version = None, None, None
return method, url, version
def parseResponseLine (self, response_line):
response_parts = response_line.split(' ', 2) if response_line else []
if len(response_parts) == 3:
version, code, status = response_parts
if code.isdigit():
code = int(code)
else:
version, code, status = None, None, None
else:
version, code, status = None, None, None
return version, code, status
def readHeaders (self, request_lines):
headers = {}
for line in request_lines:
if not line:
break
if ':' not in line:
headers = None
break
key, value = line.split(':', 1)
key = key.lower().strip()
value = value.strip()
if key in self.HEADERS or key.startswith('x-'):
headers[key] = value
if key == 'pragma' and ':' in value:
pkey, pvalue = value.split(':', 1)
pkey = pkey.lower().strip()
pvalue = pvalue.strip()
headers.setdefault(key, {})[pkey] = pvalue
return headers
def parseRequest (self, icap_string, http_string):
request_lines = (p for ss in icap_string.split('\r\n') for p in ss.split('\n'))
try:
request_line = request_lines.next()
except StopIteration:
request_line = None
method, url, version = self.parseRequestLine(request_line)
if method in self.METHODS and version in self.VERSIONS:
headers = self.readHeaders(request_lines)
site_name = url.rsplit(',',1)[-1] if ',' in url else 'default'
headers['x-customer-name'] = site_name
else:
headers = None
offsets = self.getOffsets(headers) if headers is not None else []
length, complete = self.getBodyLength(offsets)
if set(('res-hdr', 'res-body')).intersection(dict(offsets)):
headers = None
return self.request_factory.create(method, url, version, headers, icap_string, http_string, offsets, length, complete) if headers else None
def getOffsets (self, headers):
encapsulated_line = headers.get('encapsulated', '')
parts = (p.strip() for p in encapsulated_line.split(',') if '=' in p)
pairs = (p.split('=',1) for p in parts)
offsets = ((k,int(v)) for (k,v) in pairs if v.isdigit())
return sorted(offsets, lambda (_,a), (__,b): 1 if a >= b else -1)
def getBodyLength (self, offsets):
final, offset = offsets[-1] if offsets else ('null-body', 0)
return offset, final == 'null-body'
def splitResponseParts (self, offsets, body_string):
final, offset = offsets[-1] if offsets else (None, None)
if final != 'null-body':
offsets = offsets + [('null-body', len(body_string))]
names = [name for name,offset in offsets]
positions = [offset for name,offset in offsets]
blocks = ((positions[i], positions[i+1]) for i in xrange(len(positions)-1))
strings = (body_string[start:end] for start,end in blocks)
return dict(zip(names, strings))
def parseResponseHeader (self, header_string):
response_lines = (p for ss in header_string.split('\r\n') for p in ss.split('\n'))
try:
response_line = response_lines.next()
except StopIteration:
response_line = None
version, code, status = self.parseResponseLine(response_line)
if version in self.VERSIONS:
headers = self.readHeaders(response_lines)
headers['server'] = 'EXA Proxy 1.0'
else:
headers = {}
offsets = self.getOffsets(headers) if headers is not None else []
length, complete = self.getBodyLength(offsets)
return self.header_factory.create(version, code, status, headers, header_string, offsets, length, complete)
def continueResponse (self, response_header, body_string):
version, code, status = response_header.info
headers = response_header.headers
header_string = response_header.header_string
# split the body string into components
parts = self.splitResponseParts(response_header.offsets, body_string)
response_string = parts.get('res-hdr', '')
request_string = parts.get('req-hdr', '')
if request_string.startswith('CONNECT'):
intercept_string, new_request_string = self.splitResponse(request_string)
if headers.get('x-intercept', '') != 'active' and not new_request_string:
intercept_string = None
else:
request_string = new_request_string
else:
intercept_string = None
body_string = parts.get('res-body', None) if response_string else parts.get('req-body', None)
return self.response_factory.create(version, code, status, headers, header_string, request_string, response_string, body_string, intercept_string)
def splitResponse (self, response_string):
for delimiter in ('\n\n', '\r\n\r\n'):
if delimiter in response_string:
header_string, subheader_string = response_string.split(delimiter, 1)
break
else:
header_string, subheader_string = response_string, ''
return header_string, subheader_string
| 2.125 | 2 |
objs/objs.py | n-hachi/raytrace | 0 | 12786042 | <reponame>n-hachi/raytrace<gh_stars>0
from abc import ABCMeta, abstractmethod
import numpy as np
from .ray import Ray
class AbsObject(metaclass=ABCMeta):
@abstractmethod
def normal(self, ray):
raise NotImplementedError()
@abstractmethod
def reflect(self, ray):
raise NotImplementedError()
@abstractmethod
def intersect(self, ray):
raise NotImplementedError()
# surface
# | o(origin)
# | ,`
# | ,` |
# | ,` | ov(orthographic vector)
# |,` v
# * ----->*---> n(normal)
# |`,rn(resized normal)
# | `,
# | `,
# | `,
# | r(reflect)
#
def reflect(self, ray):
n = self.normal(ray)
cos = np.dot(ray.direction(), n.direction()) * -1 # Cosine
rn = np.multiply(n.direction(), cos) # Resize normal
ov = np.add(ray.direction(), rn) # Vector between origin ray and normal ray.
rd = rn + ov # Reflect vector direction.
# Create new ray.
_, p = self.intersect(ray)
p2 = np.add(p, np.multiply(rd, 1e-5)) # Move the origin of ray slightly forward.
return Ray(p2, rd) # Reflect ray.
def color(self, ray, objects, lights):
r = self.reflect(ray)
n = self.normal(ray)
diffues = 0
specular = 0
# Generate ray between intersect point and light source.
for l in lights:
# Ray to light source
r2l = Ray(r.origin(), l.center() - r.origin())
o, _, _ = r2l.intersect(objects)
if o is not None:
continue
diffues += max(0, np.dot(r2l.direction(), n.direction()))
specular += max(0, np.dot(r2l.direction(), r.direction())) ** 50 * l.intensity()
coefficient = (diffues + specular) / 2
return np.multiply(self._surface_color, coefficient)
| 2.40625 | 2 |
applications/camera_calibration/scripts/derive_jacobians.py | lingbo-yu/camera_calibration | 474 | 12786043 | import math
import sys
import time
from sympy import *
from sympy.solvers.solveset import nonlinsolve
from optimizer_builder import *
# ### Math functions ###
# Simple model for the fractional-part function used for bilinear interpolation
# which leaves the function un-evaluated. Ignores the discontinuities when
# computing the derivative. They do not matter.
class frac(Function):
# Returns the first derivative of the function.
# A simple model for the function within the range between two discontinuities is:
# f(x) = x - c, with a constant c. So f'(x) = 1.
def fdiff(self, argindex=1):
if argindex == 1:
return S.One
else:
raise ArgumentIndexError(self, argindex)
def UnitQuaternionRotatePoint(q, pt):
t2 = q[0] * q[1]
t3 = q[0] * q[2]
t4 = q[0] * q[3]
t5 = -q[1] * q[1]
t6 = q[1] * q[2]
t7 = q[1] * q[3]
t8 = -q[2] * q[2]
t9 = q[2] * q[3]
t1 = -q[3] * q[3]
return Matrix([[2 * ((t8 + t1) * pt[0] + (t6 - t4) * pt[1] + (t3 + t7) * pt[2]) + pt[0]],
[2 * ((t4 + t6) * pt[0] + (t5 + t1) * pt[1] + (t9 - t2) * pt[2]) + pt[1]],
[2 * ((t7 - t3) * pt[0] + (t2 + t9) * pt[1] + (t5 + t8) * pt[2]) + pt[2]]])
# Transformation is a 7-vector [quaternion, translation].
def TransformPoint(transformation, point):
point_out = UnitQuaternionRotatePoint(transformation, point)
point_out[0] += transformation[4];
point_out[1] += transformation[5];
point_out[2] += transformation[6];
return point_out
# Both transformations are 7-vectors [quaternion, translation].
def RigTransformPoint(camera_tr_rig, rig_tr_global, global_point):
point_rig = UnitQuaternionRotatePoint(rig_tr_global, global_point)
point_rig[0] += rig_tr_global[4];
point_rig[1] += rig_tr_global[5];
point_rig[2] += rig_tr_global[6];
point_out = UnitQuaternionRotatePoint(camera_tr_rig, point_rig)
point_out[0] += camera_tr_rig[4];
point_out[1] += camera_tr_rig[5];
point_out[2] += camera_tr_rig[6];
return point_out
# 3-Vector dot product:
def DotProduct3(vector1, vector2):
return vector1[0] * vector2[0] + vector1[1] * vector2[1] + vector1[2] * vector2[2]
def CubicHermiteSpline(p0, p1, p2, p3, x):
a = (0.5) * (-p0 + (3.0) * p1 - (3.0) * p2 + p3)
b = (0.5) * ((2.0) * p0 - (5.0) * p1 + (4.0) * p2 - p3)
c = (0.5) * (-p0 + p2)
d = p1
return d + x * (c + x * (b + x * a))
def EvalUniformCubicBSpline(a, b, c, d, x):
# x must be in [3, 4[.
# i == 3
x_for_d = x - 3
d_factor = 1./6. * x_for_d * x_for_d * x_for_d
# i == 2
c_factor = -1./2.*x*x*x + 5*x*x - 16*x + 50./3.
# i == 1
b_factor = 1./2.*x*x*x - 11./2.*x*x + (39./2.)*x - 131./6.
# i == 0
a_factor = -1./6. * (x - 4) * (x - 4) * (x - 4)
return a_factor * a + b_factor * b + c_factor * c + d_factor * d
def NoncentralGenericBicubicModelUnprojection(
l00, l01, l02, l03, l10, l11, l12, l13, l20, l21, l22, l23, l30, l31, l32, l33, #camera_intrinsics
frac_x, frac_y):
f0 = CubicHermiteSpline(l00, l01, l02, l03, frac_x)
f1 = CubicHermiteSpline(l10, l11, l12, l13, frac_x)
f2 = CubicHermiteSpline(l20, l21, l22, l23, frac_x)
f3 = CubicHermiteSpline(l30, l31, l32, l33, frac_x)
unprojection = CubicHermiteSpline(f0, f1, f2, f3, frac_y);
direction = Matrix([[unprojection[0]],
[unprojection[1]],
[unprojection[2]]])
direction = direction.normalized()
return Matrix([[direction[0]],
[direction[1]],
[direction[2]],
[unprojection[3]],
[unprojection[4]],
[unprojection[5]]])
def NoncentralGenericBSplineModelUnprojection(
l00, l01, l02, l03, l10, l11, l12, l13, l20, l21, l22, l23, l30, l31, l32, l33, #camera_intrinsics
frac_x, frac_y):
f0 = EvalUniformCubicBSpline(l00, l01, l02, l03, frac_x)
f1 = EvalUniformCubicBSpline(l10, l11, l12, l13, frac_x)
f2 = EvalUniformCubicBSpline(l20, l21, l22, l23, frac_x)
f3 = EvalUniformCubicBSpline(l30, l31, l32, l33, frac_x)
unprojection = EvalUniformCubicBSpline(f0, f1, f2, f3, frac_y);
direction = Matrix([[unprojection[0]],
[unprojection[1]],
[unprojection[2]]])
direction = direction.normalized()
return Matrix([[direction[0]],
[direction[1]],
[direction[2]],
[unprojection[3]],
[unprojection[4]],
[unprojection[5]]])
def CentralGenericBicubicModelUnprojection(
p00, p01, p02, p03, p10, p11, p12, p13, p20, p21, p22, p23, p30, p31, p32, p33, #camera_intrinsics
frac_x, frac_y):
f0 = CubicHermiteSpline(p00, p01, p02, p03, frac_x)
f1 = CubicHermiteSpline(p10, p11, p12, p13, frac_x)
f2 = CubicHermiteSpline(p20, p21, p22, p23, frac_x)
f3 = CubicHermiteSpline(p30, p31, p32, p33, frac_x)
unprojection = CubicHermiteSpline(f0, f1, f2, f3, frac_y);
unprojection = unprojection.normalized()
return Matrix([[unprojection[0]],
[unprojection[1]],
[unprojection[2]]])
def CentralGenericBicubicModelFittingProblemError(
p00, p01, p02, p03, p10, p11, p12, p13, p20, p21, p22, p23, p30, p31, p32, p33, #camera_intrinsics
frac_x, frac_y, measurement_x, measurement_y, measurement_z):
# Interpolation data points:
# col
# p00 p01 p02 p03
# row p10 p11 p12 p13
# p20 p21 p22 p23
# p30 p31 p32 p33
f0 = CubicHermiteSpline(p00, p01, p02, p03, frac_x)
f1 = CubicHermiteSpline(p10, p11, p12, p13, frac_x)
f2 = CubicHermiteSpline(p20, p21, p22, p23, frac_x)
f3 = CubicHermiteSpline(p30, p31, p32, p33, frac_x)
unprojection = CubicHermiteSpline(f0, f1, f2, f3, frac_y);
unprojection = unprojection.normalized()
return Matrix([[unprojection[0] - measurement_x],
[unprojection[1] - measurement_y],
[unprojection[2] - measurement_z]])
def CentralGenericBSplineModelUnprojection(
p00, p01, p02, p03, p10, p11, p12, p13, p20, p21, p22, p23, p30, p31, p32, p33, #camera_intrinsics
frac_x, frac_y):
a = EvalUniformCubicBSpline(p00, p01, p02, p03, frac_x)
b = EvalUniformCubicBSpline(p10, p11, p12, p13, frac_x)
c = EvalUniformCubicBSpline(p20, p21, p22, p23, frac_x)
d = EvalUniformCubicBSpline(p30, p31, p32, p33, frac_x)
unprojection = EvalUniformCubicBSpline(a, b, c, d, frac_y)
unprojection = unprojection.normalized()
return Matrix([[unprojection[0]],
[unprojection[1]],
[unprojection[2]]])
def CentralGenericBSplineModelFittingProblemError(
p00, p01, p02, p03, p10, p11, p12, p13, p20, p21, p22, p23, p30, p31, p32, p33, #camera_intrinsics
frac_x, frac_y, measurement_x, measurement_y, measurement_z):
a = EvalUniformCubicBSpline(p00, p01, p02, p03, frac_x)
b = EvalUniformCubicBSpline(p10, p11, p12, p13, frac_x)
c = EvalUniformCubicBSpline(p20, p21, p22, p23, frac_x)
d = EvalUniformCubicBSpline(p30, p31, p32, p33, frac_x)
unprojection = EvalUniformCubicBSpline(a, b, c, d, frac_y)
unprojection = unprojection.normalized()
return Matrix([[unprojection[0] - measurement_x],
[unprojection[1] - measurement_y],
[unprojection[2] - measurement_z]])
def CentralGenericBilinearModelUnprojection(
p00, p01, p10, p11, #camera_intrinsics
frac_x, frac_y):
unprojection = ((1 - frac_x) * (1 - frac_y) * p00 +
( frac_x) * (1 - frac_y) * p01 +
(1 - frac_x) * ( frac_y) * p10 +
( frac_x) * ( frac_y) * p11)
unprojection = unprojection.normalized()
return Matrix([[unprojection[0]],
[unprojection[1]],
[unprojection[2]]])
def CentralGenericBilinearModelFittingProblemError(
p00, p01, p10, p11, #camera_intrinsics
frac_x, frac_y, measurement_x, measurement_y, measurement_z):
unprojection = ((1 - frac_x) * (1 - frac_y) * p00 +
( frac_x) * (1 - frac_y) * p01 +
(1 - frac_x) * ( frac_y) * p10 +
( frac_x) * ( frac_y) * p11)
unprojection = unprojection.normalized()
return Matrix([[unprojection[0] - measurement_x],
[unprojection[1] - measurement_y],
[unprojection[2] - measurement_z]])
def ConvertDirectionToLocalUpdate(base_direction, target_direction, tangent1, tangent2):
factor = 1 / DotProduct3(base_direction, target_direction)
offset = (factor * target_direction) - base_direction
return Matrix([[DotProduct3(tangent1, offset)],
[DotProduct3(tangent2, offset)]])
# For quaternion layout: (w, x, y, z).
def QuaternionMultiplication(z, w):
return Matrix([[z[0] * w[0] - z[1] * w[1] - z[2] * w[2] - z[3] * w[3]],
[z[0] * w[1] + z[1] * w[0] + z[2] * w[3] - z[3] * w[2]],
[z[0] * w[2] - z[1] * w[3] + z[2] * w[0] + z[3] * w[1]],
[z[0] * w[3] + z[1] * w[2] - z[2] * w[1] + z[3] * w[0]]])
# For quaternion layout: (w, x, y, z).
def QuaternionLocalUpdate(delta, q):
norm_delta = sqrt(delta[0] * delta[0] +
delta[1] * delta[1] +
delta[2] * delta[2])
sin_delta_by_delta = sin(norm_delta) / norm_delta
delta_q = Matrix([[cos(norm_delta)],
[sin_delta_by_delta * delta[0]],
[sin_delta_by_delta * delta[1]],
[sin_delta_by_delta * delta[2]]])
return QuaternionMultiplication(delta_q, q)
def ComputeTangentsForLine_ForSmallAbsX(direction):
other_vector = Matrix([[1], [0], [0]])
t1 = direction.cross(other_vector).normalized()
t2 = direction.cross(t1)
return t1.col_join(t2)
def ComputeTangentsForLine_ForLargeAbsX(direction):
other_vector = Matrix([[0], [1], [0]])
t1 = direction.cross(other_vector).normalized()
t2 = direction.cross(t1)
return t1.col_join(t2)
def DirectionBorderRegularization(outer, inner1, inner2):
proj = inner1.dot(inner2) * inner1;
mirror = proj + (proj - inner2);
return mirror - outer
def CentralThinPrismFisheyeProjection(
px, py, pz,
fx, fy, cx, cy,
k1, k2, k3, k4,
p1, p2, sx1, sy1,
fisheye_case):
nx = px / pz
ny = py / pz
r = sqrt(nx * nx + ny * ny)
if fisheye_case:
theta_by_r = atan(r) / r
fisheye_x = theta_by_r * nx
fisheye_y = theta_by_r * ny
else:
fisheye_x = nx
fisheye_y = ny
x2 = fisheye_x * fisheye_x
xy = fisheye_x * fisheye_y
y2 = fisheye_y * fisheye_y
r2 = x2 + y2
r4 = r2 * r2
r6 = r4 * r2
r8 = r6 * r2
radial = k1 * r2 + k2 * r4 + k3 * r6 + k4 * r8
dx = 2 * p1 * xy + p2 * (r2 + 2 * x2) + sx1 * r2
dy = 2 * p2 * xy + p1 * (r2 + 2 * y2) + sy1 * r2
distorted_x = fisheye_x + radial * fisheye_x + dx
distorted_y = fisheye_y + radial * fisheye_y + dy
return Matrix([[fx * distorted_x + cx],
[fy * distorted_y + cy]])
def CentralOpenCVProjection(
px, py, pz,
fx, fy, cx, cy,
k1, k2, k3, k4,
k5, k6, p1, p2):
nx = px / pz
ny = py / pz
x2 = nx * nx
xy = nx * ny
y2 = ny * ny
r2 = x2 + y2
r4 = r2 * r2
r6 = r4 * r2
radial = (1 + k1 * r2 + k2 * r4 + k3 * r6) / (1 + k4 * r2 + k5 * r4 + k6 * r6)
dx = 2 * p1 * xy + p2 * (r2 + 2 * x2)
dy = 2 * p2 * xy + p1 * (r2 + 2 * y2)
distorted_x = nx * radial + dx
distorted_y = ny * radial + dy
return Matrix([[fx * distorted_x + cx],
[fy * distorted_y + cy]])
def CentralRadialProjection(
spline_resolution, spline_param0, spline_param1, spline_param2, spline_param3,
fx, fy, cx, cy, p1, p2, sx1, sy1,
lx, ly, lz):
local_point = Matrix([[lx],
[ly],
[lz]])
# Radial part
original_angle = acos(local_point.normalized()[2]);
pos_in_spline = 1. + (spline_resolution - 3.) / (math.pi / 2) * original_angle;
# chunk = std::max(1, std::min(spline_resolution() - 3, static_cast<int>(pos_in_spline)));
fraction = frac(pos_in_spline) # - chunk;
radial_factor = EvalUniformCubicBSpline(
spline_param0,
spline_param1,
spline_param2,
spline_param3,
fraction + 3.);
# Parametric part
nx = lx / lz
ny = ly / lz
x2 = nx * nx
xy = nx * ny
y2 = ny * ny
r2 = x2 + y2
dx = 2 * p1 * xy + p2 * (r2 + 2 * x2) + sx1 * r2
dy = 2 * p2 * xy + p1 * (r2 + 2 * y2) + sy1 * r2
distorted_x = nx + radial_factor * nx + dx
distorted_y = ny + radial_factor * ny + dy
return Matrix([[fx * distorted_x + cx],
[fy * distorted_y + cy]])
if __name__ == '__main__':
p00 = Matrix(3, 1, lambda i,j:Symbol('p00_%d' % (i), real=True))
p01 = Matrix(3, 1, lambda i,j:Symbol('p01_%d' % (i), real=True))
p02 = Matrix(3, 1, lambda i,j:Symbol('p02_%d' % (i), real=True))
p03 = Matrix(3, 1, lambda i,j:Symbol('p03_%d' % (i), real=True))
p10 = Matrix(3, 1, lambda i,j:Symbol('p10_%d' % (i), real=True))
p11 = Matrix(3, 1, lambda i,j:Symbol('p11_%d' % (i), real=True))
p12 = Matrix(3, 1, lambda i,j:Symbol('p12_%d' % (i), real=True))
p13 = Matrix(3, 1, lambda i,j:Symbol('p13_%d' % (i), real=True))
p20 = Matrix(3, 1, lambda i,j:Symbol('p20_%d' % (i), real=True))
p21 = Matrix(3, 1, lambda i,j:Symbol('p21_%d' % (i), real=True))
p22 = Matrix(3, 1, lambda i,j:Symbol('p22_%d' % (i), real=True))
p23 = Matrix(3, 1, lambda i,j:Symbol('p23_%d' % (i), real=True))
p30 = Matrix(3, 1, lambda i,j:Symbol('p30_%d' % (i), real=True))
p31 = Matrix(3, 1, lambda i,j:Symbol('p31_%d' % (i), real=True))
p32 = Matrix(3, 1, lambda i,j:Symbol('p32_%d' % (i), real=True))
p33 = Matrix(3, 1, lambda i,j:Symbol('p33_%d' % (i), real=True))
l00 = Matrix(6, 1, lambda i,j:Symbol('l00_%d' % (i), real=True))
l01 = Matrix(6, 1, lambda i,j:Symbol('l01_%d' % (i), real=True))
l02 = Matrix(6, 1, lambda i,j:Symbol('l02_%d' % (i), real=True))
l03 = Matrix(6, 1, lambda i,j:Symbol('l03_%d' % (i), real=True))
l10 = Matrix(6, 1, lambda i,j:Symbol('l10_%d' % (i), real=True))
l11 = Matrix(6, 1, lambda i,j:Symbol('l11_%d' % (i), real=True))
l12 = Matrix(6, 1, lambda i,j:Symbol('l12_%d' % (i), real=True))
l13 = Matrix(6, 1, lambda i,j:Symbol('l13_%d' % (i), real=True))
l20 = Matrix(6, 1, lambda i,j:Symbol('l20_%d' % (i), real=True))
l21 = Matrix(6, 1, lambda i,j:Symbol('l21_%d' % (i), real=True))
l22 = Matrix(6, 1, lambda i,j:Symbol('l22_%d' % (i), real=True))
l23 = Matrix(6, 1, lambda i,j:Symbol('l23_%d' % (i), real=True))
l30 = Matrix(6, 1, lambda i,j:Symbol('l30_%d' % (i), real=True))
l31 = Matrix(6, 1, lambda i,j:Symbol('l31_%d' % (i), real=True))
l32 = Matrix(6, 1, lambda i,j:Symbol('l32_%d' % (i), real=True))
l33 = Matrix(6, 1, lambda i,j:Symbol('l33_%d' % (i), real=True))
frac_x = Symbol("frac_x", real=True)
frac_y = Symbol("frac_y", real=True)
measurement_x = Symbol("measurement_x", real=True)
measurement_y = Symbol("measurement_y", real=True)
measurement_z = Symbol("measurement_z", real=True)
# For pose and geometry optimization:
# Local point Jacobian wrt. image_tr_global, pattern_point
image_tr_global = Matrix(7, 1, lambda i,j:Symbol('itg_%d' % (i), real=True))
pattern_point = Matrix(3, 1, lambda i,j:Symbol('p_%d' % (i), real=True))
parameters = image_tr_global.col_join(pattern_point)
functions = [lambda variables : TransformPoint(variables.extract([0, 1, 2, 3, 4, 5, 6], [0]), variables.extract([7, 8, 9], [0]))]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=True, simplify_residual=False)
# For rig pose and geometry optimization:
# Local point Jacobian wrt. camera_tr_rig, rig_tr_global, pattern_point
camera_tr_rig = Matrix(7, 1, lambda i,j:Symbol('ctr_%d' % (i), real=True))
rig_tr_global = Matrix(7, 1, lambda i,j:Symbol('rtg_%d' % (i), real=True))
pattern_point = Matrix(3, 1, lambda i,j:Symbol('p_%d' % (i), real=True))
parameters = rig_tr_global.col_join(camera_tr_rig).col_join(pattern_point)
functions = [lambda variables : RigTransformPoint(
variables.extract([7, 8, 9, 10, 11, 12, 13], [0]),
variables.extract([0, 1, 2, 3, 4, 5, 6], [0]),
variables.extract([14, 15, 16], [0]))]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=True, simplify_residual=False)
# Tangents Jacobian wrt. direction:
direction = Matrix(3, 1, lambda i,j:Symbol('dir_%d' % (i), real=True))
OptimizerBuilder([lambda variables : ComputeTangentsForLine_ForSmallAbsX(variables)],
direction,
direction,
simplify_function_jacobian=[True],
simplify_jacobian=True, simplify_residual=True)
OptimizerBuilder([lambda variables : ComputeTangentsForLine_ForLargeAbsX(variables)],
direction,
direction,
simplify_function_jacobian=[True],
simplify_jacobian=True, simplify_residual=True)
# Jacobian for CentralGenericBilinear unprojection wrt. pixel x, y
# (CentralGenericBilinear_UnprojectFromPixelCornerConv_ComputeResidualAndJacobian()):
parameters = Matrix([[frac_x],
[frac_y]])
functions = [lambda variables : CentralGenericBilinearModelUnprojection(
p00, p01, p10, p11,
variables[0], variables[1])]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# CentralGenericBilinearDirectionCostFunction_ComputeResidualAndJacobian():
# Residual: grid.InterpolateBilinearVector(model->PixelCornerConvToGridPoint(x + 0.5f, y + 0.5f)) - measurement
# Variables are p00 .. p33
parameters = p00.col_join(
p01.col_join(
p10.col_join(
p11)))
functions = [lambda variables : CentralGenericBilinearModelFittingProblemError(
variables.extract([0, 1, 2], [0]),
variables.extract([3, 4, 5], [0]),
variables.extract([6, 7, 8], [0]),
variables.extract([9, 10, 11], [0]),
frac_x, frac_y, measurement_x, measurement_y, measurement_z)]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# CentralGenericBSplineDirectionCostFunction_ComputeResidualAndJacobian():
# Residual: grid.InterpolateBSplineVector(model->PixelCornerConvToGridPoint(x + 0.5f, y + 0.5f)) - measurement
# Variables are p00 .. p33
parameters = p00.col_join(
p01.col_join(
p02.col_join(
p03.col_join(
p10.col_join(
p11.col_join(
p12.col_join(
p13.col_join(
p20.col_join(
p21.col_join(
p22.col_join(
p23.col_join(
p30.col_join(
p31.col_join(
p32.col_join(
p33)))))))))))))))
functions = [lambda variables : CentralGenericBSplineModelFittingProblemError(
variables.extract([0, 1, 2], [0]),
variables.extract([3, 4, 5], [0]),
variables.extract([6, 7, 8], [0]),
variables.extract([9, 10, 11], [0]),
variables.extract([12, 13, 14], [0]),
variables.extract([15, 16, 17], [0]),
variables.extract([18, 19, 20], [0]),
variables.extract([21, 22, 23], [0]),
variables.extract([24, 25, 26], [0]),
variables.extract([27, 28, 29], [0]),
variables.extract([30, 31, 32], [0]),
variables.extract([33, 34, 35], [0]),
variables.extract([36, 37, 38], [0]),
variables.extract([39, 40, 41], [0]),
variables.extract([42, 43, 44], [0]),
variables.extract([45, 46, 47], [0]),
frac_x, frac_y, measurement_x, measurement_y, measurement_z)]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# Jacobian for CentralGenericBSpline unprojection wrt. pixel x, y
# (CentralGenericBSpline_UnprojectFromPixelCornerConv_ComputeResidualAndJacobian()):
parameters = Matrix([[frac_x],
[frac_y]])
functions = [lambda variables : CentralGenericBSplineModelUnprojection(
p00, p01, p02, p03, p10, p11, p12, p13, p20, p21, p22, p23, p30, p31, p32, p33,
variables[0], variables[1])]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# Jacobian for direction grid border regularization:
outer = Matrix(3, 1, lambda i,j:Symbol('o_%d' % (i), real=True))
inner1 = Matrix(3, 1, lambda i,j:Symbol('i1_%d' % (i), real=True))
inner2 = Matrix(3, 1, lambda i,j:Symbol('i2_%d' % (i), real=True))
parameters = outer.col_join(inner1.col_join(inner2))
OptimizerBuilder([lambda variables : DirectionBorderRegularization(
variables.extract([0, 1, 2], [0]),
variables.extract([3, 4, 5], [0]),
variables.extract([6, 7, 8], [0]))],
parameters,
parameters,
simplify_function_jacobian=[True],
simplify_jacobian=True, simplify_residual=True)
# Derive Jacobian of local update to quaternions (as in ceres)
# TODO: This only works if replacing subs() by limit() in optimizer_builder's
# ComputeValueAndJacobian(). However, it seems that this gave wrong results in other cases ...
q = Matrix(4, 1, lambda i,j:Symbol('q_%d' % (i), real=True))
delta_q = Matrix(3, 1, lambda i,j:Symbol('dq_%d' % (i), real=True))
OptimizerBuilder([lambda variables : QuaternionLocalUpdate(variables, q)],
delta_q,
Matrix([[0], [0], [0]]),
simplify_function_jacobian=[True],
simplify_jacobian=True, simplify_residual=True)
# Derivation of LocalUpdateJacobianWrtDirection():
target_direction = Matrix(3, 1, lambda i,j:Symbol('t_%d' % (i), real=True))
base_direction = Matrix(3, 1, lambda i,j:Symbol('d_%d' % (i), real=True))
tangent1 = Matrix(3, 1, lambda i,j:Symbol('t1_%d' % (i), real=True))
tangent2 = Matrix(3, 1, lambda i,j:Symbol('t2_%d' % (i), real=True))
parameters = target_direction
parameter_values = base_direction # Taking Jacobian at base_direction
functions = [lambda target_dir : ConvertDirectionToLocalUpdate(base_direction, target_dir, tangent1, tangent2)]
OptimizerBuilder(functions,
parameters, parameter_values,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# Jacobian for NoncentralGenericBicubic unprojection wrt. pixel x, y
# (NoncentralGenericBicubic_UnprojectFromPixelCornerConv_ComputeResidualAndJacobian()):
parameters = Matrix([[frac_x],
[frac_y]])
functions = [lambda variables : NoncentralGenericBicubicModelUnprojection(
l00, l01, l02, l03, l10, l11, l12, l13, l20, l21, l22, l23, l30, l31, l32, l33,
variables[0], variables[1])]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# Jacobian for CentralGenericBicubic unprojection wrt. pixel x, y
# (CentralGenericBicubic_UnprojectFromPixelCornerConv_ComputeResidualAndJacobian()):
parameters = Matrix([[frac_x],
[frac_y]])
functions = [lambda variables : CentralGenericBicubicModelUnprojection(
p00, p01, p02, p03, p10, p11, p12, p13, p20, p21, p22, p23, p30, p31, p32, p33,
variables[0], variables[1])]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# CentralGenericBicubicDirectionCostFunction_ComputeResidualAndJacobian():
# Residual: grid.InterpolateBicubicVector(model->PixelCornerConvToGridPoint(x + 0.5f, y + 0.5f)) - measurement
# Variables are p00 .. p33
parameters = p00.col_join(
p01.col_join(
p02.col_join(
p03.col_join(
p10.col_join(
p11.col_join(
p12.col_join(
p13.col_join(
p20.col_join(
p21.col_join(
p22.col_join(
p23.col_join(
p30.col_join(
p31.col_join(
p32.col_join(
p33)))))))))))))))
functions = [lambda variables : CentralGenericBicubicModelFittingProblemError(
variables.extract([0, 1, 2], [0]),
variables.extract([3, 4, 5], [0]),
variables.extract([6, 7, 8], [0]),
variables.extract([9, 10, 11], [0]),
variables.extract([12, 13, 14], [0]),
variables.extract([15, 16, 17], [0]),
variables.extract([18, 19, 20], [0]),
variables.extract([21, 22, 23], [0]),
variables.extract([24, 25, 26], [0]),
variables.extract([27, 28, 29], [0]),
variables.extract([30, 31, 32], [0]),
variables.extract([33, 34, 35], [0]),
variables.extract([36, 37, 38], [0]),
variables.extract([39, 40, 41], [0]),
variables.extract([42, 43, 44], [0]),
variables.extract([45, 46, 47], [0]),
frac_x, frac_y, measurement_x, measurement_y, measurement_z)]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# Jacobian for NoncentralGenericBSpline unprojection wrt. pixel x, y
# (NoncentralGenericBicubic_UnprojectFromPixelCornerConv_ComputeResidualAndJacobian()):
parameters = Matrix([[frac_x],
[frac_y]])
functions = [lambda variables : NoncentralGenericBSplineModelUnprojection(
l00, l01, l02, l03, l10, l11, l12, l13, l20, l21, l22, l23, l30, l31, l32, l33,
variables[0], variables[1])]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# Jacobian for CentralThinPrismFisheyeModel::ProjectPointToPixelCornerConv() wrt. the 12 camera model parameters.
fx = Symbol("fx", real=True)
fy = Symbol("fy", real=True)
cx = Symbol("cx", real=True)
cy = Symbol("cy", real=True)
k1 = Symbol("k1", real=True)
k2 = Symbol("k2", real=True)
k3 = Symbol("k3", real=True)
k4 = Symbol("k4", real=True)
p1 = Symbol("p1", real=True)
p2 = Symbol("p2", real=True)
sx1 = Symbol("sx1", real=True)
sy1 = Symbol("sy1", real=True)
local_point = Matrix(3, 1, lambda i,j:Symbol('p_%d' % (i), real=True))
parameters = Matrix([[fx],
[fy],
[cx],
[cy],
[k1],
[k2],
[k3],
[k4],
[p1],
[p2],
[sx1],
[sy1]])
print('Fisheye case:')
functions = [lambda variables : CentralThinPrismFisheyeProjection(
local_point[0], local_point[1], local_point[2],
variables[0], variables[1], variables[2], variables[3],
variables[4], variables[5], variables[6], variables[7],
variables[8], variables[9], variables[10], variables[11], True)]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
print('Non-fisheye case:')
functions = [lambda variables : CentralThinPrismFisheyeProjection(
local_point[0], local_point[1], local_point[2],
variables[0], variables[1], variables[2], variables[3],
variables[4], variables[5], variables[6], variables[7],
variables[8], variables[9], variables[10], variables[11], False)]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# Jacobian for CentralOpenCVModel::ProjectPointToPixelCornerConv() wrt. the 12 camera model parameters.
fx = Symbol("fx", real=True)
fy = Symbol("fy", real=True)
cx = Symbol("cx", real=True)
cy = Symbol("cy", real=True)
k1 = Symbol("k1", real=True)
k2 = Symbol("k2", real=True)
k3 = Symbol("k3", real=True)
k4 = Symbol("k4", real=True)
k5 = Symbol("k5", real=True)
k6 = Symbol("k6", real=True)
p1 = Symbol("p1", real=True)
p2 = Symbol("p2", real=True)
local_point = Matrix(3, 1, lambda i,j:Symbol('p_%d' % (i), real=True))
parameters = Matrix([[fx],
[fy],
[cx],
[cy],
[k1],
[k2],
[k3],
[k4],
[k5],
[k6],
[p1],
[p2]])
functions = [lambda variables : CentralOpenCVProjection(
local_point[0], local_point[1], local_point[2],
variables[0], variables[1], variables[2], variables[3],
variables[4], variables[5], variables[6], variables[7],
variables[8], variables[9], variables[10], variables[11])]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# Jacobian of CentralRadialModel::Project() wrt. the local point.
fx = Symbol("fx", real=True)
fy = Symbol("fy", real=True)
cx = Symbol("cx", real=True)
cy = Symbol("cy", real=True)
p1 = Symbol("p1", real=True)
p2 = Symbol("p2", real=True)
sx1 = Symbol("sx1", real=True)
sy1 = Symbol("sy1", real=True)
spline_resolution = Symbol("spline_resolution", real=True)
spline_param0 = Symbol("spline_param0", real=True)
spline_param1 = Symbol("spline_param1", real=True)
spline_param2 = Symbol("spline_param2", real=True)
spline_param3 = Symbol("spline_param3", real=True)
local_point = Matrix(3, 1, lambda i,j:Symbol('p_%d' % (i), real=True))
parameters = Matrix([[local_point[0]],
[local_point[1]],
[local_point[2]]])
functions = [lambda variables : CentralRadialProjection(
spline_resolution, spline_param0, spline_param1, spline_param2, spline_param3,
fx, fy, cx, cy, p1, p2, sx1, sy1,
variables[0], variables[1], variables[2])]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# Jacobian of CentralRadialModel::Project() wrt. the camera model parameters.
fx = Symbol("fx", real=True)
fy = Symbol("fy", real=True)
cx = Symbol("cx", real=True)
cy = Symbol("cy", real=True)
p1 = Symbol("p1", real=True)
p2 = Symbol("p2", real=True)
sx1 = Symbol("sx1", real=True)
sy1 = Symbol("sy1", real=True)
spline_resolution = Symbol("spline_resolution", real=True)
spline_param0 = Symbol("spline_param0", real=True)
spline_param1 = Symbol("spline_param1", real=True)
spline_param2 = Symbol("spline_param2", real=True)
spline_param3 = Symbol("spline_param3", real=True)
local_point = Matrix(3, 1, lambda i,j:Symbol('p_%d' % (i), real=True))
parameters = Matrix([[fx],
[fy],
[cx],
[cy],
[p1],
[p2],
[sx1],
[sy1],
[spline_param0],
[spline_param1],
[spline_param2],
[spline_param3]])
functions = [lambda variables : CentralRadialProjection(
spline_resolution, variables[8], variables[9], variables[10], variables[11],
variables[0], variables[1], variables[2], variables[3],
variables[4], variables[5], variables[6], variables[7],
local_point[0], local_point[1], local_point[2])]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
| 2.640625 | 3 |
generate_doc_src.py | epaulson/brick-website | 0 | 12786044 | from util import generate_doc_src, auto_dict
from rdflib import Graph
from urllib.error import URLError
# Pull the latest Brick.ttl to /static/schema
try:
g = Graph()
g.parse("https://github.com/brickschema/Brick/releases/latest/download/Brick.ttl", format="turtle")
g.serialize("static/schema/Brick.ttl", format="turtle")
except URLError as e:
print("[WARN]: Unable to pull the latest version of Brick!")
# Doc config
doc_spec = auto_dict()
# Brick v1.0.3
doc_spec["1.0.3"]["input"] = ["static/schema/1.0.3"]
doc_spec["1.0.3"]["ns_restriction"] = [
"https://brickschema.org/schema/1.0.3/Brick#",
"https://brickschema.org/schema/1.0.3/BrickFrame#",
]
doc_spec["1.0.3"]["classes"]["type_restriction"] = [
"http://www.w3.org/2002/07/owl#Class"
]
doc_spec["1.0.3"]["relationships"]["type_restriction"] = [
"http://www.w3.org/2002/07/owl#ObjectProperty"
]
# Brick v1.1
doc_spec["1.1"]["input"] = ["static/schema/1.1"]
doc_spec["1.1"]["ns_restriction"] = ["https://brickschema.org/schema/1.1/Brick#"]
doc_spec["1.1"]["classes"]["type_restriction"] = ["http://www.w3.org/2002/07/owl#Class"]
doc_spec["1.1"]["relationships"]["type_restriction"] = [
"http://www.w3.org/2002/07/owl#ObjectProperty"
]
# Brick v1.2
doc_spec["1.2"]["input"] = ["static/schema/1.2"]
doc_spec["1.2"]["ns_restriction"] = ["https://brickschema.org/schema/Brick#"]
doc_spec["1.2"]["classes"]["type_restriction"] = ["http://www.w3.org/2002/07/owl#Class"]
doc_spec["1.2"]["relationships"]["type_restriction"] = [
"http://www.w3.org/2002/07/owl#ObjectProperty"
]
if __name__ == "__main__":
generate_doc_src(doc_spec)
# Structure
# doc_spec = {
# "1.0.3": {
# "ns_restriction": ["https://brickschema.org/schema/1.0.3/Brick#", "https://brickschema.org/schema/1.0.3/BrickFrame#"]
# "classes" : {
# "roots": [],
# "type_restriction": ["http://www.w3.org/2002/07/owl#Class"]
# "ns_restriction": [
# "https://brickschema.org/schema/1.0.3/Brick#",
# "https://brickschema.org/schema/1.0.3/BrickFrame#"
# ],
# "parent_restriction": [],
# "no_expansion": [],
# "exclusions": []
# }
# }
# }
| 2.453125 | 2 |
menus/IBook/Chapter4 Digital-Filter/filter_plgs.py | Image-Py/IBook | 2 | 12786045 | from sciapp.action import Free
import scipy.ndimage as ndimg
import numpy as np, wx
# from imagepy import IPy
#matplotlib.use('WXAgg')
import matplotlib.pyplot as plt
def block(arr):
img = np.zeros((len(arr),30,30), dtype=np.uint8)
img.T[:] = arr
return np.hstack(img)
class Temperature(Free):
title = 'Temperature Difference'
asyn = False
def run(self, para = None):
xs = np.array([1,2,3,4,5,6,7,8,9,10,11,12])
ys = np.array([1,2,1,2,2,3,8,9,8,10,9,10], dtype=np.float32)
ds = ndimg.convolve1d(ys, [0,1,-1])
lbs = ['Jan','Feb','Mar','Apr','May','June',
'Jul','Aug','Sep','Oct','Nov','Dec']
plt.xticks(xs, lbs)
plt.plot(xs, ys, '-o', label='Temperature')
plt.plot(xs, ds, '-o', label='Difference')
plt.grid()
plt.gca().legend()
plt.title('Temperature in XX')
plt.xlabel('Month')
plt.ylabel('Temperature (C)')
plt.show()
self.app.show_img([block((ys-ys.min())*(180/ys.max()-ys.min()))], 'Temperature')
self.app.show_img([block((ds-ds.min())*(180/ds.max()-ds.min()))], 'Difference')
class Shake(Free):
title = 'Shake Damping'
asyn = False
def run(self, para = None):
xs = np.array([1,2,3,4,5,6,7,8,9,10])
ys = np.array([10,-9,8,-7,6,-5,4,-3,2,-1], dtype=np.float32)
ds = ndimg.convolve1d(ys, [1/3,1/3,1/3])
print(ds)
plt.plot(xs, ys, '-o', label='Shake')
plt.plot(xs, ds, '-o', label='Damping')
plt.grid()
plt.gca().legend()
plt.title('Shake Damping')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.show()
self.app.show_img([block(ys*10+128)], 'Shake')
self.app.show_img([block(ds*10+128)], 'Damping')
class Inertia(Free):
title = 'Psychological Inertia'
asyn = False
def run(self, para = None):
xs = np.array([1,2,3,4,5,6,7,8,9,10])
ys = np.array([90,88,93,95,91,70,89,92,94,89], dtype=np.float32)
ds = ndimg.convolve1d(ys, [1/3,1/3,1/3])
print(ds)
plt.plot(xs, ys, '-o', label='Psychological')
plt.plot(xs, ds, '-o', label='Inertia')
plt.grid()
plt.gca().legend()
plt.title('Psychological Inertia')
plt.xlabel('Time')
plt.ylabel('Score')
plt.show()
self.app.show_img([block((ys-80)*3+80)], 'Psychological')
self.app.show_img([block((ds-80)*3+80)], 'Inertia')
class GaussCore(Free):
title = 'Gaussian Core'
asyn = False
def run(self, para = None):
x, y = np.ogrid[-3:3:10j, -3:3:10j]
z = np.exp(-(x ** 2 + y ** 2)/1)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(x, y, z)
z = np.exp(-(x ** 2 + y ** 2)/4)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(x, y, z)
plt.show()
class LoGCore(Free):
title = 'Laplace of Gaussian Core'
asyn = False
def run(self, para = None):
plt.figure()
x = np.linspace(-3,3,50)
y = np.exp(-x**2)
dy = np.exp(-x**2)*(4*x**2-2)
plt.plot(x, y, label='Gauss')
plt.plot(x, -dy, label="Gauss''")
plt.grid()
plt.legend()
x, y = np.ogrid[-3:3:20j, -3:3:20j]
z = (4*x**2-2)*np.exp(-y**2-x**2)+(4*y**2-2)*np.exp(-x**2-y**2)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(x, y, -z)
plt.show()
class DogCore(Free):
title = 'Difference of Gaussian Core'
asyn = False
def run(self, para = None):
plt.figure()
x = np.linspace(-3,3,50)
y = np.exp(-x**2)
yy = np.exp(-x**2/4)/2
plt.plot(x, y, label='sigma = 1')
plt.plot(x, yy, label='sigma = 2')
plt.plot(x, y-yy, 'r', lw=3, label="Difference")
plt.grid()
plt.legend()
x, y = np.ogrid[-3:3:20j, -3:3:20j]
z = np.exp(-(x ** 2 + y ** 2)/1)-np.exp(-(x ** 2 + y ** 2)/4)/2
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(x, y, z)
plt.show()
class LaplaceSharp(Free):
title = 'Show how to Laplace Sharp'
asyn = False
def run(self, para = None):
x = np.linspace(-10,10,300)
y = np.arctan(x)
fig, axes = plt.subplots(nrows=2, ncols=2)
ax0, ax1, ax2, ax3 = axes.flatten()
ax0.set_title('y = arctan(x)')
ax0.plot(x, y)
ax0.grid()
ax1.set_title("y = arctan(x)'")
ax1.plot(x, y)
ax1.plot(x, 1/(x**2+1))
ax1.grid()
ax2.set_title("y = arctan(x)''")
ax2.plot(x, y)
ax2.plot(x, (2*x)/(x**4+2*x**2+1))
ax2.grid()
ax3.set_title("y = arctan(x) + arctan(x)''")
ax3.plot(x, y)
ax3.plot(x, y+(2*x)/(x**4+2*x**2+1))
ax3.grid()
fig.tight_layout()
plt.show()
self.app.show_img([(((y*70)+128)*np.ones((30,1))).astype(np.uint8)], 'tan(x)')
self.app.show_img([((100/(x**2+1))*np.ones((30,1))).astype(np.uint8)], "tan(x)'")
self.app.show_img([((((2*x)/(x**4+2*x**2+1)*70)+128)*
np.ones((30,1))).astype(np.uint8)], "tan(x))''")
self.app.show_img([((((y+(2*x)/(x**4+2*x**2+1))*70)+128)*
np.ones((30,1))).astype(np.uint8)], "tan(x)+tan(x)''")
class UnSharp(Free):
title = 'Show how to Unsharp Mask'
asyn = False
def run(self, para = None):
x = np.linspace(-10,10,300)
y = np.arctan(x)
fig, axes = plt.subplots(nrows=2, ncols=2)
ax0, ax1, ax2, ax3 = axes.flatten()
gy = ndimg.gaussian_filter1d(y, 30)
ax0, ax1, ax2, ax3 = axes.flatten()
ax0.set_title('y = arctan(x)')
ax0.plot(x, y)
ax0.grid()
ax1.set_title("gaussian")
ax1.plot(x, y)
ax1.plot(x, gy)
ax1.grid()
ax2.set_title("y = arctan(x) - gaussian")
ax2.plot(x, y)
ax2.plot(x, y-gy)
ax2.grid()
ax3.set_title("y = arctan(x) + diff")
ax3.plot(x, y)
ax3.plot(x, y+2*(y-gy))
ax3.grid()
fig.tight_layout()
plt.show()
self.app.show_img([((y*70+128)*np.ones((30,1))).astype(np.uint8)], 'tan(x)')
self.app.show_img([((gy*70+128)*np.ones((30,1))).astype(np.uint8)], 'gaussian')
self.app.show_img([(((y-gy)*100+128)*np.ones((30,1))).astype(np.uint8)], 'arctan(x) - gaussian')
self.app.show_img([(((y+2*(y-gy))*70+128)*np.ones((30,1))).astype(np.uint8)], "arctan(x) + diff")
plgs = [Temperature, Shake, Inertia, GaussCore, LoGCore, DogCore, LaplaceSharp, UnSharp] | 2.546875 | 3 |
tests/test_telemetry_full.py | cruigo93/client | 0 | 12786046 | <gh_stars>0
"""
telemetry full tests.
"""
import platform
import pytest
import wandb
try:
from unittest import mock
except ImportError: # TODO: this is only for python2
import mock
def test_telemetry_finish(live_mock_server, parse_ctx):
run = wandb.init()
run.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
telemetry = ctx_util.telemetry
# finish()
assert telemetry and 2 in telemetry.get("3", [])
def test_telemetry_imports_hf(live_mock_server, parse_ctx):
run = wandb.init()
with mock.patch.dict("sys.modules", {"transformers": mock.Mock()}):
import transformers
run.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
telemetry = ctx_util.telemetry
# hf in finish modules but not in init modules
assert telemetry and 11 not in telemetry.get("1", [])
assert telemetry and 11 in telemetry.get("2", [])
@pytest.mark.skipif(
platform.system() == "Windows", reason="test suite does not build jaxlib on windows"
)
def test_telemetry_imports_jax(live_mock_server, parse_ctx):
import jax
wandb.init()
wandb.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
telemetry = ctx_util.telemetry
# jax in finish modules but not in init modules
assert telemetry and 12 in telemetry.get("1", [])
assert telemetry and 12 in telemetry.get("2", [])
def test_telemetry_run_organizing_init(live_mock_server, parse_ctx):
wandb.init(name="test_name", tags=["my-tag"], config={"abc": 123}, id="mynewid")
wandb.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
telemetry = ctx_util.telemetry
assert telemetry and 13 in telemetry.get("3", []) # name
assert telemetry and 14 in telemetry.get("3", []) # id
assert telemetry and 15 in telemetry.get("3", []) # tags
assert telemetry and 16 in telemetry.get("3", []) # config
def test_telemetry_run_organizing_set(live_mock_server, parse_ctx):
run = wandb.init()
run.name = "test-name"
run.tags = ["tag1"]
wandb.config.update = True
wandb.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
telemetry = ctx_util.telemetry
assert telemetry and 17 in telemetry.get("3", []) # name
assert telemetry and 18 in telemetry.get("3", []) # tags
assert telemetry and 19 in telemetry.get("3", []) # config update
| 2.25 | 2 |
setup.py | istreamlabs/httpie-esni-auth | 1 | 12786047 | <filename>setup.py
from setuptools import setup
try:
import multiprocessing
except ImportError:
pass
setup(
name='httpie-esni-auth',
description='ESNI auth plugin for HTTPie.',
long_description=open('README.md').read().strip(),
version='1.0.0',
author='<NAME>',
author_email='<EMAIL>',
license='Apache2',
url='https://github.com/pd/httpie-esni-auth',
download_url='https://github.com/pd/httpie-esni-auth',
py_modules=['httpie_esni_auth'],
zip_safe=False,
entry_points={
'httpie.plugins.auth.v1': [
'httpie_esni_auth = httpie_esni_auth:EsniAuthPlugin'
]
},
install_requires=[
'httpie>=0.7.0'
],
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Intended Audience :: Developers',
'Environment :: Plugins',
'License :: OSI Approved :: MIT License',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Utilities'
],
)
| 1.476563 | 1 |
census_response.py | dannmorr/greater-chicago-food-despository | 0 | 12786048 | from api_keys import CENSUS_KEY
import json
import requests
def getCensusResponse(table_url,get_ls,geo):
'''
Concatenates url string and returns response from census api query
input:
table_url (str): census api table url
get_ls (ls): list of tables to get data from
geo (str): geographic area and filter
output:
response (requests.response): api response
'''
get = 'NAME,' + ",".join(get_ls)
url = f'{table_url}get={get}&for={geo}&key={CENSUS_KEY}'
response = requests.get(url)
return(response)
def searchTable(table_json_ls, keyword_ls=list(), filter_function_ls=list()):
'''
Filters variable tables by keyword and filter
input:
table_json_ls (response.json() object): list of lists from census variable table api
keyword_ls (list): list of keyword strings
keyword filter applied to the third element of the input list (concept column)
filter_function_ls (list): list of functions that filter table_json_ls with filter method
output:
return_json_ls (list): list, same format as table_json_ls, filtered
'''
#verifies parameters are lists
assert (type(table_json_ls)==type(keyword_ls)==type(filter_function_ls)==list), "searchTable Parameters must be lists"
return_json_ls = list()
#runs filter for each function in filter_function_ls
for f in filter_function_ls:
table_json_ls = list(filter(f, table_json_ls))
#adds rows with keyword(s) in concept column to return_json_ls
for d in table_json_ls:
try:
for k in keyword_ls:
#d[2] is the concept column, d[1] is the label column
if k.lower() in d[2].lower() or k.lower() in d[1].lower():
continue
else:
break
else:
return_json_ls.append(d)
except:
continue
return return_json_ls | 3.453125 | 3 |
coqconn/client.py | liyi-david/coqconn | 0 | 12786049 | <gh_stars>0
from .conn import CoqConnection, CoqConnectionError
from .call import Add
class CoqClient:
def __init__(self, coqtop=None, args=[], timeout=2):
self.conn = CoqConnection.connect(coqtop, args, timeout)
def add(self, code):
self.conn.call(Add(code))
resps = self.conn.read_until(lambda resps: resps[-1].is_value())
v = resps[-1]
if v.succeed():
self.conn.state_id = v.data.fst
else:
pass
| 2.53125 | 3 |
mci/config/__init__.py | brighthive/master-client-index | 2 | 12786050 | <reponame>brighthive/master-client-index
from mci.config.config import ConfigurationFactory, Config
| 1.054688 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.