max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
Forms/index.py | vgeorgo/courses-python-udemy-create-websites-using-flask | 1 | 12787251 | from flask import Flask, render_template,request,session,redirect,url_for,flash
from flask_wtf import FlaskForm
from wtforms import (StringField,SubmitField,BooleanField,DateTimeField,
RadioField,SelectField,TextField,TextAreaField)
from wtforms.validators import DataRequired
app = Flask(__name__)
# TODO improve secret_key
app.config['SECRET_KEY'] = 'MY_SECRET_KEY'
# TODO move to separate file
class InfoForm(FlaskForm):
breed = StringField('What breed are you?', validators=[DataRequired()])
neutered = BooleanField('Have you been neutered?')
mood = RadioField('Choose a mood:', choices=[('mood_one', 'Happy'), ('mood_two', 'Excited')])
food_choice = SelectField(u'Favorite food:', choices=[('chi', 'Chicken'), ('bf', 'Beef'), ('fish', 'Fish')])
feedback = TextAreaField('Feedback')
submit = SubmitField('Submit')
@app.route('/', methods=['GET', 'POST'])
def index():
form = InfoForm()
if form.validate_on_submit():
# using session only because we still havent learned database
session['breed'] = form.breed.data
session['neutered'] = form.neutered.data
session['mood'] = form.mood.data
session['food'] = form.food_choice.data
session['feedback'] = form.feedback.data
flash('Thanks for answering the form.')
return redirect(url_for('thank_you'))
return render_template('home.html', form = form)
@app.route('/thank_you')
def thank_you():
return render_template('thankyou.html')
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
if __name__ == '__main__':
app.run(debug=True)
| 2.796875 | 3 |
src/core.py | DrAtomic/ECE_Team9_Capstone | 0 | 12787252 | <gh_stars>0
# Project: An Algorithmic Teaching Practices and Classroom Activities Tool to Improve Education
#
# Authors: <NAME>, <NAME>, <NAME>, and <NAME>
#
# Sponsor: <NAME> and teuscher-lab.com
#
# Ownership: See https://github.com/jb-codemaker/ECE_Team9_Capstone for license details
#
#
# This file: Takes in arguments and splits, models, analyses, visualizes, email.
import os
import sys
import ray
import split_video # Splits .mp4 file
import audio_model # Diarizes and counts
import utils
import face_analyzer
import slide_analyzer
from visualize_data import visualize
import truncate_data
def main(file_name_1, file_name_2):
ray.init()
@ray.remote
def split_student(file_path1):
"""this is just for concurrency
Args:
file_path1: file_path for students
Returns:
the split function for students
"""
return split_video.split(file_path1, "students")
@ray.remote
def split_teacher(file_path2):
"""this is just for concurrency
Args:
file_path2: file_path for teachers
Returns:
the split function for teachers
"""
return split_video.split(file_path2, "teacher")
# start the process
split_funcs = [split_student.remote(file_name_1), split_teacher.remote(file_name_2)]
# block before next section
[ray.get(x) for x in split_funcs]
@ray.remote(num_gpus=1)
def student_call():
"""this is just for concurrency
Returns:
the student attentiveness function
"""
return face_analyzer.student_attentiveness()
@ray.remote
def slide_call():
"""this is just for concurrency
Returns:
the slide analyzer function
"""
return slide_analyzer.analyze_lecture()
@ray.remote
def audio_call():
"""this is just for concurrency
Returns:
the audio analyzer function
"""
return audio_model.audio_analyze()
# start the process
call_funcs = [student_call.remote(), slide_call.remote(), audio_call.remote()]
# block before next section
[ray.get(x) for x in call_funcs]
ray.shutdown()
# TODO(#23): Clone https://github.com/tyiannak/pyAudioAnalysis.git
# Audio analysis tool
# TODO: Clone https://github.com/tyiannak/pyAudioAnalysis.git
# Audio analysis tool
if __name__ == '__main__':
# Take args and split video/audio into seperate files
import time
start_time = time.time()
try:
file_path1 = sys.argv[1]
file_path2 = sys.argv[2]
except:
raise Exception("needs files for students: file_name_1, and teacher: file_name_2 \n python core.py \"path/to/student_lecture.mov\" \"path/to/teacher_lecture.mov\"")
main(file_path1, file_path2)
print("ALL DONE")
print((time.time() - start_time) / 60)
truncate_data.truncate()
data_dir = utils.get_data_dir()
delimiter = utils.get_delimiter()
file_name = data_dir + delimiter + "all_data.csv"
visualize(file_name)
| 2.6875 | 3 |
synthraw/__init__.py | crowsonkb/synthraw | 1 | 12787253 | <filename>synthraw/__init__.py
from .synthraw import DNG, DNGError
| 1.195313 | 1 |
fill_and_upscale.py | erinxi/chika | 0 | 12787254 | import cv2
import glob
import os
# Fill in the output with upscaled frames.
# Make sure to match scale and interpolation mode.
SCALE = 2
if __name__ == "__main__":
frames = sorted(glob.glob("frames/*.jpg"))
for frame_index, frame in enumerate(frames):
output_frame = "output/{:05d}.png".format(frame_index)
if os.path.exists(output_frame):
continue
img = cv2.imread(frame, cv2.IMREAD_COLOR)
simg = cv2.resize(img, (img.shape[1] * SCALE, img.shape[0] * SCALE), interpolation=cv2.INTER_LANCZOS4)
cv2.imwrite(output_frame, simg)
| 3 | 3 |
analyze/routes.py | GustavoBoaz/aganalyze | 0 | 12787255 | import flask
from flask import Flask, session, render_template,redirect, url_for
from run import server
@server.route('/')
def index():
return render_template("tbases/t_index.html", startpage=True)
#@server.route('/dashboard/')
#def dashboard():
# return render_template("tbases/t_index.html", startpage=True) | 2.546875 | 3 |
label_studio/data_export/urls.py | cdpath/label-studio | 3 | 12787256 | <filename>label_studio/data_export/urls.py
"""This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
from django.urls import path, include
from . import api
from . import views
app_name = 'data_export'
_api_urlpatterns = [
# export api
path('<int:pk>/export', api.ExportAPI.as_view(), name='project-export'),
path('<int:pk>/export/formats', api.ExportFormatsListAPI.as_view(), name='project-export-formats'),
# Previously exported results
path('<int:pk>/export/files', api.ProjectExportFiles.as_view(), name='project-export-files'),
]
urlpatterns = [
path('api/projects/', include((_api_urlpatterns, app_name), namespace='api-projects')),
path('api/auth/export/', api.ProjectExportFilesAuthCheck.as_view(), name='project-export-files-auth-check'),
]
| 1.921875 | 2 |
client.py | RaghuA06/ChatBox-Application | 1 | 12787257 | <reponame>RaghuA06/ChatBox-Application
#<NAME>
#May 2021
import socket
import sys
import time
socket_server = socket.socket()
server_host = socket.gethostname()
ip = socket.gethostbyname(server_host)
sport = 8000
print("This is your IP address:{}".format(ip))
server_host = input("Enter server's IP address:")
name = input("Enter your name:")
socket_server.connect((server_host, sport))
socket_server.send(name.encode())
server_name = socket_server.recv(1024)
server_name = server_name.decode()
print('\n{} has joined...'.format(server_name))
while True:
message = (socket_server.recv(1024)).decode()
print("\n{}:{}".format(server_name, message))
message = str(input("\nMe:"))
socket_server.send(message.encode())
| 3.4375 | 3 |
175 _labs/lab4_q1.py | lzeeorno/Python-practice175 | 0 | 12787258 | <filename>175 _labs/lab4_q1.py
from lab4_input_Stack import Stack
# assign
forwardStack = Stack()
backwardStack = Stack()
current_page = "www.cs.ualberta.ca"
print(current_page)
while(True):
user_command = input()
if(user_command == '>'):
# forward
if (forwardStack.is_empty()):
print("Error command")
continue
backwardStack.push(current_page)
current_page = forwardStack.pop()
elif(user_command=='<'):
# backward
if (backwardStack.is_empty()):
print("Error command")
continue
forwardStack.push(current_page)
current_page = backwardStack.pop()
elif(user_command=='='):
#enter anywebsaide
if (backwardStack.is_empty()):
print("the current page is ", current_page)
continue
else:
# visit new website
backwardStack.push(current_page)
current_page = user_command
forwardStack = Stack()
print(forwardStack)
print(backwardStack)
print("The current page is: ", current_page)
| 3.421875 | 3 |
utils/communication_benchmark.py | beomyeol/baechi | 4 | 12787259 | # Copyright 2020 University of Illinois Board of Trustees. All Rights Reserved.
# Author: <NAME>, DPRG (https://dprg.cs.uiuc.edu)
# This file is part of Baechi, which is released under specific terms. See file License.txt file for full license details.
# ==============================================================================
import argparse
import json
import time
import numpy as np
import tensorflow as tf
from sklearn.linear_model import LinearRegression
from tensorflow.python.client import timeline
from utils import logger
_LOGGER = logger.get_logger(__file__)
def run_benchmark(tensor_size, from_gpu_id, to_gpu_id,
warmup_count=5, num_measurements=20):
with tf.Graph().as_default():
with tf.device('/device:GPU:%d' % from_gpu_id):
x1 = tf.get_variable("var1a", [tensor_size, 1])
x2 = tf.get_variable("var1b", [1, 1])
from_op = tf.matmul(x1, x2, name='from_op')
with tf.device('/device:GPU:%d' % to_gpu_id):
y = tf.get_variable("var2", [1, 1])
out = tf.matmul(from_op, y, name='to_op')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# warm up
for _ in range(warmup_count):
sess.run(out)
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata_list = []
for _ in range(num_measurements):
run_metadata = tf.RunMetadata()
sess.run(out, options=run_options, run_metadata=run_metadata)
run_metadata_list.append(run_metadata)
return run_metadata_list
def get_transfer_time(timeline_json, from_op_name='from_op',
to_op_name='to_op'):
data = timeline_json['traceEvents']
end_ts = start_ts = None
for dic in data:
for key, value in dic.items():
if key == 'cat' and value == 'Op':
for key, value in dic.items():
if key == "args" and value['name'] == from_op_name:
new_end_ts = dic['ts'] + dic['dur']
end_ts = max(end_ts or new_end_ts, new_end_ts)
if key == "args" and value['name'] == to_op_name:
new_start_ts = dic['ts']
start_ts = min(start_ts or new_start_ts, new_start_ts)
transfer_time = start_ts - end_ts
assert transfer_time > 0
return transfer_time
def generate_dataset(results):
transfer_times_by_size = []
for tensor_size, run_metadata_list in results:
transfer_times = []
for run_metadata in run_metadata_list:
chrome_trace_str = timeline.Timeline(
run_metadata.step_stats).generate_chrome_trace_format()
timeline_json = json.loads(chrome_trace_str)
transfer_times.append(get_transfer_time(timeline_json))
transfer_times_by_size.append((tensor_size, transfer_times))
X = []
Y = []
for x, ys in transfer_times_by_size:
for y in ys:
X.append(x * 4)
Y.append(y)
return X, Y
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--from_gpu_id', type=int, default=0,
help='From GPU ID')
parser.add_argument('--to_gpu_id', type=int, default=1,
help='To GPU ID')
parser.add_argument('--exponent', type=int, default=30,
help='Max tensor size. 2^(exponent).')
args = parser.parse_args()
tensor_sizes = [2 ** i for i in range(0, args.exponent)]
_LOGGER.info('Running benchmark to measure communication costs')
results = []
for tensor_size in tensor_sizes:
run_metadata_list = run_benchmark(
tensor_size, args.from_gpu_id, args.to_gpu_id)
results.append((tensor_size, run_metadata_list))
X, Y = generate_dataset(results)
reg = LinearRegression().fit([[x] for x in X], [[y] for y in Y])
print('Communication cost function: {} x + {}'.format(
reg.coef_[0][0], reg.intercept_[0]))
if __name__ == "__main__":
main()
| 2.078125 | 2 |
bourbaki/application/paths.py | bourbaki-py/application | 0 | 12787260 | # coding:utf-8
import os
from pathlib import Path
from io import StringIO, BytesIO, IOBase
from typing import Union
FileTypes = (IOBase,)
FileType = Union[FileTypes]
DEFAULT_FILENAME_DATE_FMT = (
"%Y-%m-%d_%H:%M:%S"
) # Format for dates appended to files or dirs.
# This will lexsort in temporal order.
DEFAULT_FILENAME_N_DIGITS = 6
def ensure_dir(dir_):
try:
if not os.path.exists(dir_):
os.mkdir(dir_)
return dir_
except FileExistsError:
if os.path.isfile(dir_):
raise NotADirectoryError("{} exists but is not a directory".format(dir_))
return None
def get_file(file_or_path, mode=None, allow_dir=False):
"""if a file object is passed, return it unaltered, with a flag indicating that the file should not be closed
by the caller (the opener may have other uses for it). In this case, if mode is also passed, it is checked against
the existing file's mode and a ValueError is thrown if they disagree.
If a string is passed, it is treated as a path and a file at that location is opened and returned with a flag
indicating that the file should be closed by the caller.
"""
if not isinstance(file_or_path, FileTypes):
if isinstance(file_or_path, (str, Path)) and os.path.isdir(file_or_path):
if not allow_dir:
raise IsADirectoryError(
"allow_dir=False but {} is a directory".format(file_or_path)
)
else:
close = False
file = Path(file_or_path)
else:
close = True
if mode:
file = open(file_or_path, mode)
else:
file = open(file_or_path)
else:
close = False
file = file_or_path
if mode is not None:
if hasattr(file, "mode") and mode != file.mode:
raise ValueError(
"mode {} was requested, but the given file has mode {}".format(
mode, file.mode
)
)
elif isinstance(file, StringIO) and "b" in mode:
raise ValueError(
"mode {} was requested, but the given file is a {}, which supports only text IO".format(
mode, type(file)
)
)
elif isinstance(file, BytesIO) and "b" not in mode:
raise ValueError(
"mode {} was requested, but the given file is a {}, which supports only binary IO".format(
mode, type(file)
)
)
return file, close
def is_newer(file1, file2):
if file2 is None:
return True
mtime1 = os.stat(file1).st_mtime
if isinstance(file2, (str, bytes, Path)):
mtime2 = os.stat(file2).st_mtime
elif isinstance(file2, (float, int)):
mtime2 = file2
else:
raise TypeError(
"file2 must be str, pathlib.Path, None, float, or int; got {}".format(
type(file2)
)
)
return mtime1 > mtime2
def dir_prefix_and_ext(prefix, ext=None):
dir_ = os.path.dirname(prefix)
if ext is None:
prefix, ext = os.path.splitext(os.path.basename(prefix))
if prefix.startswith(".") and ext == "":
prefix, ext = ext, prefix
else:
prefix = os.path.basename(prefix)
return dir_, prefix, ext
def path_with_ext(file_path, ext=None, disambiguate=False):
file_path, ext_ = _path_with_ext(file_path, ext)
if not ext_ and not ext:
if disambiguate:
file_path, ext_ = _path_with_ext(disambiguate_path(file_path), ext)
else:
raise ValueError(
"no extension specified for file path {}; try passing one manually via the "
"`ext` arg or specify `disambiguate=True`".format(file_path)
)
else:
ext_ = ext or ext_
return file_path, ext_
def _path_with_ext(path, ext=None):
name, ext_ = os.path.splitext(path)
if ext_:
if ext is not None and ext_ != ext:
raise ValueError(
"ambiguous extension; config_file has extension {} while ext is {}".format(
ext_, ext
)
)
ext_ = ext or ext_
p, e = name + ext_, ext_
return p, e
def disambiguate_path(file_path):
"""Find the unique file with path `file_path`, excluding extensions. If there is no such file, raise
FileNotFoundError"""
dir_, name, ext = dir_prefix_and_ext(file_path)
dir_ = dir_ or None # don't allow empty string for dir
paths = [path for path in os.listdir(dir_) if os.path.splitext(path)[0] == name]
if len(paths) == 0:
raise FileNotFoundError(
"No file with any extension found at {}".format(file_path)
)
elif len(paths) != 1:
raise FileNotFoundError(
"Amiguous config path {}; multiple matches found: {}".format(
file_path, paths
)
)
p = os.path.join(dir_ or "", paths[0])
return p
| 3.1875 | 3 |
roengine/net/cUDP.py | ROTARTSI82/RoEngine | 1 | 12787261 | <gh_stars>1-10
# -*- coding: UTF-8 -*-
from __future__ import print_function
import rencode
import logging
import socket
from twisted.internet import reactor
from twisted.internet.protocol import DatagramProtocol
from roengine.config import LOG_NO_HANDLERS, LOG_GETS, LOG_SENDS
__all__ = ['ServerUDP', 'EnqueUDPClient', 'UDPServerFactory', 'adopt_udp_port']
load = rencode.loads
dump = rencode.dumps
cUDPServerLogger = logging.getLogger('cUDP.ServFac')
cUDPClientLogger = logging.getLogger('cUDP.CliFac')
cUDPServProtLogger = logging.getLogger('cUDP.ServProt')
def adopt_udp_port(cls, addr=('127.0.0.1', 3000), args=(), kwargs={}):
"""
From https://twistedmatrix.com/documents/15.1.0/core/howto/udp.html
:param cls:
:param addr:
:return: None
"""
portSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Make the port non-blocking and start it listening.
portSocket.setblocking(False)
portSocket.bind(addr)
# Now pass the port file descriptor to the reactor
port = reactor.adoptDatagramPort(portSocket.fileno(), socket.AF_INET, cls(*args, **kwargs))
# The portSocket should be cleaned up by the process that creates it.
return portSocket, port
class ServerUDP(object):
factory = None
address = None
def __init__(self):
self.send_que = []
def enque(self, msg):
self.send_que.append(msg)
def network_ping(self, msg):
print ("PING from UDPServerProtocol")
def empty_que(self):
try:
if self.send_que and self.factory is not None:
self.factory.transport.write(dump(self.send_que), self.address)
if LOG_SENDS:
cUDPServProtLogger.debug("%s SEND %s --> %s", self.address, self.send_que, self.address)
self.send_que = []
except:
cUDPServProtLogger.exception("%s empty_que() failed.", self.address)
class UDPServerFactory(DatagramProtocol):
protocol = ServerUDP
def __init__(self, host, port, maxClients=1):
self.max_clients = maxClients
self.clients = []
self.client_protocols = {}
self.arrivals_confirmed = {}
self.host, self.port = host, port
self.address = host, port
def _send(self, msg, addr):
try:
self.transport.write(dump([msg, ]), addr)
if LOG_SENDS:
cUDPServerLogger.debug("%s SEND %s --> %s", self.address, msg, addr)
except:
cUDPServerLogger.exception("%s _send() failed.", self.address)
def empty_all(self):
for cp in self.client_protocols.values():
cp.empty_que()
def load(self):
reactor.listenUDP(self.port, self, interface=self.host)
cUDPServerLogger.info("Loading UDPServerFactory%s", self.address)
def datagramReceived(self, message, address):
if address not in self.clients:
cUDPServerLogger.critical("%s Got packet from unknown Client%s.", self.address, address)
cUDPServerLogger.critical("%s Attempting to build...", self.address)
self.build_protocol(address)
try:
message = load(message)
for packet in message:
try:
if hasattr(self.client_protocols[address], "network_" + packet["action"]):
getattr(self.client_protocols[address], "network_" + packet["action"])(packet)
elif LOG_NO_HANDLERS:
cUDPServerLogger.critical('%s Got packet without handler: %s', self.address, packet)
except Exception as e:
cUDPServerLogger.exception("%s Protocol's network_ failed on: %s", self.address, packet)
try:
if hasattr(self, "network_" + packet["action"]):
getattr(self, "network_" + packet["action"])(packet, address)
elif LOG_NO_HANDLERS:
cUDPServerLogger.critical('%s Got packet without handler: %s', self.address, packet)
except Exception as e:
cUDPServerLogger.exception("%s Self's network_ failed on: %s", self.address, packet)
except Exception as e:
cUDPServerLogger.exception("%s Invalid packet from %s: %s", self.address, address, message)
if LOG_GETS:
cUDPServerLogger.debug("%s GET %s <-- %s", self.address, message, address)
def send_to_all(self, message):
[self.send_to_addr(message, addr) for addr in self.clients]
def verify_send_to_all(self, message):
[self.verify_send(message, addr) for addr in self.clients]
def send_to_addr(self, message, addr):
if self.transport is not None:
if addr in self.client_protocols:
self.client_protocols[addr].enque(message)
else:
cUDPServerLogger.critical('%s send_to_addr%s failed. Trying _send()',
self.address, (message, addr))
self._send(message, addr)
# self.transport.write(dump(message), addr)
else:
cUDPServerLogger.critical('%s send_to_addr%s failed due to null transport. Trying _send()',
self.address, (message, addr))
self._send(message, addr)
def network_connect_notify(self, message, address):
if address not in self.clients:
cUDPServerLogger.info("%s Got new Client%s", self.address, address)
self.build_protocol(address)
def network_ping(self, msg, addr):
print ("PING! from UDPServerFactory")
def build_protocol(self, addr):
if addr not in self.clients:
np = self.protocol()
np.factory = self
np.address = addr
if len(self.clients) < self.max_clients:
self.clients.append(addr)
self.client_protocols[addr] = np
cUDPServerLogger.info('%s Successfully built Client%s', self.address, addr)
return True
else:
cUDPServerLogger.info('%s Kicking Client%s: Game already full', self.address, addr)
np.enque({'action': 'kick', 'reason': 'Game already full'})
np.empty_que()
else:
cUDPServerLogger.critical('%s Client%s is already built!', self.address, addr)
return False
def network_verify_send(self, message, address):
message['action'] = 'confirm_arrival'
self.send_to_addr(message, address)
cUDPServerLogger.info("%s verify_send%s", self.address, (message, address))
'''
if message['id'] in self.handled_ids:
cUDPServerLogger.critical("%s Got repeat verify_send! Ignoring...", self.address)
return
self.handled_ids.append(message['id'])
'''
try:
if hasattr(self.client_protocols[address], "network_" + message['data']["action"]):
getattr(self.client_protocols[address], "network_" + message['data']["action"])(message['data'])
elif LOG_NO_HANDLERS:
cUDPServerLogger.critical('%s Got packet without handler: %s', self.address, message)
except Exception as e:
cUDPServerLogger.exception("%s Protocol's network_%s(%s) failed:", self.address,
"?" if 'action' not in message else message['data']['action'], message)
try:
if hasattr(self, "network_" + message['data']["action"]):
getattr(self, "network_" + message['data']["action"])(message['data'], address)
elif LOG_NO_HANDLERS:
cUDPServerLogger.critical('%s Got packet without handler: %s', self.address, message)
except Exception as e:
cUDPServerLogger.exception("%s Self's network_%s(%s) failed:", self.address,
"?" if 'action' not in message else message['data']['action'], message)
def confirm_arrivals(self, message, address, retry):
if not self.arrivals_confirmed[message['id']]:
cUDPServerLogger.critical("%s Cannot confirm packet: %s Retrying in %s", self.address, message, retry)
self.send_to_addr(message, address)
reactor.callLater(retry, self.confirm_arrivals, message, address, retry)
else:
cUDPServerLogger.info("%s Packet was confirmed: %s", self.address, message)
def verify_send(self, message, address, retry=1):
keys = sorted(self.arrivals_confirmed.keys())
pid = keys[len(keys)-1] if keys else 0
message = {"action": "verify_send", "id": pid, "data": message}
self.send_to_addr(message, address)
self.arrivals_confirmed[pid] = False
reactor.callLater(retry, self.confirm_arrivals, message, address, retry)
def network_confirm_arrival(self, message, address):
if self.arrivals_confirmed[message['id']]:
cUDPServerLogger.critical("%s Got repeat confirm_arrival! Ignoring...", self.address)
return
self.arrivals_confirmed[message['id']] = True
try:
if hasattr(self, "verified_" + message["data"]["action"]):
getattr(self, "verified_" + message["data"]["action"])(message['data'], address)
elif LOG_NO_HANDLERS:
cUDPServerLogger.critical('%s Got packet without verified_ handler: %s', self.address, message)
except Exception as e:
cUDPServerLogger.exception("%s Self's verified_ failed on: ", self.address, message)
try:
if hasattr(self.client_protocols[address], "verified_" + message["data"]["action"]):
getattr(self.client_protocols[address],
"verified_" + message["data"]["action"])(message['data'], address)
elif LOG_NO_HANDLERS:
cUDPServerLogger.critical('%s Got packet without verified_ handler: %s', self.address, message)
except Exception as e:
cUDPServerLogger.exception("%s Protocol's verified_ failed on: %s", self.address, message)
class EnqueUDPClient(DatagramProtocol):
def __init__(self, host, port):
self.host, self.port = host, port
self.address = host, port
self.connection_success = False
self.send_que = []
self.handled_ids = []
self.arrivals_confirmed = {}
def load(self):
reactor.listenUDP(0, self, interface=self.host)
cUDPClientLogger.info("Loading EnqueUDPClient%s", self.address)
def startProtocol(self):
self.transport.connect(self.host, self.port)
self.verify_send({"action": "connect_notify"})
def datagramReceived(self, message, address):
try:
message = load(message)
for packet in message:
try:
if hasattr(self, "network_" + packet["action"]):
getattr(self, "network_" + packet["action"])(packet, address)
elif LOG_NO_HANDLERS:
cUDPClientLogger.critical('%s Got packet without handler: %s', self.address, message)
except Exception as e:
cUDPClientLogger.exception("%s network_ failed on: %s", self.address, message)
except Exception as e:
cUDPClientLogger.exception("%s Invalid packet from %s: %s", self.address, address, message)
if LOG_GETS:
cUDPClientLogger.debug("%s GET %s <-- %s", self.address, message, address)
def enque(self, message):
self.send_que.append(message)
def network_ping(self, msg, addr):
print ("PING! from Client")
def empty_que(self):
try:
if self.send_que:
self.transport.write(dump(self.send_que))
if LOG_SENDS:
cUDPClientLogger.debug("%s SEND %s --> %s", self.address, self.send_que, self.address)
self.send_que = []
except:
cUDPClientLogger.exception("%s empty_que() failed.", self.address)
# reactor.callLater(retry, self.empty_que, retry)
def _send(self, message):
if self.transport is not None:
self.transport.write(dump([message, ]))
if LOG_SENDS:
cUDPClientLogger.debug("%s SEND %s --> %s", self.address, message, self.address)
else:
cUDPClientLogger.critical("%s _send() failed due to null transport", self.address)
def confirm_arrivals(self, message, retry=1):
if not self.arrivals_confirmed[message['id']]:
cUDPClientLogger.critical("%s Cannot confirm packet: %s Retrying in %s", self.address, message, retry)
self.enque(message)
reactor.callLater(retry, self.confirm_arrivals, message, retry)
else:
cUDPClientLogger.info("%s Packet was confirmed: %s", self.address, message)
def verify_send(self, message, retry=1):
keys = sorted(self.arrivals_confirmed.keys())
pid = keys[len(keys)-1] if keys else 0
message = {"action": "verify_send", "id": pid, "data": message}
self.enque(message)
self.arrivals_confirmed[pid] = False
reactor.callLater(retry, self.confirm_arrivals, message, retry)
def network_kick(self, message, address):
cUDPClientLogger.info("%s Was kicked: '%s'.", self.address,
message['reason'] if 'reason' in message else '?')
reactor.stop()
def network_confirm_arrival(self, message, address):
if self.arrivals_confirmed[message['id']]:
cUDPClientLogger.critical("%s Got repeat confirm_arrival! Ignoring...", self.address)
return
self.arrivals_confirmed[message['id']] = True
try:
if hasattr(self, "verified_" + message["data"]["action"]):
getattr(self, "verified_" + message["data"]["action"])(message['data'], address)
elif LOG_NO_HANDLERS:
cUDPClientLogger.critical('%s Got packet without handler: %s', self.address, message)
except Exception as e:
cUDPClientLogger.exception("%s verify_ failed on: %s", self.address, message)
def network_verify_send(self, message, address):
message['action'] = 'confirm_arrival'
self.enque(message)
'''
if message['id'] in self.handled_ids:
cUDPClientLogger.critical("%s Got repeat verify_send! Ignoring...", self.address)
return
self.handled_ids.append(message['id'])
'''
try:
if hasattr(self, "network_" + message['data']["action"]):
getattr(self, "network_" + message['data']["action"])(message['data'], address)
elif LOG_NO_HANDLERS:
cUDPClientLogger.critical('%s Got packet without handler: %s', self.address, message)
except Exception as e:
cUDPClientLogger.exception("%s network_ failed on: %s", self.address, message)
| 2.40625 | 2 |
arcade_solutions/the_core/timed_reading.py | nickaigi/automatic-dollop | 0 | 12787262 | <filename>arcade_solutions/the_core/timed_reading.py
import string
def timed_reading(max_length, text):
count = 0
text = text.translate(str.maketrans('', '', string.punctuation))
for w in text.split(' '):
if w and len(w) <= max_length:
count += 1
return count
if __name__ == '__main__':
max_length = 4
text = "The Fox asked the stork, 'How is the soup?'"
print(timed_reading(max_length, text))
| 3.3125 | 3 |
PythonVSCode/DP_GFG/LongestPalindromicSubsequence.py | porcelainruler/InterviewPrep | 0 | 12787263 | <filename>PythonVSCode/DP_GFG/LongestPalindromicSubsequence.py<gh_stars>0
from sys import stdin
'''
import java.util.*;
import java.lang.*;
import java.io.*;
class GFG
{
public static int dp(String str1, String str2, int n) {
int[][] dp = new int[n+1][n+1];
for(int i=0 ; i<n+1 ; i++) {
for(int j=0 ; j<n+1 ; j++) {
if(i == 0 || j == 0) {
dp[i][j] = 0;
} else if(str1.charAt(i-1) == str2.charAt(j-1)) {
dp[i][j] = dp[i-1][j-1] + 1;
} else {
dp[i][j] = Integer.max(dp[i-1][j], dp[i][j-1]);
}
}
}
return dp[n][n];
}
public static void main (String[] args)
{
Scanner sc = new Scanner(System.in);
int t = sc.nextInt();
sc.nextLine();
for(int i=0 ; i<t ; i++) {
String str1 = sc.nextLine();
StringBuilder st = new StringBuilder(str1);
st = st.reverse();
String str2 = st.toString();
int ans = dp(str1, str2, str1.length());
System.out.println(ans);
}
}
}
'''
def dp(sarr1: str, sarr2: str, n: int):
dp = [[0]*(n+1) for i in range(n+1)]
for i in range(n+1):
for j in range(n+1):
if i==0 or j==0:
dp[i][j] = 0
elif sarr1[i-1] == sarr2[j-1]:
dp[i][j] = dp[i-1][j-1] + 1
else:
dp[i][j] = max(dp[i-1][j], dp[i][j-1])
# for i in range(n+1):
# for j in range(n+1):
# print(dp[i][j], end = ' ')
# print()
# print()
return dp[n][n]
def main():
t = int(input())
for i in range(t):
sarr1 = list(map(str, stdin.readline().split()))[0]
ans = dp(sarr1, sarr1[::-1], len(sarr1))
print(ans)
if __name__ == '__main__':
main() | 2.546875 | 3 |
Web/field/api/views.py | Pancras-Zheng/Graduation-Project | 37 | 12787264 | import json
from django.shortcuts import render
from django.http import HttpResponse
import config.config as config
import user.models as database
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.csrf import requires_csrf_token
from django.views.decorators.csrf import ensure_csrf_cookie
# Create your views here.
HTTP_RESULT_SUCCESS = 1
HTTP_RESULT_PERMISSION_DENIED = 0
HTTP_RESULT_NO_SUCH_API = -1
HTTP_RESULT_PARAMS_ERROR = -2
HTTP_RESULT_LOGIN_REQUIRE = -3
HTTP_RESULT_UNKNOWN_ERROR = -4
JSON_PERMISSION_DENIED = '{"result":0}'
JSON_NO_SUCH_API = '{"result":-1}'
JSON_PARAMS_INCORRENCT = '{"result":-2}'
JSON_LOGIN_REQUIRE = '{"result":-3}'
JSON_UNKNOWN_ERROR = '{"result":-4}'
DICT_RESULT_CODE = {
HTTP_RESULT_PARAMS_ERROR: JSON_PERMISSION_DENIED,
HTTP_RESULT_NO_SUCH_API: JSON_NO_SUCH_API,
HTTP_RESULT_PARAMS_ERROR: JSON_PARAMS_INCORRENCT,
HTTP_RESULT_LOGIN_REQUIRE: JSON_LOGIN_REQUIRE,
HTTP_RESULT_UNKNOWN_ERROR: JSON_UNKNOWN_ERROR,
}
KEY_RESULT = 'result'
KEY_TYPE = 'type'
KEY_OPERATION_RESULT = 'operation_result'
TYPE_DEVICE = 1
TYPE_LIGHT = 2
TYPE_TEMPERATURE = 3
TYPE_HUMIDITY = 4
TYPE_DIRT_HUMIDITY = 5
TYPE_FERTILIZATION = 6
TYPE_WATER = 7
TYPE_USER = 8
# 处理函数
def __checkUser(request):
if request.user.is_authenticated:
return HTTP_RESULT_SUCCESS
return HTTP_RESULT_LOGIN_REQUIRE
@csrf_exempt
def device(request):
request_result = __checkUser(request)
if request_result == HTTP_RESULT_SUCCESS:
custom_device_config = config.get_device_config()
if request.method == 'POST': # 如果是POST请求
if config.KEY_DEVICE_STATE_1 in request.POST:
custom_device_config.device1State = int(request.POST.get(config.KEY_DEVICE_STATE_1, config.STATE_OFF))
print('自定义设备1被设置为 ', custom_device_config.device1State)
if config.KEY_DEVICE_STATE_2 in request.POST:
custom_device_config.device2State = int(request.POST.get(config.KEY_DEVICE_STATE_2, config.STATE_OFF))
print('自定义设备2被设置为 ', custom_device_config.device2State)
if config.KEY_DEVICE_STATE_3 in request.POST:
custom_device_config.device3State = int(request.POST.get(config.KEY_DEVICE_STATE_3, config.STATE_OFF))
print('自定义设备3被设置为 ', custom_device_config.device3State)
if config.KEY_DEVICE_STATE_4 in request.POST:
custom_device_config.device4State = int(request.POST.get(config.KEY_DEVICE_STATE_4, config.STATE_OFF))
print('自定义设备4被设置为 ', custom_device_config.device4State)
config.set_device_config_obj(custom_device_config)
json_obj = {
KEY_RESULT: HTTP_RESULT_SUCCESS,
config.KEY_DEVICE_STATE_1: int(custom_device_config.device1State),
config.KEY_DEVICE_STATE_2: int(custom_device_config.device2State),
config.KEY_DEVICE_STATE_3: int(custom_device_config.device3State),
config.KEY_DEVICE_STATE_4: int(custom_device_config.device4State),
}
return HttpResponse(json.dumps(json_obj))
else:
return HttpResponse(DICT_RESULT_CODE.get(request_result, JSON_UNKNOWN_ERROR))
@csrf_exempt
def light(request):
request_result = __checkUser(request)
if request_result == HTTP_RESULT_SUCCESS:
light_config = config.get_light_config()
if request.method == 'POST':
if config.KEY_IS_LIGHT_ON in request.POST:
light_config.isLightOn = request.POST.get(config.KEY_IS_LIGHT_ON, config.STATE_OFF)
print('环境光设置为', light_config.isLightOn)
if config.KEY_LIGHT_COLOR in request.POST:
light_config.lightColor = request.POST.get(config.KEY_LIGHT_COLOR, config.STATE_OFF)
print('环境光颜色设置为', light_config.lightColor)
if config.KEY_LIGHT_LEVEL in request.POST:
light_config.lightLevel = request.POST.get(config.KEY_LIGHT_LEVEL, config.STATE_OFF)
print('环境光亮度设置为', light_config.lightLevel)
config.set_light_config_obj(light_config)
json_obj = {
KEY_RESULT: HTTP_RESULT_SUCCESS,
config.KEY_IS_LIGHT_ON: light_config.isLightOn,
config.KEY_LIGHT_COLOR: light_config.lightColor,
config.KEY_LIGHT_LEVEL: light_config.lightLevel,
}
return HttpResponse(json.dumps(json_obj))
else:
return HttpResponse(DICT_RESULT_CODE.get(request_result, JSON_UNKNOWN_ERROR))
@csrf_exempt
def temperature(request):
request_result = __checkUser(request)
if request_result == HTTP_RESULT_SUCCESS:
temperature_config = config.get_temperature_config()
if request.method == 'POST':
if config.KEY_IS_AUTO_CONTROL in request.POST:
temperature_config.isAutoControl = request.POST.get(config.KEY_IS_AUTO_CONTROL, config.STATE_OFF)
print('自动温度控制被设置为 ' + temperature_config.isAutoControl)
if config.KEY_UPPER_LIMIT in request.POST:
temperature_config.upperLimit = request.POST.get(config.KEY_UPPER_LIMIT, 30)
print('自动温度控制温度上限被设置为 ' + temperature_config.upperLimit)
if config.KEY_LOWER_LIMIT in request.POST:
temperature_config.lowerLimit = request.POST.get(config.KEY_LOWER_LIMIT, 0)
print('自动温度控制温度下限被设置为 ' + temperature_config.lowerLimit)
if config.KEY_UPPER_ACTION in request.POST:
temperature_config.upperActions = request.POST.get(config.KEY_UPPER_ACTION, config.EMPTY_ACTION)
print('自动温度控制温度上限执行动作被设置为 ' + temperature_config.upperActions)
if config.KEY_LOWER_ACTION in request.POST:
temperature_config.lowerActions = request.POST.get(config.KEY_LOWER_ACTION, config.EMPTY_ACTION)
print('自动温度控制温度下限执行动作被设置为 ' + temperature_config.lowerActions)
config.set_temperature_config_obj(temperature_config)
json_obj = {
KEY_RESULT: HTTP_RESULT_SUCCESS,
KEY_TYPE: TYPE_TEMPERATURE,
config.KEY_IS_AUTO_CONTROL: temperature_config.isAutoControl,
config.KEY_UPPER_LIMIT: temperature_config.upperLimit,
config.KEY_LOWER_LIMIT: temperature_config.lowerLimit,
config.KEY_UPPER_ACTION: temperature_config.upperActions,
config.KEY_LOWER_ACTION: temperature_config.lowerActions,
}
return HttpResponse(json.dumps(json_obj))
else:
return HttpResponse(DICT_RESULT_CODE.get(request_result, JSON_UNKNOWN_ERROR))
@csrf_exempt
def humidity(request):
request_result = __checkUser(request)
if request_result == HTTP_RESULT_SUCCESS:
humidity_config = config.get_humidity_config()
if request.method == 'POST':
if config.KEY_IS_AUTO_CONTROL in request.POST:
humidity_config.isAutoControl = request.POST.get(config.KEY_IS_AUTO_CONTROL, config.STATE_OFF)
print('自动湿度控制被设置为 ' + humidity_config.isAutoControl)
if config.KEY_UPPER_LIMIT in request.POST:
humidity_config.upperLimit = request.POST.get(config.KEY_UPPER_LIMIT,
config.DEFAULT_HUMIDITY_UPPER_LIMIT)
print('自动湿度控制温度上限被设置为 ' + humidity_config.upperLimit)
if config.KEY_LOWER_LIMIT in request.POST:
humidity_config.lowerLimit = request.POST.get(config.KEY_LOWER_LIMIT,
config.DEFAULT_HUMIDITY_LOWER_LIMIT)
print('自动湿度控制湿度下限被设置为 ' + humidity_config.lowerLimit)
if config.KEY_UPPER_ACTION in request.POST:
humidity_config.upperActions = request.POST.get(config.KEY_UPPER_ACTION, config.DEFAULT_ACTIONS)
print('自动湿度动作上限被设置为 ', humidity_config.upperActions)
if config.KEY_LOWER_ACTION in request.POST:
humidity_config.lowerActions = request.POST.get(config.KEY_LOWER_ACTION, config.DEFAULT_ACTIONS)
print('自动湿度动作下限被设置为 ', humidity_config.upperActions)
config.set_dumidity_config_obj(humidity_config)
json_obj = {
KEY_RESULT: HTTP_RESULT_SUCCESS,
KEY_TYPE: TYPE_HUMIDITY,
config.KEY_IS_AUTO_CONTROL: humidity_config.isAutoControl,
config.KEY_UPPER_LIMIT: humidity_config.upperLimit,
config.KEY_LOWER_LIMIT: humidity_config.lowerLimit,
config.KEY_UPPER_ACTION: humidity_config.upperActions,
config.KEY_LOWER_ACTION: humidity_config.lowerActions,
}
return HttpResponse(json.dumps(json_obj))
else:
return HttpResponse(DICT_RESULT_CODE.get(request_result, JSON_UNKNOWN_ERROR))
@csrf_exempt
def dirt_humidity(request):
request_result = __checkUser(request)
if request_result == HTTP_RESULT_SUCCESS:
dirt_humidity_config = config.get_dirt_humidity_config()
if request.method == 'POST':
if config.KEY_IS_AUTO_CONTROL in request.POST:
dirt_humidity_config.isAutoControl = request.POST.get(config.KEY_IS_AUTO_CONTROL, config.STATE_OFF)
print('自动土壤湿度控制被设置为 ' + dirt_humidity_config.isAutoControl)
if config.KEY_UPPER_LIMIT in request.POST:
dirt_humidity_config.upperLimit = request.POST.get(config.KEY_UPPER_LIMIT,
config.DEFAULT_HUMIDITY_UPPER_LIMIT)
print('自动土壤湿度控制温度上限被设置为 ' + dirt_humidity_config.upperLimit)
if config.KEY_LOWER_LIMIT in request.POST:
dirt_humidity_config.lowerLimit = request.POST.get(config.KEY_LOWER_LIMIT,
config.DEFAULT_HUMIDITY_LOWER_LIMIT)
print('自动土壤湿度控制湿度下限被设置为 ' + dirt_humidity_config.lowerLimit)
if config.KEY_UPPER_ACTION in request.POST:
dirt_humidity_config.upperActions = request.POST.get(config.KEY_UPPER_ACTION, config.DEFAULT_ACTIONS)
print('自动土壤湿度动作上限被设置为 ', dirt_humidity_config.upperActions)
if config.KEY_LOWER_ACTION in request.POST:
dirt_humidity_config.lowerActions = request.POST.get(config.KEY_LOWER_ACTION, config.DEFAULT_ACTIONS)
print('自动土壤湿度动作下限被设置为 ', dirt_humidity_config.upperActions)
config.set_dirt_humidity_config_obj(dirt_humidity_config)
json_obj = {
KEY_RESULT: HTTP_RESULT_SUCCESS,
KEY_TYPE: TYPE_DIRT_HUMIDITY,
config.KEY_IS_AUTO_CONTROL: dirt_humidity_config.isAutoControl,
config.KEY_UPPER_LIMIT: dirt_humidity_config.upperLimit,
config.KEY_LOWER_LIMIT: dirt_humidity_config.lowerLimit,
config.KEY_UPPER_ACTION: dirt_humidity_config.upperActions,
config.KEY_LOWER_ACTION: dirt_humidity_config.lowerActions,
}
return HttpResponse(json.dumps(json_obj))
else:
return HttpResponse(DICT_RESULT_CODE.get(request_result, JSON_UNKNOWN_ERROR))
@csrf_exempt
def fertilization(request):
request_result = __checkUser(request)
if request_result == HTTP_RESULT_SUCCESS:
fertilization_config = config.get_fertilization_config()
if request.method == 'POST':
if config.KEY_IS_AUTO_CONTROL in request.POST:
fertilization_config.isAutoControl = request.POST.get(config.KEY_IS_AUTO_CONTROL,
config.DEFAULT_AUTO_CONTROL)
print('自动施肥被设置为', fertilization_config.isAutoControl)
if config.KEY_REPEAT_TYPE in request.POST:
fertilization_config.repeatType = request.POST.get(config.KEY_REPEAT_TYPE, config.DEFAULT_REPEAT_TYPE)
print('自动施肥重复类型被设置为', fertilization_config.repeatType)
if config.KEY_REPEAT_CIRCLE in request.POST:
fertilization_config.repeatCircle = request.POST.get(config.KEY_REPEAT_CIRCLE,
config.DEFAULT_REPEAT_CIRCLE)
print('自动施肥周期被设置为', fertilization_config.repeatCircle)
if config.KEY_HOUR in request.POST:
fertilization_config.hour = request.POST.get(config.KEY_HOUR, config.DEFAULT_REPEAT_HOUR)
print('自动施肥周期小时被设置为', fertilization_config.hour)
if config.KEY_MINUTE in request.POST:
fertilization_config.minute = request.POST.get(config.KEY_MINUTE, config.DEFAULT_REPEAT_MINUTE)
print('自动施肥周期分钟被设置为', fertilization_config.minute)
config.set_fertilization_config_obj(fertilization_config)
json_obj = {
KEY_RESULT: HTTP_RESULT_SUCCESS,
KEY_TYPE: TYPE_FERTILIZATION,
config.KEY_IS_AUTO_CONTROL: fertilization_config.isAutoControl,
config.KEY_REPEAT_TYPE: fertilization_config.repeatType,
config.KEY_REPEAT_CIRCLE: fertilization_config.repeatCircle,
config.KEY_HOUR: fertilization_config.hour,
config.KEY_MINUTE: fertilization_config.minute
}
return HttpResponse(json.dumps(json_obj))
else:
return HttpResponse(DICT_RESULT_CODE.get(request_result, JSON_UNKNOWN_ERROR))
@csrf_exempt
def water(request):
request_result = __checkUser(request)
if request_result == HTTP_RESULT_SUCCESS:
water_config = config.get_water_config()
if request.method == 'POST':
if config.KEY_IS_AUTO_CONTROL in request.POST:
water_config.isAutoControl = request.POST.get(config.KEY_IS_AUTO_CONTROL, config.DEFAULT_AUTO_CONTROL)
print('自动浇水被设置为', water_config.isAutoControl)
if config.KEY_REPEAT_TYPE in request.POST:
water_config.repeatType = request.POST.get(config.KEY_REPEAT_TYPE, config.DEFAULT_REPEAT_TYPE)
print('自动浇水重复类型被设置为', water_config.repeatType)
if config.KEY_REPEAT_CIRCLE in request.POST:
water_config.repeatCircle = request.POST.get(config.KEY_REPEAT_CIRCLE, config.DEFAULT_REPEAT_CIRCLE)
print('自动浇水周期被设置为', water_config.repeatCircle)
if config.KEY_HOUR in request.POST:
water_config.hour = request.POST.get(config.KEY_HOUR, config.DEFAULT_REPEAT_HOUR)
print('自动浇水周期小时被设置为', water_config.hour)
if config.KEY_MINUTE in request.POST:
water_config.minute = request.POST.get(config.KEY_MINUTE, config.DEFAULT_REPEAT_MINUTE)
print('自动浇水周期分钟被设置为', water_config.minute)
config.set_water_config_obj(water_config)
json_obj = {
KEY_RESULT: HTTP_RESULT_SUCCESS,
KEY_TYPE: TYPE_WATER,
config.KEY_IS_AUTO_CONTROL: water_config.isAutoControl,
config.KEY_REPEAT_TYPE: water_config.repeatType,
config.KEY_REPEAT_CIRCLE: water_config.repeatCircle,
config.KEY_HOUR: water_config.hour,
config.KEY_MINUTE: water_config.minute
}
return HttpResponse(json.dumps(json_obj))
else:
return HttpResponse(DICT_RESULT_CODE.get(request_result, JSON_UNKNOWN_ERROR))
TYPE_USER_NO = -1
TYPE_USER_CREATE = 0
TYPE_USER_EDIT = 1
TYPE_USER_DELETE = 2
JSON_NO_SUCH_USER = '{"result":-5}'
JSON_USER_EXIST = '{"result":-6}'
ACTION_TYPE_NONE = -1
ACTION_TYPE_USER_CREATE = 0
ACTION_TYPE_USER_EDIT = 1
ACTION_TYPE_USER_DEL = 2
VALUE_OPERATION_RESULT_FAIL = 'fail'
VALUE_OPERATION_RESULT_SUCCESS = 'success'
VALUE_OPERATION_RESULT_USER_EXIST = 'user_exist'
VALUE_OPERATION_RESULT_USER_NOT_EXIST = 'user_not_exist'
VALUE_BOOL_TRUE = 1
VALUE_BOOL_FALSE = 0
@csrf_exempt
def user(request):
request_result = __checkUser(request)
if request_result == HTTP_RESULT_SUCCESS: # 如果请求成功
if request.method == 'GET': # 如果是Get获取数据
print('Get请求')
if {config.KEY_PAGE_NUM, config.KEY_PAGE_COUNT}.intersection(request.GET): # 制定页码
print('请求参数符合条件')
user_list = database.User.objects.all()
return HttpResponse(user_list) # 未完成
elif request.method == 'POST':
print('收到的API的关于用户的POST的请求\n'+str(request.POST))
if {config.KEY_USER_ACTION_TYPE, config.KEY_USERNAME, config.KEY_PASSWORD, config.KEY_IS_STUFF,
config.KEY_IS_ACTIVE, config.KEY_NICKNAME, config.KEY_INFO}.intersection(request.POST):
print('用户的参数校验正确')
user_username = request.POST[config.KEY_USERNAME]
user_password = request.POST[config.KEY_PASSWORD]
user_is_stuff = int(request.POST[config.KEY_IS_STUFF]) == VALUE_BOOL_TRUE
user_is_active = int(request.POST[config.KEY_IS_ACTIVE]) == VALUE_BOOL_TRUE
user_action_type = int(request.POST[config.KEY_USER_ACTION_TYPE])
user_info = request.POST[config.KEY_INFO]
user_nickname = request.POST[config.KEY_NICKNAME]
if user_nickname == '':
user_nickname = user_username
if user_info == '':
user_info = '暂无简介'
user_operation_result = VALUE_OPERATION_RESULT_FAIL
if user_action_type == ACTION_TYPE_USER_CREATE: # 如果是创建用户
print('创建用户 是否工作人员 '+str(user_is_stuff)+' 是否活动 '+str(user_is_active))
if database.User.objects.filter(username=user_username): # 如果用户名已被注册
user_operation_result = VALUE_OPERATION_RESULT_USER_EXIST
print("同户名 %s 已被注册"%user_username)
else: # 如果用户名可用
temp_user = database.User()
temp_user.username = user_username
temp_user.nickname = user_nickname
temp_user.set_password(<PASSWORD>)
temp_user.is_staff = user_is_stuff
temp_user.is_active = user_is_active
temp_user.info = user_info
temp_user.save()
print("同户名 %s 创建成功"%user_username)
elif user_action_type == ACTION_TYPE_USER_EDIT: # 如果是编辑用户,不会改变密码
print('编辑用户')
if database.User.objects.filter(username=user_username): # 如果用户的确存在
temp_user = database.User().objects.get(username=user_username)
temp_user.username = user_username
temp_user.nickname = user_nickname
temp_user.is_staff = user_is_stuff
temp_user.is_active = user_is_active
temp_user.info = user_info
temp_user.save()
print("同户 %s 编辑成功"%user_username)
else: # 如果用户不存在
user_operation_result = VALUE_OPERATION_RESULT_USER_EXIST
print("同户 %s 不存在,不能编辑"%user_username)
elif user_action_type == ACTION_TYPE_USER_DEL: # 如果是删除账户
print('删除用户')
if database.User.objects.filter(username=user_username): # 如果用户的确存在
temp_user = database.User.objects.get(username=user_username)
print("同户 %s 删除成功"%user_username)
temp_user.delete()
else: # 如果用户不存在
user_operation_result = VALUE_OPERATION_RESULT_USER_EXIST
print("同户 %s 不存在,不能删除"%user_username)
else:
print('传递的用户操作类型不正确')
user_action_type = ACTION_TYPE_NONE
json_obj = {
KEY_RESULT: HTTP_RESULT_SUCCESS,
KEY_TYPE: TYPE_USER,
KEY_OPERATION_RESULT: user_operation_result,
config.KEY_USERNAME: user_username,
config.KEY_NICKNAME: user_nickname,
config.KEY_IS_STUFF: user_is_stuff,
config.KEY_IS_ACTIVE: user_is_active,
config.KEY_INFO: user_info,
config.KEY_USER_ACTION_TYPE: user_action_type
}
return HttpResponse(json.dumps(json_obj))
return HttpResponse(JSON_PARAMS_INCORRENCT)
else:
return HttpResponse(DICT_RESULT_CODE.get(request_result, JSON_UNKNOWN_ERROR))
@csrf_exempt
def schedule(request):
return HttpResponse(JSON_LOGIN_REQUIRE)
@csrf_exempt
def noSuchApi(request):
return HttpResponse(JSON_NO_SUCH_API)
| 2.0625 | 2 |
tests/test_which_cam.py | waider/gopro-py-api | 1 | 12787265 | import http
from socket import timeout
from goprocam import GoProCamera
from .conftest import GoProCameraTest
class WhichCamTest(GoProCameraTest):
def setUp(self):
super().setUp()
# disable this so we can test it separately
self.monkeypatch.setattr(GoProCamera.GoPro, '_prepare_gpcontrol',
lambda self: self)
def test_already_deetected(self):
assert self.goprocam.whichCam() == 'gpcontrol'
def test_detection_exceptions(self):
# this would, of course, be better as a parametrised test
for firmware_version in ['HX', 'FS', 'HD3.02', 'H18', 'HD3.22.01.50']:
self.goprocam._camera = ''
self.responses['/gp/gpControl']['info']['firmware_version'] = \
firmware_version
assert self.goprocam.whichCam() == 'gpcontrol'
def test_auth_detection(self):
self.goprocam._camera = ''
self.responses['/camera/cv'] = 'Hero3'
for firmware_version in ['HD2', '4', 'HD3.1']:
self.responses['/gp/gpControl']['info']['firmware_version'] = \
firmware_version
assert self.goprocam.whichCam() == 'auth'
def test_auth_detection_not_hero3(self):
self.goprocam._camera = ''
self.responses['/camera/cv'] = 'Hero2'
for firmware_version in ['HD2', '4', 'HD3.1']:
self.responses['/gp/gpControl']['info']['firmware_version'] = \
firmware_version
assert self.goprocam.whichCam() == ''
def test_auth_detection_without_gpcontrol(self):
self.goprocam._camera = ''
self.responses = {'/camera/cv': 'Hero3'}
assert self.goprocam.whichCam() == 'auth'
def test_gpcontrol_detection_without_gpcontrol_not_hero3(self):
self.goprocam._camera = ''
del(self.responses['/gp/gpControl'])
self.responses['/camera/cv'] = 'Hero2'
assert self.goprocam.whichCam() == ''
def test_gpcontrol_detection_without_gpcontrol(self):
# this will attempt to power on the camera - which we have intercepted
self.goprocam._camera = ''
self.responses = {}
assert self.goprocam.whichCam() == ''
def test_cv_timeout_while_detecting(self):
self.goprocam._camera = ''
self.responses = {'/camera/cv': timeout()}
assert self.goprocam.whichCam() == ''
def test_gpcontrol_timeout_while_detecting_hero3(self):
self.goprocam._camera = ''
self.responses['/gp/gpControl'] = timeout()
# this copes poorly with errors, so help it along
self.responses['/camera/cv'] = 'Hero3'
assert self.goprocam.whichCam() == 'auth'
def test_gpcontrol_timeout_while_detecting_hero2(self):
self.goprocam._camera = ''
self.responses['/gp/gpControl'] = timeout()
# this copes poorly with errors, so help it along
self.responses['/camera/cv'] = 'Hero2'
assert self.goprocam.whichCam() == ''
def test_gpcontrol_exception_while_detecting(self):
self.goprocam._camera = ''
self.responses['/gp/gpControl'] = http.client.HTTPException()
# this copes poorly with errors, so help it along
self.responses['/camera/cv'] = 'Hero3'
# different power-on!
with self.monkeypatch.context() as m:
def print_verify(args):
assert isinstance(args, http.client.HTTPException) or \
args == 'HERO3/3+'
m.setattr('builtins.print', print_verify)
m.setattr(GoProCamera.GoPro, 'power_on_auth', lambda self: self)
assert self.goprocam.whichCam() == 'auth'
def test_gpcontrol_exception_while_detecting_not_hero3(self):
self.goprocam._camera = ''
self.responses['/gp/gpControl'] = http.client.HTTPException()
# this copes poorly with errors, so help it along
self.responses['/camera/cv'] = 'Hero2'
# different power-on!
with self.monkeypatch.context() as m:
def print_verify(args):
assert isinstance(args, http.client.HTTPException)
m.setattr('builtins.print', print_verify)
m.setattr(GoProCamera.GoPro, 'power_on_auth', lambda self: self)
assert self.goprocam.whichCam() == 'auth'
| 2.546875 | 3 |
sendbird/api_endpoints/channel.py | jpbullalayao/sendbird-python | 4 | 12787266 | <reponame>jpbullalayao/sendbird-python
# All endpoint constants that both the OpenChannel and GroupChannel
# resources share
CHANNEL_BAN_USER = "/ban"
CHANNEL_FREEZE = "/freeze"
CHANNEL_LIST_BANNED_USERS = "/ban"
CHANNEL_LIST_MUTED_USERS = "/mute"
CHANNEL_MUTE_USER = "/mute"
CHANNEL_UNBAN_USER = "/ban/{banned_user_id}"
CHANNEL_UNMUTE_USER = "/mute/{muted_user_id}"
CHANNEL_UPDATE_BAN = "/ban/{banned_user_id}"
CHANNEL_VIEW_BAN = "/ban/{banned_user_id}"
CHANNEL_VIEW_MUTE = "/mute/{muted_user_id}"
CHANNEL_DELETE_MESSAGE = "/messages/{message_id}"
CHANNEL_LIST_MESSAGES = "/messages"
CHANNEL_MARK_AS_READ = "/messages/mark_as_read"
CHANNEL_SEND_MESSAGE = "/messages"
CHANNEL_UPDATE_MESSAGE = "/messages/{message_id}"
CHANNEL_VIEW_MEMBER_UNNREAD_COUNT = "/messages/unread_count"
CHANNEL_VIEW_MESSAGE = "/messages/{message_id}"
CHANNEL_VIEW_MESSAGE_COUNT = "/messages/total_count"
CHANNEL_CREATE_METADATA = "/metadata"
CHANNEL_DELETE_METADATA = "/metadata"
CHANNEL_UPDATE_METADATA = "/metadata"
CHANNEL_VIEW_METADATA = "/metadata"
CHANNEL_CREATE_METACOUNTER = "/metacounter"
CHANNEL_DELETE_METACOUNTER = "/metacounter"
CHANNEL_UPDATE_METACOUNTER = "/metacounter"
CHANNEL_VIEW_METACOUNTER = "/metacounter"
| 1.273438 | 1 |
genart/torch/train_gan.py | dyf/genart | 0 | 12787267 | <reponame>dyf/genart<gh_stars>0
import torch
import os
import numpy as np
import skimage.io
from data import GenartDataSet
from model import GenartGenerator, GenartDiscriminator
from torch.autograd import Variable
from torchvision.utils import save_image
Tensor = torch.FloatTensor
latent_size = 10
n_epochs = 100
img_shape = (256, 256, 3)
save_interval = 5
lr = 0.0002
save_path = "./out"
ds = GenartDataSet('./circles.h5')
loader = torch.utils.data.DataLoader(ds, batch_size=10, shuffle=True, num_workers=0)
generator = GenartGenerator(latent_size, img_shape)
discriminator = GenartDiscriminator(img_shape)
optimizer_g = torch.optim.Adam(generator.parameters(), lr=lr, betas=(0.5,0.999))
optimizer_d = torch.optim.Adam(discriminator.parameters(), lr=lr, betas=(0.5,0.999))
adversarial_loss = torch.nn.BCELoss()
for ni, epoch in enumerate(range(n_epochs)):
for bi, (imgs,_) in enumerate(loader):
# ground truths
valid = Variable(Tensor(imgs.size(0),1).fill_(1.0), requires_grad=False)
fake = Variable(Tensor(imgs.size(0),1).fill_(0.0), requires_grad=False)
real_imgs = Variable(imgs.type(Tensor))
# train generator
# ---------------
optimizer_g.zero_grad()
# noise input
z = Variable(Tensor(np.random.normal(0, 1, (real_imgs.shape[0], latent_size))))
fake_imgs = generator(z)
gen_loss = adversarial_loss(discriminator(fake_imgs), valid)
gen_loss.backward()
optimizer_g.step()
# train discriminator
# -------------------
real_vals = discriminator(real_imgs)
real_loss = adversarial_loss(real_vals, valid)
fake_vals = discriminator(fake_imgs.detach())
fake_loss = adversarial_loss(fake_vals, fake)
d_loss = (real_loss + fake_loss) * 0.5
d_loss.backward()
optimizer_d.step()
if bi % save_interval == 0:
print(f'Epoch {ni}, Batch {bi} - saving')
save_image(fake_imgs.data[:9],
os.path.join(save_path, f'images_{ni:04d}_{bi:04d}.png'),
nrow=3, range=[0,1])
save_image(real_imgs.data[:9],
os.path.join(save_path, f'real_images_{ni:04d}_{bi:04d}.png'),
nrow=3, range=[0,1])
print("done")
| 2.1875 | 2 |
Unsupervised Machine Learning/Unsupervised Learning/02 - Image clustering K-means.py | Piraato/Learn-Python | 0 | 12787268 | <filename>Unsupervised Machine Learning/Unsupervised Learning/02 - Image clustering K-means.py<gh_stars>0
# Clustering is not actually good for image recognition,
# but this is a good way of showing how clustering works
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# Just turning off the annoying messages in the console
tf.logging.set_verbosity(tf.logging.ERROR)
import matplotlib
import matplotlib.pyplot as plt
mnist = input_data.read_data_sets("mnist_data/")
training_digits, training_labels = mnist.train.next_batch(2000)
test_digits, test_labels = mnist.test.next_batch(5)
def display_digit(digit):
plt.imshow(digit.reshape(28, 28), cmap="Greys", interpolation='nearest')
display_digit(training_digits[0])
plt.show()
from tensorflow.contrib.learn.python.learn.estimators import kmeans
def input_fn(digits):
input_t = tf.convert_to_tensor(digits, dtype=tf.float32)
return (input_t, None)
k_means_estimator = kmeans.KMeansClustering(num_clusters=10)
# 100 steps results in 0% for me. I don't dare to try more steps because my computer might explode then...
fit = k_means_estimator.fit(input_fn=lambda: input_fn(training_digits), steps=100)
clusters = k_means_estimator.clusters()
for i in range(10):
plt.subplot(2, 5, i + 1)
display_digit(clusters[i])
cluster_labels = [9, 1, 6, 8, 0, 3, 0, 6, 1, 9]
for i in range(5):
plt.subplot(1, 5, i + 1)
display_digit(test_digits[i])
predict = k_means_estimator.predict(input_fn=lambda: input_fn(test_digits), as_iterable=False)
print([cluster_labels[i] for i in predict['cluster_idx']])
for i in range(5):
plt.subplot(1, 5, i + 1)
display_digit(test_digits[i])
predict_train = k_means_estimator.predict(input_fn=lambda: input_fn(test_digits), as_iterable=False)
def display_accuracy(cluster_labels, cluster_idx, actual_labels):
predict_labels = [cluster_labels[i] for i in cluster_idx]
num_accurate_predictions = (list(predict_labels == actual_labels)).count(True)
print("Number of accurate predictions: ", num_accurate_predictions)
pctAccuracy = float(num_accurate_predictions) / float(len(actual_labels))
print("% accurate predictions: ", pctAccuracy)
display_accuracy(cluster_labels, predict_train['cluster_idx'], test_labels)
plt.show() | 3.640625 | 4 |
aiobook/core/facebook/types/__init__.py | Valenookua/aiobook | 4 | 12787269 | from .buttons import \
UrlButton, CallButton, LogOutButton, LogInButton, PostbackButton, GamePlayButton
from .elements import Element, MediaElement, OpenGraphElement
from .quick_replies import QuickReply
from .templates import\
ButtonTemplate, MediaTemplate, GenericTemplate, OpenGraphTemplate, ListTemplate
| 0.808594 | 1 |
source/data_preparation/vim_label.py | jackie930/yolov3 | 0 | 12787270 | # -*- coding: utf-8 -*-
"""
@File : yolo_label.py
@Author : Jackie
@Description :
"""
import json
import os
from shutil import copyfile
from sys import exit
sets = ['train', 'valid']
classes = ["nie es8","maybach s650","toyota gt8","tesla modelx"] #
def load_vim_label(labelfile):
with open(labelfile, "r") as f:
annotations = json.load(f, encoding='unicode-escape')
image_list = annotations['_via_image_id_list']
print ("<<<image ls: ", image_list)
print (annotations)
def preprocess(imgfolder,targetfolder):
image_list = os.listdir(imgfolder)
print ('total number:', len(image_list))
if not os.path.isdir(targetfolder):
os.makedirs(targetfolder)
for i in range(len(image_list)):
#print(image_list[i])
# 遍历所有文件
source = os.path.join(imgfolder, image_list[i])
target = os.path.join(targetfolder, str(i)+'.jpg')
# adding exception handling
try:
copyfile(source, target)
except IOError as e:
print("Unable to copy file. %s" % e)
exit(1)
except:
print("Unexpected error:", sys.exc_info())
exit(1)
print ("<<<< finish rename imgs!")
if __name__ == "__main__":
# first make sure your images is preprocessed before labeling!
imgfolder = '/Users/liujunyi/Desktop/spottag/summit-training/道路/pics/imgs'
preprocess(imgfolder,'../data/custom/images')
#load_vim_label('../data/custom/labels/car-type.json')
'''
# sets generation
image_list = os.listdir('../data/custom/images')
label_list = os.listdir('../data/custom/labels')
images = [i[:-4] for i in image_list]
print ("<<<<< length before", len(images))
xml_images = [i[:-4] for i in label_list]
print ("<<< images: ", images)
print ("<<< xml_images: ", xml_images)
images = [val for val in images if val in xml_images]
print ("<<<<< length after", len(images))
image_len = len(images)
num_train = image_len - int(image_len * 0.2)
num_test = int(image_len * 0.2)
print ("<<<< NUM TRAIN: ", num_train)
print ("<<<< NUM TEST: ", num_test)
print ("<<<< check if exisits")
if not os.path.exists('./data/custom'):
os.makedirs('./data/custom')
train_file = open('../data/custom/train.txt', 'w')
test_file = open('../data/custom/valid.txt', 'w')
i = 0
for image_id in image_list:
if i < num_train:
# print (">>> images for train: ",image_id)
train_file.write('%s\n' % ('data/custom/images/' + image_id))
else:
# print (">>> images for valid: ",image_id)
test_file.write('%s\n' % ('data/custom/images/' + image_id))
i = i + 1
train_file.close()
test_file.close()
'''
| 2.5 | 2 |
boxmetrics/core/info/sensors.py | Laurent-PANEK/boxmetrics-cli | 0 | 12787271 | <gh_stars>0
import psutil
from .base import Info
class Sensors(Info):
def __init__(self, *args):
super(Sensors, self).__init__(*args)
def temp(self):
if psutil.WINDOWS:
return dict()
temp = psutil.sensors_temperatures()
for key, data in temp.items():
temp[key] = data._asdict()
return temp
def fans(self):
if psutil.WINDOWS:
return dict()
fans = psutil.sensors_fans()
for key, data in fans.items():
fans[key] = data._asdict()
return fans
def battery(self):
battery = psutil.sensors_battery()
battery = battery._asdict()
battery["secsleft"] = self.__convert_remaining_time(battery["secsleft"])
return battery
def __convert_remaining_time(self, secs):
value = {
psutil.POWER_TIME_UNKNOWN: "?",
psutil.POWER_TIME_UNLIMITED: "unlimited",
}
if secs in value.keys():
return value[secs]
mm, ss = divmod(secs, 60)
hh, mm = divmod(mm, 60)
return "%dh%02dm%02ds" % (hh, mm, ss)
def all(self):
temp = self.temp()
fans = self.fans()
battery = self.battery()
return dict(temperature=temp, fans=fans, battery=battery)
SensorsInst = Sensors()
| 2.8125 | 3 |
web/app/models.py | henryjr1/DelterAirlinesTeam3 | 0 | 12787272 | # models.py
from app import db
class Passenger(db.Model):
__tablename__ = "passengers"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30), nullable=False)
dob = db.Column(db.Date, nullable=False)
email = db.Column(db.String(30), unique=True, nullable=False)
address = db.Column(db.Text, nullable=False)
def __repr__(self):
return '<name {}>'.format(self.name)
class Plane(db.Model):
__tablename__ = "planes"
id = db.Column(db.Integer, primary_key=True)
model = db.Column(db.String(30), unique=True, nullable=False)
capacity = db.Column(db.Integer, nullable=False)
flight_number = db.Column(db.String(30), unique=True, nullable=False)
def __repr__(self):
return '<model = {} --- capacity = {} --- flight number = {}>'.format(self.model, self.capacity, self.flight_number)
class Flight(db.Model):
__tablename__ = 'flights'
id = db.Column(db.Integer, primary_key=True)
source = db.Column(db.String(200), nullable=False)
destination = db.Column(db.String(200), nullable=False)
plane_id = db.Column(db.Integer, db.ForeignKey('planes.id'))
plane = db.relationship("Plane", backref=db.backref('flights', lazy=True))
departure_time = db.Column(db.DateTime, nullable=False)
departure_zip_code = db.Column(db.Integer, nullable=False)
arrival_time = db.Column(db.DateTime, nullable=False)
arrival_zip_code = db.Column(db.Integer, nullable=False)
locale = db.Column(db.String(50), nullable=False)
tickets = db.relationship("Ticket", backref="flights", lazy="dynamic", order_by='Ticket.id')
class Ticket(db.Model):
__tablename__ = 'tickets'
id = db.Column(db.Integer, primary_key=True)
seat_number = db.Column(db.String(4), nullable=False)
price = db.Column(db.Float, nullable=False)
available = db.Column(db.Boolean, nullable=False)
flight_id = db.Column(db.Integer, db.ForeignKey('flights.id'), nullable=False)
flight = db.relationship("Flight")
def __repr__(self):
return '<seat_number = {} --- available = {} ----'.format(self.seat_number, self.available)
class AirFare(db.Model):
__tablename__ = 'airfares'
id = db.Column(db.Integer, primary_key=True)
amount = db.Column(db.Numeric, nullable=False)
description = db.Column(db.String(100), nullable=True)
class Transaction(db.Model):
__tablename__ = 'transactions'
id = db.Column(db.Integer, primary_key=True)
# booking_date_time = db.Column(db.DateTime, nullable=False)
passenger_id = db.Column(db.Integer, db.ForeignKey('passengers.id'))
passenger = db.relationship('Passenger', backref='transactions', cascade='save-update')
ticket_id = db.Column(db.Integer, db.ForeignKey('tickets.id'))
ticket = db.relationship('Ticket', backref='transactions', cascade='save-update')
# airfare_id = db.Column(db.Integer, db.ForeignKey('airfares.id'))
# airfare = db.relationship('AirFare', backref='transactions')
| 2.65625 | 3 |
utils.py | Fork-for-Modify/MetaSCI-CVPR2021 | 0 | 12787273 | <reponame>Fork-for-Modify/MetaSCI-CVPR2021<filename>utils.py
"""
@author : <NAME>, <NAME>
@Email : <EMAIL> <EMAIL>
Description:
Citation:
The code prepares for ECCV 2020
Contact:
<NAME>
<EMAIL>
Xidian University, Xi'an, China
<NAME>
<EMAIL>
Xidian University, Xi'an, China
LICENSE
=======================================================================
The code is for research purpose only. All rights reserved.
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
Copyright (c), 2020, <NAME>
<EMAIL>
"""
import scipy.io as scio
import numpy as np
def generate_masks(mask_path):
mask = scio.loadmat(mask_path + '/mask.mat')
mask = mask['mask']
mask_s = np.sum(mask, axis=2)
index = np.where(mask_s == 0)
mask_s[index] = 1
return mask.astype(np.float32), mask_s.astype(np.float32)
# def generate_masks_metaTest(mask_path):
# mask = scio.loadmat(mask_path + '/Mask.mat')
# mask = mask['mask']
# mask_s = np.sum(mask, axis=2)
# index = np.where(mask_s == 0)
# mask_s[index] = 1
# mask = np.transpose(mask, [3, 0, 1, 2])
# mask_s = np.transpose(mask_s, [2, 0, 1])
# mask = mask[3]
# mask_s = mask_s[3]
# return mask.astype(np.float32), mask_s.astype(np.float32)
# def generate_masks_metaTest_v2(mask_path):
# mask = scio.loadmat(mask_path + '/Mask.mat')
# mask = mask['mask']
# mask_s = np.sum(mask, axis=2)
# index = np.where(mask_s == 0)
# mask_s[index] = 1
# mask = np.transpose(mask, [3, 0, 1, 2])
# mask_s = np.transpose(mask_s, [2, 0, 1])
# return mask.astype(np.float32), mask_s.astype(np.float32)
# def generate_masks_metaTest_v3(mask_path):
# mask = scio.loadmat(mask_path)
# mask = mask['mask']
# mask_s = np.sum(mask, axis=2)
# index = np.where(mask_s == 0)
# mask_s[index] = 1
# return mask.astype(np.float32), mask_s.astype(np.float32)
def generate_masks_MAML(mask_path, picked_task):
# generate mask and mask_sum form given picked task index
# input data:
# mask_path->mask: [H,W,Cr,num_task]
# picked_task: list
# output data:
# mask: [num_task,H,W,Cr]
# mask_s: [num_task,H,W], mask sum
mask = scio.loadmat(mask_path)
mask = mask['mask']
if mask.ndim==3:
mask = np.expand_dims(mask,-1)
mask_s = np.sum(mask, axis=2)
index = np.where(mask_s == 0)
mask_s[index] = 1
mask = np.transpose(mask, [3, 0, 1, 2])
mask_s = np.transpose(mask_s, [2, 0, 1])
assert max(picked_task)<=mask.shape[0], 'ERROR: picked task index exceed maximum limit'
mask = mask[picked_task]
mask_s = mask_s[picked_task]
return mask.astype(np.float32), mask_s.astype(np.float32)
def generate_meas(gt, mask):
"""
generate_meas [generate coded measurement from mask and orig] from mask and orig (extra orig frames with be throwed out)
Args:
gt [H,W,num_frame]: orig frames
mask [H,W,Cr]: masks
Returns:
meas [num_batch,H,W]: coded measurement, each meas is a batch here
used_gt [num_batch,H,W,Cr]: used orig frames
"""
# data type convert
mask = mask.astype(np.float32)
gt = gt.astype(np.float32)
# rescale to 0-1
mask_maxv = np.max(mask)
if mask_maxv > 1:
mask = mask/mask_maxv
# calculate meas
# meas = np.sum(mask*gt,2)
Cr = mask.shape[2] # num of masks
used_gt = np.zeros([gt.shape[2] // Cr, gt.shape[0], gt.shape[1], Cr])
for jj in range(gt.shape[2] // Cr*Cr):
if jj % Cr == 0:
meas_t = np.zeros(gt.shape[0:2])
n = 0
pic_t = gt[:, :, jj]
mask_t = mask[:, :, n]
used_gt[jj // Cr, :, :, n] = pic_t
n += 1
meas_t = meas_t + np.multiply(mask_t, pic_t)
if jj == Cr-1:
meas_t = np.expand_dims(meas_t, 0)
meas = meas_t
elif (jj + 1) % Cr == 0: #zzh
meas_t = np.expand_dims(meas_t, 0)
meas = np.concatenate((meas, meas_t), axis=0)
return meas, used_gt
| 1.882813 | 2 |
mqtt_performance_tester/analyze.py | lucasimone/mqtt-performance-tester | 0 | 12787274 | import json
import traceback
import sys
from mqtt_performance_tester.mqtt_utils import *
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
### CLASS FOR STORE AN MQTT MESSAGE
class packet():
counter = 0
def __init__(self):
self.protocol = None
self.frame_id = None
self.type = None
self.size = -1
self.payload_size = 0
self.delta_time = -1
self.epoc_time = -1
self.mid = -1
def __repr__(self):
return "--- mid:%s \t%s \tType:%s \tSize:%s \tTime:%s \tEpoc:%s" \
%(self.mid, self.protocol, self.type, self.size, self.delta_time, self.epoc_time)
### CLASS FOR COMPUTE ALL THE PERFORMANCE PARAMS
class mqtt_performance():
def __init__(self, data, num_request, qos=1):
self.num_request = num_request
self.qos = qos
self.data = data
self.packets = []
self.size_tcp = 0
self.size_mqtt = 0
self.size_udp = 0
self.size_others = 0
self.counter = 0
self.num_mqtt = 0
self.num_tcp = 0
self.num_upd = 0
self.num_others = 0
self.mqtt_types = []
self.mqtt_ids = []
self._parse_json()
def _parse_json(self):
self.counter = 0
index = 0 # start_counter
msg = None
for pkt in self.data:
msg = packet()
try:
msg.frame_id = extract_field(pkt, 'frame_id')
msg.size = int(extract_field(pkt, "frame_size"))
# Read TIME
msg.delta_time = extract_field(pkt, "time_delta")
msg.epoc_time = extract_field(pkt, "time_epoch")
for layer in pkt["_source"]['layers']:
if layer == 'mqtt':
logger.debug ("---- Packet: {0}".format(pkt["_source"]['layers'][layer]))
if 'mqtt' in pkt["_source"]['layers']:
self.counter += 1
msg.type = extract_field(pkt, "mqtt_type")
msg.payload_size = extract_field(pkt, "mqtt_size", msg.type)
msg.mid = extract_field(pkt, "mqtt_id", msg.type)
msg.protocol = "mqtt"
logger.debug("MQTT Message Type {0} - ID:{1}".format(msg.type, msg.mid))
logger.debug("Numero di messaggi MQTT: {0}".format(len(pkt["_source"]['layers']['mqtt'])))
if msg.type not in self.mqtt_types:
self.mqtt_types.append(msg.type)
if msg.mid not in self.mqtt_ids or msg.mid == 'NA':
if msg.mid != 'NA':
self.mqtt_ids.append(msg.mid)
self.mqtt_ids.append(msg.mid)
else:
logger.debug("DUP packet %s" %repr(msg))
self.num_mqtt += 1
self.size_mqtt += msg.size
self.packets.append(msg)
elif 'udp' in pkt["_source"]['layers']:
msg.protocol = "udp"
msg.size = extract_field(pkt, "udp_size")
self.payload_size += msg.size
self.num_upd += 1
elif 'tcp' in pkt["_source"]['layers']:
msg.protocol = "tcp"
self.payload_size = int(extract_field(pkt, "tcp_size"))
self.size_tcp += msg.size
self.num_tcp += 1
else:
msg.protocol = extract_field(pkt, "protocols")
self.size_others += msg.size
self.num_others += 1
except Exception as error:
logger.debug(" >>>>> ERROR PARSING Packets %s " %pkt, error)
traceback.print_exc(file=sys.stdout)
## PRINT RESUL
total = 0
logger.debug("Detected %d MQTT packets" %len(self.packets))
for t in self.mqtt_types:
num = len(self.filter_by(t))
logger.debug("--- %d %s " % (num, t))
total += num
logger.debug("--- TOTAL %d" % (total))
logger.debug('#######################################')
logger.debug('--- Total Message: %d' % self.counter)
logger.debug("--- TCP Message: %s " % self.num_tcp)
logger.debug('--- MQTT Message: %d' % self.num_mqtt)
logger.debug('--- UDP Message: %d' % self.num_upd)
logger.debug('--- OTHER Message: %d' % self.num_others)
logger.debug('#######################################')
logger.debug('--- TCP packets size: %d' % self.size_tcp)
logger.debug('--- MQTT packets size: %d' % self.size_mqtt)
logger.debug('--- UPD packets size: %d' % self.size_udp)
logger.debug('--- OTHERS packets size: %d' % self.size_others)
logger.debug('--- TOTAL packets size: %d' % (self.size_mqtt + self.size_tcp+ self.size_udp + self.size_others))
logger.debug('#######################################')
def get_num(self, msg_type):
return len(self.filter_by(msg_type))
def filter_by(self, filter):
output = []
for pkt in self.packets:
if pkt.type == filter:
output.append(pkt)
return output
def find_msg_with_id(self, mid, msg_type):
data = self.filter_by(msg_type)
for msg in data:
if msg.mid == mid:
return msg
return -1
def get_e2e(self):
min = 100000
max = -1
msg_type = MQTT_PUB_ACK
if self.qos == 2:
msg_type = MQTT_PUB_COM
avg_time = 0
counter = 0
data = self.filter_by(msg_type)
for msg in data:
msg_pub = self.find_msg_with_id(msg.mid, MQTT_PUB)
mqtt_time = (float(msg.epoc_time) - float(msg_pub.epoc_time))
if mqtt_time > max:
max = mqtt_time
if mqtt_time < min:
min = mqtt_time
avg_time += mqtt_time
# logger.debug ("%s -- %s " % (repr(msg), repr(msg_pub)))
counter += 1
logger.debug("[E2E] TOTAL TIME: %s " % avg_time)
if counter == 0:
avg_time = 0
else:
avg_time /= counter
logger.debug("[E2E] MIN TIME: %s - MAX TIME: %s" % (min, max))
logger.debug("[E2E] The E2E delay for %s is :%f [N. Pkt=%d]" %(msg_type, avg_time, counter))
return avg_time
def get_pdr(self, num):
filter = MQTT_PUB_ACK
if self.qos == 2:
filter = MQTT_PUB_COM
data = self.filter_by(filter)
counter = len(data)
pdr = (counter *1.0 / self.num_request) * 100
logger.debug("[PDR] The PDR for is %f [n. %d %s Pkt / Pkt sent %d] - REQUEST: %d" % (pdr, counter, filter, self.num_request, num))
return pdr
def get_size(self, protocol):
if protocol == TCP:
return self.size_tcp
elif protocol == MQTT:
return self.size_mqtt
else:
return 0
def get_packet_drop(self, paylod_size):
if self.qos == 1:
num_ack = self.get_num(MQTT_PUB_ACK)
ack_type = MQTT_PUB_ACK
else:
num_ack = self.get_num(MQTT_PUB_COM)
ack_type = MQTT_PUB_COM
size = self.size_tcp + self.size_mqtt
if float(size) == 0:
return 0
pdrop = (num_ack * paylod_size * 1.0) / float(size)
logger.debug("[PDROP] The Packet Drop is %f [n. %s: %d dim: %d] " % (pdrop, ack_type, num_ack, size))
return pdrop
def get_tcp_overhead(self):
size = self.size_tcp + self.size_mqtt
if float(size) == 0:
return 0
overhead = (self.size_tcp*1.0)/size
logger.debug("[TCP_OVERHEAD] TCP[%d] /TOTAL[%d] = %f " % (self.size_tcp, size, overhead))
return overhead
def computeTime(json_file, num_test, qos):
with open(json_file) as file:
pkts = json.load(file)
file.close()
return mqtt_performance(pkts, num_test, qos)
if __name__ == '__main__':
logger.debug('#######################################################')
logger.debug("#")
logger.debug("# Analyze Wireshark data for MQTT Performance analysis")
logger.debug("#")
logger.debug('#######################################################')
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
LOG_FORMAT = '%(levelname)-7s | %(asctime)s | %(name)40s:%(lineno)-3d| %(message)s'
formatter = logging.Formatter(LOG_FORMAT)
sh.setFormatter(formatter)
logger.addHandler(sh)
json_file = "backup/data_1507099161.54/mqtt_qos_1_payload_128_num_req_500.json"
with open(json_file) as file:
pkts = json.load(file)
file.close()
demo = mqtt_performance(pkts, 500, 1)
demo.get_e2e()
demo.get_pdr(500)
demo.get_packet_drop(256)
demo.get_tcp_overhead()
| 2.546875 | 3 |
ephios/core/models/users.py | ephios-dev/ephios | 14 | 12787275 | import datetime
import functools
import secrets
import uuid
from datetime import date
from itertools import chain
import guardian.mixins
from django.contrib.auth import get_user_model
from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
from django.contrib.auth.models import Group, PermissionsMixin
from django.db import models, transaction
from django.db.models import (
BooleanField,
CharField,
DateField,
EmailField,
ExpressionWrapper,
F,
ForeignKey,
Max,
Model,
Q,
Sum,
)
from django.db.models.functions import TruncDate
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from ephios.extra.fields import EndOfDayDateTimeField
from ephios.extra.json import CustomJSONDecoder, CustomJSONEncoder
from ephios.extra.widgets import CustomDateInput
from ephios.modellogging.log import (
ModelFieldsLogConfig,
add_log_recorder,
register_model_for_logging,
)
from ephios.modellogging.recorders import FixedMessageLogRecorder, M2MLogRecorder
class UserProfileManager(BaseUserManager):
def create_user(
self,
email,
first_name,
last_name,
date_of_birth,
password=<PASSWORD>,
):
# pylint: disable=unexpected-keyword-arg,no-value-for-parameter
user = self.model(
email=email,
first_name=first_name,
last_name=last_name,
date_of_birth=date_of_birth,
)
user.set_password(password)
user.save()
return user
def create_superuser(
self,
email,
first_name,
last_name,
date_of_birth,
password=None,
):
user = self.create_user(
email=email,
password=password,
first_name=first_name,
last_name=last_name,
date_of_birth=date_of_birth,
)
user.is_superuser = True
user.is_staff = True
user.save()
return user
class UserProfile(guardian.mixins.GuardianUserMixin, PermissionsMixin, AbstractBaseUser):
email = EmailField(_("email address"), unique=True)
is_active = BooleanField(default=True, verbose_name=_("Active"))
is_staff = BooleanField(default=False, verbose_name=_("Staff user"))
first_name = CharField(_("first name"), max_length=254)
last_name = CharField(_("last name"), max_length=254)
date_of_birth = DateField(_("date of birth"))
phone = CharField(_("phone number"), max_length=254, blank=True)
calendar_token = CharField(_("calendar token"), max_length=254, default=secrets.token_urlsafe)
USERNAME_FIELD = "email"
REQUIRED_FIELDS = [
"first_name",
"last_name",
"date_of_birth",
]
objects = UserProfileManager()
class Meta:
verbose_name = _("user profile")
verbose_name_plural = _("user profiles")
db_table = "userprofile"
def get_full_name(self):
return self.first_name + " " + self.last_name
def __str__(self):
return self.get_full_name()
def get_short_name(self):
return self.first_name
@property
def age(self):
today, born = date.today(), self.date_of_birth
return today.year - born.year - ((today.month, today.day) < (born.month, born.day))
@property
def is_minor(self):
return self.age < 18
def as_participant(self):
from ephios.core.signup import LocalUserParticipant
return LocalUserParticipant(
first_name=self.first_name,
last_name=self.last_name,
qualifications=self.qualifications,
date_of_birth=self.date_of_birth,
email=self.email if self.is_active else None,
user=self,
)
@property
def qualifications(self):
return Qualification.objects.filter(
pk__in=self.qualification_grants.unexpired().values_list("qualification_id", flat=True)
).annotate(
expires=Max(F("grants__expires"), filter=Q(grants__user=self)),
)
def get_shifts(self, with_participation_state_in):
from ephios.core.models import Shift
shift_ids = self.localparticipation_set.filter(
state__in=with_participation_state_in
).values_list("shift", flat=True)
return Shift.objects.filter(pk__in=shift_ids).select_related("event")
def get_workhour_items(self):
from ephios.core.models import AbstractParticipation
participations = (
self.localparticipation_set.filter(state=AbstractParticipation.States.CONFIRMED)
.annotate(
duration=ExpressionWrapper(
(F("shift__end_time") - F("shift__start_time")),
output_field=models.DurationField(),
),
date=ExpressionWrapper(TruncDate(F("shift__start_time")), output_field=DateField()),
reason=F("shift__event__title"),
)
.values("duration", "date", "reason")
)
workinghours = self.workinghours_set.annotate(duration=F("hours")).values(
"duration", "date", "reason"
)
hour_sum = (
participations.aggregate(Sum("duration"))["duration__sum"] or datetime.timedelta()
) + datetime.timedelta(
hours=float(workinghours.aggregate(Sum("duration"))["duration__sum"] or 0)
)
return hour_sum, list(sorted(chain(participations, workinghours), key=lambda k: k["date"]))
register_model_for_logging(
UserProfile,
ModelFieldsLogConfig(
unlogged_fields={"id", "password", "calendar_token", "last_login"},
),
)
register_model_for_logging(
Group,
ModelFieldsLogConfig(
unlogged_fields={"id", "permissions"},
initial_recorders_func=lambda group: [
M2MLogRecorder(UserProfile.groups.field, reverse=True, verbose_name=_("Users")),
],
),
)
class QualificationCategoryManager(models.Manager):
def get_by_natural_key(self, category_uuid, *args):
return self.get(uuid=category_uuid)
class QualificationCategory(Model):
uuid = models.UUIDField("UUID", unique=True, default=uuid.uuid4)
title = CharField(_("title"), max_length=254)
objects = QualificationCategoryManager()
class Meta:
verbose_name = _("qualification track")
verbose_name_plural = _("qualification tracks")
db_table = "qualificationcategory"
def __str__(self):
return str(self.title)
def natural_key(self):
return (self.uuid, self.title)
class QualificationManager(models.Manager):
def get_by_natural_key(self, qualification_uuid, *args):
return self.get(uuid=qualification_uuid)
class Qualification(Model):
uuid = models.UUIDField(unique=True, default=uuid.uuid4, verbose_name="UUID")
title = CharField(_("title"), max_length=254)
abbreviation = CharField(max_length=254)
category = ForeignKey(
QualificationCategory,
on_delete=models.CASCADE,
related_name="qualifications",
verbose_name=_("category"),
)
includes = models.ManyToManyField(
"self", related_name="included_by", symmetrical=False, blank=True
)
is_imported = models.BooleanField(verbose_name=_("imported"), default=True)
objects = QualificationManager()
def __eq__(self, other):
return self.uuid == other.uuid if other else False
def __hash__(self):
return hash(self.uuid)
class Meta:
verbose_name = _("qualification")
verbose_name_plural = _("qualifications")
db_table = "qualification"
def __str__(self):
return str(self.title)
def natural_key(self):
return (self.uuid, self.title)
natural_key.dependencies = ["core.QualificationCategory"]
@classmethod
def collect_all_included_qualifications(cls, given_qualifications) -> set:
"""We collect using breadth first search with one query for every layer of inclusion."""
all_qualifications = set(given_qualifications)
current = set(given_qualifications)
while current:
new = (
Qualification.objects.filter(included_by__in=current)
.exclude(id__in=(q.id for q in all_qualifications))
.distinct()
)
all_qualifications |= set(new)
current = new
return all_qualifications
class CustomQualificationGrantQuerySet(models.QuerySet):
# Available on both Manager and QuerySet.
def unexpired(self):
return self.exclude(expires__isnull=False, expires__lt=timezone.now())
class ExpirationDateField(models.DateTimeField):
"""
A model datetime field whose formfield is an EndOfDayDateTimeField
"""
def formfield(self, **kwargs):
return super().formfield(
**{
"widget": CustomDateInput,
"form_class": EndOfDayDateTimeField,
**kwargs,
}
)
class QualificationGrant(Model):
qualification = ForeignKey(
Qualification,
on_delete=models.CASCADE,
verbose_name=_("qualification"),
related_name="grants",
)
user = ForeignKey(
get_user_model(),
related_name="qualification_grants",
on_delete=models.CASCADE,
verbose_name=_("user profile"),
)
expires = ExpirationDateField(_("expiration date"), blank=True, null=True)
objects = CustomQualificationGrantQuerySet.as_manager()
def __str__(self):
return f"{self.qualification!s} {_('for')} {self.user!s}"
class Meta:
unique_together = [["qualification", "user"]] # issue #218
db_table = "qualificationgrant"
verbose_name = _("Qualification grant")
register_model_for_logging(
QualificationGrant,
ModelFieldsLogConfig(attach_to_func=lambda grant: (UserProfile, grant.user_id)),
)
class Consequence(Model):
slug = models.CharField(max_length=255)
data = models.JSONField(default=dict, encoder=CustomJSONEncoder, decoder=CustomJSONDecoder)
user = models.ForeignKey(
get_user_model(),
on_delete=models.CASCADE,
verbose_name=_("affected user"),
null=True,
related_name="affecting_consequences",
)
class States(models.TextChoices):
NEEDS_CONFIRMATION = "needs_confirmation", _("needs confirmation")
EXECUTED = "executed", _("executed")
FAILED = "failed", _("failed")
DENIED = "denied", _("denied")
state = models.TextField(
max_length=31,
choices=States.choices,
default=States.NEEDS_CONFIRMATION,
verbose_name=_("State"),
)
class Meta:
db_table = "consequence"
verbose_name = _("Consequence")
@property
def handler(self):
from ephios.core import consequences
return consequences.consequence_handler_from_slug(self.slug)
def confirm(self, user):
from ephios.core.consequences import ConsequenceError
if self.state not in {
self.States.NEEDS_CONFIRMATION,
self.States.DENIED,
self.States.FAILED,
}:
raise ConsequenceError(_("Consequence was executed already."))
try:
with transaction.atomic():
self.handler.execute(self)
from ephios.core.services.notifications.types import ConsequenceApprovedNotification
if user != self.user:
ConsequenceApprovedNotification.send(self)
except Exception as e: # pylint: disable=broad-except
self.state = self.States.FAILED
add_log_recorder(
self,
FixedMessageLogRecorder(
label=_("Reason"),
message=str(e),
),
)
raise ConsequenceError(str(e)) from e
else:
self.state = self.States.EXECUTED
finally:
self.save()
def deny(self, user):
from ephios.core.consequences import ConsequenceError
if self.state not in {self.States.NEEDS_CONFIRMATION, self.States.FAILED}:
raise ConsequenceError(_("Consequence was executed or denied already."))
self.state = self.States.DENIED
self.save()
from ephios.core.services.notifications.types import ConsequenceDeniedNotification
if user != self.user:
ConsequenceDeniedNotification.send(self)
def render(self):
return self.handler.render(self)
def __str__(self):
return self.render()
def attach_log_to_object(self):
if self.user_id:
return UserProfile, self.user_id
return Consequence, self.id
register_model_for_logging(
Consequence,
ModelFieldsLogConfig(
unlogged_fields=["id", "slug", "user", "data"],
attach_to_func=lambda consequence: consequence.attach_log_to_object(),
),
)
class WorkingHours(Model):
user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
hours = models.DecimalField(decimal_places=2, max_digits=7)
reason = models.CharField(max_length=1024, default="")
date = models.DateField()
class Meta:
db_table = "workinghours"
class Notification(Model):
slug = models.SlugField(max_length=255)
user = models.ForeignKey(
get_user_model(),
on_delete=models.CASCADE,
verbose_name=_("affected user"),
null=True,
)
failed = models.BooleanField(default=False)
data = models.JSONField(
blank=True, default=dict, encoder=CustomJSONEncoder, decoder=CustomJSONDecoder
)
@functools.cached_property
def notification_type(self):
from ephios.core.services.notifications.types import notification_type_from_slug
return notification_type_from_slug(self.slug)
@property
def subject(self):
return self.notification_type.get_subject(self)
def as_plaintext(self):
return self.notification_type.as_plaintext(self)
def as_html(self):
return self.notification_type.as_html(self)
def get_url(self):
return self.notification_type.get_url(self)
| 1.921875 | 2 |
model/xgboost_model.py | theBraindonor/chicago-crime-arrests | 1 | 12787276 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Build an XGBoost model of arrests in the Chicago crime data.
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2019, <NAME>"
__license__ = "Creative Commons Attribution-ShareAlike 4.0 International License"
__version__ = "1.0"
import xgboost as xgb
from skopt.space import Integer, Real
from sklearn.externals import joblib
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn_pandas import DataFrameMapper
from utility import HyperParameters, Runner
from model import load_clean_data_frame, ordinal_data_mapper
sample = None
iterations = 24
hyper_parameters = HyperParameters(search_space={
'xgb__n_estimators': Integer(100, 500),
'xgb__learning_rate': Real(0.1, 0.3),
'xgb__gamma': Real(0.0001, 100.0, prior='log-uniform'),
'xgb__max_depth': Integer(3, 7),
'xgb__colsample_bytree': Real(0.4, 0.8),
'xgb__colsample_bylevel': Real(0.4, 0.8),
'xgb__colsample_bynode': Real(0.4, 0.8)
})
# Features were selected based on feature importance from experiments.
data_mapper = DataFrameMapper([
(['iucr'], [MinMaxScaler()]),
(['location'], [MinMaxScaler()]),
(['latitude'], [StandardScaler()]),
(['hour'], [MinMaxScaler()]),
(['longitude'], [StandardScaler()]),
(['type'], [MinMaxScaler()]),
(['month'], [MinMaxScaler()]),
(['fbi_code'], [MinMaxScaler()])
])
xgboost_pipeline = Pipeline([
('mapper', ordinal_data_mapper),
('xgb', xgb.XGBClassifier(tree_method='hist'))
])
xgboost_pipeline_fs = Pipeline([
('mapper', data_mapper),
('xgb', xgb.XGBClassifier(tree_method='hist'))
])
def build_xgboost_model():
runner = Runner(
'model/output/xgboost_basic',
load_clean_data_frame(),
'arrest',
xgboost_pipeline,
hyper_parameters
)
runner.run_classification_search_experiment(
'roc_auc',
sample=sample,
n_iter=iterations,
record_predict_proba=True
)
joblib.dump(
runner.trained_estimator,
'model/output/xgboost_basic.joblib'
)
runner = Runner(
'model/output/xgboost_basic_fs',
load_clean_data_frame(),
'arrest',
xgboost_pipeline_fs,
hyper_parameters
)
runner.run_classification_search_experiment(
'roc_auc',
sample=sample,
n_iter=iterations,
record_predict_proba=True
)
joblib.dump(
runner.trained_estimator,
'model/output/xgboost_basic_fs.joblib'
)
if __name__ == '__main__':
build_xgboost_model()
| 2.640625 | 3 |
rlintro/bandit.py | adam-page/rlintro | 0 | 12787277 | <reponame>adam-page/rlintro
from abc import ABC
import matplotlib.pyplot as plt
import numpy as np
class Bandit:
"""A simple k-armed bandit."""
def __init__(
self,
arms: int = 10,
qs: np.ndarray = None,
walk: float = None,
):
"""Create the simple k-armed bandit.
Args:
arms (int, optional): Number of arms. Defaults to 10.
qs (np.ndarray, optional): Initial q values-random if not set. Must be equal in length to arms. Defaults to None.
walk (float, optional): Standard deviation for walk. No walking behavior if None. Defaults to None.
Raises:
IndexError: Raised when qs is specified but its length != arms.
"""
if qs is None:
self._qs: np.ndarray = np.random.normal(size=(arms, 1))
elif len(qs) == arms:
self._qs = qs
else:
raise IndexError
self._walk = walk
def step(self, arm: int) -> int:
"""Run a step by pulling one of the arms and getting a reward.
Args:
arm (int): Which arm to pull.
Raises:
IndexError: Raised when the arm value is greater than the number of arms for this bandit.
Returns:
int: The reward for this step.
"""
if arm > len(self._qs):
raise IndexError
reward = np.random.normal(loc=self._qs[arm])
if self._walk is not None:
self._qs += np.random.normal(scale=self._walk, size=self._qs.shape)
return reward
class BanditAgent(ABC):
"""Base class for all bandit agents."""
def action(self, prev_reward: int = None) -> int:
pass
class BanditExperiment:
"""Handles a bandit experiment with an agent and reward averaging."""
def __init__(
self, bandit: Bandit, agent: BanditAgent, steps: int, reward_over: int = None
):
"""Create the experiment.
Args:
bandit (Bandit): The bandit for this experiment.
agent (BanditAgent): The agent for this experiment.
steps (int): The total number of steps.
reward_over (int, optional): The number of steps (from the end) to average reward over. Defaults to None.
"""
self._bandit = bandit
self._agent = agent
self._steps = steps
self._r_over = self._steps - reward_over if reward_over else 0
self._avg_r = 0.0
self._nr = 0
self._current_step = 0
def run(self) -> float:
"""Run the experiment over all steps.
Returns:
float: The average reward over the last reward_over steps or all steps if reward_over is not set.
"""
reward = None
for i in range(self._steps):
r = self._bandit.step(self._agent.action(reward))
if i >= self._r_over:
self._avg_r = (r + (self._nr * self._avg_r)) / float(self._nr + 1)
self._nr += 1
return self._avg_r
class RandomLeverAgent(BanditAgent):
"""A simple BanditAgent which just randomly chooses always."""
def action(self, prev_reward: int) -> int:
"""Get the next action based on previous reward.
Args:
prev_reward (int, optional): The previous reward. Defaults to None.
Returns:
int: The action selected by the agent.
"""
return np.random.randint(10)
experiment_parameters = [
float(1 / 128),
float(1 / 64),
float(1 / 32),
float(1 / 16),
float(1 / 8),
float(1 / 4),
float(1 / 2),
1,
2,
4,
]
if __name__ == "__main__":
print(experiment_parameters)
exp = BanditExperiment(Bandit(), RandomLeverAgent(), 2000, reward_over=1000)
print(exp.run())
| 3.640625 | 4 |
Poker.py | guptaronav/python-projects | 0 | 12787278 | import random
import time
from collections import Counter
done = 'false'
#here is the animation
def animate():
Count=0
global done
print('loading… |',end="")
while done == 'false':
time.sleep(0.1)
print('/',end="")
time.sleep(0.1)
print('-',end="")
time.sleep(0.1)
print('\\',end="")
time.sleep(0.1)
Count+=1
if Count==10:
done='true'
print()
print('Done!')
animate()
done = 'false'
Card_Deck=[2,3,4,5,6,7,8,9,10,'J','Q','K','A']
Suits=['♠','♣︎','♥︎','♦']
Deck=['2 ♠','3 ♠','4 ♠','5 ♠','6 ♠','7 ♠','8 ♠','9 ♠','10 ♠','J ♠','Q ♠','K ♠','A ♠',
'2 ♣︎','3 ♣︎','4 ♣︎','5 ♣︎','6 ♣︎','7 ♣︎','8 ♣︎','9 ♣︎','10 ♣︎','J ♣︎','Q ♣︎','K ♣︎','A ♣︎',
'2 ♥︎','3 ♥︎','4 ♥︎︎','5 ♥︎','6 ♥︎','7 ♥︎︎','8 ︎♥︎','9 ♥︎︎','10 ♥︎','J ♥︎','Q ♥︎','K ♥︎','A ♥︎',
'2 ♦︎','3 ♦︎','4 ♦︎︎','5 ♦︎','6 ♦︎','7 ♦︎︎','8 ︎♦','9 ♦','10 ♦︎','J ♦︎','Q ♦','K ♦','A ♦']
Deck_Value=[1,2,3,4,5,6,7,8,9,10,11,12,13,
1,2,3,4,5,6,7,8,9,10,11,12,13,
1,2,3,4,5,6,7,8,9,10,11,12,13,
1,2,3,4,5,6,7,8,9,10,11,12,13]
Spades=[0,1,2,3,4,5,6,7,8,9,10,11,12]
Clubs=[13,14,15,16,17,18,19,20,21,22,23,24,25]
Hearts=[26,27,28,29,30,31,32,33,34,35,36,37,38]
Diamonds=[39,40,41,42,43,44,45,46,47,48,49,50,51]
Aces=[12,25,38,51]
Used_Cards=[]
Stats={}
def deal():
A=random.randint(0,51)
if A not in Used_Cards:
Used_Cards.append(A)
return A
else:
return deal()
def Draw_Five():
A=deal()
B=deal()
C=deal()
D=deal()
E=deal()
Cards_in_Hand=[A,B,C,D,E]
return Cards_in_Hand
def Compare(A,B):
if Deck_Value[A]>Deck_Value[B]:
return 1
elif Deck_Value[A]<Deck_Value[B]:
return -1
else:
return 0
def Is_Straight(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck_Value[i])
Card_Value.sort()
if Card_Value[0]+1==Card_Value[1] and Card_Value[1]+1==Card_Value[2] and Card_Value[2]+1==Card_Value[3] and Card_Value[3]+1==Card_Value[4]:
return True
elif Card_Value[4] in Aces:
if Card_Value[4]-12==Card_Value[0] and Card_Value[0]+1==Card_Value[1] and Card_Value[1]+1==Card_Value[2] and Card_Value[2]+1==Card_Value[3]:
return True
else:
return False
else:
return False
def Print_Cards(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck[i])
print(Card_Value)
def Is_Flush(Cards):
return all(item in Spades for item in Cards) or all(item in Clubs for item in Cards) or all(item in Hearts for item in Cards) or all(item in Diamonds for item in Cards)
def Is_Straight_Flush(Cards):
return Is_Straight(Cards) and Is_Flush(Cards)
def Is_Royal_Flush(Cards):
Cards.sort(reverse=1)
return Cards[0] in Aces and Is_Straight_Flush(Cards)
def OAK(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck_Value[i])
return max(Counter(Card_Value).values())
def Get_MRC(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck_Value[i])
Values=list(Counter(Card_Value).values())
Keys=list(Counter(Card_Value).keys())
Max_Value_Index=Values.index(max(Values))
return Keys[Max_Value_Index]
#GET Top Two Repeat Cards
def Get_TTRC(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck_Value[i])
Values=list(Counter(Card_Value).values())
Keys=list(Counter(Card_Value).keys())
if 1 in Values:
Min_Value_Index=Values.index(1)
Keys.pop(Min_Value_Index)
return Keys
def Is_Four_of_a_Kind(Cards):
return OAK(Cards)==4
def Is_Three_of_a_Kind(Cards):
return OAK(Cards)==3
def Is_One_Pair(Cards):
return OAK(Cards)==2
def Is_Two_Pair(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck_Value[i])
return not Is_Three_of_a_Kind(Cards) and len(Counter(Card_Value).keys())==3
def Is_Full_House(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck_Value[i])
return len(Counter(Card_Value).keys())==2 and Is_Three_of_a_Kind(Cards)
def Get_High_Card(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck_Value[i])
Card_Value.sort(reverse=1)
return Card_Value[0]
def Get_2nd_High_Card(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck_Value[i])
Card_Value.sort(reverse=1)
return Card_Value[1]
def Get_3rd_High_Card(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck_Value[i])
Card_Value.sort(reverse=1)
return Card_Value[2]
def Get_4th_High_Card(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck_Value[i])
Card_Value.sort(reverse=1)
return Card_Value[3]
def Get_5th_High_Card(Cards):
Card_Value=[]
for i in Cards:
Card_Value.append(Deck_Value[i])
Card_Value.sort(reverse=1)
return Card_Value[4]
def Play(Name):
Result=10
Cards=Draw_Five()
#Cards=[0,13,2,15,25]
print("Drawing Cards for",Name+"…")
time.sleep(2.5)
Print_Cards(Cards)
if Is_Royal_Flush(Cards):
Result=1
print("You got a Royal Flush and your Highest Card is",Card_Deck[Get_High_Card(Cards)-1])
elif Is_Straight_Flush(Cards):
Result=2
print("You got a Straight Flush and your Highest Card is",Card_Deck[Get_High_Card(Cards)-1])
elif Is_Four_of_a_Kind(Cards):
Result=3
print("You got a Four of a Kind of",Card_Deck[Get_MRC(Cards)-1],"and your Highest Card is",Card_Deck[Get_High_Card(Cards)-1])
elif Is_Full_House(Cards):
Result=4
RepeatCards=[]
for dv in Get_TTRC(Cards):
RepeatCards.append(Card_Deck[dv-1])
print("You got a Full House",RepeatCards,"and your Highest Card is",Card_Deck[Get_High_Card(Cards)-1])
elif Is_Flush(Cards):
Result=5
print("You got a Flush and your Highest Card is",Card_Deck[Get_High_Card(Cards)-1])
elif Is_Straight(Cards):
Result=6
print("You got a Straight and your Highest Card is",Card_Deck[Get_High_Card(Cards)-1])
elif Is_Three_of_a_Kind(Cards):
Result=7
print("You got a Three of a Kind of",Card_Deck[Get_MRC(Cards)-1],"and your Highest Card is",Card_Deck[Get_High_Card(Cards)-1])
elif Is_Two_Pair(Cards):
Result=8
RepeatCards=[]
for dv in Get_TTRC(Cards):
RepeatCards.append(Card_Deck[dv-1])
print("You got Two Pairs",RepeatCards,"and your Highest Card is",Card_Deck[Get_High_Card(Cards)-1])
elif Is_One_Pair(Cards):
Result=9
print("You got a Pair of",Card_Deck[Get_MRC(Cards)-1],"and your Highest Card is",Card_Deck[Get_High_Card(Cards)-1])
else:
print("You got a High Card!", Card_Deck[Get_High_Card(Cards)-1])
#print("Your Highest Card is",Card_Deck[Get_High_Card(Cards)-1])
Result_Array=[Get_High_Card(Cards),Get_2nd_High_Card(Cards),Get_3rd_High_Card(Cards),Get_4th_High_Card(Cards),Get_5th_High_Card(Cards)]
return Cards,Result,Result_Array,Get_MRC(Cards)
def declare_winner(P1_Name,P1_Score,P2_Name,P2_Score):
if P1_Score>P2_Score:
Stats[P1_Name]+=1
print(P1_Name,"Wins!")
elif P1_Score<P2_Score:
Stats[P2_Name]+=1
print(P2_Name,"Wins!")
def breaktie(P1_Name,P1_Result_Array,P2_Name,P2_Result_Array,idx):
if P1_Result_Array[idx]==P2_Result_Array[idx]:
if idx==4:
Stats[P2]+=0.5
Stats[P1]+=0.5
print(P1_Name,"and",P2_Name,"have tied. It's a draw!")
else:
breaktie(P1_Name,P1_Result_Array,P2_Name,P2_Result_Array,idx+1)
else:
declare_winner(P1_Name,P1_Result_Array[idx],P2_Name,P2_Result_Array[idx])
def Check_High_Card(P1,P1_Result_Array,P2,P2_Result_Array):
if P1_Result_Array[0]==P2_Result_Array[0]:
breaktie(P1,P1_Result_Array,P2,P2_Result_Array,1)
else:
declare_winner(P1,P1_Result_Array[0],P2,P2_Result_Array[0])
def Start_Game(P1,P2,Game_Number):
print("______________________________________________")
input(P1 + ", Hit Enter when Ready ")
(P1_Cards,P1_Result,P1_Result_Array,P1_MRC)=Play(P1)
for i in range(1,3,1):
print()
input(P2 + ", Hit Enter when Ready ")
(P2_Cards,P2_Result,P2_Result_Array,P2_MRC)=Play(P2)
for i in range(1,3,1):
print()
#comparing results to find a winner
if P1_Result==P2_Result:
if P1_Result in [3,4,7,9]:
if P1_MRC>P2_MRC:
Stats[P1]+=1
print(P1,"Wins!")
elif P1_MRC<P2_MRC:
Stats[P2]+=1
print(P2,"Wins!")
else:
Check_High_Card(P1,P1_Result_Array,P2,P2_Result_Array)
elif P1_Result==8:
#both players have 2 pairs
P1_TTRC=Get_TTRC(P1_Cards)
P2_TTRC=Get_TTRC(P2_Cards)
if P1_TTRC[0]>P2_TTRC[0] and P1_TTRC[0]>P2_TTRC[1]:
Stats[P1]+=1
print(P1,"Wins!")
elif P1_TTRC[1]>P2_TTRC[0] and P1_TTRC[0]>P2_TTRC[1]:
Stats[P1]+=1
print(P1,"Wins!")
elif P2_TTRC[0]>P1_TTRC[0] and P2_TTRC[0]>P1_TTRC[1]:
Stats[P2]+=1
print(P2,"Wins!")
elif P2_TTRC[1]>P1_TTRC[0] and P2_TTRC[0]>P1_TTRC[1]:
Stats[P2]+=1
print(P2,"Wins!")
else:
Check_High_Card(P1,P1_Result_Array,P2,P2_Result_Array)
else:
Check_High_Card(P1,P1_Result_Array,P2,P2_Result_Array)
elif P1_Result>P2_Result:
Stats[P2]+=1
print(P2,"Wins!")
elif P1_Result<P2_Result:
Stats[P1]+=1
print(P1,"Wins!")
print("Current Stats:",Stats)
print("______________________________________________")
Continue=input("Would You Like to Play Again? ")
if "n" not in Continue and "N" not in Continue:
print("Ok, Starting Game",Game_Number+1)
if len(Used_Cards)>42:
print("Our Virtual Deck has ran out of cards. Shuffling…")
time.sleep(1.5)
print("Deck Incoming!")
Used_Cards.clear()
Start_Game(P1,P2,Game_Number+1)
else:
print("Thank You for Playing Poker Online: Multiplayer (Single Deck Edition)!")
print("Welcome To Poker Online: Multiplayer (Single Deck Edition)!")
print()
P1=input("Player 1, Please Enter Your Name: ")
P2=input("Player 2, Please Enter Your Name: ")
Stats[P1]=0
Stats[P2]=0
Start_Game(P1,P2,1)
| 3.609375 | 4 |
products/migrations/0009_alter_product_image.py | susovangarai/SRshoper | 0 | 12787279 | <reponame>susovangarai/SRshoper
# Generated by Django 3.2 on 2021-04-16 05:40
from django.db import migrations, models
import products.models
class Migration(migrations.Migration):
dependencies = [
('products', '0008_alter_shippingaddress_order'),
]
operations = [
migrations.AlterField(
model_name='product',
name='image',
field=models.ImageField(blank=True, default=None, null=True, upload_to=products.models.image_upload_path),
),
]
| 1.585938 | 2 |
pinball/workflow/job.py | DotModus/pinball | 1,143 | 12787280 | <reponame>DotModus/pinball
# Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of job metadata included in job tokens.
Job object describes job inputs, outputs, and all information required to
execute a job (e.g., a command line of a shell job or class name of a data
job)."""
import abc
from pinball.config.utils import get_log
from pinball.persistence.token_data import TokenData
from pinball.workflow.name import Name
__author__ = '<NAME>'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = [__author__]
__license__ = 'Apache'
__version__ = '2.0'
LOG = get_log('pinball.workflow.worker')
class Job(TokenData):
"""Parent class for specialized job types."""
__metaclass__ = abc.ABCMeta
IS_CONDITION = False
def __init__(self, name=None, inputs=None, outputs=None, emails=None,
max_attempts=1, retry_delay_sec=0, warn_timeout_sec=None,
abort_timeout_sec=None):
self.name = name
self.inputs = inputs if inputs is not None else []
self.outputs = outputs if outputs is not None else []
self.emails = emails if emails is not None else []
self.max_attempts = max_attempts
self.retry_delay_sec = retry_delay_sec
self.warn_timeout_sec = warn_timeout_sec
self.abort_timeout_sec = abort_timeout_sec
assert self.max_attempts > 0
self.disabled = False
self.history = []
self.events = []
@property
def _COMPATIBILITY_ATTRIBUTES(self):
return {
'emails': [],
'disabled': False,
'max_attempts': 1,
'events': [],
'warn_timeout_sec': None,
'abort_timeout_sec': None,
'retry_delay_sec': 0,
}
@abc.abstractmethod
def info(self):
return
def retry(self):
"""Decide if the job should be retried.
Returns:
True if the job should be retried, otherwise False.
"""
if not self.history:
return False
last_record = self.history[-1]
current_instance = last_record.instance
assert last_record.exit_code != 0
failed_runs = 0
for record in reversed(self.history):
if record.instance != current_instance:
break
if record.exit_code != 0:
# There may have been successful runs in the past if we are
# re-doing an execution.
failed_runs += 1
if failed_runs >= self.max_attempts:
return False
return True
def truncate_history(self):
if self.IS_CONDITION and len(self.history) > self.max_attempts:
self.history = self.history[-self.max_attempts:]
def reload(self, new_job):
"""Reload job config from a new config.
Configuration elements defining the workflow topology (inputs and
outputs), execution history, or run-time values (events) are not
modified.
Args:
new_job: The new job configuration to update from.
"""
assert self.__class__ == new_job.__class__
self.emails = new_job.emails
self.max_attempts = new_job.max_attempts
class ShellJob(Job):
"""Shell job runs a command when executed."""
def __init__(self, name=None, inputs=None, outputs=None, emails=None,
max_attempts=1, retry_delay_sec=0, warn_timeout_sec=None,
abort_timeout_sec=None, command=None, cleanup_template=None):
super(ShellJob, self).__init__(name, inputs, outputs, emails,
max_attempts, retry_delay_sec,
warn_timeout_sec, abort_timeout_sec)
self.command = command
self.cleanup_template = cleanup_template
@property
def _COMPATIBILITY_ATTRIBUTES(self):
result = super(ShellJob, self)._COMPATIBILITY_ATTRIBUTES
result['cleanup_template'] = None
return result
def __str__(self):
return ('ShellJob(name=%s, inputs=%s, outputs=%s, emails=%s, '
'max_attempts=%d, retry_delay_sec=%d, warn_timeout_sec=%s, '
'abort_timeout_sec=%s, disabled=%s, command=%s, '
'cleanup_template=%s, events=%s, history=%s)' % (
self.name,
self.inputs,
self.outputs,
self.emails,
self.max_attempts,
self.retry_delay_sec,
self.warn_timeout_sec,
self.abort_timeout_sec,
self.disabled,
self.command,
self.cleanup_template,
self.events,
self.history))
def __repr__(self):
return self.__str__()
def info(self):
return 'command=%s' % self.command
def reload(self, new_job):
super(ShellJob, self).reload(new_job)
self.command = new_job.command
self.cleanup_template = new_job.cleanup_template
@staticmethod
def _get_command_attributes(template):
"""Extract attributes from a command string template.
E.g., for template 'ls %(dir1)s %(dir2)s' the result is
['dir1', 'dir2'].
Args:
template: The template to extract attributes from.
Returns:
The list of named attributes extracted from the template.
"""
class Extractor:
"""Helper class extracting attributes from a string template.
"""
def __init__(self):
self.attributes = set()
def __getitem__(self, attribute):
self.attributes.add(attribute)
return 0
extractor = Extractor()
try:
template % extractor
except ValueError:
LOG.exception('failed to customize template %s', template)
return list(extractor.attributes)
def _consolidate_event_attributes(self):
"""Consolidate attributes in triggering events.
Iterate over events in the most recent execution record and combine
them into one dictionary mapping attribute names to their values. If
multiple events contain the same attribute, the return value will be a
comma separated string of values from all those events.
Returns:
Dictionary of consolidated event attribute key-values.
"""
assert self.history
last_execution_record = self.history[-1]
result = {}
for event in last_execution_record.events:
for key, value in event.attributes.items():
new_value = result.get(key)
if new_value:
new_value += ',%s' % value
else:
new_value = value
result[key] = new_value
return result
def customize_command(self):
"""Specialize the command with attribute values extracted from events.
Returns:
Job command with parameter values replaced by attributes extracted
from the triggering events. If a parameter is not present in the
event attribute set, it is replaced with an empty string.
"""
attributes = {}
command_attributes = ShellJob._get_command_attributes(self.command)
for attribute in command_attributes:
attributes[attribute] = ''
event_attributes = self._consolidate_event_attributes()
attributes.update(event_attributes)
try:
return self.command % attributes
except ValueError:
LOG.exception('failed to customize command %s', self.command)
return self.command
class ShellConditionJob(ShellJob):
IS_CONDITION = True
def __init__(self, name=None, outputs=None, emails=None, max_attempts=10,
retry_delay_sec=5 * 60, warn_timeout_sec=None,
abort_timeout_sec=None, command=None, cleanup_template=None):
super(ShellConditionJob, self).__init__(
name=name,
inputs=[Name.WORKFLOW_START_INPUT],
outputs=outputs,
emails=emails,
max_attempts=max_attempts,
retry_delay_sec=retry_delay_sec,
warn_timeout_sec=warn_timeout_sec,
abort_timeout_sec=abort_timeout_sec,
command=command,
cleanup_template=cleanup_template)
| 2.046875 | 2 |
python/qicycling/trend.py | zimult/web_md | 0 | 12787281 | #! /usr/bin/python
# encoding:utf-8
import requests
import json
import time
import config
import db
import os
import datetime
import sys
import traceback
from fn import log, db_app, db_wp
from urllib import quote
from urllib import unquote
reload(sys)
sys.setdefaultencoding('utf8')
'''
Google Trend 热词
热词的访问次数是有限制的,现在暂时不知道限制有多少,建议每天更新一次,因为数据的分辨率是以day来记录的,所以每天更新一次不会对结果造成多大影响
https://bikeridejoy.herokuapp.com/api/trend/index/<symbol>/<months>
返回json格式,key为timestamp,以毫秒记,先除以1000后再转化datetime,value为热词指数
<months>为1-3,由于google trend api限制只能搜3个月以内的
<symbol>为自行车品牌.docx 中的任一自行车品牌,全小写
其中三个只有中文没有英文的品牌对应英文如下
‘feige’:’飞鸽’,
‘phoenix’:’凤凰’,
‘forever’:’永久’,
'''
brand_list = [
['Giant', '捷安特'],
['Merida', '美利达'],
['TREK', '崔克'],
['Specialized', '闪电'],
['BMC'],
['LOOK', '路克'],
['SRAM', '速联'],
['shimano', '禧玛诺'],
['cannondale', '佳能戴尔'],
['Pinarello', '皮纳瑞罗'],
['Kuota'],
['SCOTT', '斯科特'],
['Cervelo'],
['UCC'],
['KUNG', '攻队'],
['FOCUS'],
['missile', '米赛尔'],
['ORBEA', '欧贝亚'],
['JAVA', '佳沃'],
['De Rosa'],
['Colnago', '梅花'],
['Kona'],
['Schwinn', '施文'],
['Bianchi', '比安奇'],
['Gusto', '高士特'],
['Cinelli'],
['Fuji', '富士'],
['BH'],
['WILIER', '威廉'],
['飞鸽'],
['凤凰自行车'],
['Cube'],
['永久'],
['ZGL'],
['XDS', '喜德盛'],
['TIME', '自行车'],
['Canyon'],
['LAPIERRE', '拉皮尔'],
['凯路仕'],
['3T'],
['Factor'],
['Argon18'],
['Ridley'],
['pardus', '瑞豹'],
['Campagnolo'],
['Power2Max'],
['SRM'],
['Rotor'],
['Moots'],
['Olympia'],
['ENVE'],
['OYAMA', '欧亚马'],
['YETI'],
['LiteSpeed'],
['Lightweight'],
['FSA'],
['QUICK'],
['Taokas', '道卡斯'],
['KTM'],
['Author'],
['NeilPryde'],
['Ceepo', '袭豹'],
['CAMP', '坎普'],
['TRINX', '千里达'],
['Triace', '骓驰'],
['intense'],
['Tropix', '烈风'],
['GALAXY', '格莱仕'],
['Santa Cruz'],
['NORCO', '诺客']
]
def import_brand(cursor):
t = time.time()
ts = int(round(t * 1000))
for i in xrange(len(brand_list)):
brand_name = brand_list[i][0]
brand_name_cn = ''
if len(brand_list[i]) > 1:
brand_name_cn = brand_list[i][1]
print brand_name, brand_name_cn
cursor.execute("INSERT INTO brand (status,`name`,name_cn,`TIMESTAMP`) values (1,'%s','%s',%d)"
% (brand_name, brand_name_cn, ts))
def get_brand_list(cursor):
list = []
cursor.execute("SELECT id, `name`, name_cn FROM brand where status=1 and type=0")
result = cursor.fetchall()
for row in result:
list.append(row)
return list
def get_google_trend(cursor, list):
for row in list:
brand_id, brand_name, brand_name_cn = row
check_name = brand_name.lower()
get_google_trend_brand(cursor, check_name, brand_id)
def get_google_trend_brand(cursor, check_name, brand_id):
# print brand_id, brand_name, brand_name_cn
if check_name == '飞鸽':
check_name = 'feige'
elif check_name == '凤凰':
check_name = 'phoenix'
elif check_name == '永久':
check_name = 'forever'
# url = "https://bikeridejoy.herokuapp.com/api/trend/index/"+check_name+"/3"
url1 = "https://bikeridejoy.herokuapp.com/api/trend/index/" + check_name
url2 = "https://bikeridejoy.herokuapp.com/api/trend/qicycling/" + check_name
print url1
t = time.time()
ts = int(round(t * 1000))
try:
res1 = requests.get(url1)
res2 = requests.get(url2)
print res1.text
js1 = json.loads(res1.text)
js2 = json.loads(res2.text)
sts = "2018-06-10 08:00:00"
date_s = datetime.datetime.strptime(sts, '%Y-%m-%d %H:%M:%S')
stk = 736856
hs1 = {}
hs2 = {}
for ror in js1:
for k, v in ror.items():
# print k, v
days = int(k) - stk
delta = datetime.timedelta(days)
n_days = date_s + delta
n_str = n_days.strftime('%Y-%m-%d %H:%M:%S')
hs1[n_str] = v
for ror in js2:
for k, v in ror.items():
# print k, v
days = int(k) - stk
delta = datetime.timedelta(days)
n_days = date_s + delta
n_str = n_days.strftime('%Y-%m-%d %H:%M:%S')
hs2[n_str] = v
print hs1
print hs2
for k, v in hs1.items():
if hs2.has_key(k):
v2 = hs2[k]
else:
v2 = 1
insert_brand_opinion(cursor, brand_id, v, v2, k, ts)
except Exception, e:
print(e.message)
print(traceback.format_exc())
# 写入当日数据
n_str = datetime.datetime.now().strftime('%Y-%m-%d') + ' 08:00:00'
insert_brand_opinion(cursor, brand_id, 1, 1, n_str, ts)
return
def insert_brand_opinion(cursor, brand_id, value1, value2, str_time, ts):
cursor.execute("SELECT id FROM brand_opinion where `day`='%s' and brand_id=%d" % (str_time, brand_id))
result = cursor.fetchone()
if result is None:
sql = "INSERT INTO brand_opinion (`day`,full_num,num,brand_id,`timestamp`)" \
" VALUES ('%s',%d,%d,%d,%d)" % (str_time, value1, value2, brand_id, ts)
# print sql
cursor.execute(sql)
else:
id = result[0]
# print id
#cursor.execute("UPDATE brand_opinion set full_num=%d, num=%d, `timestamp`=%d where id=%d and `day`='%s'" % (
# value1, value2, ts, id, str_time))
if __name__ == '__main__':
# s = ''
# html, img_list, author, video_list = sync.get_href('http://www.qicycling.cn/2927.html')
# print html
# print time.time()
# cursor_wp = db_wp.get_cursor()
#print datetime.datetime.now().strftime('%Y-%m-%d')
cursor_app = db_app.get_cursor()
# import_brand(cursor_app)
# db_app.commit()
b_l = get_brand_list(cursor_app)
print b_l
get_google_trend(cursor_app, b_l)
#
# get_google_trend_brand(cursor_app, '3T')
db_app.commit()
| 1.773438 | 2 |
hw4/hw4_2/mitm.py | dedeswim/com-402-hw | 9 | 12787282 | from netfilterqueue import NetfilterQueue
from scapy.all import *
import socket
import re
def print_and_accept(pkt):
ip = IP(pkt.get_payload())
if ip.haslayer("Raw"):
print("IP packet received")
payload = ip["Raw"].load
if payload[0] == 0x16 and payload[5] == 0x01:
new_payload = [x for x in payload]
new_payload[112] == 0x00
new_payload[113] == 0x2f
print("Downgraded AES")
pkt.set_payload(bytes(new_payload))
pkt.accept()
nfqueue = NetfilterQueue()
nfqueue.bind(1, print_and_accept)
s = socket.fromfd(nfqueue.get_fd(), socket.AF_UNIX, socket.SOCK_STREAM)
try:
nfqueue.run_socket(s)
except KeyboardInterrupt:
print('')
s.close()
nfqueue.unbind() | 2.40625 | 2 |
core/iotools.py | pvthinker/pyRSW | 8 | 12787283 | from netCDF4 import Dataset
from dataclasses import dataclass, field
import os
import pickle
import sys
import shutil
import numpy as np
from variables import modelvar
@dataclass
class VariableInfo():
nickname: str = ""
dimensions: tuple = field(default_factory=lambda: ())
name: str = ""
units: str = ""
dtype: str = "d"
class NetCDF_tools():
"""
Basic class to create and write NetCDF files
Parameters
----------
filename : str
The file name to be created.
attrs : dict
The global attributes.
dimensions : list[(name, size), ...]
The list of dimensions.
size==None -> unlimited
variables : list[VariableInfo, ...]
The name of variable.dimensions should match one of dimensions.
"""
def __init__(self, filename, attrs, dimensions, variables):
self.filename = filename
self.attrs = attrs
self.dimensions = {dim[0]: dim[1] for dim in dimensions}
self.variables = {var.nickname: var for var in variables}
def create(self):
"""
Create the empty NetCDF file with
- attributes
- dimensions
- variables
"""
with Dataset(self.filename, "w", format='NETCDF4') as nc:
nc.setncatts(self.attrs)
for dim, size in self.dimensions.items():
nc.createDimension(dim, size)
for infos in self.variables.values():
assert isinstance(infos.dimensions, tuple)
v = nc.createVariable(infos.nickname,
infos.dtype,
infos.dimensions)
v.standard_name = infos.name
v.units = infos.units
def write(self, variables, nc_start={}, data_start={}):
"""
Write variables
Parameters
----------
variables : list[(nickname, data), ...]
where data is an ndarray
nc_start : dict{name: (offset, size)}
name : the dimension name
offset : the offset of that dimension in the NetCDF file
size : the size of data in that dimension
If a dimension is not in nc_start it is assumed that
the data has a size that matches the size defined in
the NetCDF.
data_start : dict{name: (offset, size)}
same that nc_start but for the data in variables
"""
with Dataset(self.filename, "r+") as nc:
for nickname, data in variables.items():
ncidx = self._get_idx(nickname, nc_start)
if isinstance(data, np.ndarray):
dataidx = self._get_idx(nickname, data_start)
nc.variables[nickname][ncidx] = data[dataidx]
else:
nc.variables[nickname][ncidx] = data
def _get_idx(self, nickname, nc_start):
"""
Return the tuple of slices
to either slice through nc.variables or through data
"""
infos = self.variables[nickname]
ncidx = []
for dim in infos.dimensions:
if dim in nc_start:
istart, size = nc_start[dim]
else:
istart, size = 0, self.dimensions[dim]
if size is not None:
ncidx += [slice(istart, istart+size)]
return tuple(ncidx)
class Ncio():
"""
Class that handles all the IO for pyRSW
which includes
- creating and writing model snapshots in the history.nc
- creating and writing model bulk diagnostics in the diags.nc
- saving the param.pkl file
- saving the Python experiment script
"""
def __init__(self, param, grid, batchindex=0):
self.param = param
self.grid = grid
self.batchindex = batchindex
self.nprocs = np.prod(grid.procs)
if self.nprocs > 1:
from mpi4py import MPI
self.MPI = MPI
self._create_output_directory()
self.backup_config()
hist_infos = get_hist_infos(param, grid)
self.hist = NetCDF_tools(self.history_file, *hist_infos)
if not self.singlefile or self.master:
self.hist.create()
self.hist_index = 0
self.write_grid()
diag_infos = get_diag_infos(param, grid)
self.diag = NetCDF_tools(self.diag_file, *diag_infos)
self.diag_index = 0
if self.master:
self.diag.create()
def _create_output_directory(self):
if self.master and not os.path.isdir(self.output_directory):
os.makedirs(self.output_directory)
@property
def myrank(self):
return self.grid.myrank
@property
def master(self):
return self.myrank == 0
@property
def expname(self):
return self.param["expname"]
@property
def singlefile(self):
return self.param["singlefile"]
@property
def output_directory(self):
datadir = os.path.expanduser(self.param["datadir"])
return os.path.join(datadir, self.expname)
@property
def history_file(self):
"""
Full path to the NetCDF history file
"""
his = self._add_batchindex("history")
basicname = f"{his}.nc"
mpiname = f"{his}_{self.myrank:02}.nc"
hisname = basicname if self.singlefile else mpiname
return os.path.join(self.output_directory, hisname)
@property
def diag_file(self):
"""
Full path to the NetCDF diagnostic file
"""
diag = self._add_batchindex("diag")
diagname = f"{diag}.nc"
return os.path.join(self.output_directory, diagname)
def _add_batchindex(self, filename):
if self.param.restart:
return filename + f"_{self.batchindex:02}"
else:
return filename
def backup_config(self):
"""
Backup the experiment configuration into the output directory
- save param in the param.pkl
- save the experiment Python script
"""
if self.master and self.batchindex == 0:
dest = f"{self.output_directory}/param.pkl"
with open(dest, "wb") as fid:
pickle.dump(self.param, fid)
python_launch_script = sys.argv[0]
dest = os.path.join(self.output_directory, f"{self.expname}.py")
shutil.copyfile(python_launch_script, dest)
def write_grid(self):
"""
Write the model grid arrays into the NetCDF file (just once)
"""
xc = self.grid.coord.x(0, self.grid.ic)[0]
yc = self.grid.coord.y(self.grid.jc, 0)[:, 0]
xe = self.grid.coord.x(0, self.grid.ie)[0]
ye = self.grid.coord.y(self.grid.je, 0)[:, 0]
layer = np.arange(self.grid.nz)
msk = self.grid.arrays.msk.view("i")
datagrid = {
"x": xc,
"y": yc,
"xe": xe,
"ye": ye,
"layer": layer,
"msk": msk
}
self._history_write_halo_mpi(datagrid)
def write_hist(self, state, time, kt):
"""
Write a model snapshot into the NetCDF file
"""
datahist = {
"time": time,
"iteration": kt,
}
for name in self.param["var_to_save"]:
vartype = modelvar[name]["type"]
if vartype == "vector":
for axis in "xy":
compname = name+axis
var = state.get(compname)
datahist[compname] = var.getproperunits(self.grid)
else:
var = state.get(name)
datahist[name] = var.getproperunits(self.grid)
nc_start = {"time": (self.hist_index, 1)}
self._history_write_halo_mpi(datahist, nc_start=nc_start)
self.hist_index += 1
def _history_write_halo_mpi(self, data, nc_start={}):
"""
Generic function to write data into the history NetCDF file
handle the following special cases
- write the arrays without the halo
- write in a single history file, even if several MPI ranks
"""
data_start = {}
if not self.param.halo_included:
j0, j1, i0, i1 = self.grid.arrays.hb.domainindices
nx = self.param.nx
ny = self.param.ny
data_start["x"] = (i0, nx)
data_start["y"] = (j0, ny)
data_start["xe"] = (i0, nx+1)
data_start["ye"] = (j0, ny+1)
if self.singlefile:
i0 = self.grid.loc[2]*self.param.nx
j0 = self.grid.loc[1]*self.param.ny
nc_start["x"] = (i0, nx)
nc_start["y"] = (j0, ny)
nc_start["xe"] = (i0, nx+1)
nc_start["ye"] = (j0, ny+1)
if self.singlefile and (self.nprocs > 1):
# all MPI ranks write in the same file
for rank in range(self.nprocs):
if rank == self.myrank:
self.hist.write(data,
nc_start=nc_start,
data_start=data_start)
self.MPI.COMM_WORLD.Barrier()
else:
# each rank writes in its own history file
self.hist.write(data, nc_start=nc_start, data_start=data_start)
def write_diags(self, diags, time, kt):
"""
Write the domain integrated diagnostics into the NetCDF file
"""
datadiag = {
"time": time,
"iteration": kt,
"ke": diags["ke"],
"pe": diags["pe"],
"me": diags["me"],
"enstrophy": diags["potenstrophy"],
}
start = {"time": (self.diag_index, 1)}
if self.master:
self.diag.write(datadiag, nc_start=start)
self.diag_index += 1
def get_hist_infos(param, grid):
attrs = {"model": "pyrsw",
"author": "someone"}
if param.halo_included:
ny, nx = grid.xc.shape
else:
ny, nx = param.ny, param.nx
if param.singlefile:
nx *= param.npx
ny *= param.npy
nz = param.nz
dims = [("time", None), ("layer", nz),
("x", nx), ("y", ny),
("xe", nx+1), ("ye", ny+1)]
infos = [
("time", ("time",), "time", "s"),
("iteration", ("time",), "model iteration", "", "i4"),
("x", ("x",), "x coord at center", "m"),
("y", ("y",), "y coord at center", "m"),
("xe", ("xe",), "x coord at edge", "m"),
("ye", ("ye",), "y coord at edge", "m"),
("layer", ("layer",), "layer index", "", "i1"),
("msk", ("y", "x"), "mask at cell centers", "", "i1"),
]
vardims = {
"scalar": ("time", "layer", "y", "x"),
"u": ("time", "layer", "y", "xe"),
"v": ("time", "layer", "ye", "x"),
"vorticity": ("time", "layer", "ye", "xe")
}
for name in param["var_to_save"]:
longname = modelvar[name]["name"]
units = modelvar[name]["unit"]
vartype = modelvar[name]["type"]
if vartype == "vector":
infos += [(name+"x", vardims["u"], longname+" x-component", units)]
infos += [(name+"y", vardims["v"], longname+" y-component", units)]
else:
infos += [(name, vardims[vartype], longname, units)]
varinfos = [VariableInfo(*info) for info in infos]
hist_infos = (attrs, dims, varinfos)
return hist_infos
def get_diag_infos(param, grid):
attrs = {"model": "pyrsw",
"author": "someone"}
dims = [("time", None)]
infos = [
("time", ("time",), "time", "s"),
("iteration", ("time",), "model iteration", "", "i4"),
("ke", ("time",), "kinetic energy", "m^2 s^-2"),
("pe", ("time",), "mean available potential energy", "m^2 s^-2"),
("me", ("time",), "kinetic + potential energy", "m^2 s^-2"),
("enstrophy", ("time",), "mean enstrophy", "s^-2 m^-2"),
]
varinfos = [VariableInfo(*info) for info in infos]
diag_infos = (attrs, dims, varinfos)
return diag_infos
| 2.65625 | 3 |
task0/test_task0.py | 18harsh/Nirikshak-Bot-NB--eyantra | 2 | 12787284 | <gh_stars>1-10
try:
import task0_cardinal
except ImportError:
print("\n\t[ERROR] It seems that task0_cardinal.pyc is not found in current directory! OR")
print("\n\tAlso, it might be that you are running test_task0.py from outside the Conda environment!\n")
exit()
# Main function
if __name__ == '__main__':
task0_cardinal.test_setup() | 1.953125 | 2 |
apigateway/apps/store_point_password_handle.py | cnds/wxdemo | 0 | 12787285 | import requests
from flask import jsonify, request
from .base import BaseHandler
from .json_validate import SCHEMA
class StorePointPassword(BaseHandler):
def get(self, store_id):
api_resp = requests.get(
'{0}/accounts/stores/{1}/point-password'.format(
self.endpoint['accounts'], store_id))
resp_status = api_resp.status_code
if resp_status != 200 and resp_status != 400:
self.logger.error('request account service failed')
return '', 500
return jsonify(api_resp.json()), resp_status
def post(self, store_id):
is_valid, data = self.get_params_from_request(
request, SCHEMA['store_point_password_post'])
if not is_valid:
return self.error_msg(self.ERR['invalid_body_content'], data)
api_resp = requests.post(
'{0}/accounts/stores/{1}/point-password'.format(
self.endpoint['accounts'], store_id),
json=data)
resp_status = api_resp.status_code
if resp_status != 201 and resp_status != 400:
self.logger.error('request accounts service failed')
return '', 500
return jsonify(api_resp.json()), resp_status
def put(self, store_id):
is_valid, data = self.get_params_from_request(
request, SCHEMA['store_point_password_put'])
if not is_valid:
return self.error_msg(self.ERR['invalid_body_content'], data)
api_resp = requests.put(
'{0}/accounts/stores/{1}/point-password'.format(
self.endpoint['accounts'], store_id),
json=data)
resp_status = api_resp.status_code
if resp_status != 200 and resp_status != 400:
self.logger.error('request accounts service failed')
return '', 500
return jsonify(api_resp.json()), resp_status
| 2.734375 | 3 |
ML_CW1/assgn_1_part_1/3_regularized_linear_regression/hypothesis_to_vector.py | ShellySrivastava/Machine-Learning | 0 | 12787286 | from calculate_hypothesis import *
def hypothesis_to_vector(X, theta):
hypothesis_vec = np.array([], dtype=np.float32)
for i in range(X.shape[0]):
hypothesis_vec = np.append(hypothesis_vec, calculate_hypothesis(X, theta, i))
return hypothesis_vec | 2.953125 | 3 |
pqb/queries.py | josegomezr/pyqb | 0 | 12787287 | <filename>pqb/queries.py
#encoding=utf-8
from . import statements
from . import grouping
from . import expressions
import json
class Select:
"""SELECT Query Builder"""
def __init__(self, *fields):
"""
Inicializa la consulta SELECT opcionalmente los argumentos pasados serna considerados campos para la proyección.
"""
super(self.__class__, self).__init__()
self.raw_fields = fields
self.raw_fields = []
self.raw_fields_group = []
self.fields = []
self.group_fields = []
self.raw_tables = []
self.raw_order_by = []
self.order_by_fields = []
self.tables = []
self.where_criteria = grouping.BaseGrouper()
def __prepareData__(self):
"""
Helper para preparar los datos para la produccion del SQL final.
"""
if isinstance(self.raw_fields, str):
self.raw_fields = self.raw_fields.split(',')
for x in self.raw_fields:
self.fields.append(expressions.AliasExpression(x).result())
if len(self.fields) == 0:
self.fields.append('*')
for x in self.raw_tables:
self.tables.append(expressions.AliasExpression(x).result())
if isinstance(self.raw_fields_group, str):
self.raw_fields_group = self.raw_fields_group.split(',')
for x in self.raw_fields_group:
self.group_fields.append(expressions.AliasExpression(x).result())
if isinstance(self.raw_order_by, str):
self.raw_order_by = self.raw_order_by.split(',')
for x in self.raw_order_by:
self.order_by_fields.append(expressions.OrderByExpression(*x).result())
def from_(self, table, alias=None):
"""
Establece el origen de datos (y un alias opcionalmente).
"""
if isinstance(table, str):
table = [[table, alias]]
self.raw_tables = table
return self
def where(self, field, value = None, operator = '='):
"""
Establece condiciones para la consulta unidas por AND
"""
if field is None:
return self
conjunction = None
if value is None and isinstance(field, dict):
for field, value in field.items():
operator, value = value if isinstance(value, tuple) else ('=', value)
self.where(field, value, operator)
else:
if self.where_criteria.size() > 0:
conjunction = 'AND'
self.where_criteria.append(expressions.ConditionExpression(field, value, operator=operator, conjunction=conjunction))
return self
def where_or(self, field, value = None, operator = '='):
"""
Establece condiciones para la consulta unidas por OR
"""
if field is None:
return self
conjunction = None
if value is None and isinstance(field, dict):
for field, value in field.items():
operator, value = value if isinstance(value, tuple) else ('=', value)
self.where_or(field, value, operator)
else:
if self.where_criteria.size() > 0:
conjunction = 'OR'
self.where_criteria.append(expressions.ConditionExpression(field, value, operator=operator, conjunction=conjunction))
return self
def group_by(self, *args):
"""
Indica los campos para agrupación
"""
if len(args) == 1:
self.raw_fields_group = args[0].split(',')
else:
self.raw_fields_group = list(args)
return self
def order_by(self, field, orientation='ASC'):
"""
Indica los campos y el criterio de ordenamiento
"""
if isinstance(field, list):
self.raw_order_by.append(field)
else:
self.raw_order_by.append([field, orientation])
return self
def result(self, *args, **kwargs):
"""
Construye la consulta SQL
"""
prettify = kwargs.get('pretty', False)
self.__prepareData__()
sql = 'SELECT '
sql += ', '.join(self.fields)
if len(self.tables) > 0:
if prettify:
sql += '\n'
else:
sql += ' '
sql += 'FROM '
sql += ', '.join(self.tables)
if self.where_criteria.size() > 0:
if prettify:
sql += '\n'
else:
sql += ' '
sql += 'WHERE '
sql += self.where_criteria.result()
if len(self.group_fields) > 0:
if prettify:
sql += '\n'
else:
sql += ' '
sql += 'GROUP BY '
sql += ', '.join(self.group_fields)
if len(self.order_by_fields) > 0:
if prettify:
sql += '\n'
else:
sql += ' '
sql += 'ORDER BY '
sql += ', '.join(self.order_by_fields)
if prettify:
sql += '\n'
else:
sql += ' '
return sql
class Delete(object):
"""
DELETE Query Builder
"""
def __init__(self, type):
"""
Inicializa la consulta, type = recurso (Vertex|Edge)
"""
super(Delete, self).__init__()
self._class = None
self._cluster = None
self._type = None
self.data = {}
self.where_criteria = grouping.BaseGrouper()
self._type = type
def class_(self, _class):
"""
Especifica la clase para eliminar
"""
self._class = _class
return self
def where(self, field, value = None, operator = None):
"""
Establece condiciones para la consulta unidas por AND
"""
if field is None:
return self
conjunction = None
if value is None and isinstance(field, dict):
for f,v in field.items():
if self.where_criteria.size() > 0:
conjunction = 'AND'
self.where_criteria.append(expressions.ConditionExpression(f, v, operator=operator, conjunction=conjunction))
else:
if self.where_criteria.size() > 0:
conjunction = 'AND'
self.where_criteria.append(expressions.ConditionExpression(field, value, operator=operator, conjunction=conjunction))
return self
def where_or(self, field, value = None, operator = None):
"""
Establece condiciones para la consulta unidas por OR
"""
if field is None:
return self
conjunction = None
if value is None and isinstance(field, dict):
for f,v in field.items():
if self.where_criteria.size() > 0:
conjunction = 'OR'
self.where_criteria.append(expressions.ConditionExpression(f, v, operator=operator, conjunction=conjunction))
else:
if self.where_criteria.size() > 0:
conjunction = 'OR'
self.where_criteria.append(expressions.ConditionExpression(field, value, operator=operator, conjunction=conjunction))
return self
def result(self, *args, **kwargs):
"""
Construye la consulta SQL
"""
prettify = kwargs.get('pretty', False)
sql = 'DELETE %s %s' % (self._type, self._class)
if prettify:
sql += '\n'
else:
sql += ' '
if self.where_criteria.size() > 0:
sql += 'WHERE '
sql += self.where_criteria.result()
if prettify:
sql += '\n'
else:
sql += ' '
return sql
class Create(object):
"CREATE query builder"
def __init__(self, type):
"""
Inicializa la consulta, type = recurso (Vertex|Edge)
"""
super(Create, self).__init__()
self._class = None
self._cluster = None
self._type = None
self._from = None
self._to = None
self.data = {}
self._type = type
def class_(self, _class):
"""
Especifica la clase para crear
"""
self._class = _class
return self
def cluster(self, cluster):
"""
Especifica el cluster donde se almacenara el nuevo recurso
"""
self._cluster = cluster
return self
def from_(self, From):
"""
[Edge-only] especifica el origen del lado
"""
if self._type.lower() != 'edge':
raise ValueError('Cannot set From/To to non-edge objects')
self._from = From
return self
def to(self, to):
"""
[Edge-only] especifica el destino del lado
"""
if self._type.lower() != 'edge':
raise ValueError('Cannot set From/To to non-edge objects')
self._to = to
return self
def set(self, field, value = None):
"""
[Edge|Vertex] establece datos del recurso
"""
if value is None and isinstance(field, dict):
self.content(field)
if field and value:
self.data[field] = value
return self
def content(self, obj):
"""
[Edge|Vertex] establece datos del recurso
"""
self.data.update(obj)
return self
def result(self, *args, **kwargs):
"""
Construye la consulta SQL
"""
prettify = kwargs.get('pretty', False)
sql = 'CREATE %s %s' % (self._type, self._class)
if prettify:
sql += '\n'
else:
sql += ' '
if self._type.lower() == 'edge':
sql += " FROM %s TO %s " % (self._from, self._to)
if self._cluster:
sql += 'CLUSTER %s' % self._cluster
if prettify:
sql += '\n'
else:
sql += ' '
if self.data:
sql += 'CONTENT ' + json.dumps(self.data)
return sql
class Update(object):
"""
UPDATE Query Builder
"""
def __init__(self, _class):
"""
Inicializa la clase, _class = origen donde actualizar
"""
super(Update, self).__init__()
self._class = None
self._cluster = None
self.data = {}
self.where_criteria = grouping.BaseGrouper()
self._class = _class
def set(self, field, value = None):
"""
[Edge|Vertex] establece datos del recurso
"""
if value is None and isinstance(field, dict):
self.content(field)
if field and value:
self.data[field] = value
return self
def content(self, obj):
"""
[Edge|Vertex] establece datos del recurso
"""
self.data.update(obj)
return self
def where(self, field, value = None, operator = None):
"""
Establece condiciones para la consulta unidas por AND
"""
if field is None:
return self
conjunction = None
if value is None and isinstance(field, dict):
for f,v in field.items():
if self.where_criteria.size() > 0:
conjunction = 'AND'
self.where_criteria.append(expressions.ConditionExpression(f, v, operator=operator, conjunction=conjunction))
else:
if self.where_criteria.size() > 0:
conjunction = 'AND'
self.where_criteria.append(expressions.ConditionExpression(field, value, operator=operator, conjunction=conjunction))
return self
def where_or(self, field, value = None, operator = None):
"""
Establece condiciones para la consulta unidas por OR
"""
if field is None:
return self
conjunction = None
if value is None and isinstance(field, dict):
for f,v in field.items():
if self.where_criteria.size() > 0:
conjunction = 'OR'
self.where_criteria.append(expressions.ConditionExpression(f, v, operator=operator, conjunction=conjunction))
else:
if self.where_criteria.size() > 0:
conjunction = 'OR'
self.where_criteria.append(expressions.ConditionExpression(field, value, operator=operator, conjunction=conjunction))
return self
def result(self, *args, **kwargs):
"""
Construye la consulta SQL
"""
prettify = kwargs.get('pretty', False)
sql = 'UPDATE %s' % self._class
if prettify:
sql += '\n'
else:
sql += ' '
if self.data:
sql += 'MERGE ' + json.dumps(self.data)
if prettify:
sql += '\n'
else:
sql += ' '
if self.where_criteria.size() > 0:
sql += 'WHERE '
sql += self.where_criteria.result()
if prettify:
sql += '\n'
else:
sql += ' '
return sql
| 2.515625 | 3 |
baekjoon/python/why_the_cow_landing_on_information_islands_17128.py | yskang/AlgorithmPractice | 0 | 12787288 | # Title: 소가 정보섬에 올라온 이유
# Link: https://www.acmicpc.net/problem/17128
import sys
sys.setrecursionlimit(10 ** 6)
read_list_int = lambda: list(map(int, sys.stdin.readline().strip().split(' ')))
def solution(n: int, q: int, cows: list, qs: list):
cows = cows + cows
parts = []
for start in range(n):
parts.append(cows[start]*cows[start+1]*cows[start+2]*cows[start+3])
s = sum(parts)
for q in qs:
s -= 2*(parts[q-1]+parts[q-2]+parts[q-3]+parts[q-4])
parts[q-1] *= -1
parts[q-2] *= -1
parts[q-3] *= -1
parts[q-4] *= -1
print(s)
def main():
n, q = read_list_int()
cows = read_list_int()
qs = read_list_int()
solution(n, q, cows, qs)
if __name__ == '__main__':
main() | 3.03125 | 3 |
oletools/thirdparty/xglob/__init__.py | maniVix/oletools | 2,059 | 12787289 | <gh_stars>1000+
from .xglob import * | 1.101563 | 1 |
dlapp/dlquery.py | Geeks-Trident-LLC/dlquery | 0 | 12787290 | """Module containing the logic for querying dictionary or list object."""
import re
import operator
from dlapp import utils
from dlapp.argumenthelper import validate_argument_type
from dlapp.argumenthelper import validate_argument_is_not_empty
from dlapp.collection import Element
class DLQueryError(Exception):
"""Use to capture error for DLQuery instance"""
class DLQueryDataTypeError(DLQueryError):
"""Use to capture error of unsupported query data type."""
class DLQuery:
"""This is a class for querying dictionary or list object.
Attributes
__________
data (list, tuple, or dict): list or dictionary instance.
Properties
----------
is_dict -> bool
is_list -> bool
Methods
-------
keys() -> dict_keys or odict_keys
values() -> dict_values or odict_values
items() -> dict_items or odict_items
get(index, default=None) -> Any
find(node=None, lookup='', select='') -> List
Raise
-----
TypeError: if failed to invoke ``iter`` built-in function.
"""
def __init__(self, data):
validate_argument_type(list, tuple, dict, data=data)
self.data = data
self._is_dict = None
self._is_list = None
############################################################################
# Special methods
############################################################################
def __len__(self):
return len(self.data)
def __getitem__(self, item):
return self.data[item]
def __iter__(self):
if self.is_dict:
return iter(self.data.keys())
elif self.is_list:
return iter(self.data)
else:
fmt = '{!r} object is not iterable.'
msg = fmt.format(type(self).__name__)
raise TypeError(msg)
def __bool__(self):
return bool(self.data)
def __eq__(self, other):
if isinstance(other, self.__class__):
result = operator.eq(self.data, other.data)
else:
result = operator.eq(self.data, other)
return result
def __ne__(self, other):
if isinstance(other, self.__class__):
result = operator.ne(self.data, other.data)
else:
result = operator.ne(self.data, other)
return result
############################################################################
# properties
############################################################################
@property
def is_dict(self):
"""Check if data of DLQuery is a dictionary data."""
if self._is_dict is None:
self._is_dict = isinstance(self.data, dict)
return self._is_dict
@property
def is_list(self):
"""Check if data of DLQuery is a list or tuple data."""
if self._is_list is None:
self._is_list = isinstance(self.data, (list, tuple))
return self._is_list
############################################################################
# public methods
############################################################################
def keys(self):
"""a set-like object providing a view on D's keys"""
result = utils.foreach(self.data, choice='keys')
return result
def values(self):
"""a set-like object providing a view on D's values"""
result = utils.foreach(self.data, choice='values')
return result
def items(self):
"""a set-like object providing a view on D's items"""
result = utils.foreach(self.data, choice='items')
return result
def get(self, index, default=None, on_exception=False):
"""if DLQuery is a list, then return the value for index if
index is in the list, else default.
if DLQuery is a dictionary, then return the value for key (i.e index)
if key is in the dictionary, else default.
Parameters
----------
index (int, str): a index of list or a key of dictionary.
default (Any): a default value if no element in list or
in dictionary is found.
on_exception (bool): raise Exception if it is True. Default is False.
Returns
-------
Any: any value from DLQuery.data
"""
try:
if self.is_list:
if isinstance(index, int):
return self.data[index]
elif isinstance(index, str):
pattern = r'-?[0-9]+$'
if re.match(pattern, index.strip()):
return self.data[int(index)]
else:
count = index.count(':')
if count == 1:
i, j = [x.strip() for x in index.split(':')]
chks = [
re.match(pattern, i.strip()) or i == '',
re.match(pattern, j.strip()) or j == ''
]
if any(chks):
i = int(i) if i else None
j = int(j) if j else None
slice_obj = slice(i, j)
return self.data[slice_obj]
else:
if on_exception:
return self.data[index]
else:
return default
elif count == 2:
i, j, k = [x.strip() for x in index.split(':')]
chks = [
re.match(pattern, i.strip()) or i == '',
re.match(pattern, j.strip()) or j == '',
re.match(pattern, k.strip()) or k == ''
]
if any(chks):
i = int(i) if i else None
j = int(j) if j else None
k = int(k) if k else None
slice_obj = slice(i, j, k)
return self.data[slice_obj]
else:
if on_exception:
return self.data[index]
else:
return default
else:
if on_exception:
return self.data[index]
else:
return default
else:
return default
else:
key = index
return self.data.get(key, default)
except Exception as ex: # noqa
if on_exception:
raise ex
else:
return default
def find(self, node=None, lookup='', select='', on_exception=False):
"""recursively search a lookup.
Parameters
----------
node (dict, list): a dict, dict-like, list, or list-like instance.
lookup (str): a search pattern.
select (str): a select statement.
on_exception (bool): raise `Exception` if set True, otherwise, return False.
Returns
-------
List: list of Any.
"""
node = node or self.data
lookup = str(lookup).strip()
if lookup == '' and select == '':
return node
validate_argument_is_not_empty(lookup=lookup)
validate_argument_type(list, tuple, dict, node=node)
elm_obj = Element(node, on_exception=on_exception)
records = elm_obj.find(lookup, select=select)
return records
| 2.921875 | 3 |
src/worker/streamable.py | christopher-dG/osr2mp4-bot | 2 | 12787291 | <gh_stars>1-10
import logging
import os
from datetime import timedelta
from pathlib import Path
import boto3
import requests
from requests import Response
from . import ReplyWith
from ..common import enqueue
def upload(video: Path, title: str) -> str:
"""Upload `video` to Streamable."""
# This technique comes from: https://github.com/adrielcafe/AndroidStreamable
# We're not actually uploading the file ourselves,
# just supplying a URL where it can find the video file.
# It's assumed that `video` is available at $SERVER_ADDR.
# Docker Compose handles this, provided that $SERVER_ADDR is publically accessible.
# If $SERVER_ADDR cannot be accessed, setting $USE_S3_URLS to `true` will upload
# the video to the S3 bucket $S3_BUCKET, and its public URL will be used.
# BEWARE: S3 IS EXPENSIVE AND SHOULD ONLY BE USED WHEN ABSOLUTELY NECESSARY.
auth = os.environ["STREAMABLE_USERNAME"], os.environ["STREAMABLE_PASSWORD"]
s3 = os.getenv("USE_S3_URLS") == "true"
if s3:
source_url = _s3_upload(video)
else:
source_url = f"{os.environ['SERVER_ADDR']}/{video.name}"
params = {"url": source_url, "title": title}
resp = requests.get("https://api.streamable.com/import", auth=auth, params=params)
_check_response(resp)
shortcode = resp.json()["shortcode"]
# Because the response comes before the upload is actually finished,
# we can't delete the video file yet, although we need to eventually.
# Create a new job that handles that at some point in the future.
enqueue(_wait, shortcode, video, s3=s3)
return f"https://streamable.com/{shortcode}"
def _s3_upload(video: Path) -> str:
"""Upload `video` to S3 and return a public URL."""
# It's assumed that credentials are set via environment variables.
s3 = boto3.client("s3")
bucket = os.environ["S3_BUCKET"]
key = video.name
with video.open("rb") as f:
s3.put_object(ACL="public-read", Body=f, Bucket=bucket, Key=key)
return f"https://{bucket}.s3.amazonaws.com/{key}"
def _check_response(resp: Response) -> None:
"""Verify that the `resp` to an upload request indicates success."""
ex = ReplyWith("Sorry, uploading to Streamable failed.")
if resp.headers["Content-Type"] != "application/json":
logging.error("Streamable did not return JSON")
raise ex
if not resp.ok:
logging.error(f"Streamable upload failed ({resp.status_code})")
logging.info(resp.text)
raise ex
if not isinstance(resp.json().get("shortcode"), str):
logging.error("Streamable did not return shortcode")
logging.info(resp.text)
raise ex
def _wait(shortcode: str, video: Path, s3: bool = False) -> None:
"""Wait for the video with `shortcode` to be uploaded, then delete `video`."""
resp = requests.get(f"https://api.streamable.com/videos/{shortcode}")
if not resp.ok:
logging.warning("Retrieving video failed")
return
status = resp.json()["status"]
if status in [0, 1]:
# Still in progress, so run this function again in a while.
# In the meantime, exit so that the worker gets freed up.
enqueue(_wait, shortcode, video, s3=s3, wait=timedelta(seconds=5))
elif status == 2:
# Upload is finished, we can delete the local file now.
video.unlink()
if s3:
_s3_delete(video.name)
else:
# If this happens too much, then we'll run out of disk space.
logging.warning(f"Status {status} from Streamable ({shortcode} {video})")
def _s3_delete(key: str) -> None:
"""Delete an object with `key` from the S3 bucket."""
s3 = boto3.client("s3")
s3.delete_object(Bucket=os.environ["S3_BUCKET"], Key=key)
| 2.59375 | 3 |
app/SellerReviews.py | leahokamura/RetailTherapy | 0 | 12787292 | # from __future__ import print_function # In python 2.7
from flask import render_template
from flask_login import current_user
import datetime
#import forms
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, IntegerField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, NumberRange
#import models
from .models.sellerreview import SellerReview
from .models.seller import Seller
from flask import current_app as app
from flask import Blueprint
bp = Blueprint('sellerreviews', __name__)
#routes to reviews page for certain seller
@bp.route('/sellerreviews/<int:seller_id>/<int:number>', methods=['GET', 'POST'])
def SellerReviews(seller_id, number):
s_reviews = SellerReview.get_all_seller_reviews_for_seller(seller_id, number)
seller_review_stats = SellerReview.get_stats(seller_id)
seller_name = Seller.get_seller_info(seller_id)
SR_check = True
if current_user.is_authenticated:
SR_check = SellerReview.review_check(seller_id, current_user.uid)
total_reviews = SellerReview.get_total_number_seller_reviews_for_seller(seller_id)
return render_template('sellerreviews.html',
sellerreviews = s_reviews,
sellerreviewstats = seller_review_stats,
SRcheck = SR_check,
sellername = seller_name,
number = number,
total = total_reviews) | 2.40625 | 2 |
robosuite/models/grippers/gripper_factory.py | StanfordVL/Lasersuite | 5 | 12787293 | """
Defines a string based method of initializing grippers
"""
from .panda_gripper import PandaGripper
from .wiping_gripper import WipingGripper
from .pr2_gripper import PR2Gripper
from .rethink_gripper import RethinkGripper
from .robotiq_gripper import RobotiqGripper
from .robotiq_three_finger_gripper import RobotiqThreeFingerGripper
def gripper_factory(name, idn=0):
"""
Generator for grippers
Creates a GripperModel instance with the provided name.
Args:
name: the name of the gripper class
idn: idn (int or str): Number or some other unique identification string for this gripper instance
Returns:
gripper: GripperModel instance
Raises:
XMLError: [description]
"""
if name == "RethinkGripper":
return RethinkGripper(idn=idn)
if name == "PR2Gripper":
return PR2Gripper(idn=idn)
if name == "RobotiqGripper":
return RobotiqGripper(idn=idn)
if name == "RobotiqThreeFingerGripper":
return RobotiqThreeFingerGripper(idn=idn)
if name == "PandaGripper":
return PandaGripper(idn=idn)
if name == "WipingGripper":
return WipingGripper(idn=idn)
raise ValueError("Unknown gripper name: {}".format(name))
| 3.453125 | 3 |
crawl/views.py | mannyhappenings/WebCrawler | 0 | 12787294 | from django.shortcuts import render
from crawler import Crawler
from django.http import HttpResponse
crawlers = {}
def index(request, params=''):
post_data = dict(request.POST)
post_data['urls'] = post_data['url[]']
for link in post_data['urls']:
crawlers[link] = Crawler()
crawlers[link].setUrl(link)
crawlers[link].start()
return render(request, 'crawl/index.html', {'urls': post_data['urls']})
def status(request):
response_text = "Total documents collected: <span class='count'>" + str(Crawler.crawledUrls['size']) + "</span>"
return HttpResponse(str(response_text))
def stop(request):
for key in crawlers.keys():
crawlers[key].stop()
response_text = "Stopped"
return HttpResponse(response_text) | 2.375 | 2 |
lib_bgpstream_website_collector/old_tests/test_data_classes.py | jfuruness/lib_bgpstream_website_collector | 16 | 12787295 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This file contains tests for the data_classes.py file.
For specifics on each test, see the docstrings under each function.
"""
__authors__ = ["<NAME>, <NAME>"]
__credits__ = ["<NAME>, <NAME>"]
__Lisence__ = "BSD"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import pytest
from unittest.mock import patch
from bs4 import BeautifulSoup as Soup
from ..data_classes import Data, Hijack, Leak, Outage
from ..tables import Hijacks_Table, Leaks_Table, Outages_Table
from ..event_types import BGPStream_Website_Event_Types
from itertools import combinations
from .create_HTML import HTML_Creator
# Importing actually runs the tests
#from .test_tables import Test_Hijacks_Table, Test_Leaks_Table, Test_Outages_Table
class Test_Data:
"""Tests all functions within the Data class.
NOTE: You may want to make this not a test class
and simply have all other classes inherit it."""
@staticmethod
def init(event):
type_ = event['event_type']
if type_ == BGPStream_Website_Event_Types.HIJACK.value:
return Hijack('/tmp/')
if type_ == BGPStream_Website_Event_Types.LEAK.value:
return Leak('/tmp/')
if type_ == BGPStream_Website_Event_Types.OUTAGE.value:
return Outage('/tmp/')
@staticmethod
def uncommon_info(event):
type_ = event['event_type']
if type_ == BGPStream_Website_Event_Types.HIJACK.value:
return ['expected_origin_name', 'expected_origin_number',
'detected_origin_name', 'detected_origin_number',
'expected_prefix', 'more_specific_prefix',
'detected_as_path', 'detected_by_bgpmon_peers']
if type_ == BGPStream_Website_Event_Types.LEAK.value:
return ['origin_as_name', 'origin_as_number',
'leaker_as_name', 'leaker_as_number',
'leaked_prefix', 'leaked_to_number', 'leaked_to_name',
'example_as_path', 'detected_by_bgpmon_peers']
if type_ == BGPStream_Website_Event_Types.OUTAGE.value:
return ['as_name', 'as_number',
'number_prefixes_affected', 'percent_prefixes_affected']
def test_append(self, setup):
"""Tests the append function
Should have input for every combo of:
-hijack, leak, outage
-country info vs non country info
And check expected output.
"""
for event in setup.events:
data = Test_Data.init(event)
with patch('lib_bgp_data.utils.utils.get_tags') as mock:
mock.side_effect = setup.open_custom_HTML
data.append(event['row'])
# Columns are retrieved from the Postgres table columns
# which has an 'id' column used as the primary key.
# Not part of row data, so must be removed
cols = data._columns
cols.remove('id')
for i, c in enumerate(cols):
assert data.data[0][i] == event[c]
def test_db_insert(self, setup):
"""Tests the db_insert function
Should have input with the powerset of all the combinations of:
-hijack, leak, outage
-country info vs non country info
And check expected output.
"""
for event in setup.events:
data = Test_Data.init(event)
# need something to insert
with patch('lib_bgp_data.utils.utils.get_tags') as mock:
mock.side_effect = setup.open_custom_HTML
data.append(event['row'])
with data.table() as t:
for IPV4, IPV6 in combinations([True, False], 2):
data.db_insert(IPV4, IPV6)
# db_insert creates indexes
sql = f"""SELECT * FROM pg_indexes
WHERE indexname = '{t.name}_index'"""
assert len(t.execute(sql)) == 1
# db_insert deletes duplicates
sql = f"SELECT DISTINCT * FROM {t.name}"
assert t.get_all() == t.execute(sql)
# check IPV filtering was successful
for IPV, num in zip([IPV4, IPV6], [4, 6]):
if not IPV and event['event_type'] != BGPStream_Website_Event_Types.OUTAGE.value:
sql = f"""SELECT COUNT({t.prefix_column})
FROM {t.name}
WHERE family({t.prefix_column}) = {num}"""
assert t.get_count(sql) == 0
def test_parse_common_elements(self, setup):
"""Tests the parse_common_elements function
Should have input for every combo of:
-hijack, leak, outage
-country info vs non country info
And check expected output.
"""
for event in setup.events:
data = Test_Data.init(event)
with patch('lib_bgp_data.utils.utils.get_tags') as mock:
mock.side_effect = setup.open_custom_HTML
as_info, extended_children = data._parse_common_elements(event['row'])
assert event['as_info'] == as_info
assert event['extended_children'] == extended_children
def test_parse_as_info(self, setup):
"""Tests the parse_as_info function
Should have input for every combo of:
-hijack, leak, outage
-country info vs non country info
-every possible combo if as info formatting
And check expected output.
"""
for event in setup.events:
d = Test_Data.init(event)
as_info = event['as_info']
# the AS info for outages will be a a single string
if isinstance(as_info, str):
assert event['parsed_as_info1'] == d._parse_as_info(as_info)
# for hijacks and leaks, there are 2 pieces of AS info in a list
elif isinstance(as_info, list):
assert event['parsed_as_info1'] == d._parse_as_info(as_info[1])
assert event['parsed_as_info2'] == d._parse_as_info(as_info[3])
def test_format_temp_row(self, setup):
"""Tests the format temp row func function
Make sure list exists with all columns but ID.
"""
# test by putting the same string for every column except ID
# what should returned is just the a list of the same string
# the string that's put for ID should not be found
for event in setup.events:
data = Test_Data.init(event)
# usually initialized in append
data._temp_row = {}
for col in data._columns:
# id columns is ignored
if col == 'id':
data._temp_row[col] = 'should not be here'
# quotes should be removed
else:
data._temp_row[col] = 'no quotes"'
expected = ['no quotes' for i in range(len(data._columns)-1)]
assert data._format_temp_row() == expected
def test_parse_uncommon_info(self, setup):
"""Tests the parse_uncommon_elements function
input all kinds of rows and check expected output.
"""
for event in setup.events:
data = Test_Data.init(event)
# initialize temp row. it's usually initialized in append()
data._temp_row = {}
data._parse_uncommon_info(event['as_info'], event['extended_children'])
for info in Test_Data.uncommon_info(event):
assert data._temp_row[info] == event[info]
| 2.375 | 2 |
Tuples and Sets/04. Parking Lot.py | milenpenev/Python_Advanced | 0 | 12787296 | n = int(input())
cars = set()
for _ in range(n):
command, number = input().split(", ")
if command == "IN":
cars.add(number)
else:
if number in cars:
cars.remove(number)
if not cars :
print("Parking Lot is Empty")
else:
[print(n) for n in cars] | 3.65625 | 4 |
wally/suits/io/fio.py | Mirantis/rally-results-processor | 41 | 12787297 | <filename>wally/suits/io/fio.py<gh_stars>10-100
import os.path
import logging
from typing import cast, Any, List, Union
import numpy
from cephlib.units import ssize2b, b2ssize
from cephlib.node import IRPCNode, get_os
import wally
from ...utils import StopTestError
from ..itest import ThreadedTest
from ...result_classes import TimeSeries, DataSource
from ..job import JobConfig
from .fio_task_parser import execution_time, fio_cfg_compile, FioJobConfig, FioParams, get_log_files
from . import rpc_plugin
from .fio_hist import get_lat_vals
logger = logging.getLogger("wally")
class FioTest(ThreadedTest):
soft_runcycle = 5 * 60
retry_time = 30
configs_dir = os.path.dirname(__file__) # type: str
name = 'fio'
job_config_cls = FioJobConfig
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
get = self.suite.params.get
self.remote_task_file = self.join_remote("task.fio")
self.remote_output_file = self.join_remote("fio_result.json")
self.use_system_fio = get('use_system_fio', False) # type: bool
self.use_sudo = get("use_sudo", True) # type: bool
self.force_prefill = get('force_prefill', False) # type: bool
self.skip_prefill = get('skip_prefill', False) # type: bool
self.load_profile_name = self.suite.params['load'] # type: str
if os.path.isfile(self.load_profile_name):
self.load_profile_path = self.load_profile_name # type: str
else:
self.load_profile_path = os.path.join(self.configs_dir, self.load_profile_name+ '.cfg')
self.load_profile = open(self.load_profile_path, 'rt').read() # type: str
if self.use_system_fio:
self.fio_path = "fio" # type: str
else:
self.fio_path = os.path.join(self.suite.remote_dir, "fio")
self.load_params = self.suite.params['params']
self.file_name = self.load_params['FILENAME']
if 'FILESIZE' not in self.load_params:
logger.debug("Getting test file sizes on all nodes")
try:
sizes = {node.conn.fs.file_stat(self.file_name)[b'size']
for node in self.suite.nodes}
except Exception:
logger.exception("FILESIZE is not set in config file and fail to detect it." +
"Set FILESIZE or fix error and rerun test")
raise StopTestError()
if len(sizes) != 1:
logger.error("IO target file %r has different sizes on test nodes - %r",
self.file_name, sizes)
raise StopTestError()
self.file_size = list(sizes)[0]
logger.info("Detected test file size is %sB", b2ssize(self.file_size))
if self.file_size % (4 * 1024 ** 2) != 0:
tail = self.file_size % (4 * 1024 ** 2)
logger.warning("File size is not proportional to 4M, %sb at the end will not be used for test",
str(tail // 1024) + "Kb" if tail > 1024 else str(tail) + "b")
self.file_size -= self.file_size % (4 * 1024 ** 2)
self.load_params['FILESIZE'] = self.file_size
else:
self.file_size = ssize2b(self.load_params['FILESIZE'])
self.job_configs = list(fio_cfg_compile(self.load_profile, self.load_profile_path,
cast(FioParams, self.load_params)))
if len(self.job_configs) == 0:
logger.error("Empty fio config provided")
raise StopTestError()
self.exec_folder = self.suite.remote_dir
def config_node(self, node: IRPCNode) -> None:
plugin_code = open(rpc_plugin.__file__.rsplit(".", 1)[0] + ".py", "rb").read() # type: bytes
node.upload_plugin("fio", plugin_code)
try:
node.conn.fs.rmtree(self.suite.remote_dir)
except Exception:
pass
try:
node.conn.fs.makedirs(self.suite.remote_dir)
except Exception:
msg = "Failed to recreate folder {} on remote {}.".format(self.suite.remote_dir, node)
logger.exception(msg)
raise StopTestError()
# TODO: check this during config validation
if self.file_size % (4 * (1024 ** 2)) != 0:
logger.error("Test file size must be proportional to 4MiB")
raise StopTestError()
self.install_utils(node)
if self.skip_prefill:
logger.info("Prefill is skipped due to 'skip_prefill' set to true")
else:
mb = int(self.file_size / 1024 ** 2)
logger.info("Filling test file %s on node %s with %sMiB of random data", self.file_name, node.info, mb)
is_prefilled, fill_bw = node.conn.fio.fill_file(self.file_name, mb,
force=self.force_prefill,
fio_path=self.fio_path)
if not is_prefilled:
logger.info("Test file on node %s is already prefilled", node.info)
elif fill_bw is not None:
logger.info("Initial fio fill bw is %s MiBps for %s", fill_bw, node.info)
def install_utils(self, node: IRPCNode) -> None:
os_info = get_os(node)
if self.use_system_fio:
if os_info.distro != 'ubuntu':
logger.error("Only ubuntu supported on test VM")
raise StopTestError()
node.conn.fio.install('fio', binary='fio')
else:
node.conn.fio.install('bzip2', binary='bzip2')
fio_dir = os.path.dirname(os.path.dirname(wally.__file__)) # type: str
fio_dir = os.path.join(os.getcwd(), fio_dir)
fio_dir = os.path.join(fio_dir, 'fio_binaries')
fname = 'fio_{0.release}_{0.arch}.bz2'.format(os_info)
fio_path = os.path.join(fio_dir, fname) # type: str
if not os.path.exists(fio_path):
logger.error("No prebuild fio binary available for {0}".format(os_info))
raise StopTestError()
bz_dest = self.join_remote('fio.bz2') # type: str
node.copy_file(fio_path, bz_dest, compress=False)
node.run("bzip2 --decompress {} ; chmod a+x {}".format(bz_dest, self.join_remote("fio")))
def get_expected_runtime(self, job_config: JobConfig) -> int:
return execution_time(cast(FioJobConfig, job_config))
def prepare_iteration(self, node: IRPCNode, job: JobConfig) -> None:
node.put_to_file(self.remote_task_file, str(job).encode("utf8"))
# TODO: get a link to substorage as a parameter
def run_iteration(self, node: IRPCNode, job: JobConfig) -> List[TimeSeries]:
exec_time = execution_time(cast(FioJobConfig, job))
fio_cmd_templ = "cd {exec_folder}; " + \
"{fio_path} --output-format=json --output={out_file} --alloc-size=262144 {job_file}"
cmd = fio_cmd_templ.format(exec_folder=self.exec_folder,
fio_path=self.fio_path,
out_file=self.remote_output_file,
job_file=self.remote_task_file)
must_be_empty = node.run(cmd, timeout=exec_time + max(300, exec_time), check_timeout=1).strip()
for line in must_be_empty.split("\n"):
if line.strip():
if 'only root may flush block devices' in line:
continue
logger.error("Unexpected fio output: %r", must_be_empty)
break
# put fio output into storage
fio_out = node.get_file_content(self.remote_output_file)
path = DataSource(suite_id=self.suite.storage_id,
job_id=job.storage_id,
node_id=node.node_id,
sensor='fio',
dev=None,
metric='stdout',
tag='json')
self.storage.put_extra(fio_out, path)
node.conn.fs.unlink(self.remote_output_file)
files = [name for name in node.conn.fs.listdir(self.exec_folder)]
result = [] # type: List[TimeSeries]
for name, file_path, units in get_log_files(cast(FioJobConfig, job)):
log_files = [fname for fname in files if fname.startswith(file_path)]
if len(log_files) != 1:
logger.error("Found %s files, match log pattern %s(%s) - %s",
len(log_files), file_path, name, ",".join(log_files[10:]))
raise StopTestError()
fname = os.path.join(self.exec_folder, log_files[0])
raw_result = node.get_file_content(fname) # type: bytes
node.conn.fs.unlink(fname)
try:
log_data = raw_result.decode("utf8").split("\n")
except UnicodeEncodeError:
logger.exception("Error during parse %s fio log file - can't decode usint UTF8", name)
raise StopTestError()
# TODO: fix units, need to get array type from stream
open("/tmp/tt", 'wb').write(raw_result)
parsed = [] # type: List[Union[List[int], int]]
times = []
for idx, line in enumerate(log_data):
line = line.strip()
if line:
try:
time_ms_s, val_s, _, *rest = line.split(",")
times.append(int(time_ms_s.strip()))
if name == 'lat':
vals = [int(i.strip()) for i in rest]
# if len(vals) != expected_lat_bins:
# msg = f"Expect {expected_lat_bins} bins in latency histogram, " + \
# f"but found {len(vals)} at time {time_ms_s}"
# logger.error(msg)
# raise StopTestError(msg)
parsed.append(vals)
else:
parsed.append(int(val_s.strip()))
except ValueError:
logger.exception("Error during parse %s fio log file in line %s: %r", name, idx, line)
raise StopTestError()
assert not self.suite.keep_raw_files, "keep_raw_files is not supported"
histo_bins = None if name != 'lat' else numpy.array(get_lat_vals(len(parsed[0])))
ts = TimeSeries(data=numpy.array(parsed, dtype='uint64'),
units=units,
times=numpy.array(times, dtype='uint64'),
time_units='ms',
source=path(metric=name, tag='csv'),
histo_bins=histo_bins)
result.append(ts)
return result
def format_for_console(self, data: Any) -> str:
raise NotImplementedError()
| 1.757813 | 2 |
slayer/io/screenshot.py | ajduberstein/slayer | 2 | 12787298 | """
The goal of this module is to ease the creation of static maps
from this package.
Ideally, this is done headlessly (i.e., no running browser)
and quickly. Given that deck.gl requires WebGL, there aren't
lot of alternatives to using a browser.
Not yet implemented.
"""
from selenium import webdriver
# from selenium.webdriver.chrome.options import Options
# TODO this should be determined programmatically
CHROME_PATH = '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'
CHROMEDRIVER_PATH = '/usr/local/bin/chromedriver'
WINDOW_SIZE = "1920,1080"
def make_screenshot(url, output):
# options = webdriver.ChromeOptions()
driver = webdriver.Chrome(
executable_path=CHROMEDRIVER_PATH
)
driver.get(url)
driver.save_screenshot(output)
driver.close()
# This may be of interest
# https://github.com/stackgl/headless-gl
raise NotImplementedError(
'This part of the library is not complete')
| 2.5 | 2 |
paprika/io.py | wwilla7/pAPRika | 3 | 12787299 | import logging as log
import os
import base64
import json
import numpy as np
from paprika.restraints import DAT_restraint
from parmed.amber import AmberParm
from parmed import Structure
# https://stackoverflow.com/questions/27909658/json-encoder-and-decoder-for-complex-numpy-arrays
# https://stackoverflow.com/a/24375113/901925
# https://stackoverflow.com/questions/3488934/simplejson-and-numpy-array/24375113#24375113
class NumpyEncoder(json.JSONEncoder):
"""Save DAT_restraints as JSON by re-encoding `numpy` arrays."""
def default(self, obj):
"""If input object is an ndarray it will be converted into a dict
holding dtype, shape and the data, base64 encoded.
"""
if isinstance(obj, AmberParm):
log.info("Encountered AmberParm, returning name.")
return obj.name
if isinstance(obj, Structure):
log.warning("Encountered Structure, which does not store filename.")
return ""
if isinstance(obj, np.ndarray):
if obj.flags["C_CONTIGUOUS"]:
obj_data = obj.data
else:
cont_obj = np.ascontiguousarray(obj)
assert cont_obj.flags["C_CONTIGUOUS"]
obj_data = cont_obj.data
data_b64 = base64.b64encode(obj_data)
# obj_data = obj.tolist()
return dict(
__ndarray__=data_b64.decode("utf-8"),
dtype=str(obj.dtype),
shape=obj.shape,
)
elif isinstance(
obj,
(
np.int_,
np.intc,
np.intp,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
),
):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
return float(obj)
elif isinstance(obj, (np.ndarray,)):
return obj.tolist()
# Let the base class default method raise the TypeError
# return json.JSONEncoder(self, obj)
return super(NumpyEncoder, self).default(obj)
def json_numpy_obj_hook(dct):
"""Decodes a previously encoded numpy ndarray with proper shape and dtype.
:param dct: (dict) json encoded ndarray
:return: (ndarray) if input was an encoded ndarray
"""
if isinstance(dct, dict) and "__ndarray__" in dct:
data = base64.b64decode(dct["__ndarray__"])
return np.frombuffer(data, dct["dtype"]).reshape(dct["shape"])
# return dct['__ndarray__']
return dct
def save_restraints(restraint_list, filepath="restraints.json"):
log.debug("Saving restraint information as JSON.")
with open(os.path.join(filepath), "w") as f:
for restraint in restraint_list:
dumped = json.dumps(restraint.__dict__, cls=NumpyEncoder)
f.write(dumped)
f.write("\n")
def load_restraints(filepath="restraints.json"):
log.debug("Loading restraint information from JSON.")
with open(os.path.join(filepath), "r") as f:
json_data = f.read()
restraint_json = json_data.split("\n")
restraints = []
for restraint in restraint_json:
if restraint == "":
continue
loaded = json.loads(restraint, object_hook=json_numpy_obj_hook)
tmp = DAT_restraint()
tmp.__dict__ = loaded
properties = ["mask1", "mask2", "mask3", "mask4", "topology", "instances", "custom_restraint_values",
"auto_apr", "continuous_apr", "attach", "pull", "release", "amber_index"]
for class_property in properties:
if f"_{class_property}" in tmp.__dict__.keys():
tmp.__dict__[class_property] = tmp.__dict__[f"_{class_property}"]
restraints.append(tmp)
return restraints
| 2.421875 | 2 |
PINN_Survey/architecture/tf_v1/domain_transformer.py | roman-amici/PINN_Survey | 0 | 12787300 | import PINN_Base.base_v1 as base_v1
import tensorflow as tf
'''
This is an implementation of the (unnamed)
"Improved fully-connected neural architecture" from
UNDERSTANDING AND MITIGATING GRADIENT PATHOLOGIES IN
PHYSICS-INFORMED NEURAL NETWORKS (Wang, 2020).
I have taken the liberty of naming it based on the authors
likening it to the Transformer. Unlike the transformer, it doesn't
transform the outputs, Z, but rather the inputs, X.
'''
class Domain_Transformer(base_v1.PINN_Base):
def __init__(self,
lower_bound,
upper_bound,
input_dim,
output_dim,
width,
depth,
**kwargs):
self.input_dim = input_dim
self.output_dim = output_dim
self.width = width
self.depth = depth
layers = [self.input_dim] + [self.width] * depth + [self.output_dim]
super().__init__(lower_bound, upper_bound, layers, **kwargs)
def _init_params(self):
# Two layer neural network
transformer_shape = [self.input_dim, self.width, self.width]
# The first encoder network
self.weights_T1, self.biases_T1 = self._init_NN(transformer_shape)
# The second encoder network
self.weights_T2, self.biases_T2 = self._init_NN(transformer_shape)
# The normal "forward" network is initialized by parent
super()._init_params()
def _domain_transformer_forward(self, X, T1, T2, weights, biases):
activations = []
H = 2.0 * (X - self.lower_bound) / \
(self.upper_bound - self.lower_bound) - 1.0
activations.append(H)
for l in range(len(weights) - 1):
W = weights[l]
b = biases[l]
Z = tf.tanh(tf.add(tf.matmul(H, W), b))
H = (1 - Z) * T1 + Z * T2
activations.append(H)
W = weights[-1]
b = biases[-1]
Y = tf.add(tf.matmul(H, W), b)
return Y, activations
def _forward(self, X):
T1, activations_T1 = self._NN(X, self.weights_T1, self.biases_T2)
T2, activations_T2 = self._NN(X, self.weights_T2, self.biases_T2)
U, activations = self._domain_transformer_forward(
X, T1, T2, self.weights, self.biases)
if X == self.X:
self.T1 = T1
self.T2 = T2
self.activations = activations
self.Activations_T1 = activations_T1
self.Activations_T2 = activations_T2
return U
def get_T1(self, X):
return self.sess.run(self.T1, {self.X: X})
def get_T2(self, X):
return self.sess.run(self.T2, {self.X: X})
def get_all_weights(self):
return self.sess.run(self.get_all_weight_variables())
def get_all_weight_variables(self):
return [
self.weights, self.biases,
self.weights_T1, self.biases_T1,
self.weights_T2, self.biases_T2
]
def _count_params(self):
params_main = super()._count_params()
params_T1_weights = self._size_of_variable_list(self.weights_T1)
params_T1_biases = self._size_of_variable_list(self.biases_T1)
params_T2_weights = self._size_of_variable_list(self.weights_T2)
params_T2_biases = self._size_of_variable_list(self.biases_T2)
return params_main + params_T1_weights + params_T1_biases + params_T2_weights + params_T2_biases
def get_architecture_description(self):
params = self._count_params()
return {
"arch_name": "domain_transformer",
"n_params": params,
"shape_main": self.layers[:],
"shape_T1": [self.input_dim, self.width, self.output_dim],
"shape_T2": [self.input_dim, self.width, self.output_dim],
"dtype": "float32" if self.dtype == tf.float32 else "float64"
}
| 3.171875 | 3 |
code/recon/recon-bias.py | modichirag/21cm_cleaning | 1 | 12787301 | import numpy
from scipy.interpolate import InterpolatedUnivariateSpline as interpolate
from scipy.interpolate import interp1d
from cosmo4d.lab import (UseComplexSpaceOptimizer,
NBodyModel, LPTModel, ZAModel,
LBFGS, ParticleMesh)
#from cosmo4d.lab import mapbias as map
from cosmo4d import lab
from cosmo4d.lab import report, dg, objectives
from abopt.algs.lbfgs import scalar as scalar_diag
from nbodykit.cosmology import Planck15, EHPower, Cosmology
from nbodykit.algorithms.fof import FOF
from nbodykit.lab import KDDensity, BigFileMesh, BigFileCatalog, ArrayCatalog
import sys, os, json, yaml
from solve import solve
from getbiasparams import getbias, eval_bfit
sys.path.append('../')
sys.path.append('../utils/')
import HImodels
#########################################
#Set parameters here
##
cfname = sys.argv[1]
with open(cfname, 'r') as ymlfile: cfg = yaml.load(ymlfile)
for i in cfg['basep'].keys(): locals()[i] = cfg['basep'][i]
h1model = HImodels.ModelA(aa)
truth_pm = ParticleMesh(BoxSize=bs, Nmesh=(nc, nc, nc), dtype='f4')
comm = truth_pm.comm
rank = comm.rank
if numd <= 0: num = -1
else: num = int(bs**3 * numd)
if rank == 0: print('Number of objects : ', num)
objfunc = getattr(objectives, cfg['mods']['objective'])
map = getattr(lab, cfg['mods']['map'])
#
proj = '/project/projectdirs/cosmosim/lbl/chmodi/cosmo4d/'
dfolder = '/global/cscratch1/sd/chmodi/m3127/cm_lowres/%dstepT-B%d/%d-%d-9100-fixed/'%(nsteps, B, bs, nc)
#ofolder = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/recon/bias/L%04d-N%04d-T%02d-B%01d/'%(bs, nc, nsteps, B)
ofolder = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/recon/L%04d-N%04d/'%(bs, nc)
if pmdisp:
ofolder += 'T%02d-B%01d'%(nsteps, B)
else: ofolder += 'ZA/'
prefix = '_fourier'
if rsdpos: prefix += "_rsdpos"
if masswt:
if h1masswt : fname = 's999_h1massA%s'%prefix
else: fname = 's999_mass%s'%prefix
else: fname = 's999_pos%s'%prefix
optfolder = ofolder + 'opt_%s/'%fname
if truth_pm.comm.rank == 0: print('Output Folder is %s'%optfolder)
for folder in [ofolder, optfolder]:
try: os.makedirs(folder)
except:pass
#########################################
#initiate
klin, plin = numpy.loadtxt('../../data/pklin_1.0000.txt', unpack = True)
ipk = interpolate(klin, plin)
#cosmo = Planck15.clone(Omega_cdm = 0.2685, h = 0.6711, Omega_b = 0.049)
cosmodef = {'omegam':0.309167, 'h':0.677, 'omegab':0.048}
cosmo = Cosmology.from_dict(cosmodef)
data = BigFileCatalog('/global/cscratch1/sd/chmodi/m3127/H1mass/highres/2560-9100-fixed/fastpm_%0.4f/LL-0.200/'%aa)
data = data.gslice(start = 0, stop = num)
data['Mass'] = data['Length']*data.attrs['M0']*1e10
if masswt :
masswt = data['Mass'].copy()
if h1masswt : masswt = h1model.assignhalo(masswt)
else: masswt = data['Mass'].copy()*0 + 1.
hpos, hmass = data['Position'], masswt
rsdfac = 0
if rsdpos:
with open('/global/cscratch1/sd/chmodi/m3127/H1mass/highres/2560-9100-fixed/fastpm_%0.4f/Header/attr-v2'%aa) as ff:
for line in ff.readlines():
if 'RSDFactor' in line: rsdfac = float(line.split()[-2])
hpos = data['Position'] + rsdfac*data['Velocity']*numpy.array([0, 0, 1]).reshape(1, -1)
hlayout = truth_pm.decompose(hpos)
hmesh = truth_pm.paint(hpos, layout=hlayout, mass=hmass)
hmesh /= hmesh.cmean()
hmesh -= 1.
rankweight = sum(masswt.compute())
totweight = comm.allreduce(rankweight)
rankweight = sum((masswt**2).compute())
totweight2 = comm.allreduce(rankweight)
noise = bs**3 / (totweight**2/totweight2)
if rank == 0 : print('Noise : ', noise)
#########################################
#dynamics
stages = numpy.linspace(0.1, aa, nsteps, endpoint=True)
if pmdisp: dynamic_model = NBodyModel(cosmo, truth_pm, B=B, steps=stages)
else: dynamic_model = ZAModel(cosmo, truth_pm, B=B, steps=stages)
if rank == 0: print(dynamic_model)
#noise
#Artifically low noise since the data is constructed from the model
truth_noise_model = map.NoiseModel(truth_pm, None, noisevar*(truth_pm.BoxSize/truth_pm.Nmesh).prod(), 1234)
truth_noise_model = None
#Create and save data if not found
dyn = BigFileCatalog(dfolder + 'fastpm_%0.4f/1'%aa)
s_truth = BigFileMesh(dfolder + 'linear', 'LinearDensityK').paint()
mock_model_setup = map.MockModel(dynamic_model, rsdpos=rsdpos, rsdfac=rsdfac)
fpos, linear, linearsq, shear = mock_model_setup.get_code().compute(['x', 'linear', 'linearsq', 'shear'], init={'parameters': s_truth})
grid = truth_pm.generate_uniform_particle_grid(shift=0.0, dtype='f4')
params, bmod = getbias(truth_pm, hmesh, [linear, linearsq, shear], fpos, grid)
title = ['%0.3f'%i for i in params]
kerror, perror = eval_bfit(hmesh, bmod, optfolder, noise=noise, title=title, fsize=15)
ipkerror = interp1d(kerror, perror, bounds_error=False, fill_value=(perror[0], perror[-1]))
mock_model = map.MockModel(dynamic_model, params=params, rsdpos=rsdpos, rsdfac=rsdfac)
data_p = mock_model.make_observable(s_truth)
data_p.mapp = hmesh.copy()
data_p.save(optfolder+'datap/')
if rank == 0: print('datap saved')
#data_n = truth_noise_model.add_noise(data_p)
#data_n.save(optfolder+'datan/')
#if rank == 0: print('datan saved')
fit_p = mock_model.make_observable(s_truth)
fit_p.save(optfolder+'fitp/')
if rank == 0: print('fitp saved')
##
if rank == 0: print('data_p, data_n created')
################################################
#Optimizer
if cfg['init']['sinit'] is None:
s_init = truth_pm.generate_whitenoise(777, mode='complex')\
.apply(lambda k, v: v * (ipk(sum(ki **2 for ki in k) **0.5) / v.BoxSize.prod()) ** 0.5)\
.c2r()*0.001
sms = [4.0, 2.0, 1.0, 0.5, 0.0]
else:
s_init = BigFileMesh(cfg['init']['sinit'], 's').paint()
sms = cfg['init']['sms']
if sms is None: [4.0, 2.0, 1.0, 0.5, 0.0]
x0 = s_init
N0 = nc
C = x0.BoxSize[0] / x0.Nmesh[0]
for Ns in sms:
if truth_pm.comm.rank == 0: print('\nDo for cell smoothing of %0.2f\n'%(Ns))
sml = C * Ns
rtol = 0.005
run = '%d-%0.2f'%(N0, Ns)
if Ns == sms[0]:
if cfg['init']['sinit'] is not None: run += '-nit_%d-sm_%.2f'%(cfg['init']['nit'], cfg['init']['sml'])
obj = objfunc(mock_model, truth_noise_model, data_p, prior_ps=ipk, error_ps=ipkerror, sml=sml)
x0 = solve(N0, x0, rtol, run, Ns, prefix, mock_model, obj, data_p, truth_pm, optfolder, saveit=20, showit=5, title=None)
#########################################
##def gaussian_smoothing(sm):
## def kernel(k, v):
## return numpy.exp(- 0.5 * sm ** 2 * sum(ki ** 2 for ki in k)) * v
## return kernel
##
#########################################
#optimizer
##
##def solve(Nmesh, x0, rtol, run, Nsm):
##
## pm = truth_pm.resize(Nmesh=(Nmesh, Nmesh, Nmesh))
## atol = pm.Nmesh.prod() * rtol
## x0 = pm.upsample(x0, keep_mean=True)
## #data = data_n.downsample(pm)
## #IDEAL no noise limit
## data = data_p.downsample(pm)
##
## # smooth the data. This breaks the noise model but we don't need it
## # for lower resolution anyways.
## sml = pm.BoxSize[0] / Nmesh * Nsm
##
## #dynamic_model = ZAModel(cosmo, truth_pm, B=B, steps=stages)
## #mock_model = map.MockModel(dynamic_model)
##
## # an approximate noise model, due to smoothing this is correct only at large scale.
## noise_model = truth_noise_model #.downsample(pm)
##
## obj = map.SmoothedObjective(mock_model, noise_model, data, prior_ps=pk, sml=sml)#, noised=noised)
##
## prior, chi2 = obj.get_code().compute(['prior', 'chi2'], init={'parameters': data.s})
## if pm.comm.rank == 0: print('Prior, chi2 : ', prior, chi2) # for 2d chi2 is close to total pixels.
##
## fit_p = mock_model.make_observable(data.s)
## #r = obj.evaluate(fit_p, data)
## r = dg.evaluate(fit_p, data)
##
## try:
## os.makedirs(optfolder + '%s' % run)
## except:
## pass
## try:
## os.makedirs(optfolder + '%s/2pt' % run)
## except:
## pass
## dg.save_report(r, optfolder + "%s/truth.png" % run, pm)
## dg.save_2ptreport(r, optfolder + "%s/2pt/truth.png" % run, pm)
##
##
## optimizer = LBFGS(m=10, diag_update=scalar_diag)
##
## prob = obj.get_problem(atol=atol, precond=UseComplexSpaceOptimizer)
##
## def monitor(state):
## if pm.comm.rank == 0:
## print(state)
## if state.nit % 5 == 0:
## fit_p = mock_model.make_observable(state['x'])
## if state.nit % 20 == 0:
## fit_p.save(optfolder + '%s/%04d/fit_p' % (run, state['nit']))
## r = obj.evaluate(fit_p, data)
## #obj.save_report(r, optfolder + "%s/%s%02d-%04d.png"% (run, prefix, int(Nsm*10), state['nit']))
## dg.save_report(r, optfolder + "%s/%s_N%02d-%04d.png"% (run, prefix, int(Nsm*10), state['nit']), pm)
## dg.save_2ptreport(r, optfolder + "%s/2pt/%s_N%02d-%04d.png"% (run, prefix, int(Nsm*10), state['nit']), pm)
## if pm.comm.rank == 0:
## print('saved')
##
## state = optimizer.minimize(prob, x0=x0, monitor=monitor)
## fit_p = mock_model.make_observable(state['x'])
## fit_p.save(optfolder + '%s/best-fit' % run)
## r = dg.evaluate(fit_p, data)
## dg.save_report(r, optfolder + "%s/%s%02d-best-fit.png" % (run, prefix, int(Nsm*10)), pm)
## dg.save_2ptreport(r, optfolder + "%s/2pt/%s_N%02d-best-fit.png" % (run, prefix, int(Nsm*10)), pm)
## return state.x
##
| 1.6875 | 2 |
tests/integration/examples/abstract/workflows/foo.py | stucox/gadk | 1 | 12787302 | <gh_stars>1-10
from .lib import Service
class FooService(Service):
def __init__(self) -> None:
super().__init__("foo")
def service_name(self) -> str:
return "foo"
| 2.53125 | 3 |
simALModulesWrappers/ALPosture.py | Snoke13/NaoqibulletWrapper | 1 | 12787303 | #!/usr/bin/env python
__author__ = '<NAME>'
__copyright__ = """
Copyright 2019, CPE Lyon, <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
__license__ = 'Apache 2.0'
__maintainer__ = '<NAME>'
class ALPostureProxy:
def __init__(self, qiSession, sim_pepper):
print("__init__ in ALPostureProxy")
self.sim_pepper = sim_pepper
def goToPosture(self, posture_name, percentage_speed):
self.sim_pepper.goToPosture(posture_name, percentage_speed)
| 2.109375 | 2 |
processor/rotary.py | Princeton-Penn-Vents/princeton-penn-flowmeter | 3 | 12787304 | #!/usr/bin/env python3
from __future__ import annotations
from typing import List, Dict, Any, ValuesView, TypeVar, Iterable, ItemsView
from processor.setting import Setting
import threading
T = TypeVar("T", bound="LocalRotary")
class LocalRotary:
def __init__(self, config: Dict[str, Setting]):
self.config = config
self._alarms: Dict[str, Any] = {}
# Will be set by changing a value, unset by access (to_dict)
self._changed = threading.Event()
# Cached for simplicity (dicts are ordered)
self._items: List[str] = list(self.config.keys())
def changed(self):
"""
This should always be called when an item in the rotary is changed
"""
self._changed.set()
def to_dict(self) -> Dict[str, Any]:
"Convert config to dict"
return {
k: v.to_dict()
for k, v in self.config.items()
if k not in ["C02 Setting", "Current Setting", "Reset Setting"]
}
@property
def alarms(self) -> Dict[str, Dict[str, float]]:
return self._alarms
@alarms.setter
def alarms(self, item: Dict[str, Dict[str, float]]):
self._alarms = item
def __getitem__(self, item: str):
return self.config[item]
def values(self) -> ValuesView[Setting]:
return self.config.values()
def items(self) -> ItemsView[str, Setting]:
return self.config.items()
def __repr__(self) -> str:
out = f"{self.__class__.__name__}(\n"
for key, value in self.config.items():
out += f" {key} : {value}\n"
return out + "\n)"
def __enter__(self: T) -> T:
return self
def __exit__(self, *args) -> None:
return None
def __contains__(self, key: str):
return key in self.config
def __iter__(self) -> Iterable[str]:
return iter(self.config)
def external_update(self) -> None:
"Update the display after a live setting (CurrentSetting) is changed externally"
pass
def time_left(self) -> float:
"Amount of time left on timer"
raise NotImplementedError()
def last_interaction(self) -> float:
"Timestamp of last interaction"
raise NotImplementedError()
| 2.609375 | 3 |
DataStructures/queue.py | nabiharaza/LabLearnings | 2 | 12787305 | <reponame>nabiharaza/LabLearnings
from node import LinkedNode
class Queue:
__slots__ = "front", "back"
def __init__(self):
""" Create a new empty queue.
"""
self.front = None
self.back = None
def __str__(self):
""" Return a string representation of the contents of
this queue, oldest value first.
"""
result = "Str Queue["
n = self.front
while n != None:
result += " " + str(n.value)
n = n.link
result += " ]"
return result
def is_empty(self):
return self.front == None
def enqueue(self, newValue):
newNode = LinkedNode(newValue)
if self.front == None:
self.front = newNode
else:
self.back.link = newNode
self.back = newNode
def dequeue(self):
assert not self.is_empty(), "Dequeue from empty queue"
self.front = self.front.link
if self.front == None:
self.back = None
def peek(self):
assert not self.is_empty(), "peek on empty stack"
return self.front.value
# insert = enqueue
# remove = dequeue
def test():
s = Queue()
print(s)
for value in 1, 2, 3:
s.enqueue(value)
print(s)
print("Dequeueing:", s.peek())
s.dequeue()
print(s)
for value in 15, 16:
s.enqueue(value)
print(s)
print("Removing:", s.peek())
s.dequeue()
print(s)
while not s.is_empty():
print("Dequeueing:", s.peek())
s.dequeue()
print(s)
print("Trying one too many dequeues... ")
try:
s.dequeue()
print("Problem: it succeeded!")
except Exception as e:
print("Exception was '" + str(e) + "'")
if __name__ == "__main__":
test()
| 4.09375 | 4 |
habitat_sim/utils/__init__.py | shacklettbp/habitat-sim | 1 | 12787306 | <reponame>shacklettbp/habitat-sim
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# quat_from_angle_axis and quat_rotate_vector imports
# added for backward compatibility with Habitat-API
# TODO @maksymets: remove after habitat-api/examples/new_actions.py will be
# fixed
from habitat_sim.utils import common
from habitat_sim.utils.common import quat_from_angle_axis, quat_rotate_vector
__all__ = ["quat_from_angle_axis", "quat_rotate_vector", "common"]
| 1.21875 | 1 |
src/ds_algs/sorting_algs/bubble_sort.py | E1mir/PySandbox | 0 | 12787307 | <reponame>E1mir/PySandbox<gh_stars>0
def bubble_sort(arr):
for n in range(len(arr) - 1, 0, -1):
for k in range(n):
if arr[k] > arr[k + 1]:
arr[k], arr[k + 1] = arr[k + 1], arr[k]
if __name__ == '__main__':
array = [2, 6, 7, 4, 1, 8, 5, 9, 3, 10, 15, 12, 13, 11, 14]
bubble_sort(array)
print(array)
| 3.875 | 4 |
api/models.py | DarkbordermanTemplate/fastapi-sqlalchemy | 3 | 12787308 | <gh_stars>1-10
from db import SESSION
from sqlalchemy import INT, VARCHAR, Column
from sqlalchemy.ext.declarative import declarative_base
BASE = declarative_base()
class Fruit(BASE):
__tablename__ = "fruit"
name = Column(VARCHAR, primary_key=True)
count = Column(INT, nullable=False)
def dumps(self):
return {"name": self.name, "count": self.count}
def init_db():
SESSION.merge(Fruit(**{"name": "apple", "count": 1}))
SESSION.commit()
| 2.734375 | 3 |
montagne/collector/event.py | warcy/montagne | 0 | 12787309 | from montagne.common.event import BaseEvent, BaseRequest, BaseReply
dst_to_neutron_collector = 'montagne.collector.neutron_collector'
dst_to_nova_collector = 'montagne.collector.nova_collector'
class GetOVSAgentRequest(BaseRequest):
def __init__(self, msg):
super(GetOVSAgentRequest, self).__init__()
self.dst = dst_to_neutron_collector
self.msg = msg
class GetOVSAgentReply(BaseReply):
def __init__(self, msg):
super(GetOVSAgentReply, self).__init__()
self.msg = msg
class GetHypervisorRequest(BaseRequest):
def __init__(self, msg):
super(GetHypervisorRequest, self).__init__()
self.dst = dst_to_nova_collector
self.msg = msg
class GetHypervisorReply(BaseReply):
def __init__(self, msg):
super(GetHypervisorReply, self).__init__()
self.msg = msg
class GetLBMemberRequest(BaseRequest):
def __init__(self, msg):
super(GetLBMemberRequest, self).__init__()
self.dst = dst_to_neutron_collector
self.msg = msg
class GetLBMemberReply(BaseReply):
def __init__(self, msg):
super(GetLBMemberReply, self).__init__()
self.msg = msg
| 2.21875 | 2 |
backend/chat/loggers.py | dmitriyvek/chat_app | 1 | 12787310 | <gh_stars>1-10
import logging
from django.conf import settings
def get_main_logger():
'''Return file logger for info and errors'''
log_formatter = logging.Formatter(
"%(asctime)s — %(name)s — %(levelname)s — %(message)s")
error_formatter = logging.Formatter(
"%(asctime)s — %(name)s — %(message)s")
info_handler = logging.FileHandler(settings.INFO_LOG_FILE_LOCATION)
info_handler.setLevel(logging.INFO)
info_handler.setFormatter(log_formatter)
error_handler = logging.FileHandler(settings.ERROR_LOG_FILE_LOCATION)
error_handler.setLevel(logging.ERROR)
error_handler.setFormatter(error_formatter)
main_logger = logging.getLogger(__name__)
main_logger.addHandler(info_handler)
main_logger.addHandler(error_handler)
return main_logger
| 2.25 | 2 |
Desafios/Desafio 90.py | blopah/python3-curso-em-video-gustavo-guanabara-exercicios | 2 | 12787311 | print('''Faça um programa que leia nome e média de um aluno, guardando também a situação em um dicionário.
No final, mostre o conteúdo da estrutura na tela.''')
aluno = dict()
aluno['nome'] = str(input('Insira o nome: '))
aluno['nota'] = float(input('Insira a nota: '))
if aluno['nota'] < 7:
aluno['nota'] = 'reprovado'
else:
aluno['situacao'] = 'aprovado'
print(f'''O aluno {aluno['nome']} teve a nota {aluno['nota']} e foi {aluno['situacao']}.''') | 3.84375 | 4 |
Ex044.py | raphaeltertuliano/Python | 1 | 12787312 | #Elabore um programa que calcule o valor a ser pago por um produto,
#considerando o o seu preço normal e condição de pagamento:
#- Á vista dinheiro/cheque: 10% de desconto
#- À vista no cartão: 5% de desconto
#- Em até 2x no cartão: preço normal
#- 3x ou mais no cartão: 20% de juros
valor = float(input('Valor do produto: R$'))
print('''Condições de pagamento:
[ 1 ] - Á vista dinheiro/cheque 10% de desconto
[ 2 ] - À vista no cartão de crédito: 5% de desconto
[ 3 ] - 2x no cartão
[ 4 ] - 3x no cartão ou mais (juros 20%)''')
cond = int(input('Forma de pagamento: '))
if cond == 1:
novo_valor = valor-(valor*10)/100
print(f'O produto custará: R${novo_valor:.2f}')
elif cond == 2:
novo_valor = valor-(valor*5)/100
print(f'O produto custará: R${novo_valor:.2f}')
elif cond == 3:
print(f'O produto custará: R${valor:.2f}')
elif cond == 4:
novo_valor = valor+(valor*20)/100
print(f'O produto custará: R${novo_valor:.2f}')
else:
print('\033[31mOPÇÃO INVÁLIDA. TENTE NOVAMENTE')
| 3.796875 | 4 |
src/microq_admin/utils.py | Odin-SMR/qqjobs | 0 | 12787313 | from sys import stderr
CONFIG_PATH = '/odin.cfg'
CONFIG_FILE_DOCS = """The configuration file should contain these settings:
ODIN_API_ROOT=https://example.com/odin_api
ODIN_SECRET=<secret encryption key>
JOB_API_ROOT=https://example.com/job_api
JOB_API_USERNAME=<username>
JOB_API_PASSWORD=<password>
It may contain:
JOB_API_VERSION=v4
"""
def validate_config(config):
"""Return True if ok, else False"""
def error(msg):
stderr.write(msg + '\n')
error.ok = False
error.ok = True
required = ['ODIN_API_ROOT', 'ODIN_SECRET',
'JOB_API_ROOT', 'JOB_API_USERNAME',
'JOB_API_PASSWORD']
for key in required:
if key not in config or not config[key]:
error('Missing in config: %s' % key)
if not error.ok:
return False
for api_root in ('ODIN_API_ROOT', 'JOB_API_ROOT'):
url = config[api_root]
if not url.startswith('http'):
error('%s does not look like an url: %s' % (api_root, url))
if url.endswith('/'):
error('%s must not end with /' % api_root)
optional = ['JOB_API_VERSION']
if not set(config.keys()).issubset(required + optional):
error("Config contains too invalid settings: {}".format(
set(config.keys()).difference(required + optional)
))
return error.ok
def load_config(config_file=None):
if config_file is None:
config_file = CONFIG_PATH
with open(config_file) as inp:
conf = dict(row.strip().split('=', 1) for row in inp if row.strip())
for k, v in conf.items():
conf[k] = v.strip('"')
return conf
def validate_project_name(project_name):
"""Must be ascii alnum and start with letter"""
if not project_name or not isinstance(project_name, str):
return False
if not project_name[0].isalpha():
return False
if not project_name.isalnum():
return False
return True
| 2.328125 | 2 |
egs/chime4/wer.py | wbengine/SPMILM | 25 | 12787314 | <reponame>wbengine/SPMILM
import os
import sys
import numpy as np
sys.path.insert(0, os.getcwd() + '/../../tools/')
import wb
def nbest_rmUNK(read_nbest, write_nbest):
f = open(read_nbest, 'rt')
fo = open(write_nbest, 'wt')
for line in f:
fo.write(line.replace('<UNK>', ''))
f.close()
fo.close()
# such as
# lmpaths = {'KN5': nbestdir + '<tsk>/lmwt.lmonly',
# 'RNN': nbestdir +'<tsk>/lmwt.rnn',
# 'LSTM': workdir + '<tsk>/lmwt.lstm'}
# lmtypes = ['KN5', 'RNN', 'RNN+KN5']
def wer_all(workdir, nbestdir, lmpaths, lmtypes):
wb.mkdir(workdir)
# calculate the wer for each task, each lmscale, each combination
for tsk in ['nbestlist_{}_{}'.format(a, b) for a in ['dt05', 'et05'] for b in ['real', 'simu']]:
print(tsk)
wb.mkdir(workdir + tsk)
fwer = open(workdir + tsk + '/wer.txt', 'wt')
read_nbest_txt = nbestdir + tsk + '/words_text'
read_transcript = nbestdir + tsk + '/text'
read_acscore = nbestdir + tsk + '/acwt'
read_gfscore = nbestdir + tsk + '/lmwt.nolm'
# remove the <UNK> in nbest
read_nbest_rmunk = workdir + tsk + '/words_text_rmunk'
nbest_rmUNK(read_nbest_txt, read_nbest_rmunk)
# load score
acscore = np.array(wb.LoadScore(read_acscore))
gfscore = np.array(wb.LoadScore(read_gfscore))
# load label
score_label = wb.LoadLabel(read_acscore)
# lm config
for lmtype in lmtypes:
a = lmtype.split('+')
if len(a) == 1:
lmscore = np.array(wb.LoadScore(lmpaths[a[0]].replace('<tsk>', tsk)))
elif len(a) == 2:
s1 = wb.LoadScore(lmpaths[a[0]].replace('<tsk>', tsk))
s2 = wb.LoadScore(lmpaths[a[1]].replace('<tsk>', tsk))
lmscore = 0.5 * np.array(s1) + 0.5 * np.array(s2)
# write lmscore
wb.WriteScore(workdir + tsk + '/' + lmtype + '.lmscore', lmscore, score_label)
for lmscale in np.linspace(9, 15, 7):
write_best = workdir + tsk + '/{}_lmscale={}.best'.format(lmtype, lmscale)
wb.GetBest(read_nbest_rmunk, (acscore + lmscale * (lmscore + gfscore)).tolist(), write_best)
[err, num, wer] = wb.CmpWER(write_best, read_transcript)
os.remove(write_best)
s = '{} wer={:.2f} err={} num={} lmscale={}'.format(lmtype, wer, err, num, lmscale)
print(' ' + s)
fwer.write(s + '\n')
fwer.flush()
fwer.close()
def wer_tune(workdir):
config = {}
nLine = 0
with open(workdir + 'nbestlist_dt05_real/wer.txt') as f1, open(workdir + 'nbestlist_dt05_simu/wer.txt') as f2, open(
workdir + 'dt05_real_simu_wer.txt', 'wt') as f3:
for linea, lineb in zip(f1, f2):
a = linea.split()
b = lineb.split()
if (a[0] != b[0] or a[4:] != b[4:]):
print('[ERROR] wer_tune : two files are not match')
print(linea)
print(lineb)
return
weight = float(a[4].split('=')[1])
totale = int(a[2].split('=')[1]) + int(b[2].split('=')[1])
totalw = int(a[3].split('=')[1]) + int(b[3].split('=')[1])
wer = 100.0 * totale / totalw
f3.write('{} wer={:.2f} err={} num={} lmscale={}\n'.format(a[0], wer, totale, totalw, weight))
x = config.setdefault(a[0], dict())
oldwer = x.setdefault('wer', 100)
if wer < oldwer:
x['wer'] = wer
x['line'] = nLine
nLine += 1
return config
def ExactVaue(s, label):
a = s.split()
l = len(label)
for w in a:
if w[0:l] == label:
return w[l + 1:]
return s
def wer_print(workdir, config):
fresult = open(workdir + 'wer_result.txt', 'wt')
keylist = []
for tsk in ['nbestlist_{}_{}'.format(a, b) for b in ['real', 'simu'] for a in ['dt05', 'et05']]:
print(tsk)
fresult.write(tsk + '\n')
keylist.append(tsk)
fwer = open(workdir + tsk + '/wer.txt')
a = fwer.readlines()
for key in config.keys():
x = config[key]
n = x['line']
line = a[n][0:-1] # remove '\n'
x[tsk] = float(ExactVaue(line, 'wer'))
x['lmscale'] = float(ExactVaue(line, 'lmscale'))
print(' ' + a[n][0:-1])
fresult.write(a[n])
# print
s = 'model\tlmscale\t' + '\t'.join([i[10:] for i in keylist])
print(s)
fresult.write(s + '\n')
for key in sorted(config.keys()):
x = config[key]
s = '{}\t{}\t{}\t{}\t{}\t{}'.format(key, x['lmscale'], x[keylist[0]], x[keylist[1]], x[keylist[2]],
x[keylist[3]])
print(s)
fresult.write(s + '\n')
fresult.close()
# main
if __name__ == '__main__':
absdir = os.getcwd() + '/'
nbestdir = absdir + 'data/NBEST_HEQ/'
lmpaths = {'KN5': nbestdir + '<tsk>/lmwt.lmonly',
'RNN': nbestdir + '<tsk>/lmwt.rnn'}
lmtypes = ['KN5', 'RNN', 'RNN+KN5']
wer_workdir = absdir + 'wer/'
# compute the wer for all the dataset
wer_all(wer_workdir, nbestdir, lmpaths, lmtypes)
# using dt_real and dt_simu to tune the lmscale
config = wer_tune(wer_workdir)
# using the tuned lmscale to get the result WER
wer_print(wer_workdir, config)
| 1.945313 | 2 |
environment/custom/resource_v3/env.py | AndreMaz/transformer-pointer-critic | 5 | 12787315 | from re import L
import sys
from typing import List
from tensorflow.python.ops.gen_array_ops import gather
sys.path.append('.')
import json
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from random import randint, randrange
from environment.base.base import BaseEnvironment
from environment.custom.resource_v3.reward import RewardFactory, ReducedNodeUsage
from environment.custom.resource_v3.misc.utils import compute_remaining_resources, round_half_up
from environment.custom.resource_v3.node import Node as History
from environment.custom.resource_v3.resource import Resource as Request
class ResourceEnvironmentV3(BaseEnvironment):
def __init__(self, name: str, opts: dict):
super(ResourceEnvironmentV3, self).__init__(name)
###########################################
##### PROBLEM CONFIGS FROM JSON FILE ######
###########################################
self.gather_stats: bool = False
self.generate_request_on_the_fly: bool = opts['generate_request_on_the_fly']
self.mask_nodes_in_mha: bool = opts['mask_nodes_in_mha']
self.seed_value: int = opts['seed_value']
self.normalization_factor: int = opts['normalization_factor']
self.decimal_precision: int = opts['decimal_precision']
self.batch_size: int = opts['batch_size']
self.num_features: int = opts['num_features']
self.num_profiles: int = opts['num_profiles']
self.profiles_sample_size: int = opts['profiles_sample_size']
assert self.num_profiles >= self.profiles_sample_size, 'Resource sample size should be less than total number of resources'
self.EOS_CODE: int = opts['EOS_CODE']
self.EOS_BIN = np.full((1, self.num_features), self.EOS_CODE, dtype='float32')
self.node_sample_size: int = opts['node_sample_size'] + 1 # + 1 because of the EOS bin
self.req_min_val: int = opts['req_min_val']
self.req_max_val: int = opts['req_max_val']
self.node_min_val: int = opts['node_min_val']
self.node_max_val: int = opts['node_max_val']
################################################
##### MATERIALIZED VARIABLES FROM CONFIGS ######
################################################
self.decoding_step = self.node_sample_size
self.rewarder = RewardFactory(
opts['reward'],
self.EOS_BIN
)
if isinstance(self.rewarder, ReducedNodeUsage):
self.is_empty = np.zeros((self.batch_size, self.node_sample_size + self.profiles_sample_size, 1), dtype='float32')
# First position is EOS
self.is_empty[:, 0, 0] = self.EOS_BIN[0][0]
else:
self.is_empty = None
# Generate req profiles
self.total_profiles = self.generate_dataset()
# Problem batch
self.batch, self.history = self.generate_batch()
# Default masks
# Will be updated during at each step() call
self.bin_net_mask,\
self.resource_net_mask,\
self.mha_used_mask = self.generate_masks()
def reset(self):
# Reset decoding step
self.decoding_step = self.node_sample_size
if isinstance(self.rewarder, ReducedNodeUsage):
self.is_empty = np.zeros(
(self.batch_size, self.node_sample_size + self.profiles_sample_size, 1), dtype='float32')
# First position is EOS
self.is_empty[:, 0, 0] = self.EOS_BIN[0][0]
self.batch, self.history = self.generate_batch()
self.bin_net_mask,\
self.resource_net_mask,\
self.mha_used_mask = self.generate_masks()
return self.state()
def state(self):
decoder_input = self.batch[:, self.decoding_step]
decoder_input = np.expand_dims(decoder_input, axis=1)
batch = self.batch.copy()
if isinstance(self.rewarder, ReducedNodeUsage):
batch = self.add_is_empty_dim(batch, self.is_empty)
return batch,\
decoder_input,\
self.bin_net_mask.copy(),\
self.mha_used_mask.copy()
def step(self, bin_ids: List[int], feasible_bin_mask):
# Default is not done
isDone = False
req_ids = tf.fill(self.batch_size, self.decoding_step)
batch_size = self.batch.shape[0]
num_elems = self.batch.shape[1]
batch_indices = tf.range(batch_size, dtype='int32')
# Copy the state before updating the values
copy_batch = self.batch.copy()
# Grab the selected nodes and resources
nodes: np.ndarray = self.batch[batch_indices, bin_ids]
reqs: np.ndarray = self.batch[batch_indices, req_ids]
# Compute remaining resources after placing reqs at nodes
remaining_resources = compute_remaining_resources(
nodes, reqs, self.decimal_precision)
# Update the batch state
self.batch[batch_indices, bin_ids] = remaining_resources
# Keep EOS node intact
self.batch[batch_indices, 0] = self.EOS_BIN
# Item taken mask it
self.resource_net_mask[batch_indices, req_ids] = 1
# Update node masks
dominant_resource = tf.reduce_min(remaining_resources, axis=-1)
is_full = tf.cast(tf.equal(dominant_resource, 0), dtype='float32')
# Mask full nodes/bins
self.bin_net_mask[batch_indices, bin_ids] = is_full
self.bin_net_mask[:, 0] = 0 # EOS is always available
# Update the MHA masks
self.mha_used_mask[batch_indices, :, :, req_ids] = 1
if self.mask_nodes_in_mha:
self.mha_used_mask[batch_indices, :, :, bin_ids] = tf.reshape(
is_full, (self.batch_size, 1, 1)
)
# EOS is always available
self.mha_used_mask[batch_indices, :, :, 0] = 0
if np.all(self.resource_net_mask == 1):
isDone = True
# Compute rewards
rewards = self.rewarder.compute_reward(
self.batch, # Already updated values of nodes, i.e., after insertion
copy_batch, # Original values of nodes, i.e., before insertion
self.node_sample_size,
nodes,
reqs,
feasible_bin_mask,
bin_ids,
self.is_empty
)
rewards = tf.reshape(rewards, (batch_size, 1))
#else:
# rewards = tf.zeros((batch_size, 1), dtype='float32')
info = {
'bin_net_mask': self.bin_net_mask.copy(),
'resource_net_mask': self.resource_net_mask.copy(),
'mha_used_mask': self.mha_used_mask.copy(),
# 'num_resource_to_place': self.num_profiles
}
if self.gather_stats:
self.place_reqs(bin_ids, req_ids, reqs)
# Pick next decoder_input
self.decoding_step += 1
if self.decoding_step < self.node_sample_size + self.profiles_sample_size:
decoder_input = self.batch[:, self.decoding_step]
decoder_input = np.expand_dims(decoder_input, axis=1)
else:
# We are done. No need to generate decoder input
decoder_input = np.array([None])
batch = self.batch.copy()
if isinstance(self.rewarder, ReducedNodeUsage):
batch = self.add_is_empty_dim(batch, self.is_empty)
return batch, decoder_input, rewards, isDone, info
def generate_dataset(self):
profiles = tf.random.uniform(
(self.num_profiles, self.num_features),
minval=self.req_min_val,
maxval=self.req_max_val,
dtype='int32',
seed=self.seed_value
) / self.normalization_factor
return tf.cast(profiles, dtype="float32")
def generate_batch(self):
history = []
elem_size = self.node_sample_size + self.profiles_sample_size
batch: np.ndarray = np.zeros(
(self.batch_size, elem_size, self.num_features),
dtype="float32"
)
# Generate nodes states
nodes = tf.random.uniform(
(self.batch_size, self.node_sample_size, self.num_features),
minval=self.node_min_val,
maxval=self.node_max_val,
dtype="int32",
seed=self.seed_value
) / self.normalization_factor
batch[:, :self.node_sample_size, :] = tf.cast(nodes, dtype="float32")
# Replace first position with EOS node
batch[:, 0, :] = self.EOS_BIN
if self.generate_request_on_the_fly:
# Generate reqs
reqs = tf.random.uniform(
(self.batch_size, self.profiles_sample_size, self.num_features),
minval=self.req_min_val,
maxval=self.req_max_val,
dtype="int32",
seed=self.seed_value
) / self.normalization_factor
batch[:, self.node_sample_size:, :] = tf.cast(reqs, dtype="float32")
else:
# Sample profiles and add them to batch instances
for index in range(self.batch_size):
shuffled_profiles = tf.random.shuffle(self.total_profiles)
batch[index, self.node_sample_size:, :] = shuffled_profiles[:self.profiles_sample_size]
# Create node instances that will gather stats
if self.gather_stats:
history = self.build_history(batch)
return batch, history
def generate_masks(self):
elem_size = self.node_sample_size + self.profiles_sample_size
# Represents positions marked as "0" where resource Ptr Net can point
profiles_net_mask = np.zeros((self.batch_size, elem_size), dtype='float32')
# Represents positions marked as "0" where bin Ptr Net can point
nodes_net_mask = np.ones(
(self.batch_size, elem_size), dtype='float32')
# Default mask for resources
#for batch_id in range(self.batch_size):
# for i in range(self.node_sample_size):
# profiles_net_mask[batch_id, i] = 1
profiles_net_mask[:, :self.node_sample_size] = 1
# Default mask for bin
nodes_net_mask = nodes_net_mask - profiles_net_mask
# For Transformer's multi head attention
mha_used_mask = np.zeros_like(profiles_net_mask)
mha_used_mask = mha_used_mask[:, np.newaxis, np.newaxis, :]
return nodes_net_mask, profiles_net_mask, mha_used_mask
def sample_action(self):
batch_indices = tf.range(self.batch.shape[0], dtype='int32')
resource_ids = tf.fill(self.batch_size, self.decoding_step)
# Decode the resources
decoded_resources = self.batch[batch_indices, resource_ids]
decoded_resources = np.expand_dims(decoded_resources, axis=1)
bins_mask = self.build_feasible_mask(self.batch,
decoded_resources,
self.bin_net_mask
)
bins_probs = np.random.uniform(size=self.bin_net_mask.shape)
bins_probs = tf.nn.softmax(bins_probs - (bins_mask*10e6), axis=-1)
dist_bin = tfp.distributions.Categorical(probs = bins_probs)
bin_ids = dist_bin.sample()
return bin_ids, bins_mask
def add_stats_to_agent_config(self, agent_config: dict):
agent_config['num_resources'] = self.profiles_sample_size
agent_config['num_bins'] = self.node_sample_size
agent_config['tensor_size'] = self.node_sample_size + self.profiles_sample_size
agent_config['batch_size'] = self.batch_size
# Init the object
agent_config["encoder_embedding"] = {}
if isinstance(self.rewarder, ReducedNodeUsage):
agent_config["encoder_embedding"]["common"] = False
agent_config["encoder_embedding"]["num_bin_features"] = 4
agent_config["encoder_embedding"]["num_resource_features"] = 3
else:
agent_config["encoder_embedding"]["common"] = True
# If using the same embedding layer these vars are unused
agent_config["encoder_embedding"]["num_bin_features"] = None
agent_config["encoder_embedding"]["num_resource_features"] = None
return agent_config
def set_testing_mode(self,
batch_size,
node_sample_size,
profiles_sample_size,
node_min_val,
node_max_val
) -> None:
self.gather_stats = True
self.batch_size = batch_size
self.node_min_val = node_min_val
self.node_max_val = node_max_val
self.node_sample_size = node_sample_size + 1 # +1 For EOS node
self.profiles_sample_size = profiles_sample_size
def build_history(self, batch):
history = []
for batch_id, instance in enumerate(batch):
nodes = []
for id, bin in enumerate(instance[:self.node_sample_size]):
nodes.append(
History(
batch_id,
id,
bin
)
)
history.append(nodes)
return history
def place_reqs(self, bin_ids: List[int], req_ids: List[int], reqs: np.ndarray):
for batch_index, bin_id in enumerate(bin_ids):
node: History = self.history[batch_index][bin_id]
req_id = req_ids[batch_index]
req = Request(
batch_index,
req_id,
reqs[batch_index]
)
node.insert_req(req)
def build_feasible_mask(self, state, resources, bin_net_mask):
if isinstance(self.rewarder, ReducedNodeUsage):
state = self.remove_is_empty_dim(state)
batch = state.shape[0]
num_elems = state.shape[1]
# Add batch dim to resources
# resource_demands = np.reshape(resources, (batch, 1, self.num_features))
# Tile to match the num elems
resource_demands = tf.tile(resources, [1, num_elems, 1])
# Compute remaining resources after placement
# remaining_resources = state - resource_demands
remaining_resources = compute_remaining_resources(
state, resource_demands, self.decimal_precision
)
dominant_resource = tf.reduce_min(remaining_resources, axis=-1)
# Ensure that it's greater that 0
# i.e., that node is not overloaded
after_place = tf.less(dominant_resource, 0)
after_place = tf.cast(after_place, dtype='float32')
# Can't point to resources positions
feasible_mask = tf.maximum(after_place, bin_net_mask)
feasible_mask = feasible_mask.numpy()
assert np.all(dominant_resource*(1-feasible_mask) >= 0), 'Masking Scheme Is Wrong!'
# EOS is always available for pointing
feasible_mask[:, 0] = 0
# Return as is. At this moment node can be overloaded
return feasible_mask
def add_is_empty_dim(self, batch, is_empty):
batch = np.concatenate([batch, is_empty], axis=-1)
return round_half_up(batch, 2)
def remove_is_empty_dim(self, batch):
batch = batch[:, :, :self.num_features]
return round_half_up(batch, 2)
def print_history(self, print_details = False) -> None: # pragma: no cover
for batch_id in range(self.batch_size):
print('_________________________________')
node: History
for node in self.history[batch_id]:
node.print(print_details)
print('_________________________________')
return
def store_dataset(self, location) -> None:
np.savetxt(location, self.total_profiles)
def load_dataset(self, location):
self.total_profiles = np.loadtxt(location)
if __name__ == "__main__": # pragma: no cover
env_name = 'ResourceEnvironmentV3'
with open(f"configs/ResourceV3.json") as json_file:
params = json.load(json_file)
env_configs = params['env_config']
env_configs['batch_size'] = 2
env = ResourceEnvironmentV3(env_name, env_configs)
state, dec, bin_net_mask, mha_mask = env.state()
# env.print_history()
feasible_net_mask = env.build_feasible_mask(state, dec, bin_net_mask)
bin_ids = [0,1]
resource_ids = None
next, decoder_input, rewards, isDone, info = env.step(bin_ids, feasible_net_mask)
next, decoder_input, rewards, isDone, info = env.step(bin_ids, feasible_net_mask)
env.reset()
next, decoder_input, rewards, isDone, info = env.step(bin_ids, feasible_net_mask)
next, decoder_input, rewards, isDone, info = env.step(bin_ids, feasible_net_mask)
a = 1 | 1.789063 | 2 |
Tools/C2EA/c2eaPfinder.py | sme23/OneHourBlitz | 6 | 12787316 | <reponame>sme23/OneHourBlitz<filename>Tools/C2EA/c2eaPfinder.py
import struct
caches = {}
def getOrSetNew(dicToCheck, key, newFunc):
if key not in dicToCheck:
dicToCheck[key] = newFunc()
return dicToCheck[key]
def memoize(name = None):
def decorator(f):
global caches
# If we are given a valid name for the function, associate it with that entry in the cache.
if name is not None:
cache = getOrSetNew(caches, name, lambda: {})
else:
cache = {}
def g(*args):
return getOrSetNew(cache, args, lambda: f(*args))
# Set the cache as a function attribute so we can access it later (say for serialization)
g.cache = cache
return g
return decorator
def hash(obj):
if type(obj) is dict:
sortedDict = sorted(obj)
return tuple(map(
lambda elem: (elem, hash(obj[elem])),
sortedDict)).__hash__()
# Use sorted so we get the same order for dicts whose
# keys may have been added in other orders
if type(obj) is list:
return tuple(map(hash, obj)).__hash__()
else:
return obj.__hash__()
cachesLoaded = False
initialHash = None
def loadCache():
global cachesLoaded
global initialHash
global caches
if not cachesLoaded:
import os, pickle
if os.path.exists("./.cache"):
try:
with open("./.cache", 'rb') as f:
caches = pickle.load(f)
if type(caches) != dict: raise Exception
except Exception:
caches = {}
initialHash = hash(caches)
cachesLoaded = True
loadCache()
def writeCache():
if initialHash != hash(caches):
import pickle
with open("./.cache", 'wb') as f:
pickle.dump(caches, f, pickle.HIGHEST_PROTOCOL)
def deleteCache():
global caches
for name in caches:
caches[name] = {}
writeCache()
@memoize()
def readRom(romFileName):
words = []
with open(romFileName, 'rb') as rom:
while True:
word = rom.read(4)
if word == b'':
break
words.append(struct.unpack('<I', word)[0]) #Use the raw data;
# <I is little-endian 32 bit unsigned integer
return words
@memoize(name = 'pointerOffsets')
def pointerOffsets(romFileName, value):
return tuple(pointerIter(romFileName, value))
def pointerIter(romFileName, value):
words = readRom(romFileName)
return (i<<2 for i,x in enumerate(words) if x==value)
| 2.390625 | 2 |
src/Ot2Rec/motioncorr.py | MichaelStubbings/Ot2Rec | 0 | 12787317 | # Copyright 2021 Rosalind Franklin Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import subprocess
import itertools
import pandas as pd
import yaml
from tqdm import tqdm
from icecream import ic # for debugging
from . import metadata as mdMod
class Motioncorr:
"""
Class encapsulating a Motioncorr object
"""
def __init__(self, project_name, mc2_params, md_in, logger):
"""
Initialise Motioncorr object
ARGS:
project_name (str) :: Name of current project
mc2_params (Params) :: Parameters read in from yaml file
md_in (Metadata) :: Metadata containing information of images
logger (Logger) :: Logger for recording events
"""
self.proj_name = project_name
self.logObj = logger
self.log = []
self.prmObj = mc2_params
self.params = self.prmObj.params
self._process_list = self.params['System']['process_list']
self.meta = pd.DataFrame(md_in.metadata)
self.meta = self.meta[self.meta['ts'].isin(self._process_list)]
self._set_output_path()
# Get index of available GPU
self.use_gpu = self._get_gpu_nvidia_smi()
# Set GPU index as new column in metadata
self.meta = self.meta.assign(gpu=self.use_gpu[0])
self.no_processes = False
self._check_processed_images()
# Check if output folder exists, create if not
if not os.path.isdir(self.params['System']['output_path']):
subprocess.run(['mkdir', self.params['System']['output_path']],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding='ascii')
def _check_processed_images(self):
"""
Method to check images which have already been processed before
"""
# Create new empty internal output metadata if no record exists
if not os.path.isfile(self.proj_name + '_mc2_mdout.yaml'):
self.meta_out = pd.DataFrame(columns=self.meta.columns)
# Read in serialised metadata and turn into DataFrame if record exists
else:
_meta_record = mdMod.read_md_yaml(project_name=self.proj_name,
job_type='motioncorr',
filename=self.proj_name + '_mc2_mdout.yaml')
self.meta_out = pd.DataFrame(_meta_record.metadata)
# Compare output metadata and output folder
# If a file (in specified TS) is in record but missing, remove from record
if len(self.meta_out) > 0:
self._missing = self.meta_out.loc[~self.meta_out['output'].apply(lambda x: os.path.isfile(x))]
self._missing_specified = pd.DataFrame(columns=self.meta.columns)
for curr_ts in self.params['System']['process_list']:
self._missing_specified = self._missing_specified.append(self._missing[self._missing['ts']==curr_ts],
ignore_index=True,
)
self._merged = self.meta_out.merge(self._missing_specified, how='left', indicator=True)
self.meta_out = self.meta_out[self._merged['_merge']=='left_only']
if len(self._missing_specified) > 0:
self.logObj(f"Info: {len(self._missing_specified)} images in record missing in folder. Will be added back for processing.")
# Drop the items in input metadata if they are in the output record
_ignored = self.meta[self.meta.output.isin(self.meta_out.output)]
if len(_ignored) > 0 and len(_ignored) < len(self.meta):
self.logObj(f"Info: {len(_ignored)} images had been processed and will be omitted.")
elif len(_ignored) == len(self.meta):
self.logObj(f"Info: All specified images had been processed. Nothing will be done.")
self.no_processes = True
self.meta = self.meta[~self.meta.output.isin(self.meta_out.output)]
@staticmethod
def _get_gpu_nvidia_smi():
"""
Subroutine to get visible GPU ID(s) from nvidia-smi
"""
nv_uuid = subprocess.run(['nvidia-smi', '--list-gpus'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding='ascii')
nv_processes = subprocess.run(['nvidia-smi', '--query-compute-apps=gpu_uuid', '--format=csv'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding='ascii')
# catch the visible GPUs
if nv_uuid.returncode != 0 or nv_processes.returncode != 0:
raise AssertionError(f"Error in Ot2Rec.Motioncorr._get_gpu_from_nvidia_smi: "
f"nvidia-smi returned an error: {nv_uuid.stderr}")
else:
nv_uuid = nv_uuid.stdout.strip('\n').split('\n')
nv_processes = subprocess.run(['nvidia-smi', '--query-compute-apps=gpu_uuid', '--format=csv'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding='ascii')
visible_gpu = []
for gpu in nv_uuid:
id_idx = gpu.find('GPU ')
uuid_idx = gpu.find('UUID')
gpu_id = gpu[id_idx + 4:id_idx + 6].strip(' ').strip(':')
gpu_uuid = gpu[uuid_idx + 5:-1].strip(' ')
# discard the GPU hosting a process
if gpu_uuid not in nv_processes.stdout.split('\n'):
visible_gpu.append(gpu_id)
if visible_gpu:
return visible_gpu
else:
raise ValueError(f'Error in metadata._get_gpu_from_nvidia_smi: {len(nv_uuid)} GPU detected, but none of them is free.')
def _set_output_path(self):
"""
Subroutine to set output path for motioncorr'd images
"""
self.meta['output'] = self.meta.apply(
lambda row: f"{self.params['System']['output_path']}"
f"{self.params['System']['output_prefix']}_{row['ts']:03}_{row['angles']}.mrc", axis=1)
def _get_command(self, image):
"""
Subroutine to get commands for running MotionCor2
ARGS:
image (tuple): metadata for current image (in_path, out_path, #GPU)
RETURNS:
list
"""
in_path, out_path, gpu_number = image
if self.params['System']['source_TIFF']:
image_type = 'InTiff'
else:
image_type = 'InMrc'
# Set FtBin parameter for MC2
ftbin = self.params['MC2']['desired_pixel_size'] / self.params['MC2']['pixel_size']
return [self.params['MC2']['MC2_path'],
f'-{image_type}', in_path,
'-OutMrc', out_path,
'-Gpu', gpu_number,
'-GpuMemUsage', str(self.params['System']['gpu_memory_usage']),
'-Gain', self.params['MC2']['gain_reference'],
'-Tol', str(self.params['MC2']['tolerance']),
'-Patch', ','.join(str(i) for i in self.params['MC2']['patch_size']),
'-Iter', str(self.params['MC2']['max_iterations']),
'-Group', '1' if self.params['MC2']['use_subgroups'] else '0',
'-FtBin', str(ftbin),
'-PixSize', str(self.params['MC2']['pixel_size']),
'-Throw', str(self.params['MC2']['discard_frames_top']),
'-Trunc', str(self.params['MC2']['discard_frames_bottom']),
]
@staticmethod
def _yield_chunks(iterable, size):
"""
Subroutine to get chunks for GPU processing
"""
iterator = iter(iterable)
for first in iterator:
yield itertools.chain([first], itertools.islice(iterator, size - 1))
def run_mc2(self):
"""
Subroutine to run MotionCor2
"""
# Process tilt-series one at a time
ts_list = self.params['System']['process_list']
tqdm_iter = tqdm(ts_list, ncols=100)
for curr_ts in tqdm_iter:
tqdm_iter.set_description(f"Processing TS {curr_ts}...")
self._curr_meta = self.meta.loc[self.meta.ts==curr_ts]
while len(self._curr_meta) > 0:
# Get commands to run MC2
mc_commands = [self._get_command((_in, _out, _gpu))
for _in, _out, _gpu in zip(self._curr_meta.file_paths, self._curr_meta.output, self._curr_meta.gpu)]
jobs = (subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) for cmd in mc_commands)
# run subprocess by chunks of GPU
chunks = self._yield_chunks(jobs, len(self.use_gpu) * self.params['System']['jobs_per_gpu'])
for job in chunks:
# from the moment the next line is read, every process in job are spawned
for process in [i for i in job]:
self.log.append(process.communicate()[0].decode('UTF-8'))
self.update_mc2_metadata()
self.export_metadata()
def update_mc2_metadata(self):
"""
Subroutine to update metadata after one set of runs
"""
# Search for files with output paths specified in the metadata
# If the files don't exist, keep the line in the input metadata
# If they do, move them to the output metadata
self.meta_out = self.meta_out.append(self.meta.loc[self.meta['output'].apply(lambda x: os.path.isfile(x))],
ignore_index=True)
self.meta = self.meta.loc[~self.meta['output'].apply(lambda x: os.path.isfile(x))]
self._curr_meta = self._curr_meta.loc[~self._curr_meta['output'].apply(lambda x: os.path.isfile(x))]
def export_metadata(self):
"""
Method to serialise output metadata, export as yaml
"""
yaml_file = self.proj_name + '_mc2_mdout.yaml'
with open(yaml_file, 'w') as f:
yaml.dump(self.meta_out.to_dict(), f, indent=4, sort_keys=False)
| 2.03125 | 2 |
api/tests/integration/tests/substructure/tau_enumeration.py | f1nzer/Indigo | 0 | 12787318 | <filename>api/tests/integration/tests/substructure/tau_enumeration.py<gh_stars>0
import os
import sys
sys.path.append(
os.path.normpath(
os.path.join(os.path.abspath(__file__), "..", "..", "..", "common")
)
)
from env_indigo import *
indigo = Indigo()
# indigo_inchi = IndigoInchi(indigo);
def testEnumTautomersForMolecule(molecule):
iter = indigo.iterateTautomers(molecule, "INCHI")
lst = list()
for mol in iter:
prod = mol.clone()
lst.append(prod.canonicalSmiles())
lst.sort()
print(
" "
+ "\n ".join(
map(lambda x, y: str(x) + ") " + y, range(1, len(lst) + 1), lst)
)
+ "\n"
)
def testEnumTautomersForSDF(sdf_file):
for idx, molecule in enumerate(indigo.iterateSDFile(sdf_file)):
try:
print("%d. %s" % (idx + 1, molecule.smiles()))
molecule.dearomatize()
testEnumTautomersForMolecule(molecule)
molecule.aromatize()
testEnumTautomersForMolecule(molecule)
except IndigoException as e:
print(getIndigoExceptionText(e))
print(
"This is the case when not all tautomers are found for the first time and the algorithm requires the second attempt:"
)
testEnumTautomersForMolecule(
indigo.loadMolecule("OC1N=C2C(=NC(N)=NC(=O)2)NC(O)=1")
)
print("Test tautomers1-small.sdf")
testEnumTautomersForSDF(joinPathPy("molecules/tautomers1-small.sdf", __file__))
print("Test tautomers2-small.sdf")
testEnumTautomersForSDF(joinPathPy("molecules/tautomers2-small.sdf", __file__))
print("Test tautomers1-large.sdf")
testEnumTautomersForSDF(
joinPathPy("molecules/tautomers1-large.sdf.gz", __file__)
)
print("Test tautomers2-large.sdf")
testEnumTautomersForSDF(
joinPathPy("molecules/tautomers2-large.sdf.gz", __file__)
)
| 2.296875 | 2 |
MNIST/ram2/GlimpseNet.py | mimikaan/Attention-Model | 48 | 12787319 | import tensorflow as tf
from Globals import *
from BaseNet import *
class GlimpseNet(BaseNet):
def __init__(self):
self.imageSize = constants['imageSize']
self.imageChannel = constants['imageChannel']
self.numGlimpseResolution = constants['numGlimpseResolution']
self.glimpseOutputSize = constants['glimpseOutputSize']
self.glimpseDim = self.imageChannel * self.glimpseOutputSize * \
self.glimpseOutputSize * self.numGlimpseResolution
# linear layer processing retina encoding
with tf.variable_scope('g0') as scope:
self.wg0 = self.variableWithWeightDecay(
'weights', [self.glimpseDim, 128], 1e-4, 0.0)
self.bg0 = self.variableOnGpu(
'biases', [128], tf.constant_initializer(0.0))
# linear layer processing location
with tf.variable_scope('g1') as scope:
self.wg1 = self.variableWithWeightDecay(
'weights', [2, 128], 1e-4, 0.0)
self.bg1 = self.variableOnGpu(
'biases', [128], tf.constant_initializer(0.0))
# linear layer processing previouse two linear layers
with tf.variable_scope('g2') as scope:
self.wg2 = self.variableWithWeightDecay(
'weights', [256, 256], 1e-4, 0.0)
self.bg2 = self.variableOnGpu(
'biases', [256], tf.constant_initializer(0.0))
def forward(self, glimpses, locations):
glimpses = tf.reshape(glimpses, [-1, self.glimpseDim])
out = tf.matmul(glimpses, self.wg0)
bias = tf.nn.bias_add(out, self.bg0)
self.g0 = tf.nn.relu(bias)
# self.activationSummary(self.g0)
out = tf.matmul(locations, self.wg1)
bias = tf.nn.bias_add(out, self.bg1)
self.g1 = tf.nn.relu(bias)
# self.activationSummary(self.g1)
combined = tf.concat([self.g0, self.g1], axis=1)
out = tf.matmul(combined, self.wg2)
bias = tf.nn.bias_add(out, self.bg2)
self.g2 = tf.nn.relu(bias)
# self.activationSummary(self.g2)
return self.g2
| 2.390625 | 2 |
test/analysis/classification/test_classifier_summary_builder.py | marta18a/sleep_classifiers | 97 | 12787320 | <gh_stars>10-100
from unittest import TestCase, mock
from unittest.mock import call
from sklearn.linear_model import LogisticRegression
from source.analysis.classification.classifier_summary_builder import SleepWakeClassifierSummaryBuilder
from source.analysis.performance.raw_performance import RawPerformance
from source.analysis.setup.attributed_classifier import AttributedClassifier
from source.analysis.setup.data_split import DataSplit
from source.analysis.setup.feature_type import FeatureType
import numpy as np
class TestClassifierSummaryBuilder(TestCase):
@mock.patch('source.analysis.classification.classifier_summary_builder.SubjectBuilder')
@mock.patch('source.analysis.classification.classifier_summary_builder.ClassifierService')
@mock.patch('source.analysis.classification.classifier_summary_builder.TrainTestSplitter')
def test_build_summary_by_fraction(self, mock_train_test_splitter, mock_classifier_service, mock_subject_builder):
attributed_classifier = AttributedClassifier(name="Logistic Regression", classifier=LogisticRegression())
feature_sets = [[FeatureType.cosine, FeatureType.circadian_model], [FeatureType.count]]
number_of_splits = 5
test_fraction = 0.3
mock_subject_builder.get_all_subject_ids.return_value = subject_ids = ["subjectA", "subjectB"]
mock_subject_builder.get_subject_dictionary.return_value = subject_dictionary = {"subjectA": [], "subjectB": []}
mock_train_test_splitter.by_fraction.return_value = expected_data_splits = [
DataSplit(training_set="subjectA", testing_set="subjectB")]
mock_classifier_service.run_sw.side_effect = raw_performance_arrays = [
[RawPerformance(true_labels=np.array([1, 2]),
class_probabilities=np.array([3, 4])),
RawPerformance(true_labels=np.array([0, 1]),
class_probabilities=np.array([2, 3]))
],
[RawPerformance(true_labels=np.array([1, 1]),
class_probabilities=np.array([4, 4])),
RawPerformance(true_labels=np.array([0, 0]),
class_probabilities=np.array([2, 2]))
]
]
returned_summary = SleepWakeClassifierSummaryBuilder.build_monte_carlo(attributed_classifier, feature_sets,
number_of_splits)
mock_subject_builder.get_all_subject_ids.assert_called_once_with()
mock_subject_builder.get_subject_dictionary.assert_called_once_with()
mock_train_test_splitter.by_fraction.assert_called_once_with(subject_ids, test_fraction=test_fraction,
number_of_splits=number_of_splits)
mock_classifier_service.run_sw.assert_has_calls([call(expected_data_splits,
attributed_classifier,
subject_dictionary,
feature_sets[0]
),
call(expected_data_splits,
attributed_classifier,
subject_dictionary,
feature_sets[1]
)])
self.assertEqual(returned_summary.attributed_classifier, attributed_classifier)
self.assertEqual(returned_summary.performance_dictionary[tuple(feature_sets[0])], raw_performance_arrays[0])
self.assertEqual(returned_summary.performance_dictionary[tuple(feature_sets[1])], raw_performance_arrays[1])
@mock.patch('source.analysis.classification.classifier_summary_builder.SubjectBuilder')
@mock.patch('source.analysis.classification.classifier_summary_builder.ClassifierService')
@mock.patch('source.analysis.classification.classifier_summary_builder.TrainTestSplitter')
def test_leave_one_out(self, mock_train_test_splitter, mock_classifier_service, mock_subject_builder):
attributed_classifier = AttributedClassifier(name="Logistic Regression", classifier=LogisticRegression())
feature_sets = [[FeatureType.cosine, FeatureType.circadian_model], [FeatureType.count]]
mock_subject_builder.get_all_subject_ids.return_value = subject_ids = ["subjectA", "subjectB"]
mock_subject_builder.get_subject_dictionary.return_value = subject_dictionary = {"subjectA": [], "subjectB": []}
mock_train_test_splitter.leave_one_out.return_value = expected_data_splits = [
DataSplit(training_set="subjectA", testing_set="subjectB")]
mock_classifier_service.run_sw.side_effect = raw_performance_arrays = [
[RawPerformance(true_labels=np.array([1, 2]),
class_probabilities=np.array([3, 4])),
RawPerformance(true_labels=np.array([0, 1]),
class_probabilities=np.array([2, 3]))
],
[RawPerformance(true_labels=np.array([1, 1]),
class_probabilities=np.array([4, 4])),
RawPerformance(true_labels=np.array([0, 0]),
class_probabilities=np.array([2, 2]))
]
]
returned_summary = SleepWakeClassifierSummaryBuilder.build_leave_one_out(attributed_classifier, feature_sets)
mock_subject_builder.get_all_subject_ids.assert_called_once_with()
mock_subject_builder.get_subject_dictionary.assert_called_once_with()
mock_train_test_splitter.leave_one_out.assert_called_once_with(subject_ids)
mock_classifier_service.run_sw.assert_has_calls([call(expected_data_splits,
attributed_classifier,
subject_dictionary,
feature_sets[0]
),
call(expected_data_splits,
attributed_classifier,
subject_dictionary,
feature_sets[1]
)])
self.assertEqual(returned_summary.attributed_classifier, attributed_classifier)
self.assertEqual(returned_summary.performance_dictionary[tuple(feature_sets[0])], raw_performance_arrays[0])
self.assertEqual(returned_summary.performance_dictionary[tuple(feature_sets[1])], raw_performance_arrays[1])
| 2.46875 | 2 |
app/migrations/0005_alter_business_options_alter_neighbourhood_options_and_more.py | james-muriithi/neighbourhood | 0 | 12787321 | # Generated by Django 4.0.3 on 2022-03-20 10:33
import cloudinary.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0004_post_slug'),
]
operations = [
migrations.AlterModelOptions(
name='business',
options={'ordering': ['-created_at']},
),
migrations.AlterModelOptions(
name='neighbourhood',
options={'ordering': ['-created_at']},
),
migrations.AlterModelOptions(
name='post',
options={'ordering': ['-created_at']},
),
migrations.AddField(
model_name='business',
name='image',
field=cloudinary.models.CloudinaryField(blank=True, max_length=255, null=True, verbose_name='image'),
),
migrations.AlterField(
model_name='business',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='businesses', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='post',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL),
),
]
| 1.828125 | 2 |
riverrunner/static/arima_exploration.py | lukeWaninger/RiverRunner | 1 | 12787322 | """
Module for data exploration for ARIMA modeling.
This module contains the back-end exploration of river run flow rate
data and exogenous predictors to determine the best way to create a
time-series model of the data. Note that since this module was only used
once (i.e. is not called in order to create ongoing predictions),
it is not accompanied by any unit testing.
Functions:
daily_avg: takes time series with measurements on different
timeframes and creates a dataframe with daily averages for flow
rate and exogenous predictors
test_stationarity: implements Dickey-Fuller test and rolling average
plots to check for stationarity of the time series
plot_autocorrs: creates plots of autocorrelation function and partial
autocorrelation function to help determine p and q parameters for ARIMA
model
test_model: runs stationarity tests and acf/pcf tests and then
creates ARIMA model for one run and plots results
"""
import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.stattools import acf, pacf
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.stattools import arma_order_select_ic
from riverrunner.repository import Repository
REPO = Repository()
def daily_avg(time_series):
"""Creates dataframe needed for modelling
Takes time series with measurements on different timeframes and creates a
dataframe with daily averages for flow rate and exogenous predictors.
Args:
time_series: dataframe with metrics for one run_id, assumes output
from get_measurements function
Returns:
DataFrame: containing daily measurements
"""
precip = time_series[time_series.metric_id == '00003']
precip['date_time'] = pd.to_datetime(precip['date_time'], utc=True)
precip.index = precip['date_time']
precip_daily = precip.resample('D').sum()
flow = time_series[time_series.metric_id == '00060']
flow['date_time'] = pd.to_datetime(flow['date_time'], utc=True)
flow.index = flow['date_time']
flow_daily = flow.resample('D').mean()
temp = time_series[time_series.metric_id == '00001']
temp['date_time'] = pd.to_datetime(temp['date_time'], utc=True)
temp.index = temp['date_time']
temp_daily = temp.resample('D').mean()
time_series_daily = temp_daily.merge(flow_daily, how='inner',
left_index=True, right_index=True)\
.merge(precip_daily, how='inner', left_index=True, right_index=True)
time_series_daily.columns = ['temp', 'flow', 'precip']
return time_series_daily
def test_stationarity(time_series):
"""Visual and statistical tests to test for stationarity of flow rate.
Performs Dickey-Fuller statistical test for stationarity at 0.05 level of
significance and plots 12-month rolling mean and standard deviation
against raw data for visual review of stationarity.
Args:
time_series: dataframe containing flow rate and exogneous predictor data
for one river run (assumes output of daily_avg function).
Returns:
bool: True if data is stationary according to Dickey-Fuller test at
0.05 level of significance, False otherwise.
plot: containing rolling mean and standard deviation against raw data
time series.
"""
# Determine rolling statistics
rollmean = time_series.rolling(window=365, center=False).mean()
rollstd = time_series.rolling(window=365, center=False).std()
# Plot rolling statistics
plt.plot(time_series, color='blue', label='Raw Data')
plt.plot(rollmean, color='red', label='Rolling Mean')
plt.plot(rollstd, color='orange', label='Rolling Standard Deviation')
plt.title('Rolling Statistics')
plt.legend()
plt.show()
# Dickey-Fuller test
dftest = adfuller(time_series, autolag='BIC')
return bool(dftest[0] < dftest[4]['1%'])
def plot_autocorrs(time_series):
"""
Creates plots of auto-correlation function (acf) and partial
auto-correlation function(pacf) to help determine p and q parameters
for ARIMA model.
Args:
time_series: dataframe containing flow rate and exogneous predictor
data for one river run (assumes output of daily_avg function).
Returns:
plots: containing acf and pacf of flow rate against number of lags.
"""
lag_acf = acf(time_series['flow'], nlags=400)
lag_pacf = pacf(time_series['flow'], method='ols')
plt.subplot(121)
plt.plot(lag_acf)
plt.axhline(y=0, linestyle='--', color='gray')
plt.axhline(y=-1.96 / np.sqrt(len(time_series)),
linestyle='--', color='gray')
plt.axhline(y=1.96 / np.sqrt(len(time_series)),
linestyle='--', color='gray')
plt.title('ACF')
plt.subplot(122)
plt.plot(lag_pacf)
plt.axhline(y=0, linestyle='--', color='gray')
plt.axhline(y=-1.96 / np.sqrt(len(time_series)),
linestyle='--', color='gray')
plt.axhline(y=1.96 / np.sqrt(len(time_series)),
linestyle='--', color='gray')
plt.title('PACF')
plt.tight_layout()
def test_model(run_id):
"""Function to test model for one run
Args:
run_id: run for which to test model
Returns: plots showing model results
"""
# Retrieve data for one run to model
start = datetime.datetime(2014, 5, 18)
end = datetime.datetime(2018, 5, 17)
test_measures = REPO.get_measurements(run_id=run_id,
start_date=start,
end_date=end)
# Average data and create train/test split
measures_daily = daily_avg(test_measures)
train_measures_daily = measures_daily[:-6]
test_measures_daily = measures_daily[-7:]
train_measures_daily = train_measures_daily.dropna()
# Check if data is stationary
test_stationarity(train_measures_daily['flow'])
# Determine p and q parameters for ARIMA model
params = arma_order_select_ic(train_measures_daily['flow'], ic='aic')
# Build and fit model
mod = ARIMA(train_measures_daily['flow'],
order=(params.aic_min_order[0], 0, params.aic_min_order[1]),
exog=train_measures_daily[['temp', 'precip']]).fit()
test_measures_daily.loc[:, 'prediction'] = \
mod.forecast(steps=7, exog=test_measures_daily[['temp', 'precip']])[0]
train_measures_daily.loc[:, 'model'] = mod.predict()
# Plot results
plt.plot(test_measures_daily[['flow', 'prediction']])
plt.plot(train_measures_daily[['flow', 'model']]['2015-07':])
plt.legend(['Test values', 'Prediction', 'Train values', 'Model'])
| 3.046875 | 3 |
tests/http/test_todo.py | fumiya-kubota/sanic-gino-boilerplate | 2 | 12787323 | import pytest
import json
@pytest.mark.usefixtures('cleanup_db')
async def test_todo_api(app, test_cli):
"""
testing todo api
"""
# GET
resp = await test_cli.get('/api/todo')
assert resp.status == 200
resp_json = await resp.json()
assert len(resp_json['todo_list']) == 0
# POST
resp = await test_cli.post(
'/api/todo',
data=json.dumps({
'name': 'new_todo',
}),
headers={'Content-Type': 'application/json'}
)
assert resp.status == 201
# GET
resp = await test_cli.get('/api/todo')
assert resp.status == 200
resp_json = await resp.json()
assert len(resp_json['todo_list']) == 1
assert resp_json['todo_list'][0]['name'] == 'new_todo'
# DELETE
resp = await test_cli.delete(
'/api/todo/1',
)
assert resp.status == 200
# GET
resp = await test_cli.get('/api/todo')
assert resp.status == 200
resp_json = await resp.json()
assert len(resp_json['todo_list']) == 0
| 2.1875 | 2 |
python_scripts/nalu/nrel_5mw_scaling.py | lawsonro3/python_scripts | 0 | 12787324 | import os
import numpy as np
import matplotlib.pyplot as plt
try:
import python_scripts.nalu.io as nalu
except ImportError:
raise ImportError('Download https://github.com/lawsonro3/python_scripts/blob/master/python_scripts/nalu/nalu_functions.py')
if __name__ == '__main__':
root_dir = '/Users/mlawson/GoogleDrive/Work/NREL/Projects/HFM-ECP/nrel_5mw/results/cori_data/'
if os.path.isdir(root_dir) is False:
raise Exception('root_dir does not exist')
####################################
# Load gC data
####################################
file_gC_13 = root_dir+'gCoarse.13/nrel_5mw_gCoarse.log'
th_gC_13,t_gC_13 = nalu.read_log(file_gC_13)
t_gC_13_avg = np.mean(t_gC_13[375:425,:],axis=0)
file_gC_26 = root_dir+'gCoarse.26/nrel_5mw_gCoarse.log'
th_gC_26,t_gC_26 = nalu.read_log(file_gC_26)
t_gC_26_avg = np.mean(t_gC_26[300:350,:],axis=0)
file_gC_52 = root_dir+'gCoarse.52/nrel_5mw_gCoarse.log'
th_gC_52,t_gC_52 = nalu.read_log(file_gC_52)
t_gC_52_avg = np.mean(t_gC_52[500:550,:],axis=0)
file_gC_104 = root_dir+'gCoarse.104/nrel_5mw_gCoarse.log'
th_gC_104,t_gC_104 = nalu.read_log(file_gC_104)
t_gC_104_avg = np.mean(t_gC_104[200:250,:],axis=0)
dofs_gC = 24846302 # num_nodes_gC
nodes_gC = np.array([[13],[26],[52],[104]])
cores_gC = nodes_gC*32
dof_per_core_gC = dofs_gC/cores_gC
t_avg_gC = np.array([t_gC_13_avg,t_gC_26_avg,t_gC_52_avg,t_gC_104_avg])
t_avg_gC = np.append(nodes_gC,t_avg_gC,axis=1)
t_avg_gC = np.append(cores_gC,t_avg_gC,axis=1)
t_avg_gC = np.append(dof_per_core_gC,t_avg_gC,axis=1)
t_avg_headers_gC = ['dof_per_core_gC','cores_gC','nodes_gC']
t_avg_headers_gC = t_avg_headers_gC + th_gC_13
linear_time_gC = t_avg_gC[0,-1]*(cores_gC[0]/cores_gC) # linear scaling
####################################`
# Load g1 data
####################################
file_g1_512 = root_dir+'g1.512/nrel_5mw_g1.log'
th_g1_512,t_g1_512 = nalu.read_log(file_g1_512)
t_g1_512_avg = np.mean(t_g1_512[-50:,:],axis=0)
file_g1_1024 = root_dir+'g1.1024/nrel_5mw_g1.log'
th_g1_1024,t_g1_1024 = nalu.read_log(file_g1_1024)
t_g1_1024_avg = np.mean(t_g1_1024[-50:,:],axis=0)
# file_g1_1536 = root_dir+'g1oarse.52/nrel_5mw_g1oarse.log'
# th_g1_1536,t_g1_1536 = nalu.read_log(file_g1_1536)
# t_g1_1536_avg = np.mean(t_g1_1536[500:550,:],axis=0)
dofs_g1 = 761112205 # num_nodes_g1
nodes_g1 = np.array([[512],[1024]])#,[1536]])
cores_g1 = nodes_g1*32
dof_per_core_g1 = dofs_g1/cores_g1
t_avg_g1 = np.array([t_g1_512_avg,t_g1_1024_avg])#,t_g1_1536_avg])
t_avg_g1 = np.append(nodes_g1,t_avg_g1,axis=1)
t_avg_g1 = np.append(cores_g1,t_avg_g1,axis=1)
t_avg_g1 = np.append(dof_per_core_g1,t_avg_g1,axis=1)
t_avg_headers_g1 = ['dof_per_core_g1','cores_g1','nodes_g1']
t_avg_headers_g1 = t_avg_headers_g1 + th_g1_512
linear_time_g1 = t_avg_g1[0,-1]*(cores_g1[0]/cores_g1) # linear scaling
####################################
## Plots
####################################
fig1 = '24.8 M Nodes (gCoarse) Timing'
fig2 = '761.1 M Nodes (g1) Timing'
fig3 = 'Nalu Scaling on Cori - Cores'
fig4 = 'Nalu Scaling on Cori - DOFs per Core'
####################################
# gC plotting
####################################
caption_text_gC = '* NREL 5 MW on Cori Haswell noodes\n* 32 MPI ranks/node 1 OMP thread\n* Muelu solver stack with the v27.xml settings\n * 24.8 M DOF'
plt.figure(fig1,figsize=[10,10])
plt.title(fig1)
for i in np.arange(1,5,1):
plt.plot(t_gC_13[:,i],label=th_gC_13[i]+' 416 cores_gC')
plt.plot(t_gC_26[:,i],label=th_gC_26[i]+' 832 cores_gC')
plt.plot(t_gC_52[:,i],label=th_gC_52[i]+' 1664 cores_gC')
plt.plot(t_gC_104[:,i],label=th_gC_104[i]+'3328 cores_gC')
plt.legend()
plt.xlabel('Timestep')
plt.ylabel('Time (s)')
plt.text(0, 100,caption_text_gC, fontsize=12)
label = '24.8 M DOF, 32 MPI/node, 1 OMP thread, muelu v27.xml'
plt.figure(fig3,figsize=[10,10])
plt.title(fig3)
plt.loglog(t_avg_gC[:,1],t_avg_gC[:,-1],'ks-',label=label)
plt.loglog(cores_gC,linear_time_gC,'k--',label='Linear')
plt.xlabel('Cores')
plt.ylabel('Mean Time per Timestep (s)')
plt.legend()
plt.figure(fig4,figsize=[10,10])
plt.title(fig4)
plt.loglog(t_avg_gC[:,0],t_avg_gC[:,-1],'ks-',label=label)
plt.loglog(dof_per_core_gC,linear_time_gC,'k--',label='linear')
plt.xlabel('DOFs per Core')
plt.ylabel('Mean Time per Timestep (s)')
plt.legend()
####################################
# g1 plotting
####################################
caption_text_g1 = '* NREL 5 MW on Cori Haswell noodes\n* 32 MPI ranks/node 1 OMP thread\n* Muelu solver stack with the v27.xml settings\n 761.1 M DOF'
color = 'tab:red'
plt.figure(fig2,figsize=[10,10])
plt.title(fig2)
for i in np.arange(1,5,1):
plt.plot(t_g1_512[:,i],label=th_g1_512[i]+' 16,384 cores_g1')
plt.plot(t_g1_1024[:,i],label=th_g1_1024[i]+' 32,768 cores_g1')
#plt.plot(t_g1_1536[:,i],label=th_g1_1536[i]+'49,152 cores_g1')
plt.legend()
plt.xlabel('Timestep')
plt.ylabel('Time (s)')
plt.text(0, 100,caption_text_g1, fontsize=12)
label = '761.1 M DOFs, 32 MPI/node, 1 OMP thread, muelu v27.xml'
plt.figure(fig3,figsize=[10,10])
plt.loglog(t_avg_g1[:,1],t_avg_g1[:,-1],'s-',label=label,color=color)
plt.loglog(cores_g1,linear_time_g1,'--',label='Linear',color=color)
plt.xlabel('Cores')
plt.ylabel('Mean Time per Timestep (s)')
plt.legend()
plt.figure(fig4,figsize=[10,10])
plt.loglog(t_avg_g1[:,0],t_avg_g1[:,-1],'s-',label=label,color=color)
plt.loglog(dof_per_core_g1,linear_time_g1,'--',label='linear',color=color)
plt.xlabel('DOFs per Core')
plt.ylabel('Mean Time per Timestep (s)')
plt.legend()
####################################
# Save plots
####################################
plt.figure(fig1); plt.savefig(root_dir+fig1+'.png',dpi=400)
plt.figure(fig2); plt.savefig(root_dir+fig2+'.png',dpi=400)
plt.figure(fig3); plt.savefig(root_dir+fig3+'.png',dpi=400)
plt.figure(fig4); plt.savefig(root_dir+fig4+'.png',dpi=400)
| 2.421875 | 2 |
code/src/plan2scene/crop_select/util.py | madhawav/plan2scene | 305 | 12787325 | <gh_stars>100-1000
from plan2scene.common.image_description import ImageDescription, ImageSource
from plan2scene.common.residence import Room, House
from plan2scene.config_manager import ConfigManager
from plan2scene.texture_gen.predictor import TextureGenPredictor
from plan2scene.texture_gen.utils.io import load_conf_eval
import logging
import os.path as osp
from plan2scene.utils.io import load_image
def fill_texture_embeddings(conf: ConfigManager, house: House, predictor: TextureGenPredictor) -> None:
"""
Compute surface texture embeddings of a house
:param conf: Config Manager
:param house: House processed
:param predictor: Predictor with loaded checkpoint
"""
for room_index, room in house.rooms.items():
assert isinstance(room, Room)
for photo in room.photos:
for surface in conf.surfaces:
surface_instances = [i for i in range(conf.texture_gen.masks_per_surface[surface])]
for surface_instance in surface_instances:
for crop_instance in range(conf.texture_gen.crops_per_mask):
candidate_key = <KEY> (photo, surface_instance, crop_instance)
if osp.exists(osp.join(conf.data_paths.rectified_crops_path, surface, candidate_key + ".png")):
image = load_image(
osp.join(conf.data_paths.rectified_crops_path, surface, candidate_key + ".png"))
emb, loss = predictor.predict_embs([image])
room.surface_textures[surface][candidate_key] = ImageDescription(image, ImageSource.NEURAL_SYNTH)
room.surface_embeddings[surface][candidate_key] = emb
room.surface_losses[surface][candidate_key] = loss
def fill_house_textures(conf: ConfigManager, house: House, image_source: ImageSource, skip_existing_textures: bool, key="prop",
predictor: TextureGenPredictor = None) -> None:
"""
Synthesize textures for a house using the assigned texture embeddings.
:param conf: Config Manager
:param house: House to populate textures
:param key: Key of candidate texture embeddings.
:param image_source: Generator of the images
:param predictor: Predictor used to synthesize textures
:param skip_existing_textures: Do no synthesize if a texture already exist
"""
if predictor is None:
predictor = TextureGenPredictor(
conf=load_conf_eval(config_path=conf.texture_gen.texture_synth_conf),
rgb_median_emb=conf.texture_gen.rgb_median_emb)
predictor.load_checkpoint(checkpoint_path=conf.texture_gen.checkpoint_path)
for room_index, room in house.rooms.items():
assert isinstance(room, Room)
for surface in room.surface_embeddings:
if key in room.surface_embeddings[surface]:
if skip_existing_textures and key in room.surface_textures[surface]:
continue
generated_crops, substance_names, extra = predictor.predict_textures(
combined_embs=[room.surface_embeddings[surface][key]],
multiplier=conf.texture_gen.output_multiplier)
room.surface_textures[surface][key] = ImageDescription(generated_crops[0], image_source)
def fill_textures(conf: ConfigManager, houses: dict, image_source: ImageSource, skip_existing_textures: bool, key: str = "prop", log: bool = True,
predictor: TextureGenPredictor = None) -> None:
"""
Synthesize textures for houses using the assigned texture embeddings.
:param conf: Config manager
:param houses: Dictionary of houses.
:param image_source: Image source specified to the synthesized textures
:param skip_existing_textures: Specify true to keep existing textures. Specify false to replace existing textures with new textures.
:param key: Key of embeddings used to synthesize textures.
:param log: Set true to enable logging.
:param predictor: Predictor used to synthesize textures.
"""
if predictor is None:
predictor = TextureGenPredictor(
conf=load_conf_eval(config_path=conf.texture_gen.texture_synth_conf),
rgb_median_emb=conf.texture_gen.rgb_median_emb)
predictor.load_checkpoint(checkpoint_path=conf.texture_gen.checkpoint_path)
for i, (house_key, house) in enumerate(houses.items()):
if log:
logging.info("[%d/%d] Generating Textures %s" % (i, len(houses), house_key))
fill_house_textures(conf, house, skip_existing_textures=skip_existing_textures, key=key, predictor=predictor, image_source=image_source)
def get_least_key(kv):
"""
Given a dictionary, returns the key with minimum value.
:param kv: Dictionary considered.
:return: Key with the minimum value.
"""
min_k = None
min_v = None
for k, v in kv.items():
if min_v is None or v.item() < min_v:
min_k = k
min_v = v.item()
return min_k
def vgg_crop_select(conf: ConfigManager, house: House, predictor: TextureGenPredictor) -> None:
"""
Assigns the least VGG loss crop for each surface of the house.
:param conf: ConfigManager
:param house: House to update
:param predictor: Predictor used to synthesize textures
"""
for room_index, room in house.rooms.items():
assert isinstance(room, Room)
# Calculate the least VGG loss embeddings
for surface in room.surface_embeddings:
least_key = get_least_key(room.surface_losses[surface])
if least_key is not None:
room.surface_embeddings[surface] = {"prop": room.surface_embeddings[surface][least_key]}
room.surface_losses[surface] = {"prop": room.surface_losses[surface][least_key]}
else:
room.surface_embeddings[surface] = {}
room.surface_losses[surface] = {}
fill_textures(conf, {house.house_key: house}, predictor=predictor, log=False, image_source=ImageSource.VGG_CROP_SELECT, skip_existing_textures=False)
| 2.109375 | 2 |
barbante/recommendation/tests/TestRecommenderHRChunks.py | hypermindr/barbante | 10 | 12787326 | """ Test module for barbante.recommendation.RecommenderHRChunks class.
"""
import nose.tools
import barbante.tests as tests
from barbante.recommendation.tests.fixtures.HybridRecommenderFixture import HybridRecommenderFixture
class TestRecommenderHRChunks(HybridRecommenderFixture):
""" Class for testing barbante.recommendation.RecommenderHRChunks.
"""
def __init__(self):
super().__init__()
self.set_algorithm('HRChunks')
def test_merge_algorithm_contributions(self):
""" Tests the merge based on fixed slices.
"""
recommendations = {"UBCF": [[[50], "UBCF_1"],
[[30], "UBCF_2"],
[[10], "UBCF_3"],
[[5], "UBCF_4"],
[[2], "UBCF_5"]],
"PBCF": [[[50], "PBCF_1"],
[[30], "PBCF_2"],
[[10], "PBCF_3"],
[[5], "PBCF_4"]],
"CB": [[[50], "CB_1"],
[[40], "CB_2"],
[[30], "CB_3"],
[[20], "CB_4"],
[[10], "CB_5"],
[[9], "CB_6"],
[[8], "CB_7"],
[[7], "CB_8"],
[[4], "CB_9"]],
"POP": [[[50], "POP_1"],
[[30], "POP_2"],
[[10], "POP_3"],
[[5], "POP_4"],
[[4], "POP_5"],
[[3], "POP_6"],
[[4], "POP_7"]]}
session = tests.init_session(user_id="u_eco_1", algorithm=self.algorithm)
recommender = session.get_recommender()
merged_recommendations = recommender.merge_algorithm_contributions(recommendations, 20)
products_rank = [rec[1] for rec in merged_recommendations]
nose.tools.eq_(products_rank,
['UBCF_1', 'PBCF_1', 'CB_1', 'CB_2',
'UBCF_2', 'PBCF_2', 'CB_3', 'CB_4',
'UBCF_3', 'PBCF_3', 'CB_5', 'CB_6',
'UBCF_4', 'PBCF_4', 'CB_7', 'CB_8',
'UBCF_5', 'CB_9'],
"Wrong rank after merge")
| 2.015625 | 2 |
TicTak.py | serglit72/Python_exercises | 0 | 12787327 | from random import randint
from IPython.display import clear_output
def display(grid,player_a,player_b):
print("_1_"+'|'+"_2_"+'|'+"_3_")
print("_4_"+'|'+"_5_"+'|'+"_6_"+" Player A ("+player_a+") uses key "+marker+" !! ")
print(" 7 "+'|'+" 8 "+'|'+" 9 "+" Player B ("+player_b+") uses key "+ marker_b )
print("________________________________")
print("LETS PLAY on {}x{} !!!".format(grid,grid))
print("Use digit keys for input your step!")
def marker():
marker = ""
marker_b = ""
while marker !='X' or marker !=int("0"):
marker = input("Player A should choose a marker X or 0 : ")
if marker == "X":
marker_b = "0"
break
elif marker == "0":
marker_b = "X"
break
return marker, marker_b
# def who_start(player_a ,player_b):
# dice = {"n1":0,
# "n2":0}
# for i in range(1,3):
# if dice["n1"] == dice["n2"]:
# if i == 1:
# print("{} should click 'Whitespace' button".format(player_a))
# if input(" "):
# dice["n1"] = randint(1,6)
# continue
# elif i == 2:
# print("{} should click 'Whitespace' button".format(player_b))
# if input(" "):
# dice["n2"] = randint(1,6)
# if dice["n1"] > dice["n2"]:
# result = 1
# print("{} is going to start".format(player_a))
# return result
# elif int(dice[0]) < int(dice[1]):
# result = 2
# print("{} is going to start".format(player_b))
# return result
# else:
# print("Start again")
# else:
# return result
def game(player_a,player_b, marker, marker_b):
i = 1
j = 1
positions = []
steps = list(range(11,20))
# while j<10:
while (steps[0]!=steps[3]!=steps[6] or \
steps[1]!=steps[4]!=steps[7] or \
steps[2]!=steps[5]!=steps[8] or \
steps[0]!=steps[1]!=steps[2] or \
steps[3]!=steps[4]!=steps[5] or \
steps[6]!=steps[7]!=steps[8] or \
steps[0]!=steps[4]!=steps[8] or \
steps[2]!=steps[4]!=steps[6]) or j<10 :
if i%2==0:
s = input("Player {} with {} turn : ".format(player_b,marker_b))
if s not in positions:
steps[int(s)-1] = marker_b
positions.append(s)
i+=1
j+=1
elif s in positions:
print("Warning!!!!")
continue
else:
s = input("Player {} with {} turn : ".format(player_a,marker))
if s not in positions:
steps[int(s)-1] = marker
positions.append(s)
i+=1
j+=1
elif s in positions:
print("Warning!!!!")
continue
else:
print("nobody won- try again",net(steps))
# steps[0]==steps[3]==steps[6] or \
# steps[1]==steps[4]==steps[7] or \
# steps[2]==steps[5]==steps[8] or \
# steps[0]==steps[1]==steps[2] or \
# steps[3]==steps[4]==steps[5] or \
# steps[6]==steps[7]==steps[8] or \
# steps[0]==steps[4]==steps[8] or \
# steps[2]==steps[4]==steps[6]
# print("i won",net(steps))
print(player_a+" won",net(steps))
return(steps)
def net(steps):
print("_"+str(steps[0])+"_|_"+str(steps[1])+"_|_"+str(steps[2])+"_")
print("_"+str(steps[3])+"_|_"+str(steps[4])+"_|_"+str(steps[5])+"_")
print(" "+str(steps[6])+" | "+str(steps[7])+" | "+str(steps[8])+" ")
#Here are instruction for the DEFs
player_a = input("Give me a Player A name? : ")
player_b = input("Give me a Player B name? : ")
marker,marker_b = marker()
# who_start(player_a,player_b)
# print(display(3,player_a,player_b))
game(player_a,player_b, marker, marker_b) | 3.859375 | 4 |
common/OpTestQemu.py | vaibhav92/op-test-framework | 0 | 12787328 | <reponame>vaibhav92/op-test-framework
#!/usr/bin/env python2
#
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2015,2017
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Support testing against Qemu simulator
"""
import atexit
import sys
import time
import pexpect
import subprocess
import tempfile
from common.Exceptions import CommandFailed
from common import OPexpect
class ConsoleState():
DISCONNECTED = 0
CONNECTED = 1
class QemuConsole():
"""
A 'connection' to the Qemu Console involves *launching* qemu.
Terminating a connection will *terminate* the qemu process.
"""
def __init__(self, qemu_binary=None, skiboot=None, kernel=None, initramfs=None, logfile=sys.stdout, hda=None, cdrom=None):
self.qemu_binary = qemu_binary
self.skiboot = skiboot
self.kernel = kernel
self.initramfs = initramfs
self.hda = hda
self.state = ConsoleState.DISCONNECTED
self.logfile = logfile
self.cdrom = cdrom
def terminate(self):
if self.state == ConsoleState.CONNECTED:
print "#Qemu TERMINATE"
self.sol.terminate()
self.state = ConsoleState.DISCONNECTED
def close(self):
if self.state == ConsoleState.DISCONNECTED:
return
print "Qemu close -> TERMINATE"
self.sol.terminate()
self.state = ConsoleState.DISCONNECTED
def connect(self):
if self.state == ConsoleState.CONNECTED:
self.sol.terminate()
self.state = ConsoleState.DISCONNECTED
print "#Qemu Console CONNECT"
cmd = ("%s" % (self.qemu_binary)
+ " -M powernv -m 4G"
+ " -nographic"
+ " -bios %s" % (self.skiboot)
+ " -kernel %s" % (self.kernel)
)
if self.initramfs is not None:
cmd = cmd + " -initrd %s" % (self.initramfs)
if self.hda is not None:
cmd = cmd + " -hda %s" % (self.hda)
if self.cdrom is not None:
cmd = cmd + " -cdrom %s" % (self.cdrom)
cmd = cmd + " -netdev user,id=u1 -device e1000,netdev=u1"
cmd = cmd + " -device ipmi-bmc-sim,id=bmc0 -device isa-ipmi-bt,bmc=bmc0,irq=10"
print cmd
solChild = OPexpect.spawn(cmd,logfile=self.logfile)
self.state = ConsoleState.CONNECTED
self.sol = solChild
return solChild
def get_console(self):
if self.state == ConsoleState.DISCONNECTED:
self.connect()
count = 0
while (not self.sol.isalive()):
print '# Reconnecting'
if (count > 0):
time.sleep(1)
self.connect()
count += 1
if count > 120:
raise "IPMI: not able to get sol console"
return self.sol
def run_command(self, command, timeout=60):
console = self.get_console()
console.sendline(command)
console.expect("\n") # from us
rc = console.expect(["\[console-pexpect\]#$",pexpect.TIMEOUT], timeout)
output = console.before
console.sendline("echo $?")
console.expect("\n") # from us
rc = console.expect(["\[console-pexpect\]#$",pexpect.TIMEOUT], timeout)
exitcode = int(console.before)
if rc == 0:
res = output.replace("\r\r\n", "\n")
res = res.splitlines()
if exitcode != 0:
raise CommandFailed(command, res, exitcode)
return res
else:
res = console.before
res = res.split(command)
return res[-1].splitlines()
# This command just runs and returns the ouput & ignores the failure
def run_command_ignore_fail(self, command, timeout=60):
try:
output = self.run_command(command, timeout)
except CommandFailed as cf:
output = cf.output
return output
class QemuIPMI():
"""
Qemu has fairly limited IPMI capability, and we probably need to
extend the capability checks so that more of the IPMI test suite
gets skipped.
"""
def __init__(self, console):
self.console = console
def ipmi_power_off(self):
"""For Qemu, this just kills the simulator"""
self.console.terminate()
def ipmi_wait_for_standby_state(self, i_timeout=10):
"""For Qemu, we just kill the simulator"""
self.console.terminate()
def ipmi_set_boot_to_petitboot(self):
return 0
def ipmi_sel_check(self, i_string="Transition to Non-recoverable"):
pass
def sys_set_bootdev_no_override(self):
pass
class OpTestQemu():
def __init__(self, qemu_binary=None, skiboot=None,
kernel=None, initramfs=None, cdrom=None,
logfile=sys.stdout, hda=None):
if hda is not None:
self.qemu_hda_file = tempfile.NamedTemporaryFile(delete=True)
atexit.register(self.__del__)
else:
self.qemu_hda_file = hda
create_hda = subprocess.check_call(["qemu-img", "create",
"-fqcow2",
self.qemu_hda_file.name,
"10G"])
self.console = QemuConsole(qemu_binary, skiboot, kernel, initramfs, logfile=logfile, hda=self.qemu_hda_file.name, cdrom=cdrom)
self.ipmi = QemuIPMI(self.console)
def __del__(self):
self.qemu_hda_file.close()
def get_host_console(self):
return self.console
def get_ipmi(self):
return self.ipmi
def power_off(self):
self.console.terminate()
def power_on(self):
self.console.connect()
def get_rest_api(self):
return None
def has_os_boot_sensor(self):
return False
def has_occ_active_sensor(self):
return False
def has_host_status_sensor(self):
return False
| 2.5 | 2 |
day10/script.py | SemicolonAndV/AoC2020 | 0 | 12787329 | from collections import defaultdict
with open('day10/input.txt', 'r') as file:
data = sorted([int(x.strip()) for x in file.readlines()])
data = [0] + data
data.append(data[-1] + 3)
jolt_1, jolt_3 = 0, 0
for i in range(len(data)):
current = data[i - 1]
if (data[i] - current) == 1:
jolt_1 += 1
elif (data[i] - current) == 3:
jolt_3 += 1
jumps = [1, 2, 3]
routes = defaultdict(int) # default value is 0
routes[0] = 1
for i in data[1:]:
routes[i] = sum([routes[i - j] for j in jumps])
print(f"Result 1: {jolt_1 * jolt_3}\nResult 2: {routes[data[-1]]}") | 3.40625 | 3 |
LMM/association/networkGWAS_snpSet.py | BorgwardtLab/networkGWAS | 1 | 12787330 | <reponame>BorgwardtLab/networkGWAS
'''
Compared to the original implementation at
https://github.com/fastlmm/FaST-LMM/
this file has been modified by <NAME>
'''
from pysnptools.util.mapreduce1.runner import *
import time
import argparse
import pandas as pd
from IPython import embed
from association import FastLmmSet
from association.FastLmmSet import FastLmmSet
def networkGWAS_snpSet(test_snps, set_list, phenotype, covariate = None,
output_file = None, test_type = "lrt", kernel = None,
standardize_data = None):
"""
Function performing GWAS on sets of snps
Input
----------------------------------------------------------------------------------------------
test_snps: The base name of the file containing the SNPs for alternative kernel.
The file must be in PLINK Bed format (string)
set_list: The name of a tab-delimited file defining the sets. The file should
contain two-columns 'snp' and 'set' (string)
pheno: The name of a file containing the phenotype. The file must be in PLINK
phenotype format (string)
covariate: covariate information, optional: The name of a file in PLINK phenotype
format (string)
output_file: Name of file to write results to, optional. If not given, no output file
will be created (string)
test_type: 'lrt' (default) (string)
Output
--------------------------------------------------------------------------------------------------
results: Pandas dataframe with one row per set.
"""
if(kernel == "lin"):
print('LINEAR KERNEL')
KERNEL = {'type':'linear'}
elif(kernel == 'poly'):
print('POLYNOMIAL KERNEL')
KERNEL = {'type':'polynomial'}
nullModel = {'effect':'fixed', 'link':'linear'}
altModel = {'effect':'mixed', 'link':'linear'}
fastlmm_set = FastLmmSet(outfile = output_file, phenofile = phenotype, alt_snpreader = test_snps,
altset_list = set_list, covarfile = covariate, test = test_type, autoselect = False,
nullModel = nullModel, altModel = altModel, kernel = KERNEL, standardize_data = standardize_data )
# Running through all the sets and permutations
sequence = fastlmm_set.work_sequence()
# the following few lines of code permit to run
# the actual analysis. In fact, sequence is a generator
# which return a function. With next(generator), the
# function is run.
flag = True
result_list = []
while(flag):
try:
result = next(sequence)
result_list.append(result[0])
except StopIteration:
flag = False
observed_statistics, null_distribution = fastlmm_set.reduce(result_list)
return observed_statistics, null_distribution
| 2.328125 | 2 |
examples/horse2zebra.py | tmabraham/unpaired-img2img-translation | 0 | 12787331 | from fastai.vision.all import *
from fastai.basics import *
from upit.models.cyclegan import *
from upit.train.cyclegan import *
from upit.data.unpaired import *
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--experiment_name', type=str, default='horse2zebra')
parser.add_argument('--model_name', type=str, default='cyclegan', choices=['cyclegan', 'dualgan','ganilla'])
parser.add_argument('--batch_size', type=int, default=1, help='Batch size')
parser.add_argument('--epochs_flat', type=int, default=100, help='Number of epochs with flat LR')
parser.add_argument('--epochs_decay', type=int, default=100, help='Number of epochs with linear decay of LR')
parser.add_argument('--lr', type=float, default=0.0002, help='Learning rate')
parser.add_argument('--gpu', type=int, default=0, help='GPU ID')
args = parser.parse_args()
torch.cuda.set_device(args.gpu)
dls = get_dls_from_hf("huggan/horse2zebra", load_size=286, bs=args.batch_size)
if args.model_name == 'cyclegan':
cycle_gan = CycleGAN()
learn = cycle_learner(dls, cycle_gan, opt_func=partial(Adam,mom=0.5,sqr_mom=0.999))
elif args.model_name == 'dualgan':
dual_gan = DualGAN()
learn = dual_learner(dls, dual_gan, opt_func=partial(Adam,mom=0.5,sqr_mom=0.999))
elif args.model_name == 'ganilla':
ganilla = GANILLA()
learn = cycle_learner(dls, ganilla, opt_func=partial(Adam,mom=0.5,sqr_mom=0.999))
learn.fit_flat_lin(args.epochs_flat, args.epochs_decay, args.lr)
learn.save(args.experiment_name+'_'+args.model_name+'_'+str(args.batch_size)+'_'+str(args.epochs_flat)+'_'+str(args.epochs_decay)+'_'+str(args.lr))
learn.model.push_to_hub(args.experiment_name+'_'+args.model_name)
| 2.25 | 2 |
String Functions/palindrome1.py | adrikagupta/Must-Know-Programming-Codes | 13 | 12787332 | <filename>String Functions/palindrome1.py
# to check string is a palindrome ro not
#for easy understanding for the logic of palindrome
def palindrome(word):
#reverse of the string
reverse = rev_string(word)
if (word == reverse):
print('Palindrome')
else:
print('Not a palindrome')
def rev_string(word):
new_word = ''
for index in range(len(word),0,-1):
new_word += word[index-1]
return new_word
palindrome('madam') | 4.34375 | 4 |
src/cefpython3.wx/__init__.py | donalm/cefpython | 1 | 12787333 | <filename>src/cefpython3.wx/__init__.py<gh_stars>1-10
# This dummy file is overwritten by "__init__.py.template", see:
# cefpython/windows/installer/
# cefpython/linux/installer/
| 1.351563 | 1 |
lib/python/modules/dxxtools/setup.py | tetsuzawa/spatial-research | 0 | 12787334 | <filename>lib/python/modules/dxxtools/setup.py
from setuptools import setup
setup(
name="dxxtools",
version="0.1.4",
description="dxxtools is a package of useful tools for .DXX",
packages=["dxxtools"],
install_requires=["dxx", "numpy", "matplotlib"],
entry_points={
"console_scripts": [
"vs_plot_dxx = dxxtools.vs_plot:main",
"upsampling_dxx = dxxtools.upsampling:main"
]
},
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/tetsuzawa/spatial-research/tree/master/modules/dxxtools"
)
| 1.414063 | 1 |
estore_project/users/migrations/0003_auto_20210504_1555.py | Jawayria/estore_project | 0 | 12787335 | # Generated by Django 3.1.8 on 2021-05-04 15:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20210504_1433'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='first_name',
),
migrations.RemoveField(
model_name='user',
name='last_name',
),
migrations.AddField(
model_name='user',
name='name',
field=models.CharField(blank=True, max_length=255, verbose_name='Name of User'),
),
]
| 1.664063 | 2 |
practice_problems/trees_graphs/bst_sequence.py | YazzyYaz/codinginterviews | 0 | 12787336 | <filename>practice_problems/trees_graphs/bst_sequence.py
import pprint
def bst_sequences(bst):
return bst_sequences_partial([], [bst])
def bst_sequences_partial(partial, subtrees):
if not len(subtrees):
return [partial]
sequences = []
for index, subtree in enumerate(subtrees):
next_partial = partial + [subtree.value]
next_subtrees = subtrees[:index] + subtrees[index+1:]
if subtree.left:
next_subtrees.append(subtree.left)
if subtree.right:
next_subtrees.append(subtree.right)
sequences += bst_sequences_partial(next_partial, next_subtrees)
return sequences
class Node():
def __init__(self, value=None, left=None, right=None):
self.value, self.left, self.right = value, left, right
tree = Node(7,Node(4,Node(5)),Node(9))
pprint.pprint(bst_sequences(tree))
| 3.625 | 4 |
PythonTest/clock.py | linux-flower/Python-Clock | 0 | 12787337 | from tkinter import *
from datetime import datetime
# Colors
black: str = "#3d3d3d" # Preto
white: str = "#fafcff" # Branco
green: str = "#21c25c" # Verde
red: str = "#eb463b" # Vermelho
grey: str = "#dedcdc" # Cinza
blue: str = "#3080f0" # Azul
wallpeper: str = white
color = black
window = Tk()
window.title("Digital Clock")
window.geometry("380x150")
window.resizable(width=FALSE, height=FALSE)
window.configure(bg=white)
bt = Button(window, width=20, text='OK')
bt.place(x=100, y=150)
def clock():
time = datetime.now()
hour = time.strftime("%H:%M:%S")
week_day = time.strftime("%A")
day = time.day
mont = time.strftime("%B")
year = time.strftime("%Y")
text.config(text=hour)
text.after(1000, clock)
text1.config(text=week_day + " " + str(day) + "/" + str(mont) + "/" + str(year))
text = Label(window, text="", font='Arial 70 ', bg=wallpeper, fg=color)
text.grid(row=0, column=0, stick=NW, padx=5)
text1 = Label(window, text="", font="Arial 20 italic", bg=wallpeper, fg=color)
text1.grid(row=1, column=0, stick=NW, padx=5)
clock()
window.mainloop()
| 3.59375 | 4 |
mi-oj/NC30.py | wisesky/LeetCode-Practice | 0 | 12787338 | #
# return the min number
# @param arr int整型一维数组 the array
# @return int整型
#
class Solution:
def minNumberdisappered(self , arr ):
# write code here
# idx = float('inf')
for i, num in enumerate(arr):
if num == 1:
break
else:
return 1
st = 2
pos = i+1
while pos in range(len(arr)) and arr[pos] == st:
st += 1
pos += 1
return st
l = [-1,2,3,4,]
l = [1,2,3,5]
so = Solution()
res = so.minNumberdisappered(l)
print(res)
| 3.1875 | 3 |
ninjia/preprocess/align_garbbed_csv.py | taohu88/ninjia | 0 | 12787339 | <gh_stars>0
import sys
import re
from dateutil.parser import parse
EMAIL_REGEX = re.compile(r"[^@]+@[^@]+\.[^@]+")
def is_email_address(txt):
return EMAIL_REGEX.match(txt)
def is_tax_flags(txt):
return txt.endswith("NNNNNNN")
def is_date(txt):
try:
parse(txt)
return True
except ValueError:
return False
def is_large_number(txt):
try:
f = float(txt)
if f > 1.0e7:
return True
except ValueError:
pass
return False
COLUMNS_FOR_OFFSET = {
'TaxFlags': is_tax_flags,
}
COL_IDX = {}
COLUMNS_TO_CHECK = {
'Email': is_email_address,
'DateOpened': is_date,
'LastUpdate' : is_date,
'ValidFrom': is_date,
'ValidUntil': is_date,
}
def find_offset(items, cols_for_offset, col_idx, max_offset=5):
for c, fun in cols_for_offset.items():
idea_idx = col_idx[c]
start = max(idea_idx - max_offset, 0)
end = min(idea_idx+max_offset, len(items))
for i in range(start, end):
if fun(items[i]):
return i - idea_idx
raise Exception(f"Can't find valid item in {items}")
def is_valid_row(items, offset, cols_to_check, col_idx):
for c, fun in cols_to_check.items():
idea_idx = col_idx[c]
true_idx = idea_idx + offset
if not fun(items[true_idx]):
raise Exception(f"Invalid item {items[true_idx]} at {true_idx} = {idx} - {offset}")
return True
EXCLUDE_COLS = range(2, 13)
def process_header(line, sep=","):
cols = line.split(sep=sep)
new_cols = []
for i, col in enumerate(cols):
if i in EXCLUDE_COLS:
continue
COL_IDX[col] = i
new_cols.append(col)
print('BBB', COL_IDX)
return new_cols
def process_line(line, sep=",", safe_cols=2):
items = line.split(sep=sep)
offset = find_offset(items, COLUMNS_FOR_OFFSET, COL_IDX)
new_items = []
for col, pos in COL_IDX.items():
if pos < safe_cols:
true_idx = pos
else:
true_idx = pos + offset
item = items[true_idx]
new_items.append(item)
if col in COLUMNS_TO_CHECK:
fun = COLUMNS_TO_CHECK[col]
if not fun(item):
raise Exception(f"Failed to pass validation at {true_idx} {offset} for {line}")
return new_items
def process_file(ifile, ofile, num_line_print, out_sep='\t'):
print('loading input data from file %s' % ifile)
print('writing output to file %s' % ofile)
with open(ofile, 'w', encoding='utf-8') as out, open(ifile, 'r', ) as f:
for i, line in enumerate(f):
try:
line = line.rstrip()
if i == 0:
items = process_header(line)
else:
items = process_line(line)
if i % num_line_print == 0:
print(f'Processing line {i}')
out.write("%s\n" % out_sep.join(items))
except Exception as e:
print(f"Caught error at {i} {e}")
if __name__ == "__main__":
ifile = sys.argv[1]
ofile = sys.argv[2]
num_line_print = 1000
process_file(ifile, ofile, num_line_print)
| 2.8125 | 3 |
panaxea/core/Model.py | DarioPanada/panaxea | 1 | 12787340 | <reponame>DarioPanada/panaxea
import os
import sys
import time
from collections import defaultdict
from panaxea.core.Schedule import Schedule
class Model(object):
"""
Initializes a model object. The model object is the primary
component of each simulation, holding the schedule,
the environments, model properties, and the current progress in the
simulation.
Essentially, the model holds a snapshot of the simulation world at
any point in progress.
Attributes
----------
epochs : int
The number of epochs the simulation should run for.
verbose : bool, optional
If set to true, output is sent to standard output. If set to
false, output (Ie: print statements) is
disabled. Defaults to true.
properties: dict, optional
Specifies a dictionary of property values. This can follow any
format he developers need and should be
adapted to the simulation's needs. Defaults to an empty dictionary.
"""
def __init__(self, epochs, verbose=True, properties=dict()):
self.epochs = epochs
self.schedule = Schedule()
self.environments = dict()
self.verbose = verbose
self.current_epoch = 0
self.properties = properties
self.exit = False
self.output = defaultdict(dict)
# Changing sys.stdout messes with unittests, so we will not do it
# running from a test
if not self.verbose and "unittest" not in sys.modules:
sys.stdout = os.devnull
def run(self):
"""
Runs the simulation for the number of epochs configured or until an
the exit flag is set to true.
Note that the state of the schedule, environments etc. will result
altered after the model runs. If you
wish to run the same model multiple times, you should first copy the
original
instance to a backup variable.
"""
epochs_time = []
for i in range(0, self.epochs):
if self.exit:
print("Exit flag set to true, finishing at epoch %s" % str(
self.current_epoch))
break
self.current_epoch = i
start_time = time.time()
print("Epoch %s" % i)
self.schedule.step_schedule(self)
time_taken = time.time() - start_time
print("Epoch took %s seconds" % time_taken)
epochs_time.append(time_taken)
print("Total time %s" % str(sum(epochs_time)))
if "unittest" not in sys.modules:
sys.stdout = sys.__stdout__
| 3.453125 | 3 |
services/real-time-voice-cloning/test_service.py | arturgontijo/dnn-model-services | 26 | 12787341 | <gh_stars>10-100
import sys
import grpc
# import the generated classes
import service.service_spec.voice_cloning_pb2_grpc as grpc_bt_grpc
import service.service_spec.voice_cloning_pb2 as grpc_bt_pb2
from service import registry
TEST_URL = "https://raw.githubusercontent.com/singnet/dnn-model-services/master/" \
"docs/assets/users_guide/ben_websumit19.mp3"
TEST_SNT = "Given that most of the innovation in the AI algorithm and product worlds come from students, " \
"startups or independent developers."
if __name__ == "__main__":
try:
test_flag = False
if len(sys.argv) == 2:
if sys.argv[1] == "auto":
test_flag = True
endpoint = input("Endpoint (localhost:{}): ".format(
registry["voice_cloning_service"]["grpc"])) if not test_flag else ""
if endpoint == "":
endpoint = "localhost:{}".format(registry["voice_cloning_service"]["grpc"])
# open a gRPC channel
channel = grpc.insecure_channel("{}".format(endpoint))
audio_url = input("Audio (link): ") if not test_flag else TEST_URL
sentence = input("Sentence (~20 words): ") if not test_flag else TEST_SNT
stub = grpc_bt_grpc.RealTimeVoiceCloningStub(channel)
grpc_input = grpc_bt_pb2.Input(audio_url=audio_url, sentence=sentence)
response = stub.clone(grpc_input)
if response.audio == b"Fail":
print("Fail!")
exit(1)
print("Audio file length:", len(response.audio))
except Exception as e:
print(e)
exit(1)
| 2.515625 | 3 |
screenshot.py | lizadaly/a-physical-book | 78 | 12787342 | <filename>screenshot.py
import selenium.webdriver as webdriver
from selenium.webdriver.support.ui import WebDriverWait
import contextlib
import time
CHAPTERS = 590
@contextlib.contextmanager
def quitting(thing):
yield thing
thing.quit()
with quitting(webdriver.Firefox()) as driver:
driver.set_window_size(500, 600)
for i in range(0, CHAPTERS):
driver.get('http://localhost/github/the-physical-book/?chapter={}'.format(i))
time.sleep(2)
print("Snapping chapter {}".format(i))
driver.get_screenshot_as_file('images/{}.png'.format(i))
| 3.1875 | 3 |
notebooks/classic/solution/matplotlib-subplots.py | kmunve/ml-workshop | 3 | 12787343 | fig, ax = plt.subplots(2, sharex='all', figsize=(10, 5))
ax[0].plot(t, s)
ax[0].set_ylabel('voltage (mV)')
ax[0].set_title('Linear')
ax[0].grid(True)
ax[0].set_yscale('linear')
ax[1].plot(t, s)
ax[1].set_xlabel('time (s)')
ax[1].set_ylabel('voltage (mV)')
ax[1].set_title('Log')
ax[1].grid(True)
ax[1].set_yscale('log')
fig.savefig("two-subplots.png") | 2.421875 | 2 |
ton_metrics_push.py | certusone/ton_exporter | 0 | 12787344 | <gh_stars>0
#!/usr/bin/env python3.6
# Quick and dirty metrics exporter for the TON C++ node.
import subprocess
import argparse
import tempfile
import os
import sys
import json
import base64
import binascii
import re
import pathlib
parser = argparse.ArgumentParser()
parser.add_argument('--output', help="node_exporter collector directory", required=True)
parser.add_argument('--engine-console-binary', help="Binary path for validator-engine-console", required=True)
parser.add_argument('--lite-client-binary', help="Binary path for lite-client", required=True)
parser.add_argument('--validator-client-key', help="Validator client key", required=True)
parser.add_argument('--validator-server-pub', help="Validator server pubkey", required=True)
parser.add_argument('--wallet-address', help="Validator wallet address (0x...)", required=True)
parser.add_argument('--liteserver-pub', help="Lite server pubkey", required=True)
parser.add_argument('--election-dir', help="Election data directory generated by validator_msig.sh", required=True)
parser.add_argument('--validator-config', help="Path to validator config.json", required=True)
args = parser.parse_args()
LITE_CLIENT = [
args.lite_client_binary,
'-a', '127.0.0.1:3031',
'-p', args.liteserver_pub
]
# Request validator stats (local and chain time and height)
output = subprocess.check_output([
args.engine_console_binary,
'-a', '127.0.0.1:3030',
'-k', args.validator_client_key,
'-p', args.validator_server_pub,
'-c', 'getstats',
'-c', 'quit'],
# https://github.com/ton-blockchain/ton/issues/292
stdin=subprocess.PIPE
)
want = [
# Timestamp of local node.
b'unixtime',
# Timestamp of latest masterchain block seen.
b'masterchainblocktime',
# Block heights of masterchain.
b'stateserializermasterchainseqno',
b'shardclientmasterchainseqno',
]
values = {}
for line in output.split(b'\n'):
parts = line.split(b'\t')
if parts[0] in want:
values[parts[0]] = parts[-1]
elif parts[0] == b'masterchainblock':
# TODO: Parse and collect *masterchainblock metrics
masterchainblock = parts[-1]
for k in want:
# Assert that all metrics are present. The script needs to either return *all* metrics,
# or fail completely. Otherwise, we could silently miss error states.
values[k]
# Get election ID (i.e. timestamp).
RE_INT_RESULT = re.compile(rb'result: \[ (.*) \]')
output = subprocess.check_output(LITE_CLIENT + ['-rc', 'runmethod -1:3333333333333333333333333333333333333333333333333333333333333333 active_election_id'], stdin=subprocess.PIPE)
active_election_id = int(RE_INT_RESULT.findall(output)[0])
# Fetch amount of returned stake during election
output = subprocess.check_output(LITE_CLIENT + [
'-rc', 'runmethod -1:3333333333333333333333333333333333333333333333333333333333333333 compute_returned_stake ' + args.wallet_address], stdin=subprocess.PIPE)
returned_stake_amount = int(RE_INT_RESULT.findall(output)[0])
# Get block heights of individual shards.
# Returns a list of [(shard_id, height)] tuples.
output = subprocess.check_output(LITE_CLIENT + ['-rc', 'allshards'], stdin = subprocess.PIPE)
allshards = re.findall(rb'^shard #(\d+) : \(\d,\d+,(\d+)\)', output, re.M)
# Get list of active validators.
# Returns a list of [(hex pubkey, stake weight, hex adnl_addr)] tuples.
RE_VAL_CONFIG = re.compile(rb'value:\(validator_addr\n\s+public_key:\(ed25519_pubkey pubkey:x([A-Z0-9]+)\) weight:(\d+) adnl_addr:x([A-Z0-9]+)\)\)')
output = subprocess.check_output(LITE_CLIENT + ['-rc', 'getconfig 34'], stdin = subprocess.PIPE)
active_validators = RE_VAL_CONFIG.findall(output)
# Get list of next validators (elected and soon-to-be active).
output = subprocess.check_output(LITE_CLIENT + ['-rc', 'getconfig 36'], stdin = subprocess.PIPE)
next_validators = RE_VAL_CONFIG.findall(output)
# Get list of validators participating in the current election.
# Returns a list of [(pubkey as big endian int, stake weight)] tuples.
output = subprocess.check_output(LITE_CLIENT + ['-rc', 'runmethod -1:3333333333333333333333333333333333333333333333333333333333333333 participant_list'], stdin = subprocess.PIPE)
for line in output.split(b'\n'):
if b'result:' in line:
election_participants = re.findall(rb'\[(\d+) (\d+)\]', line)
break
else:
election_participants = []
# Get block creation stats
# For performance reason, the regex will be evaluated later once we know which validator is active on the chain.
RE_BLOCK_CREATION_STATS = re.compile(
rb'(?:[A-Z0-9]+) mc_cnt:\(counter '
rb'last_updated:(?P<mc_last_updated>\d+) '
rb'total:(?P<mc_total>\d+) '
rb'cnt2048: (?P<mc_cnt2048>[0-9\.]+) '
rb'cnt65536: (?P<mc_cnt65536>[0-9\.]+)\) '
rb'shard_cnt:\(counter '
rb'last_updated:(?P<shard_last_updated>\d+) '
rb'total:(?P<shard_total>\d+) '
rb'cnt2048: (?P<shard_cnt2048>[0-9\.]+) '
rb'cnt65536: (?P<shard_cnt65536>[0-9\.]+)\)'
)
creatorstats = subprocess.check_output(
LITE_CLIENT + ['-rc', 'creatorstats ' + masterchainblock.decode()], stdin = subprocess.PIPE)
# Read local validator config (same as running "getconfig" in the validator console).
with open(args.validator_config, 'r') as f:
cfg = json.load(f)
# Each validator has three individual keys:
#
# - Validator public key.
# - Node ID / temporary key (SHA256([0xc6, 0xb4, 0x13, 0x48] + pubkey))
# - ADNL address.
#
# Node ID and ADNL address are set in the validator config file, however,
# the public key generated during an election isn't and we have to pull
# it out of a log message. Once the validator elected, it's easy because
# we can just look at the validator set, but we haven't found an easier way
# to acquire the key during an election (I suppose we could look up the
# election transaction, but that would be a lot more effort).
#
# Figure out last/current election's validator pubkey:
#
election_dir = pathlib.Path(args.election_dir)
files = list(election_dir.glob("*-request-dump2"))
if len(files) == 1:
election_pubkey = re.findall(
rb'Provided a valid Ed25519 signature .+? with validator public key ([A-Z0-9]+)',
files[0].read_bytes())[0]
elif len(files) > 1:
print("Error: found multiple election state files: %r", files)
sys.exit(1)
else:
election_pubkey = None
# Write metrics and atomically replace metrics file.
with tempfile.NamedTemporaryFile(delete=False, dir=args.output) as fp:
# Validator stats (metric names are used verbatim).
for k, v in values.items():
fp.write(b'ton_%s %s\n' % (k, v))
# If the config contains no validators, we won't return any validator metrics!
for n, validator in enumerate(cfg['validators']):
adnl_b64 = next(filter(
lambda x: x['@type'] == 'engine.validatorAdnlAddress',
validator['adnl_addrs']))['id']
adnl_addr = binascii.hexlify(base64.b64decode(adnl_b64))
node_id = binascii.hexlify(base64.b64decode(validator['id']))
fp.write(b'ton_validator_election_date{index="%d", adnl_addr="%s"} %d\n' % (n, adnl_addr, validator['election_date']))
fp.write(b'ton_validator_expire_at{index="%d", adnl_addr="%s"} %d\n' % (n, adnl_addr, validator['expire_at']))
for pubkey, weight, adnl_addr_ in next_validators:
if adnl_addr_.lower() == adnl_addr:
print("Next validator:", pubkey.lower(), weight, adnl_addr)
fp.write(b'ton_validator_next_weight{adnl_addr="%s"} %s\n' % (adnl_addr, weight))
is_next = 1
break
else:
is_next = 0
for pubkey, weight, adnl_addr_ in active_validators:
if adnl_addr_.lower() == adnl_addr:
print("Active validator:", pubkey.lower(), weight, adnl_addr)
fp.write(b'ton_validator_active_weight{adnl_addr="%s"} %s\n' % (adnl_addr, weight))
is_active = 1
# Parse creator stats for active validator
for line in creatorstats.split(b'\n'):
if line.startswith(pubkey.upper()):
for k, v in RE_BLOCK_CREATION_STATS.match(line).groupdict().items():
fp.write(b'ton_validator_stats_%s{adnl_addr="%s"} %s\n' % (k.encode(), adnl_addr, v))
break
else:
is_active = 0
fp.write(b'ton_validator_is_active{adnl_addr="%s"} %d\n' % (adnl_addr, is_active))
fp.write(b'ton_validator_is_next{adnl_addr="%s"} %d\n' % (adnl_addr, is_next))
# Check whether the current election's newly generated validator pubkey shows up in participant_list
for num_pubkey, stake in election_participants:
if binascii.hexlify(int(num_pubkey).to_bytes(32, 'big')) == election_pubkey.lower():
election_participated = 1
break
else:
election_participated = 0
fp.write(b'ton_election_participated %d\n' % election_participated)
fp.write(b'ton_election_active_id %d\n' % active_election_id)
fp.write(b'ton_election_returned_stake_amount %d\n' % returned_stake_amount)
for id_, height in allshards:
fp.write(b'ton_shard_height{shard_id="%s"} %s\n' % (id_, height))
fp.write(b'\n')
fp.flush()
os.fchmod(fp.fileno(), 0o644)
fp.close()
os.rename(fp.name, os.path.join(args.output, 'ton.prom'))
| 1.992188 | 2 |
recipe_scrapers/cookingcircle.py | gloriousDan/recipe-scrapers | 0 | 12787345 | import re
from ._abstract import AbstractScraper
from ._utils import get_minutes
class CookingCircle(AbstractScraper):
@classmethod
def host(cls):
return "cookingcircle.com"
def author(self):
return (
self.soup.find("div", {"class": "recipe-author"})
.findChild("span", {"class": "text-uppercase"})
.get_text()
)
def title(self):
return self.schema.title()
def category(self):
return self.schema.category()
def total_time(self):
ul = self.soup.find("ul", {"class": "single-method-overview__times"})
totalTime = None
for li in ul.find_all("li"):
if li.span.get_text().lower() == "total time:":
totalTime = li.span.find_next().get_text()
if totalTime is not None:
totalTime = re.findall("[0-9]+", totalTime)[0]
return get_minutes(totalTime)
def yields(self):
return self.schema.yields()
def image(self):
return self.schema.image()
def ingredients(self):
ulList = (
self.soup.find(
"div", {"class": "single-ingredients__group", "data-unit": "metric"}
)
.findChild("ul", {"class": "single-ingredients__list"})
.findChildren("li")
)
ingredients = []
for li in ulList:
ingredients.append(
li.get_text().replace("\t", "").replace("\n\n", " ").replace("\n", "")
)
return ingredients
def instructions(self):
ulList = self.soup.find("ul", {"class": "single-method__method"}).findChildren(
"li"
)
instructions = []
for li in ulList:
instructions.append(li.get_text().strip().replace("\n", " "))
return "\n".join(instructions)
| 2.421875 | 2 |
coro/dns/stub_resolver.py | mkushnir/shrapnel | 1 | 12787346 | <filename>coro/dns/stub_resolver.py
# -*- Mode: Python -*-
import coro
import coro.dns
import coro.dns.packet as packet
import random
class QueryFailed (Exception):
pass
class stub_resolver:
def __init__ (self, nameservers, inflight=200):
self.nameservers = nameservers
self.inflight = coro.semaphore (inflight)
self.inflight_ids = set()
def lookup (self, qname, qtype, timeout=10, retries=3):
m = packet.Packer()
h = packet.Header()
while 1:
qid = random.randrange (65536)
# avoid collisions
if qid not in self.inflight_ids:
break
h.id = qid
h.opcode = packet.OPCODE.QUERY
h.rd = 1
h.qdcount = 1
m.addHeader (h)
m.addQuestion (qname, qtype, packet.CLASS.IN)
p = m.getbuf()
for addr in self.nameservers:
for i in range (retries):
self.inflight.acquire (1)
self.inflight_ids.add (qid)
try:
s = coro.udp_sock()
s.connect ((addr, 53))
s.send (p)
try:
reply = coro.with_timeout (timeout, s.recv, 1000)
u = packet.Unpacker (reply)
result = u.unpack()
rh = result[0]
if rh.id != qid:
raise QueryFailed ("bad id in reply")
else:
return result
except coro.TimeoutError:
pass
finally:
self.inflight.release (1)
self.inflight_ids.remove (qid)
raise QueryFailed ("no reply from nameservers")
def gethostbyname (self, name, qtype):
header, qdl, anl, nsl, arl = self.lookup (name, qtype)
for answer in anl:
name, rtype, _, ttl, addr = answer
if getattr (packet.TYPE, rtype) == qtype:
return addr
else:
raise QueryFailed ("no answer in nameserver reply")
def resolve_ipv4 (self, name):
return self.gethostbyname (name, packet.TYPE.A)
def resolve_ipv6 (self, name):
return self.gethostbyname (name, packet.TYPE.AAAA)
def install (nameserver_ips):
"install a stub resolver into the coro socket layer"
coro.set_resolver (
stub_resolver (nameserver_ips)
)
| 2.28125 | 2 |
validator/utils.py | digital-land/validator | 0 | 12787347 | import codecs
import collections
import sys
import csv
import os
from os.path import basename, dirname
import pandas as pd
import magic
import mimetypes
from cchardet import UniversalDetector
from validator.logger import get_logger
tmp_dir = None
logger = get_logger(__name__)
def extract_data(path, standard):
if looks_like_csv(path):
media_type = 'text/csv'
else:
path, media_type = convert_to_csv(path)
return csv_to_dict(path, media_type, standard)
def convert_to_csv(path):
media_type = magic.from_file(path, mime=True)
tmp_path = csv_path(tmp_dir, path)
try:
excel = pd.read_excel(path)
except:
excel = None
if excel is not None:
excel.to_csv(tmp_path, index=None, header=True)
return tmp_path, media_type
logger.info(f"Unable to convert {path} from {media_type} to CSV")
with open(tmp_path, 'w') as out:
pass
return tmp_path, media_type
def csv_to_dict(csv_file, media_type, standard):
result = {
'meta_data': {
'headers_found': [],
'additional_headers': [],
'missing_headers': [],
'media_type': media_type,
'suffix': suffix_for_media_type(media_type),
},
'rows': [],
'data': [],
}
encoding = detect_encoding(csv_file)
with codecs.open(csv_file, encoding=encoding['encoding']) as f:
reader = csv.DictReader(f)
if reader.fieldnames:
result['meta_data']['headers_found'] = reader.fieldnames
result['meta_data']['additional_headers'] = list(set(result['meta_data']['headers_found']) - set(standard.current_standard_headers()))
result['meta_data']['missing_headers'] = list(set(standard.current_standard_headers()) - set(result['meta_data']['headers_found']))
for row in reader:
to_check = collections.OrderedDict()
for column in standard.current_standard_headers():
value = row.get(column, None)
if value is not None:
to_check[column] = row.get(column)
result['rows'].append(to_check)
result['data'].append(row)
return result
def detect_encoding(file):
detector = UniversalDetector()
detector.reset()
with open(file, 'rb') as f:
for row in f:
detector.feed(row)
if detector.done:
break
detector.close()
return detector.result
def suffix_for_media_type(media_type):
suffix = {
'application/vnd.ms-excel': '.xls',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet': '.xlsx',
}
return suffix.get(media_type, mimetypes.guess_extension(media_type))
def get_markdown_for_field(field_name):
from pathlib import Path
current_directory = Path(__file__).parent.resolve()
markdown_file = Path(current_directory, 'markdown', f'{field_name}.md')
with open(markdown_file) as f:
content = f.read()
return content
def looks_like_csv(file):
try:
encoding = detect_encoding(file)
with open(file, encoding=encoding['encoding']) as f:
content = f.read()
if content.lower().startswith('<!doctype html'):
return False
csv.Sniffer().sniff(content)
return True
except Exception as e: # noqa
return False
def csv_path(_dir, path):
path = os.path.join(_dir, basename(path)) if _dir else path
return path + ".csv"
def save_csv(data, file):
if data:
fieldnames = data[0].keys()
if fieldnames:
writer = csv.DictWriter(file, fieldnames=fieldnames)
writer.writeheader()
for row in data:
writer.writerow(row)
| 2.859375 | 3 |
fix-links.py | jounile/jounileino.com | 0 | 12787348 | <filename>fix-links.py
#!/usr/bin/python
from bs4 import BeautifulSoup
"""
Before using this script run the following command:
$ npm run build
Hosting a static website in AWS S3 requires that names of the accessed object is exactly the one that a link points to.
Astro creates a /dist directory with all the HTML pages. Astro creates also routes that have no .html extension.
For example a link in Astro site points to /contact and the /dist directory contains a file named contact.html.
In order to access that HTML page in a S3 bucket the link needs to point to a path with the .html extension.
This script loops through the specified HTML files and adds the .html extension to links.
for example this
<a href="/contact">Link</a>
becomes this
<a href="/contact.html">Link</a>
This script will update the links and write the prettified html content into each HTML file.
After this script has executed you can sync the /dist directory content to S3 bucket.
See the project README
"""
files = ["dist/index.html", "dist/contact.html"]
for file in files:
# Read html file
f = open(file, 'r')
html = f.read()
soup = BeautifulSoup(html, "html.parser")
for a_tag in soup.find_all('a'):
old_href = a_tag['href']
if not str(old_href).startswith("https://"):
new_href = old_href + ".html"
a_tag['href'] = a_tag['href'].replace(old_href, new_href)
soup = soup.prettify()
print(soup)
# Write modified html to file
f = open(file, 'w')
f.write(soup) | 3.3125 | 3 |
tf_quat2rot/tests/test_quat2rot_graph_mode.py | risteon/tf_quat2rot | 1 | 12787349 | # -*- coding: utf-8 -*-
__author__ = """<NAME>"""
__email__ = "<EMAIL>"
import tensorflow as tf
import tf_quat2rot
class TestGraphMode(tf.test.TestCase):
@tf.function
def _run_in_graph(self, batch_shape=(2, 1, 3)):
self.assertTrue(not tf.executing_eagerly())
random_quats = tf_quat2rot.random_uniform_quaternion(batch_dim=batch_shape)
random_rotations = tf_quat2rot.quaternion_to_rotation_matrix(random_quats)
random_quats_restored = tf_quat2rot.rotation_matrix_to_quaternion(
random_rotations
)
return random_quats, random_quats_restored
def test_graph_mode(self):
with self.session(use_gpu=False):
# single random quaternion
batch_shape = (2, 1, 3)
random_quats, random_quats_restored = self._run_in_graph(batch_shape)
self.assertEqual(batch_shape + (4,), random_quats_restored.shape)
self.assertAllClose(random_quats, random_quats_restored)
| 2.296875 | 2 |
00_Original/42_Insiderwissen/Generatoren_als_Konsumenten/konsumierender_generator_pipeline.py | felixdittrich92/Python3_book | 0 | 12787350 | <filename>00_Original/42_Insiderwissen/Generatoren_als_Konsumenten/konsumierender_generator_pipeline.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def konsument(f):
def h_f(*args, **kwargs):
gen = f(*args, **kwargs)
next(gen)
return gen
return h_f
@konsument
def filter_hebe(stufe, ziel):
while True:
ziel.send(stufe + (yield))
@konsument
def filter_mittelwert(fenster, ziel):
werte = []
while True:
werte.append((yield))
if len(werte) >= fenster:
ziel.send(sum(werte)/fenster)
werte.pop(0)
@konsument
def ausgabe():
while True:
print((yield))
p = ausgabe()
f = filter_hebe(10, p)
f = filter_mittelwert(2, f)
for d in [1, 3, 2, 4, 2, 1]:
f.send(d)
| 2.578125 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.