filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_26734 | # Based loosely on the code written by folks at Wheaton College, including:
# https://github.com/goodmanj/domecontrol
import time
from panoptes.pocs.dome import abstract_serial_dome
class Protocol:
# Status codes, produced when not responding to an input. They are oriented towards
# reporting whether the two shutters are fully closed.
BOTH_CLOSED = '0' # Both A and B shutters are fully closed.
A_IS_CLOSED = '1' # Only shutter A is fully closed.
B_IS_CLOSED = '2' # Only shutter B is fully closed.
BOTH_OPEN = '3' # Really means both NOT fully closed.
# Status codes produced by the dome when not responding to a movement command.
STABLE_STATES = (BOTH_CLOSED, BOTH_OPEN, B_IS_CLOSED, A_IS_CLOSED)
# Limit responses, when the limit has been reached on a direction of movement.
A_OPEN_LIMIT = 'x' # Response to asking for A to open, and being at open limit
A_CLOSE_LIMIT = 'X' # Response to asking for A to close, and being at close limit
B_OPEN_LIMIT = 'y' # Response to asking for B to open, and being at open limit
B_CLOSE_LIMIT = 'Y' # Response to asking for B to close, and being at close limit
# Command codes, echoed while happening
CLOSE_A = 'A'
OPEN_A = 'a'
CLOSE_B = 'B'
OPEN_B = 'b'
# These codes are documented for an 18' dome, but appear not to work with the 7' domes
# we have access to.
OPEN_BOTH = 'O'
CLOSE_BOTH = 'C'
RESET = 'R'
class AstrohavenDome(abstract_serial_dome.AbstractSerialDome):
"""Interface to an Astrohaven clamshell dome with a Vision 130 PLC and RS-232 interface.
Experience shows that it emits a status byte about once a second, with the codes
as described in the Protocol class.
"""
# TODO(jamessynge): Get these from the config file (i.e. per instance), with these values
# as defaults, though LISTEN_TIMEOUT can just be the timeout config for SerialData.
LISTEN_TIMEOUT = 3 # Max number of seconds to wait for a response.
MOVE_TIMEOUT = 10 # Max number of seconds to run the door motors.
MOVE_LISTEN_TIMEOUT = 0.1 # When moving, how long to wait for feedback.
NUM_CLOSE_FEEDBACKS = 2 # Number of target_feedback bytes needed.
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# TODO(jamessynge): Consider whether to expose settings of the pyserial object thru
# rs232.SerialData. Probably should. Could use newer dictionary get/set mechanism so
# that change to SerialData is minimal. Alternately, provide a means of reading
# that info from the config file in AbstractSerialDome.__init__ and using it to
# initialize the SerialData instance.
# Let's use a timeout that is long enough so that we are "guaranteed" a byte of output
# from the device. 1 second seems too small given that it appears that is the pace of
# output from the PLC.
# TODO(jamessynge): Remove this, replace with a value in the config file.
self.serial.ser.timeout = AstrohavenDome.LISTEN_TIMEOUT
@property
def is_open(self):
v = self._read_latest_state()
return v == Protocol.BOTH_OPEN
def open(self):
self._full_move(Protocol.OPEN_A, Protocol.A_OPEN_LIMIT)
self._full_move(Protocol.OPEN_B, Protocol.B_OPEN_LIMIT)
v = self._read_state_until_stable()
if v == Protocol.BOTH_OPEN:
return True
self.logger.warning(f'AstrohavenDome.open wrong final state: {v!r}')
return False
@property
def is_closed(self):
v = self._read_latest_state()
return v == Protocol.BOTH_CLOSED
def close(self):
self._full_move(Protocol.CLOSE_A, Protocol.A_CLOSE_LIMIT,
feedback_countdown=AstrohavenDome.NUM_CLOSE_FEEDBACKS)
self._full_move(Protocol.CLOSE_B, Protocol.B_CLOSE_LIMIT,
feedback_countdown=AstrohavenDome.NUM_CLOSE_FEEDBACKS)
v = self._read_state_until_stable()
if v == Protocol.BOTH_CLOSED:
return True
self.logger.warning(f'AstrohavenDome.close wrong final state: {v!r}')
return False
@property
def status(self):
"""Return a dict with dome's current status."""
status_lookup = {
Protocol.BOTH_CLOSED: 'closed_both',
Protocol.A_IS_CLOSED: 'closed_a',
Protocol.B_IS_CLOSED: 'closed_b',
Protocol.BOTH_OPEN: 'open_both',
}
state = self._read_latest_state()
return_status = dict(
connected=self.is_connected,
)
try:
return_status['open'] = status_lookup[state]
except KeyError as e:
return_status['open'] = f'Unexpected response from Astrohaven Dome Controller: {state!r}'
return return_status
def __str__(self):
if self.is_connected:
return self.status
return 'Disconnected'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def _read_latest_state(self):
"""Read and return the latest output from the Astrohaven dome controller."""
# TODO(jamessynge): Add the ability to do a non-blocking read of the available input
# from self.serial. If there is some input, return it, but don't wait for more. The last
# received byte is good enough for our purposes... as long as we drained the input buffer
# before sending a command to the dome.
self.serial.reset_input_buffer()
data = self.serial.read_bytes(size=1)
if len(data):
return chr(data[-1])
return None
def _read_state_until_stable(self):
"""Read the status until it reaches one of the stable values."""
end_by = time.time() + AstrohavenDome.LISTEN_TIMEOUT
c = ''
while True:
data = self.serial.read_bytes(size=1)
if data:
c = chr(data[-1])
if c in Protocol.STABLE_STATES:
return c
self.logger.debug(f'_read_state_until_stable not yet stable: data={data!r}')
if time.time() < end_by:
continue
pass
return c
def _full_move(self, send, target_feedback, feedback_countdown=1):
"""Send a command code until the target_feedback is recieved, or a timeout is reached.
Args:
send: The command code to send; this is a string of one ASCII character. See
Protocol above for the command codes.
target_feedback: The response code to compare to the response from the dome;
this is a string of one ASCII character. See Protocol above for the codes;
while the dome is moving, it echoes the command code sent.
Returns:
True if the target_feedback is received from the dome before the MOVE_TIMEOUT;
False otherwise.
"""
# Set a short timeout on reading, so that we don't open or close slowly.
# In other words, we'll try to read status, but if it isn't available,
# we'll just send another command.
saved_timeout = self.serial.ser.timeout
self.serial.ser.timeout = AstrohavenDome.MOVE_LISTEN_TIMEOUT
try:
have_seen_send = False
end_by = time.time() + AstrohavenDome.MOVE_TIMEOUT
self.serial.reset_input_buffer()
# Note that there is no wait in this loop because we have a timeout on reading from
# the the dome controller, and we know that the dome doesn't echo every character that
# we send to it.
while True:
self.serial.write(send)
data = self.serial.read_bytes(size=1)
if data:
c = chr(data[-1])
if c == target_feedback:
feedback_countdown -= 1
self.logger.debug(f'Got target_feedback, feedback_countdown={feedback_countdown!r}')
if feedback_countdown <= 0:
# Woot! Moved the dome and got the desired response.
return True
elif c == send:
have_seen_send = True
elif not have_seen_send and c in Protocol.STABLE_STATES: # pragma: no cover
# At the start of looping, we may see the previous stable state until
# we start seeing the echo of `send`.
pass
else: # pragma: no cover
self.logger.warning(f'Unexpected value from dome! send={send!r} target_feedback={target_feedback!r} data={data!r}')
if time.time() < end_by:
continue
self.logger.error(
f'Timed out moving the dome. Check for hardware or communications problem. '
f'send={send!r} target_feedback={target_feedback!r} data={data!r}')
return False
finally:
self.serial.ser.timeout = saved_timeout
# Expose as Dome so that we can generically load by module name, without knowing the specific type
# of dome. But for testing, it make sense to *know* that we're dealing with the correct class.
Dome = AstrohavenDome
|
the-stack_106_26737 | import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-variadic-macros',
'-DNDEBUG',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-Isrc',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
the-stack_106_26738 | #
# @lc app=leetcode id=79 lang=python3
#
# [79] Word Search
#
from typing import List
class Solution:
def exist(self, board: List[List[str]], word: str) -> bool:
self.board = board
self.visted = set() # 记录已经检测过的点
# 遍历整个字母表
for i in range(len(board)):
for j in range(len(board[i])):
if self.__search(word, i, j):
return True
return False
def __search(self, word, i, j):
"""从四个方向搜索word中的字母"""
# word中没有剩余的字符,返回True,匹配成功
if len(word) == 0:
return True
# 判断i,j是否越界,或者(i,j)是否已经检测过
if i < 0 or i >= len(self.board) \
or j < 0 or j >= len(self.board[i]) \
or (i, j) in self.visted:
return False
# 如果(i,j)元素不是word中的首个字母,则没必要继续下去
if self.board[i][j] != word[0]:
return False
# 检测过的点中添加当前的(i,j)
self.visted.add((i, j))
# 从四个方向,继续递归搜索word中的字符
found = self.__search(word[1:], i - 1, j) \
or self.__search(word[1:], i, j - 1) \
or self.__search(word[1:], i + 1, j) \
or self.__search(word[1:], i, j + 1)
# 如果没有找到,则将(i,j)从检测过的点中删除
if not found:
self.visted.remove((i, j))
return found
if __name__ == '__main__':
word = "ABC"
print(Solution().exist(board=[
['A', 'B', 'C', 'E'],
['S', 'F', 'C', 'S'],
['A', 'D', 'E', 'E']
], word=word))
|
the-stack_106_26739 | #! /usr/bin/env python2
import numpy as np
from matplotlib import pyplot as plt
import lmeds
a1 = -5
a2 = 0.2
x_ = np.random.randn(100,1)*10
y_ = a1 * x_ + a2
x = x_ + np.random.rand(100,1) * 5 - 1
y = y_ + np.random.rand(100,1) * 5 - 1
A = np.c_[x, np.ones_like(x)]
b = y.copy()
b[-48:] *= -1
model_lstsq = np.linalg.lstsq(A,b)[0]
model_lmeds = lmeds.solve(A,b)[0]
model_lmeds_norecomp = lmeds.solve(A,b,recompute_model=False)[0]
xin = np.linspace(-10.0,10.0,1000)
l1 = xin * model_lstsq[0] + model_lstsq[1]
l2 = xin * model_lmeds[0] + model_lmeds[1]
l3 = xin * model_lmeds_norecomp[0] + model_lmeds_norecomp[1]
l4 = xin * a1 + a2
print(xin.shape)
print(l1.shape)
print(l2.shape)
print(l3.shape)
print(l4.shape)
plt.figure()
plt.scatter(x,b)
plt.plot(xin,l1,label='LS')
plt.plot(xin,l2,label='LM, recomputed')
plt.plot(xin,l3,label='LM, no recomputed')
plt.plot(xin, l4, label='True line')
plt.legend()
plt.show()
|
the-stack_106_26740 | # Source: https://github.com/python/pyperformance
# License: MIT
# create chaosgame-like fractals
# Copyright (C) 2005 Carl Friedrich Bolz
import math
import random
class GVector(object):
def __init__(self, x=0, y=0, z=0):
self.x = x
self.y = y
self.z = z
def Mag(self):
return math.sqrt(self.x ** 2 + self.y ** 2 + self.z ** 2)
def dist(self, other):
return math.sqrt((self.x - other.x) ** 2
+ (self.y - other.y) ** 2
+ (self.z - other.z) ** 2)
def __add__(self, other):
if not isinstance(other, GVector):
raise ValueError("Can't add GVector to " + str(type(other)))
v = GVector(self.x + other.x, self.y + other.y, self.z + other.z)
return v
def __sub__(self, other):
return self + other * -1
def __mul__(self, other):
v = GVector(self.x * other, self.y * other, self.z * other)
return v
__rmul__ = __mul__
def linear_combination(self, other, l1, l2=None):
if l2 is None:
l2 = 1 - l1
v = GVector(self.x * l1 + other.x * l2,
self.y * l1 + other.y * l2,
self.z * l1 + other.z * l2)
return v
def __str__(self):
return "<%f, %f, %f>" % (self.x, self.y, self.z)
def __repr__(self):
return "GVector(%f, %f, %f)" % (self.x, self.y, self.z)
class Spline(object):
"""Class for representing B-Splines and NURBS of arbitrary degree"""
def __init__(self, points, degree, knots):
"""Creates a Spline.
points is a list of GVector, degree is the degree of the Spline.
"""
if len(points) > len(knots) - degree + 1:
raise ValueError("too many control points")
elif len(points) < len(knots) - degree + 1:
raise ValueError("not enough control points")
last = knots[0]
for cur in knots[1:]:
if cur < last:
raise ValueError("knots not strictly increasing")
last = cur
self.knots = knots
self.points = points
self.degree = degree
def GetDomain(self):
"""Returns the domain of the B-Spline"""
return (self.knots[self.degree - 1],
self.knots[len(self.knots) - self.degree])
def __call__(self, u):
"""Calculates a point of the B-Spline using de Boors Algorithm"""
dom = self.GetDomain()
if u < dom[0] or u > dom[1]:
raise ValueError("Function value not in domain")
if u == dom[0]:
return self.points[0]
if u == dom[1]:
return self.points[-1]
I = self.GetIndex(u)
d = [self.points[I - self.degree + 1 + ii]
for ii in range(self.degree + 1)]
U = self.knots
for ik in range(1, self.degree + 1):
for ii in range(I - self.degree + ik + 1, I + 2):
ua = U[ii + self.degree - ik]
ub = U[ii - 1]
co1 = (ua - u) / (ua - ub)
co2 = (u - ub) / (ua - ub)
index = ii - I + self.degree - ik - 1
d[index] = d[index].linear_combination(d[index + 1], co1, co2)
return d[0]
def GetIndex(self, u):
dom = self.GetDomain()
for ii in range(self.degree - 1, len(self.knots) - self.degree):
if u >= self.knots[ii] and u < self.knots[ii + 1]:
I = ii
break
else:
I = dom[1] - 1
return I
def __len__(self):
return len(self.points)
def __repr__(self):
return "Spline(%r, %r, %r)" % (self.points, self.degree, self.knots)
def write_ppm(im, w, h, filename):
with open(filename, "wb") as f:
f.write(b'P6\n%i %i\n255\n' % (w, h))
for j in range(h):
for i in range(w):
val = im[j * w + i]
c = val * 255
f.write(b'%c%c%c' % (c, c, c))
class Chaosgame(object):
def __init__(self, splines, thickness, subdivs):
self.splines = splines
self.thickness = thickness
self.minx = min([p.x for spl in splines for p in spl.points])
self.miny = min([p.y for spl in splines for p in spl.points])
self.maxx = max([p.x for spl in splines for p in spl.points])
self.maxy = max([p.y for spl in splines for p in spl.points])
self.height = self.maxy - self.miny
self.width = self.maxx - self.minx
self.num_trafos = []
maxlength = thickness * self.width / self.height
for spl in splines:
length = 0
curr = spl(0)
for i in range(1, subdivs + 1):
last = curr
t = 1 / subdivs * i
curr = spl(t)
length += curr.dist(last)
self.num_trafos.append(max(1, int(length / maxlength * 1.5)))
self.num_total = sum(self.num_trafos)
def get_random_trafo(self):
r = random.randrange(int(self.num_total) + 1)
l = 0
for i in range(len(self.num_trafos)):
if r >= l and r < l + self.num_trafos[i]:
return i, random.randrange(self.num_trafos[i])
l += self.num_trafos[i]
return len(self.num_trafos) - 1, random.randrange(self.num_trafos[-1])
def transform_point(self, point, trafo=None):
x = (point.x - self.minx) / self.width
y = (point.y - self.miny) / self.height
if trafo is None:
trafo = self.get_random_trafo()
start, end = self.splines[trafo[0]].GetDomain()
length = end - start
seg_length = length / self.num_trafos[trafo[0]]
t = start + seg_length * trafo[1] + seg_length * x
basepoint = self.splines[trafo[0]](t)
if t + 1 / 50000 > end:
neighbour = self.splines[trafo[0]](t - 1 / 50000)
derivative = neighbour - basepoint
else:
neighbour = self.splines[trafo[0]](t + 1 / 50000)
derivative = basepoint - neighbour
if derivative.Mag() != 0:
basepoint.x += derivative.y / derivative.Mag() * (y - 0.5) * \
self.thickness
basepoint.y += -derivative.x / derivative.Mag() * (y - 0.5) * \
self.thickness
else:
# can happen, especially with single precision float
pass
self.truncate(basepoint)
return basepoint
def truncate(self, point):
if point.x >= self.maxx:
point.x = self.maxx
if point.y >= self.maxy:
point.y = self.maxy
if point.x < self.minx:
point.x = self.minx
if point.y < self.miny:
point.y = self.miny
def create_image_chaos(self, w, h, iterations, rng_seed):
# Always use the same sequence of random numbers
# to get reproductible benchmark
random.seed(rng_seed)
im = bytearray(w * h)
point = GVector((self.maxx + self.minx) / 2,
(self.maxy + self.miny) / 2, 0)
for _ in range(iterations):
point = self.transform_point(point)
x = (point.x - self.minx) / self.width * w
y = (point.y - self.miny) / self.height * h
x = int(x)
y = int(y)
if x == w:
x -= 1
if y == h:
y -= 1
im[(h - y - 1) * w + x] = 1
return im
###########################################################################
# Benchmark interface
bm_params = {
(100, 50): (0.25, 100, 50, 50, 50, 1234),
(1000, 1000): (0.25, 200, 400, 400, 1000, 1234),
(5000, 1000): (0.25, 400, 500, 500, 7000, 1234),
}
def bm_setup(params):
splines = [
Spline([
GVector(1.597, 3.304, 0.0),
GVector(1.576, 4.123, 0.0),
GVector(1.313, 5.288, 0.0),
GVector(1.619, 5.330, 0.0),
GVector(2.890, 5.503, 0.0),
GVector(2.373, 4.382, 0.0),
GVector(1.662, 4.360, 0.0)],
3, [0, 0, 0, 1, 1, 1, 2, 2, 2]),
Spline([
GVector(2.805, 4.017, 0.0),
GVector(2.551, 3.525, 0.0),
GVector(1.979, 2.620, 0.0),
GVector(1.979, 2.620, 0.0)],
3, [0, 0, 0, 1, 1, 1]),
Spline([
GVector(2.002, 4.011, 0.0),
GVector(2.335, 3.313, 0.0),
GVector(2.367, 3.233, 0.0),
GVector(2.367, 3.233, 0.0)],
3, [0, 0, 0, 1, 1, 1])
]
chaos = Chaosgame(splines, params[0], params[1])
image = None
def run():
nonlocal image
_, _, width, height, iter, rng_seed = params
image = chaos.create_image_chaos(width, height, iter, rng_seed)
def result():
norm = params[4]
# Images are not the same when floating point behaviour is different,
# so return percentage of pixels that are set (rounded to int).
#write_ppm(image, params[2], params[3], 'out-.ppm')
pix = int(100 * sum(image) / len(image))
return norm, pix
return run, result
|
the-stack_106_26743 | import asyncio
import logging
import io
import json
import pathlib
import shutil
import time
import unittest.mock as mock
import zipfile
import pytest
from asynctest import CoroutineMock
import api as service
import wqdss.processing
import wqdss.model_registry
import model_registry_api
logging.basicConfig(level=logging.DEBUG)
INPUT_EXAMPLE = """
{
"model_run": {
"type": "flow",
"input_files": [
{ "name":"a.csv", "min_qwd":"1000", "max_qwd":"2000", "steps":["500"] },
{ "name":"b.csv", "min_qwd":"0", "max_qwd":"100", "steps":["50"] }
]
},
"model_analysis": {
"type": "quality",
"output_file": "out.csv",
"parameters": [
{ "name":"NO3", "target":"3.7", "weight":"4", "score_step":"0.1" },
{ "name":"NH4", "target":"2.4", "weight":"2", "score_step":"0.2" },
{ "name":"DO", "target":"8", "weight":"2", "score_step":"0.5" }
]
}
}
"""
@pytest.fixture
def api():
return service.api
def test_execution_not_found(api):
assert api.requests.get(f"/status/DOES_NOT_EXIST").json()['status'] == 'NOT_FOUND'
def test_dss_execution(api, tmp_path):
'''
Test the execution of a dss, including setting the paramters, and polling for a result
'''
RESPONSE = [{"score": 4.2, "params": mock.Mock(values=[])}]
file_obj = tmp_path / "data.t"
file_obj.write_bytes(INPUT_EXAMPLE.encode())
files = {'input': (file_obj.name, file_obj.read_bytes(), 'application/json')}
data = {'model_name': 'some_model'}
start_event = asyncio.Event()
wqdss.processing.EXECUTIONS = {}
async def my_execute(params):
await start_event.wait()
expected_params = json.loads(INPUT_EXAMPLE)
expected_params['model_run']['model_name'] = 'some_model'
assert params == expected_params
wqdss.processing.EXECUTIONS[next(iter(wqdss.processing.EXECUTIONS.keys()))].result = RESPONSE
return RESPONSE
with api.requests:
with mock.patch.object(wqdss.processing.Execution, 'execute', new=CoroutineMock(side_effect=my_execute)):
resp = api.requests.post("/dss", data=data, files=files)
model_response = resp.json()
assert 'id' in model_response
exec_id = model_response['id']
# we check for the running state before allowing the model to complete
while api.requests.get(f"/status/{exec_id}").json()['status'] == 'NOT_FOUND':
time.sleep(0.1)
assert api.requests.get(f"/status/{exec_id}").json()['status'] == wqdss.processing.ExectuionState.RUNNING.value
# the model can now execute
start_event.set()
resp = api.requests.get(f"/status/{exec_id}").json()
assert resp['status'] == wqdss.processing.ExectuionState.COMPLETED.value
assert resp['result'][0]['score'] == RESPONSE[0]['score']
# check that the best run output is reachable
s = io.BytesIO()
with zipfile.ZipFile(s, 'w'):
pass
empty_zip_contents = s.getvalue()
with mock.patch('wqdss.processing.get_best_run', return_value=empty_zip_contents):
resp = api.requests.get(f"/best_run/{exec_id}")
assert resp.status_code == 200
assert resp.content == empty_zip_contents
assert resp.headers['content-type'] == 'application/zip'
def test_get_best_run_not_found(api):
s = io.BytesIO()
with zipfile.ZipFile(s, 'w'):
pass
empty_zip_contents = s.getvalue()
with mock.patch('wqdss.processing.get_best_run', return_value=empty_zip_contents):
resp = api.requests.get(f"/best_run/foobar")
assert resp.status_code == 400
assert resp.json() == {"exec_id": "foobar"}
def test_get_best_run_in_progress(api):
s = io.BytesIO()
with zipfile.ZipFile(s, 'w'):
pass
empty_zip_contents = s.getvalue()
with mock.patch('wqdss.processing.get_best_run', return_value=empty_zip_contents):
with mock.patch('wqdss.processing.get_status', return_value='IN_PROGRESS'):
resp = api.requests.get(f"/best_run/foobar")
assert resp.status_code == 400
assert resp.json() == {"exec_id": "foobar"}
def test_add_model(tmp_path):
file_a = tmp_path / "file.a"
file_a.write_bytes("this is a file".encode())
file_b = tmp_path / "file.b"
file_b.write_bytes("this is b file".encode())
model_zip = tmp_path / "model.zip"
with zipfile.ZipFile(model_zip, 'w') as z:
z.write(file_a)
z.write(file_b)
files = {'model': ('test_model', model_zip.read_bytes(), 'application/zip')}
resp = model_registry_api.api.requests.post("/models", files=files)
model_added_resp = resp.json()
assert 'model_name' in model_added_resp
assert model_added_resp['model_name'] == 'test_model'
list_models_resp = model_registry_api.api.requests.get('/models')
models_list = list_models_resp.json()
assert 'test_model' in models_list['models']
def test_model_registry_client(tmp_path_factory):
tmp_path = tmp_path_factory.mktemp("new_model_dir")
tmp_model_base = tmp_path_factory.mktemp("model_base")
with mock.patch.object(wqdss.model_registry, 'BASE_MODEL_DIR', tmp_model_base):
# add a test model
file_a = tmp_path / "file.a"
file_a.write_bytes("this is a file".encode())
model_zip = tmp_path / "model.zip"
with zipfile.ZipFile(model_zip, 'w') as z:
z.write(file_a)
files = {'model': ('test_model-new', model_zip.read_bytes(), 'application/zip')}
model_registry_api.api.requests.post("/models", files=files)
# fetch the test model
model_registry_client = wqdss.model_registry.ModelRegistryClient("/models", model_registry_api.api.requests)
model_contents = model_registry_client.get_model_by_name("test_model-new")
returned_model = zipfile.ZipFile(io.BytesIO(model_contents))
assert returned_model.namelist() == ["file.a"]
def test_model_registry_client_model_in_dir(tmp_path):
# add a test model
model_dir = tmp_path / "test_model-dir" / "subdir"
model_dir.mkdir(parents=True)
file_a = model_dir / "file.a"
file_a.write_bytes("this is a file".encode())
file_b = model_dir / "file.b"
file_b.write_bytes("this is another file".encode())
model_zip = tmp_path / "model.zip"
with zipfile.ZipFile(model_zip, 'w') as z:
z.write(file_a, arcname=pathlib.PurePath(*file_a.parts[-2:]))
z.write(file_b, arcname=pathlib.PurePath(*file_b.parts[-2:]))
try:
shutil.rmtree(wqdss.model_registry.BASE_MODEL_DIR)
except FileNotFoundError:
pass
files = {'model': ('test_model-new-in-dir', model_zip.read_bytes(), 'application/zip')}
model_registry_api.api.requests.post("/models", files=files)
# fetch the test model
model_registry_client = wqdss.model_registry.ModelRegistryClient("/models", model_registry_api.api.requests)
model_contents = model_registry_client.get_model_by_name("test_model-new-in-dir")
returned_model = zipfile.ZipFile(io.BytesIO(model_contents))
assert sorted(returned_model.namelist()) == ['file.a', 'file.b']
|
the-stack_106_26744 | # set in mathematics is a collection of unique elements.
# set arrays should only be 1-d arrays.
import numpy as np
x1 = np.array([1, 1, 1, 3, 4, 4, 6, 6, 8, 4, 3, 5, 7, 3, 2, 5, 6])
x2 = np.unique(x1) # finding unique elements from set of array
print(x2)
x3 = np.array([1, 1, 1, 3, 4, 4, 6, 6])
x4 = np.array([8, 4, 3, 5, 7, 3, 2, 5, 6])
x5 = np.union1d(x3, x4) # finding unique elements from both arrays
print(x5)
x6 = np.array([1, 1, 1, 3, 4, 4, 6, 6])
x7 = np.array([8, 4, 3, 5, 7, 3, 2, 5, 6])
x8 = np.intersect1d(x6, x7, assume_unique=True) # assume_unique speed up computation
print(x8) # finding similar values from both arrays
y1 = np.array([1, 1, 1, 3, 4, 4, 6, 6])
y2 = np.array([8, 4, 3, 5, 7, 3, 2, 5, 6])
y3 = np.setdiff1d(y1, y2, assume_unique=True) # finding only values of first set that is not present in second set
print(y3)
y4 = np.array([1, 1, 1, 3, 4, 4, 6, 6])
y5 = np.array([8, 4, 3, 5, 7, 3, 2, 5, 6])
y6 = np.setxor1d(y4, y5, assume_unique=True) # finding values that present in both sets
print(y6)
|
the-stack_106_26745 | #!/usr/bin/env python
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "awsheet",
version = "0.0.9",
author = "Mike Adler",
author_email = "[email protected]",
description = ("build repeatable stacks of AWS resources across prod and dev"),
license = "Apache 2.0",
#keywords = ""
url = "http://github.com/adler/awsheet",
packages=['awsheet', 'awsheet/helpers'],
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
],
# install_requires is good for install via pip
install_requires = ['boto', 'awscli'],
)
|
the-stack_106_26746 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Selected(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattermapbox"
_path_str = "scattermapbox.selected"
_valid_props = {"marker"}
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattermapbox.selected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
color
Sets the marker color of selected points.
opacity
Sets the marker opacity of selected points.
size
Sets the marker size of selected points.
Returns
-------
plotly.graph_objs.scattermapbox.selected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.scattermapbox.selected.Mar
ker` instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, **kwargs):
"""
Construct a new Selected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattermapbox.Selected`
marker
:class:`plotly.graph_objects.scattermapbox.selected.Mar
ker` instance or dict with compatible properties
Returns
-------
Selected
"""
super(Selected, self).__init__("selected")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattermapbox.Selected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattermapbox.Selected`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
the-stack_106_26748 | import numpy as np
import gd
X = np.array([[2, 0], [0, 1], [0, 0]])
y = np.array([[3], [2], [2]])
def fp(w):
return 2 * X.T @ (X @ w - y)
stepsize = 0.1
maxiter = 1000000
w0 = np.array([[0.0], [0.0]])
w_traces = gd.gd_const_ss(fp, w0, stepsize=stepsize, maxiter=maxiter)
print(
f'stepsize={stepsize}, number of iterations={len(w_traces)-1}')
print(f'optimal solution:\n{w_traces[-1]}')
sol = np.linalg.solve(X.T @ X, X.T @ y)
print(f'solution from np.linalg.solve:\n{sol}')
|
the-stack_106_26752 | import numpy as np
import os
from mpi4py import MPI
from mpi4py.MPI import COMM_WORLD as comm
import h5py
import glob
import dolfin as df
from surfaise.common.io import makedirs_safe, remove_safe, load_parameters
from surfaise.common.cmd import (
info_warning, info_split, info_on_red, info,
mpi_is_root, mpi_barrier)
from surfaise.utilities.xdmf_utils import (
parse_xyz_xdmf, parse_timeseries_xdmf, list_xdmf_files)
def get_middle(string, prefix, suffix):
return string.split(prefix)[1].split(suffix)[0]
def prep(x_list):
""" Prepare a list representing coordinates to be used as key in a
dict. """
return tuple(x_list)
def numpy_to_dolfin(nodes, elements):
""" Convert nodes and elements to a dolfin mesh object. """
tmpfile = "tmp.h5"
dim = nodes.shape[1]
is_simplex = elements.shape[1] == dim + 1
if is_simplex:
cell_type = "triangle"
if dim == 3:
cell_type = "tetrahedron"
else:
cell_type = "quadrilateral"
if dim == 3:
cell_type = "hexahedron"
if mpi_is_root():
with h5py.File(tmpfile, "w") as h5f:
cell_indices = h5f.create_dataset(
"mesh/cell_indices", data=np.arange(len(elements)),
dtype='int64')
topology = h5f.create_dataset(
"mesh/topology", data=elements, dtype='int64')
coordinates = h5f.create_dataset(
"mesh/coordinates", data=nodes, dtype='float64')
topology.attrs["celltype"] = np.string_(cell_type)
topology.attrs["partition"] = np.array([0], dtype='uint64')
mpi_barrier()
mesh = df.Mesh()
h5f = df.HDF5File(mesh.mpi_comm(), tmpfile, "r")
h5f.read(mesh, "mesh", False)
h5f.close()
mpi_barrier()
remove_safe(tmpfile)
return mesh
def unpack_fields(folder, xdmffile):
if os.path.exists(os.path.join(folder, xdmffile)):
(_, _, A_address, _, _, A_shape, _, _) = parse_xyz_xdmf(
os.path.join(folder, xdmffile))
with h5py.File(os.path.join(folder, A_address[0]), "r") as h5f:
A = np.array(h5f[A_address[1]])
return A
else:
return None
class InterpolatedTimeSeries:
""" Class for loading timeseries """
def __init__(self, folder, sought_fields=None, memory_modest=True):
self.folder = folder
self.settings_folder = os.path.join(folder, "Settings")
self.timeseries_folder = os.path.join(folder, "Timeseries")
self.geometry_folder = os.path.join(folder, "Geometry")
self.statistics_folder = os.path.join(folder, "Statistics")
self.analysis_folder = os.path.join(folder, "Analysis")
self.plots_folder = os.path.join(folder, "Plots")
self.tmp_folder = os.path.join(folder, ".tmp")
self.memory_modest = memory_modest
self.params_prefix = os.path.join(self.settings_folder,
"parameters_from_tstep_")
self.params_suffix = ".dat"
self.parameters = dict()
self.nodes = None
self.elems = None
self.times = dict()
self.datasets = dict()
self._load_timeseries(sought_fields)
if len(self.fields) > 0:
self._load_mesh()
self.dummy_function = df.Function(self.function_space)
makedirs_safe(self.analysis_folder)
makedirs_safe(self.plots_folder)
makedirs_safe(self.tmp_folder)
def _load_mesh(self):
self.mesh = numpy_to_dolfin(self.nodes, self.elems)
self.function_space = df.FunctionSpace(self.mesh, "CG", 1)
self.vector_function_space = df.VectorFunctionSpace(
self.mesh, "CG", 1)
self.dim = self.function_space.mesh().topology().dim()
self.x = self._make_dof_coords()
self.xdict = self._make_xdict()
indices_function = df.Function(self.function_space)
self.set_val(indices_function, np.arange(len(self.nodes)))
self.indices = np.asarray(indices_function.vector().get_local(),
dtype=int)
def _load_timeseries(self, sought_fields=None):
if bool(os.path.exists(self.settings_folder) and
os.path.exists(self.timeseries_folder) and
os.path.exists(self.geometry_folder)):
info_split("Opening folder:", self.folder)
else:
info_on_red("Folder does not contain "
"Settings, Timeseries or Geometry folders.")
exit()
(geometry_address, topology_address, xyz_address,
geometry_shape, topology_shape, xyz_shape,
topology_type, nodes_per_element) = parse_xyz_xdmf(
os.path.join(self.geometry_folder, "xyz.xdmf"))
with h5py.File(os.path.join(self.geometry_folder,
topology_address[0]), "r") as h5f:
self.elems = np.array(h5f[topology_address[1]])
with h5py.File(os.path.join(self.geometry_folder,
geometry_address[0]), "r") as h5f:
self.nodes = np.array(h5f[geometry_address[1]])
self.g_ab = unpack_fields(self.geometry_folder, "g_ab.xdmf")
self.gab = unpack_fields(self.geometry_folder, "gab.xdmf")
self.K_ab = unpack_fields(self.geometry_folder, "K_ab.xdmf")
self.xyz_a = []
for i in range(3):
filename_loc = "dxyz{}.xdmf".format(i)
if os.path.exists(os.path.join(
self.geometry_folder, filename_loc)):
self.xyz_a.append(
unpack_fields(self.geometry_folder,
filename_loc))
data = dict()
for params_file in glob.glob(
self.params_prefix + "*" + self.params_suffix):
parameters = dict()
from_tstep = int(get_middle(params_file,
self.params_prefix,
self.params_suffix))
load_parameters(parameters, params_file)
t_0 = float(parameters["t_0"])
self.parameters[t_0] = parameters
from_tstep_suffix = "_from_tstep_" + str(from_tstep) + ".h5"
from_tstep_xml_suffix = "_from_tstep_" + str(from_tstep) + ".xdmf"
for xml_file in glob.glob(os.path.join(
self.timeseries_folder,
"*" + from_tstep_xml_suffix)):
data_file = xml_file[:-4] + "h5"
field = get_middle(data_file,
self.timeseries_folder + "/",
from_tstep_suffix)
if bool(sought_fields is None or
field in sought_fields):
if bool(field not in data):
data[field] = dict()
dsets = parse_timeseries_xdmf(xml_file)
for time, (dset_address, field_type) in dsets[field].items():
# If in memory saving mode, only store
# address for later use.
if self.memory_modest:
data[field][time] = (data_file, dset_address[-1])
else:
with h5py.File(data_file, "r") as h5f:
data[field][time] = np.array(h5f[dset_address[-1]])
for i, field in enumerate(data.keys()):
tmps = sorted(data[field].items())
if i == 0:
self.times = [tmp[0] for tmp in tmps]
self[field] = [tmp[1] for tmp in tmps]
self.parameters = sorted(self.parameters.items())
self.fields = self.datasets.keys()
def _make_dof_coords(self):
dofmap = self.function_space.dofmap()
my_first, my_last = dofmap.ownership_range()
x = self.function_space.tabulate_dof_coordinates().reshape(
(-1, self.dim))
unowned = dofmap.local_to_global_unowned()
dofs = list(filter(lambda dof: dofmap.local_to_global_index(dof)
not in unowned,
range(my_last-my_first)))
x = x[dofs]
return x
def _make_xdict(self):
if mpi_is_root():
xdict = dict([(prep(list(x_list)), i) for i, x_list in
enumerate(self.nodes.tolist())])
else:
xdict = None
xdict = comm.bcast(xdict, root=0)
return xdict
def set_val(self, f, f_data):
vec = f.vector()
values = vec.get_local()
values[:] = [f_data[self.xdict[prep(x_val)]]
for x_val in self.x.tolist()]
vec.set_local(values)
vec.apply('insert')
def update(self, f, field, step):
""" Set dolfin vector f with values from field. """
if field == "u":
u_data = self["u", step][:, :self.dim]
for i in range(self.dim):
self.set_val(self.dummy_function, u_data[:, i])
df.assign(f.sub(i), self.dummy_function)
else:
f_data = self[field, step][:]
self.set_val(f, f_data)
def update_all(self, f, step):
""" Set dict f of dolfin functions with values from all fields. """
for field in self.fields:
self.update(f[field], field, step)
def get_parameter(self, key, time=0., default=False):
""" Get a certain parameter at certain time. """
return self.get_parameters(time).get(key, default)
def get_parameters(self, time=0.):
""" Get parameter set at a certain time. """
if len(self.parameters) == 1:
return self.parameters[0][1]
if time <= self.parameters[0][0]:
return self.parameters[0][1]
for i in range(len(self.parameters)-1):
time_a = self.parameters[i][0]
time_b = self.parameters[i+1][0]
if time_a < time <= time_b:
return self.parameters[i][1]
return self.parameters[-1][1]
def __getitem__(self, key):
if len(key) == 1:
if self.memory_modest:
info_warning("InterpolatedTimeSeries[key]: len(key)==1 doesn't work "
"in memory_modest mode!")
return self.datasets[key]
if len(key) == 2:
field, step = key
if self.memory_modest:
data_file, dset_address = self.datasets[field][step]
with h5py.File(data_file, "r") as h5f:
return np.array(h5f[dset_address])
else:
return self.datasets[field][step]
def __setitem__(self, key, val):
self.datasets[key] = val
def __contains__(self, key):
""" Overload 'in' operator """
return key in self.datasets
def function(self, field):
# if field == "u":
# return df.Function(self.vector_function_space, name="u")
# else:
return df.Function(self.function_space, name=field)
def functions(self, fields=None):
""" Returns dolfin functions for all fields. """
f = dict()
if fields is None:
fields = self.fields
for field in fields:
f[field] = self.function(field)
return f
def __len__(self):
return len(self.times)
def get_time(self, step):
return self.times[step]
def get_nearest_step(self, time):
if time < self.times[0]:
return 0
for step in range(len(self)-1):
if time < self.times[step+1]:
if self.times[step+1]-time > time-self.times[step]:
return step
else:
return step+1
return len(self)-1
def get_nearest_step_and_time(self, time, dataset_str="dataset"):
step = self.get_nearest_step(time)
time_0 = self.get_time(step)
if abs(time-time_0) > 1e-10:
info_warning("Could not find {} "
"at time={}. Using time={} instead.".format(
dataset_str, time, time_0))
return step, time_0
def _operate(self, function, field):
return function([function(self[field, step], 0)
for step in range(len(self))])
def max(self, field):
return self._operate(np.max, field)
def min(self, field):
return self._operate(np.min, field)
def mean(self, field):
return self._operate(np.mean, field)
def add_field(self, field, datasets):
if self.memory_modest:
data_file = os.path.join(self.tmp_folder,
field + ".h5")
self[field] = [(data_file, field + "/" + str(step))
for step in range(len(datasets))]
if mpi_is_root():
with h5py.File(data_file, "w") as h5f:
for step, dataset in enumerate(datasets):
dset_address = field + "/" + str(step)
h5f.create_dataset(dset_address, data=dataset)
mpi_barrier()
else:
self[field] = datasets
self.fields = self.datasets.keys()
def nodal_values(self, f):
""" Convert dolfin function to nodal values. """
farray = f.vector().get_local()
fdim = len(farray)/len(self.indices)
farray = farray.reshape((len(self.indices), fdim))
arr = np.zeros((len(self.nodes), fdim))
arr_loc = np.zeros_like(arr)
for i, fval in zip(self.indices, farray):
arr_loc[i, :] = fval
comm.Allreduce(arr_loc, arr, op=MPI.SUM)
return arr
if __name__ == "__main__":
info("Not intended for standalone use.")
|
the-stack_106_26753 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# erode.py -- a script to simulate erosion of height fields
# (c) 2014 Michel J. Anders (varkenvarken)
# with some modifications by Ian Huish (nerk)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
from time import time
import unittest
import sys
import os
from random import random as rand, shuffle
import numpy as np
numexpr_available = False
def getmemsize():
return 0.0
def getptime():
return time()
class Grid:
def __init__(self, size=10, dtype=np.single):
self.center = np.zeros([size, size], dtype)
self.water = None
self.sediment = None
self.scour = None
self.flowrate = None
self.sedimentpct = None
self.sedimentpct = None
self.capacity = None
self.avalanced = None
self.minx = None
self.miny = None
self.maxx = None
self.maxy = None
self.zscale = 1
self.maxrss = 0.0
self.sequence = [0, 1, 2, 3]
self.watermax = 1.0
self.flowratemax = 1.0
self.scourmax = 1.0
self.sedmax = 1.0
self.scourmin = 1.0
def init_water_and_sediment(self):
if self.water is None:
self.water = np.zeros(self.center.shape, dtype=np.single)
if self.sediment is None:
self.sediment = np.zeros(self.center.shape, dtype=np.single)
if self.scour is None:
self.scour = np.zeros(self.center.shape, dtype=np.single)
if self.flowrate is None:
self.flowrate = np.zeros(self.center.shape, dtype=np.single)
if self.sedimentpct is None:
self.sedimentpct = np.zeros(self.center.shape, dtype=np.single)
if self.capacity is None:
self.capacity = np.zeros(self.center.shape, dtype=np.single)
if self.avalanced is None:
self.avalanced = np.zeros(self.center.shape, dtype=np.single)
def __str__(self):
return ''.join(self.__str_iter__(fmt="%.3f"))
def __str_iter__(self, fmt):
for row in self.center[::]:
values=[]
for v in row:
values.append(fmt%v)
yield ' '.join(values) + '\n'
@staticmethod
def fromFile(filename):
if filename == '-':
filename = sys.stdin
g=Grid()
g.center=np.loadtxt(filename,np.single)
return g
def toFile(self, filename, fmt="%.3f"):
if filename == '-' :
filename = sys.stdout.fileno()
with open(filename,"w") as f:
for line in self.__str_iter__(fmt):
f.write(line)
def raw(self,format="%.3f"):
fstr=format+" "+ format+" "+ format+" "
a=self.center / self.zscale
minx = 0.0 if self.minx is None else self.minx
miny = 0.0 if self.miny is None else self.miny
maxx = 1.0 if self.maxx is None else self.maxx
maxy = 1.0 if self.maxy is None else self.maxy
dx = (maxx - minx) / (a.shape[0] - 1)
dy = (maxy - miny) / (a.shape[1] - 1)
for row in range(a.shape[0] - 1):
row0 = miny + row * dy
row1 = row0 + dy
for col in range(a.shape[1] - 1):
col0 = minx + col * dx
col1 = col0 + dx
yield (fstr%(row0 ,col0 ,a[row ][col ])+
fstr%(row0 ,col1 ,a[row ][col+1])+
fstr%(row1 ,col0 ,a[row+1][col ])+"\n")
yield (fstr%(row0 ,col1 ,a[row ][col+1])+
fstr%(row1 ,col0 ,a[row+1][col ])+
fstr%(row1 ,col1 ,a[row+1][col+1])+"\n")
def toRaw(self, filename, infomap=None):
with open(filename if type(filename) == str else sys.stdout.fileno() , "w") as f:
f.writelines(self.raw())
if infomap:
with open(os.path.splitext(filename)[0]+".inf" if type(filename) == str else sys.stdout.fileno() , "w") as f:
f.writelines("\n".join("%-15s: %s"%t for t in sorted(infomap.items())))
@staticmethod
def fromRaw(filename):
"""initialize a grid from a Blender .raw file.
currenly suports just rectangular grids of all triangles
"""
g = Grid.fromFile(filename)
# we assume tris and an axis aligned grid
g.center = np.reshape(g.center,(-1,3))
g._sort()
return g
def _sort(self, expfact):
# keep unique vertices only by creating a set and sort first on x then on y coordinate
# using rather slow python sort but couldn;t wrap my head around np.lexsort
verts = sorted(list({ tuple(t) for t in self.center[::] }))
x = set(c[0] for c in verts)
y = set(c[1] for c in verts)
nx = len(x)
ny = len(y)
self.minx = min(x)
self.maxx = max(x)
self.miny = min(y)
self.maxy = max(y)
xscale = (self.maxx-self.minx)/(nx-1)
yscale = (self.maxy-self.miny)/(ny-1)
# note: a purely flat plane cannot be scaled
if (yscale != 0.0) and (abs(xscale/yscale) - 1.0 > 1e-3):
raise ValueError("Mesh spacing not square %d x %d %.4f x %4.f"%(nx,ny,xscale,yscale))
self.zscale = 1.0
if abs(yscale) > 1e-6 :
self.zscale = 1.0/yscale
# keep just the z-values and null any ofsset
# we might catch a reshape error that will occur if nx*ny != # of vertices (if we are not dealing with a heightfield but with a mesh with duplicate x,y coords, like an axis aligned cube
self.center = np.array([c[2] for c in verts],dtype=np.single).reshape(nx,ny)
self.center = (self.center-np.amin(self.center))*self.zscale
if self.rainmap is not None:
rmscale = np.max(self.center)
self.rainmap = expfact + (1-expfact)*(self.center/rmscale)
@staticmethod
def fromBlenderMesh(me, vg, expfact):
g = Grid()
g.center = np.asarray(list(tuple(v.co) for v in me.vertices), dtype=np.single )
g.rainmap = None
if vg is not None:
for v in me.vertices:
vg.add([v.index],0.0,'ADD')
g.rainmap=np.asarray(list( (v.co[0], v.co[1], vg.weight(v.index)) for v in me.vertices), dtype=np.single )
g._sort(expfact)
return g
def setrainmap(self, rainmap):
self.rainmap = rainmap
def _verts(self, surface):
a = surface / self.zscale
minx = 0.0 if self.minx is None else self.minx
miny = 0.0 if self.miny is None else self.miny
maxx = 1.0 if self.maxx is None else self.maxx
maxy = 1.0 if self.maxy is None else self.maxy
dx = (maxx - minx) / (a.shape[0] - 1)
dy = (maxy - miny) / (a.shape[1] - 1)
for row in range(a.shape[0]):
row0 = miny + row * dy
for col in range(a.shape[1]):
col0 = minx + col * dx
yield (row0 ,col0 ,a[row ][col ])
def _faces(self):
nrow, ncol = self.center.shape
for row in range(nrow-1):
for col in range(ncol-1):
vi = row * ncol + col
yield (vi, vi+ncol, vi+1)
yield (vi+1, vi+ncol, vi+ncol+1)
def toBlenderMesh(self, me):
# pass me as argument so that we don't need to import bpy and create a dependency
# the docs state that from_pydata takes iterators as arguments but it will fail with generators because it does len(arg)
me.from_pydata(list(self._verts(self.center)),[],list(self._faces()))
def toWaterMesh(self, me):
# pass me as argument so that we don't need to import bpy and create a dependency
# the docs state that from_pydata takes iterators as arguments but it will fail with generators because it does len(arg)
me.from_pydata(list(self._verts(self.water)),[],list(self._faces()))
def peak(self, value=1):
nx,ny = self.center.shape
self.center[int(nx/2),int(ny/2)] += value
def shelf(self, value=1):
nx,ny = self.center.shape
self.center[:nx/2] += value
def mesa(self, value=1):
nx,ny = self.center.shape
self.center[nx/4:3*nx/4,ny/4:3*ny/4] += value
def random(self, value=1):
self.center += np.random.random_sample(self.center.shape)*value
def neighborgrid(self):
self.up = np.roll(self.center,-1,0)
self.down = np.roll(self.center,1,0)
self.left = np.roll(self.center,-1,1)
self.right = np.roll(self.center,1,1)
def zeroedge(self, quantity=None):
c = self.center if quantity is None else quantity
c[0,:] = 0
c[-1,:] = 0
c[:,0] = 0
c[:,-1] = 0
def diffuse(self, Kd, IterDiffuse, numexpr):
self.zeroedge()
c = self.center[1:-1,1:-1]
up = self.center[ :-2,1:-1]
down = self.center[2: ,1:-1]
left = self.center[1:-1, :-2]
right = self.center[1:-1,2: ]
if(numexpr and numexpr_available):
self.center[1:-1,1:-1] = ne.evaluate('c + Kd * (up + down + left + right - 4.0 * c)')
else:
self.center[1:-1,1:-1] = c + (Kd/IterDiffuse) * (up + down + left + right - 4.0 * c)
self.maxrss = max(getmemsize(), self.maxrss)
return self.center
def avalanche(self, delta, iterava, prob, numexpr):
self.zeroedge()
c = self.center[1:-1,1:-1]
up = self.center[ :-2,1:-1]
down = self.center[2: ,1:-1]
left = self.center[1:-1, :-2]
right = self.center[1:-1,2: ]
where = np.where
if(numexpr and numexpr_available):
self.center[1:-1,1:-1] = ne.evaluate('c + where((up -c) > delta ,(up -c -delta)/2, 0) \
+ where((down -c) > delta ,(down -c -delta)/2, 0) \
+ where((left -c) > delta ,(left -c -delta)/2, 0) \
+ where((right-c) > delta ,(right-c -delta)/2, 0) \
+ where((up -c) < -delta,(up -c +delta)/2, 0) \
+ where((down -c) < -delta,(down -c +delta)/2, 0) \
+ where((left -c) < -delta,(left -c +delta)/2, 0) \
+ where((right-c) < -delta,(right-c +delta)/2, 0)')
else:
sa = (
# incoming
where((up -c) > delta ,(up -c -delta)/2, 0)
+ where((down -c) > delta ,(down -c -delta)/2, 0)
+ where((left -c) > delta ,(left -c -delta)/2, 0)
+ where((right-c) > delta ,(right-c -delta)/2, 0)
# outgoing
+ where((up -c) < -delta,(up -c +delta)/2, 0)
+ where((down -c) < -delta,(down -c +delta)/2, 0)
+ where((left -c) < -delta,(left -c +delta)/2, 0)
+ where((right-c) < -delta,(right-c +delta)/2, 0)
)
randarray = np.random.randint(0,100,sa.shape) *0.01
sa = where(randarray < prob, sa, 0)
self.avalanced[1:-1,1:-1] = self.avalanced[1:-1,1:-1] + sa/iterava
self.center[1:-1,1:-1] = c + sa/iterava
self.maxrss = max(getmemsize(), self.maxrss)
return self.center
def rain(self, amount=1, variance=0, userainmap=False):
self.water += (1.0 - np.random.random(self.water.shape) * variance) * (amount if ((self.rainmap is None) or (not userainmap)) else self.rainmap * amount)
def spring(self, amount, px, py, radius):
# px, py and radius are all fractions
nx, ny = self.center.shape
rx = max(int(nx*radius),1)
ry = max(int(ny*radius),1)
px = int(nx*px)
py = int(ny*py)
self.water[px-rx:px+rx+1,py-ry:py+ry+1] += amount
def river(self, Kc, Ks, Kdep, Ka, Kev, numexpr):
zeros = np.zeros
where = np.where
min = np.minimum
max = np.maximum
abs = np.absolute
arctan = np.arctan
sin = np.sin
center = (slice( 1, -1,None),slice( 1, -1,None))
up = (slice(None, -2,None),slice( 1, -1,None))
down = (slice( 2, None,None),slice( 1, -1,None))
left = (slice( 1, -1,None),slice(None, -2,None))
right = (slice( 1, -1,None),slice( 2,None,None))
water = self.water
rock = self.center
sediment = self.sediment
height = rock + water
# !! this gives a runtime warning for division by zero
verysmallnumber = 0.0000000001
water += verysmallnumber
sc = where(water > verysmallnumber, sediment / water, 0)
sdw = zeros(water[center].shape)
svdw = zeros(water[center].shape)
sds = zeros(water[center].shape)
angle = zeros(water[center].shape)
for d in (up,down,left,right):
if(numexpr and numexpr_available):
hdd = height[d]
hcc = height[center]
dw = ne.evaluate('hdd-hcc')
inflow = ne.evaluate('dw > 0')
wdd = water[d]
wcc = water[center]
dw = ne.evaluate('where(inflow, where(wdd<dw, wdd, dw), where(-wcc>dw, -wcc, dw))/4.0') # nested where() represent min() and max()
sdw = ne.evaluate('sdw + dw')
scd = sc[d]
scc = sc[center]
rockd= rock[d]
rockc= rock[center]
sds = ne.evaluate('sds + dw * where(inflow, scd, scc)')
svdw = ne.evaluate('svdw + abs(dw)')
angle= ne.evaluate('angle + arctan(abs(rockd-rockc))')
else:
dw = (height[d]-height[center])
inflow = dw > 0
dw = where(inflow, min(water[d], dw), max(-water[center], dw))/4.0
sdw = sdw + dw
sds = sds + dw * where(inflow, sc[d], sc[center])
svdw = svdw + abs(dw)
angle= angle + np.arctan(abs(rock[d]-rock[center]))
if(numexpr and numexpr_available):
wcc = water[center]
scc = sediment[center]
rcc = rock[center]
water[center] = ne.evaluate('wcc + sdw')
sediment[center] = ne.evaluate('scc + sds')
sc = ne.evaluate('where(wcc>0, scc/wcc, 2000*Kc)')
fKc = ne.evaluate('Kc*sin(Ka*angle)*svdw')
ds = ne.evaluate('where(sc > fKc, -Kd * scc, Ks * svdw)')
rock[center] = ne.evaluate('rcc - ds')
# there isn't really a bottom to the rock but negative values look ugly
rock[center] = ne.evaluate('where(rcc<0,0,rcc)')
sediment[center] = ne.evaluate('scc + ds')
else:
wcc = water[center]
scc = sediment[center]
rcc = rock[center]
water[center] = wcc * (1-Kev) + sdw
sediment[center] = scc + sds
sc = where(wcc > 0, scc / wcc, 2 * Kc)
fKc = Kc*svdw
ds = where(fKc > sc, (fKc - sc) * Ks, (fKc - sc) * Kdep) * wcc
self.flowrate[center] = svdw
self.scour[center] = ds
self.sedimentpct[center] = sc
self.capacity[center] = fKc
sediment[center] = scc + ds + sds
def flow(self, Kc, Ks, Kz, Ka, numexpr):
zeros = np.zeros
where = np.where
min = np.minimum
max = np.maximum
abs = np.absolute
arctan = np.arctan
sin = np.sin
center = (slice( 1, -1,None),slice( 1, -1,None))
rock = self.center
ds = self.scour[center]
rcc = rock[center]
rock[center] = rcc - ds * Kz
# there isn't really a bottom to the rock but negative values look ugly
rock[center] = where(rcc<0,0,rcc)
def rivergeneration(self, rainamount, rainvariance, userainmap, Kc, Ks, Kdep, Ka, Kev, Kspring, Kspringx, Kspringy, Kspringr, numexpr):
self.init_water_and_sediment()
self.rain(rainamount, rainvariance, userainmap)
self.zeroedge(self.water)
self.zeroedge(self.sediment)
self.river(Kc, Ks, Kdep, Ka, Kev, numexpr)
self.watermax = np.max(self.water)
def fluvial_erosion(self, rainamount, rainvariance, userainmap, Kc, Ks, Kdep, Ka, Kspring, Kspringx, Kspringy, Kspringr, numexpr):
self.flow(Kc, Ks, Kdep, Ka, numexpr)
self.flowratemax = np.max(self.flowrate)
self.scourmax = np.max(self.scour)
self.scourmin = np.min(self.scour)
self.sedmax = np.max(self.sediment)
def analyze(self):
self.neighborgrid()
# just looking at up and left to avoid needless doubel calculations
slopes=np.concatenate((np.abs(self.left - self.center),np.abs(self.up - self.center)))
return '\n'.join(["%-15s: %.3f"%t for t in [
('height average', np.average(self.center)),
('height median', np.median(self.center)),
('height max', np.max(self.center)),
('height min', np.min(self.center)),
('height std', np.std(self.center)),
('slope average', np.average(slopes)),
('slope median', np.median(slopes)),
('slope max', np.max(slopes)),
('slope min', np.min(slopes)),
('slope std', np.std(slopes))
]]
)
class TestGrid(unittest.TestCase):
def test_diffuse(self):
g = Grid(5)
g.peak(1)
self.assertEqual(g.center[2,2],1.0)
g.diffuse(0.1, numexpr=False)
for n in [(2,1),(2,3),(1,2),(3,2)]:
self.assertAlmostEqual(g.center[n],0.1)
self.assertAlmostEqual(g.center[2,2],0.6)
def test_diffuse_numexpr(self):
g = Grid(5)
g.peak(1)
g.diffuse(0.1, numexpr=False)
h = Grid(5)
h.peak(1)
h.diffuse(0.1, numexpr=True)
self.assertEqual(list(g.center.flat),list(h.center.flat))
def test_avalanche_numexpr(self):
g = Grid(5)
g.peak(1)
g.avalanche(0.1, numexpr=False)
h = Grid(5)
h.peak(1)
h.avalanche(0.1, numexpr=True)
print(g)
print(h)
np.testing.assert_almost_equal(g.center,h.center)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Erode a terrain while assuming zero boundary conditions.')
parser.add_argument('-I', dest='iterations', type=int, default=1, help='the number of iterations')
parser.add_argument('-Kd', dest='Kd', type=float, default=0.01, help='Diffusion constant')
parser.add_argument('-Kh', dest='Kh', type=float, default=6, help='Maximum stable cliff height')
parser.add_argument('-Kp', dest='Kp', type=float, default=0.1, help='Avalanche probability for unstable cliffs')
parser.add_argument('-Kr', dest='Kr', type=float, default=0.1, help='Average amount of rain per iteration')
parser.add_argument('-Kspring', dest='Kspring', type=float, default=0.0, help='Average amount of wellwater per iteration')
parser.add_argument('-Kspringx', dest='Kspringx', type=float, default=0.5, help='relative x position of spring')
parser.add_argument('-Kspringy', dest='Kspringy', type=float, default=0.5, help='relative y position of spring')
parser.add_argument('-Kspringr', dest='Kspringr', type=float, default=0.02, help='radius of spring')
parser.add_argument('-Kdep', dest='Kdep', type=float, default=0.1, help='Sediment deposition constant')
parser.add_argument('-Ks', dest='Ks', type=float, default=0.1, help='Soil softness constant')
parser.add_argument('-Kc', dest='Kc', type=float, default=1.0, help='Sediment capacity')
parser.add_argument('-Ka', dest='Ka', type=float, default=2.0, help='Slope dependency of erosion')
parser.add_argument('-ri', action='store_true', dest='rawin', default=False, help='use Blender raw format for input')
parser.add_argument('-ro', action='store_true', dest='rawout', default=False, help='use Blender raw format for output')
parser.add_argument('-i', action='store_true', dest='useinputfile', default=False, help='use an inputfile (instead of just a synthesized grid)')
parser.add_argument('-t', action='store_true', dest='timingonly', default=False, help='do not write anything to an output file')
parser.add_argument('-infile', type=str, default="-", help='input filename')
parser.add_argument('-outfile', type=str, default="-", help='output filename')
parser.add_argument('-Gn', dest='gridsize', type=int, default=20, help='Gridsize (always square)')
parser.add_argument('-Gp', dest='gridpeak', type=float, default=0, help='Add peak with given height')
parser.add_argument('-Gs', dest='gridshelf', type=float, default=0, help='Add shelve with given height')
parser.add_argument('-Gm', dest='gridmesa', type=float, default=0, help='Add mesa with given height')
parser.add_argument('-Gr', dest='gridrandom', type=float, default=0, help='Add random values between 0 and given value')
parser.add_argument('-m', dest='threads', type=int, default=1, help='number of threads to use')
parser.add_argument('-u', action='store_true', dest='unittest', default=False, help='perfom unittests')
parser.add_argument('-a', action='store_true', dest='analyze', default=False, help='show some statistics of input and output meshes')
parser.add_argument('-d', action='store_true', dest='dump', default=False, help='show sediment and water meshes at end of run')
parser.add_argument('-n', action='store_true', dest='usenumexpr', default=False, help='use numexpr optimizations')
args = parser.parse_args()
print("\nInput arguments:")
print("\n".join("%-15s: %s"%t for t in sorted(vars(args).items())), file=sys.stderr)
if args.unittest:
unittest.main(argv=[sys.argv[0]])
sys.exit(0)
if args.useinputfile:
if args.rawin:
grid = Grid.fromRaw(args.infile)
else:
grid = Grid.fromFile(args.infile)
else:
grid = Grid(args.gridsize)
if args.gridpeak > 0 : grid.peak(args.gridpeak)
if args.gridmesa > 0 : grid.mesa(args.gridmesa)
if args.gridshelf > 0 : grid.shelf(args.gridshelf)
if args.gridrandom > 0 : grid.random(args.gridrandom)
if args.analyze:
print('\nstatistics of the input grid:\n\n', grid.analyze(), file=sys.stderr, sep='' )
t = getptime()
for g in range(args.iterations):
if args.Kd > 0:
grid.diffuse(args.Kd, args.usenumexpr)
if args.Kh > 0 and args.Kp > rand():
grid.avalanche(args.Kh, args.usenumexpr)
if args.Kr > 0 or args.Kspring > 0:
grid.fluvial_erosion(args.Kr, args.Kc, args.Ks, args.Kdep, args.Ka, args.Kspring, args.Kspringx, args.Kspringy, args.Kspringr, args.usenumexpr)
t = getptime() - t
print("\nElapsed time: %.1f seconds, max memory %.1f Mb.\n"%(t,grid.maxrss), file=sys.stderr)
if args.analyze:
print('\nstatistics of the output grid:\n\n', grid.analyze(), file=sys.stderr, sep='')
if not args.timingonly:
if args.rawout:
grid.toRaw(args.outfile, vars(args))
else:
grid.toFile(args.outfile)
if args.dump:
print("sediment\n", np.array_str(grid.sediment,precision=3), file=sys.stderr)
print("water\n", np.array_str(grid.water,precision=3), file=sys.stderr)
print("sediment concentration\n", np.array_str(grid.sediment/grid.water,precision=3), file=sys.stderr)
|
the-stack_106_26755 | # Exercise 1 (week1) :: Somu :: 22-01-2018
# Adapted from: Ian McLoughlin 's Source code
# A program that displays Fibonacci numbers.
def fib(n):
"""This function returns the nth Fibonacci number."""
i = 0
j = 1
n = n - 1
while n >= 0:
i, j = j, i + j
n = n - 1
return i
# Test the function with the following value.
# My name is Somanathan, so the first and last letter of my name (S + N = 19 + 14) give the number 33
x = 33
ans = fib(x)
print("Fibonacci number", x, "is", ans)
# Exercise 2 (week2) :: Somu :: 29-02-2018
# My Surname is Subramaniyan.
# The First and Last character digits are summed up. Fibonacci numbers are displayed for that number
name = "Subramaniyan"
first = name[0]
last = name[-1]
firstno = ord(first)
lastno = ord(last)
x = firstno + lastno
ans = fib(x)
print("My surname is", name)
print("The first letter", first, "is number", firstno)
print("The last letter", last, "is number", lastno)
print("Fibonacci number", x, "is", ans) |
the-stack_106_26756 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Placeholder docstring"""
from __future__ import absolute_import
from distutils import util
import json
import logging
import multiprocessing
import os
import shlex
import socket
import subprocess
import sys
import time
import boto3
from sagemaker_containers import _content_types, _logging, _mapping, _params
logger = _logging.get_logger()
SAGEMAKER_BASE_PATH = os.path.join("/opt", "ml") # type: str
BASE_PATH_ENV = "SAGEMAKER_BASE_DIR" # type: str
def _write_json(obj, path): # type: (object, str) -> None
"""Writes a serializeable object as a JSON file"""
with open(path, "w") as f:
json.dump(obj, f)
def _is_training_path_configured(): # type: () -> bool
"""Check if the tree structure with data and configuration files used for training
exists.
When a SageMaker Training Job is created, the Docker container that will be used for
training is executed with the folder /opt/ml attached. The /opt/ml folder contains
data and configurations files necessary for training.
Outside SageMaker, the environment variable SAGEMAKER_BASE_DIR defines the location
of the base folder.
This function checks wheter /opt/ml exists or if the base folder variable exists
Returns:
(bool): indicating whether the training path is configured or not.
"""
return os.path.exists(SAGEMAKER_BASE_PATH) or BASE_PATH_ENV in os.environ
def _set_base_path_env(): # type: () -> None
"""Sets the environment variable SAGEMAKER_BASE_DIR as
~/sagemaker_local/{timestamp}/opt/ml
Returns:
(bool): indicating whe
"""
local_config_dir = os.path.join(
os.path.expanduser("~"), "sagemaker_local", "jobs", str(time.time()), "opt", "ml"
)
logger.info("Setting environment variable SAGEMAKER_BASE_DIR as %s ." % local_config_dir)
os.environ[BASE_PATH_ENV] = local_config_dir
_is_path_configured = _is_training_path_configured()
if not _is_path_configured:
logger.info("Directory /opt/ml does not exist.")
_set_base_path_env()
base_dir = os.environ.get(BASE_PATH_ENV, SAGEMAKER_BASE_PATH) # type: str
code_dir = os.path.join(base_dir, "code")
"""str: the path of the user's code directory, e.g., /opt/ml/code/"""
model_dir = os.path.join(base_dir, "model") # type: str
"""str: the directory where models should be saved, e.g., /opt/ml/model/"""
input_dir = os.path.join(base_dir, "input") # type: str
"""str: the path of the input directory, e.g. /opt/ml/input/
The input_dir, e.g. /opt/ml/input/, is the directory where SageMaker saves input data
and configuration files before and during training.
The input data directory has the following subdirectories:
config (`input_config_dir`) and data (`_input_data_dir`)
Returns:
str: the path of the input directory, e.g. /opt/ml/input/
"""
_input_data_dir = os.path.join(input_dir, "data") # type: str
input_config_dir = os.path.join(input_dir, "config") # type: str
"""str: the path of the input directory, e.g. /opt/ml/input/config/
The directory where standard SageMaker configuration files are located, e.g. /opt/ml/input/config/.
SageMaker training creates the following files in this folder when training starts:
- `hyperparameters.json`: Amazon SageMaker makes the hyperparameters in a CreateTrainingJob
request available in this file.
- `inputdataconfig.json`: You specify data channel information in the InputDataConfig parameter
in a CreateTrainingJob request. Amazon SageMaker makes this information available
in this file.
- `resourceconfig.json`: name of the current host and all host containers in the training
More information about this files can be find here:
https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html
Returns:
str: the path of the input directory, e.g. /opt/ml/input/config/
"""
output_dir = os.path.join(base_dir, "output") # type: str
"""str: the path to the output directory, e.g. /opt/ml/output/.
The directory where training success/failure indications will be written, e.g. /opt/ml/output.
To save non-model artifacts check `output_data_dir`.
Returns:
str: the path to the output directory, e.g. /opt/ml/output/.
"""
output_data_dir = os.path.join(output_dir, "data") # type: str
output_intermediate_dir = os.path.join(output_dir, "intermediate") # type: str
"""str: the path to the intermediate output directory, e.g. /opt/ml/output/intermediate.
The directory special behavior is to move artifacts from the training instance to
s3 directory during training.
Returns:
str: the path to the intermediate output directory, e.g. /opt/ml/output/intermediate.
"""
HYPERPARAMETERS_FILE = "hyperparameters.json" # type: str
RESOURCE_CONFIG_FILE = "resourceconfig.json" # type: str
INPUT_DATA_CONFIG_FILE = "inputdataconfig.json" # type: str
hyperparameters_file_dir = os.path.join(input_config_dir, HYPERPARAMETERS_FILE) # type: str
input_data_config_file_dir = os.path.join(input_config_dir, INPUT_DATA_CONFIG_FILE) # type: str
resource_config_file_dir = os.path.join(input_config_dir, RESOURCE_CONFIG_FILE) # type: str
def _create_training_directories():
"""Creates the directory structure and files necessary for training under the base path
"""
logger.info("Creating a new training folder under %s ." % base_dir)
os.makedirs(model_dir)
os.makedirs(input_config_dir)
os.makedirs(output_data_dir)
_write_json({}, hyperparameters_file_dir)
_write_json({}, input_data_config_file_dir)
host_name = socket.gethostname()
resources_dict = {"current_host": host_name, "hosts": [host_name]}
_write_json(resources_dict, resource_config_file_dir)
if not _is_path_configured:
_create_training_directories()
def _create_code_dir(): # type: () -> None
"""Creates /opt/ml/code when the module is imported."""
if not os.path.exists(code_dir):
os.makedirs(code_dir)
_create_code_dir()
def _read_json(path): # type: (str) -> dict
"""Read a JSON file.
Args:
path (str): Path to the file.
Returns:
(dict[object, object]): A dictionary representation of the JSON file.
"""
with open(path, "r") as f:
return json.load(f)
def read_hyperparameters(): # type: () -> dict
"""Read the hyperparameters from /opt/ml/input/config/hyperparameters.json.
For more information about hyperparameters.json:
https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html#your-algorithms-training-algo-running-container-hyperparameters
Returns:
(dict[string, object]): a dictionary containing the hyperparameters.
"""
hyperparameters = _read_json(hyperparameters_file_dir)
deserialized_hps = {}
for k, v in hyperparameters.items():
try:
v = json.loads(v)
except (ValueError, TypeError):
logger.info(
"Failed to parse hyperparameter %s value %s to Json.\n"
"Returning the value itself",
k,
v,
)
deserialized_hps[k] = v
return deserialized_hps
def read_resource_config(): # type: () -> dict
"""Read the resource configuration from /opt/ml/input/config/resourceconfig.json.
For more information about resourceconfig.json:
https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html#your-algorithms-training-algo-running-container-dist-training
Returns:
resource_config (dict[string, object]): the contents from /opt/ml/input/config/resourceconfig.json.
It has the following keys:
- current_host: The name of the current container on the container
network. For example, 'algo-1'.
- hosts: The list of names of all containers on the container
network, sorted lexicographically. For example,
`['algo-1', 'algo-2', 'algo-3']` for a three-node cluster.
"""
return _read_json(resource_config_file_dir)
def read_input_data_config(): # type: () -> dict
"""Read the input data configuration from /opt/ml/input/config/inputdataconfig.json.
For example, suppose that you specify three data channels (train, evaluation, and
validation) in your request. This dictionary will contain:
{'train': {
'ContentType': 'trainingContentType',
'TrainingInputMode': 'File',
'S3DistributionType': 'FullyReplicated',
'RecordWrapperType': 'None'
},
'evaluation' : {
'ContentType': 'evalContentType',
'TrainingInputMode': 'File',
'S3DistributionType': 'FullyReplicated',
'RecordWrapperType': 'None'
},
'validation': {
'TrainingInputMode': 'File',
'S3DistributionType': 'FullyReplicated',
'RecordWrapperType': 'None'
}}
For more information about inpudataconfig.json:
https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html#your-algorithms-training-algo-running-container-dist-training
Returns:
input_data_config (dict[string, object]): contents from /opt/ml/input/config/inputdataconfig.json.
"""
return _read_json(input_data_config_file_dir)
def channel_path(channel): # type: (str) -> str
""" Returns the directory containing the channel data file(s) which is:
- <self.base_dir>/input/data/<channel>
For more information about channels: https://docs.aws.amazon.com/sagemaker/latest/dg/API_Channel.html
Returns:
str: The input data directory for the specified channel.
"""
return os.path.join(_input_data_dir, channel)
def num_gpus(): # type: () -> int
"""The number of gpus available in the current container.
Returns:
int: number of gpus available in the current container.
"""
try:
cmd = shlex.split("nvidia-smi --list-gpus")
output = subprocess.check_output(cmd).decode("utf-8")
return sum([1 for x in output.split("\n") if x.startswith("GPU ")])
except (OSError, subprocess.CalledProcessError):
logger.info("No GPUs detected (normal if no gpus installed)")
return 0
def num_cpus(): # type: () -> int
"""The number of cpus available in the current container.
Returns:
int: number of cpus available in the current container.
"""
return multiprocessing.cpu_count()
class _Env(_mapping.MappingMixin):
"""Base Class which provides access to aspects of the environment including
system characteristics, filesystem locations, environment variables and configuration settings.
The Env is a read-only snapshot of the container environment. It does not contain any form of
state. It is a dictionary like object, allowing any builtin function that works with dictionary.
Attributes:
current_host (str): The name of the current container on the container network. For
example, 'algo-1'.
module_name (str): The name of the user provided module.
module_dir (str): The full path location of the user provided module.
"""
def __init__(self):
"""Placeholder docstring"""
current_host = os.environ.get(_params.CURRENT_HOST_ENV)
module_name = os.environ.get(_params.USER_PROGRAM_ENV, None)
module_dir = os.environ.get(_params.SUBMIT_DIR_ENV, code_dir)
log_level = int(os.environ.get(_params.LOG_LEVEL_ENV, logging.INFO))
self._current_host = current_host
self._num_gpus = num_gpus()
self._num_cpus = num_cpus()
self._module_name = module_name
self._user_entry_point = module_name
self._module_dir = module_dir
self._log_level = log_level
self._model_dir = model_dir
@property
def model_dir(self): # type: () -> str
"""Returns:
str: the directory where models should be saved, e.g., /opt/ml/model/"""
return self._model_dir
@property
def current_host(self): # type: () -> str
"""The name of the current container on the container network. For example, 'algo-1'.
Returns:
str: current host.
"""
return self._current_host
@property
def num_gpus(self): # type: () -> int
"""The number of gpus available in the current container.
Returns:
int: number of gpus available in the current container.
"""
return self._num_gpus
@property
def num_cpus(self): # type: () -> int
"""The number of cpus available in the current container.
Returns:
int: number of cpus available in the current container.
"""
return self._num_cpus
@property
def module_name(self): # type: () -> str
"""The name of the user provided module.
Returns:
str: name of the user provided module
"""
return self._parse_module_name(self._module_name)
@property
def module_dir(self): # type: () -> str
"""The full path location of the user provided module.
Returns:
str: full path location of the user provided module.
"""
return self._module_dir
@property
def log_level(self): # type: () -> int
"""Environment logging level.
Returns:
int: environment logging level.
"""
return self._log_level
@property
def user_entry_point(self): # type: () -> str
"""The name of provided user entry point.
Returns:
str: The name of provided user entry point
"""
return self._user_entry_point
@staticmethod
def _parse_module_name(program_param):
"""Given a module name or a script name, Returns the module name.
This function is used for backwards compatibility.
Args:
program_param (str): Module or script name.
Returns:
str: Module name
"""
if program_param and program_param.endswith(".py"):
return program_param[:-3]
return program_param
class TrainingEnv(_Env):
"""Provides access to aspects of the training environment relevant to training jobs, including
hyperparameters, system characteristics, filesystem locations, environment variables and
configuration settings.
The TrainingEnv is a read-only snapshot of the container environment during training. It does
not contain any form of state.
It is a dictionary like object, allowing any builtin function that works with dictionary.
Example on how a script can use training environment:
>>>import sagemaker_containers
>>>env = sagemaker_containers.training_env()
get the path of the channel 'training' from the inputdataconfig.json file
>>>training_dir = env.channel_input_dirs['training']
get a the hyperparameter 'training_data_file' from hyperparameters.json file
>>>file_name = env.hyperparameters['training_data_file']
get the folder where the model should be saved
>>>model_dir = env.model_dir
>>>data = np.load(os.path.join(training_dir, file_name))
>>>x_train, y_train = data['features'], keras.utils.to_categorical(data['labels'])
>>>model = ResNet50(weights='imagenet')
...
>>>model.fit(x_train, y_train)
save the model in the end of training
>>>model.save(os.path.join(model_dir, 'saved_model'))
Attributes:
input_dir (str): The input_dir, e.g. /opt/ml/input/, is the directory where SageMaker saves
input data and configuration files before and during training. The input
data directory has the following subdirectories:
config (`input_config_dir`) and data (`_input_data_dir`)
input_config_dir (str): The directory where standard SageMaker configuration files are
located, e.g. /opt/ml/input/config/.
SageMaker training creates the following files in this folder when training starts:
- `hyperparameters.json`: Amazon SageMaker makes the hyperparameters in a
CreateTrainingJob request available in this file.
- `inputdataconfig.json`: You specify data channel information in the
InputDataConfig parameter in a CreateTrainingJob request.
Amazon SageMaker makes this information available in this
file.
- `resourceconfig.json`: name of the current host and all host containers in the
training
More information about these files can be find here:
https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html
model_dir (str): the directory where models should be saved, e.g., /opt/ml/model/
output_dir (str): The directory where training success/failure indications will be written,
e.g. /opt/ml/output. To save non-model artifacts check `output_data_dir`.
hyperparameters (dict[string, object]): An instance of `HyperParameters` containing the
training job hyperparameters.
resource_config (dict[string, object]): the contents from
/opt/ml/input/config/resourceconfig.json.
It has the following keys:
- current_host: The name of the current container on the container network.
For example, 'algo-1'.
- hosts: The list of names of all containers on the container network,
sorted lexicographically. For example, `['algo-1', 'algo-2', 'algo-3']`
for a three-node cluster.
input_data_config (dict[string, object]): the contents from /opt/ml/input/config/inputdataconfig.json.
For example, suppose that you specify three data channels (train, evaluation, and
validation) in your request. This dictionary will contain:
{'train': {
'ContentType': 'trainingContentType',
'TrainingInputMode': 'File',
'S3DistributionType': 'FullyReplicated',
'RecordWrapperType': 'None'
},
'evaluation' : {
'ContentType': 'evalContentType',
'TrainingInputMode': 'File',
'S3DistributionType': 'FullyReplicated',
'RecordWrapperType': 'None'
},
'validation': {
'TrainingInputMode': 'File',
'S3DistributionType': 'FullyReplicated',
'RecordWrapperType': 'None'
}}
You can find more information about /opt/ml/input/config/inputdataconfig.json here:
https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html#your-algorithms-training-algo-running-container-inputdataconfig
output_data_dir (str): The dir to write non-model training artifacts (e.g. evaluation
results) which will be retained by SageMaker,
e.g. /opt/ml/output/data. As your algorithm runs in a container,
it generates output including the status of the training job and
model and output artifacts. Your algorithm should write this
information to the this directory.
hosts (list[str]): The list of names of all containers on the container network, sorted
lexicographically. For example, `['algo-1', 'algo-2', 'algo-3']` for
a three-node cluster.
channel_input_dirs (dict[string, string]): containing the data channels and the directories
where the training data was saved. When you run
training, you can partition your training data
into different logical 'channels'. Depending on
your problem, some common channel ideas are:
'train', 'test', 'evaluation' or 'images',
'labels'.
The format of channel_input_dir is as follows:
- `channel`(str) - the name of the channel defined in the input_data_config.
- `training data path`(str) - the path to the directory where the training data is
saved.
framework_module (str): Name of the framework module and entry point. For example:
my_module:main
network_interface_name (str): Name of the network interface used for distributed training
job_name (str): The name of the current training job
"""
def __init__(self, resource_config=None, input_data_config=None, hyperparameters=None):
super(TrainingEnv, self).__init__()
resource_config = resource_config or read_resource_config()
current_host = resource_config["current_host"]
hosts = resource_config["hosts"]
input_data_config = input_data_config or read_input_data_config()
all_hyperparameters = hyperparameters or read_hyperparameters()
split_result = _mapping.split_by_criteria(
all_hyperparameters,
keys=_params.SAGEMAKER_HYPERPARAMETERS,
prefix=_params.SAGEMAKER_PREFIX,
)
sagemaker_hyperparameters = split_result.included
additional_framework_parameters = {
k: sagemaker_hyperparameters[k]
for k in sagemaker_hyperparameters.keys()
if k not in _params.SAGEMAKER_HYPERPARAMETERS
}
sagemaker_region = sagemaker_hyperparameters.get(
_params.REGION_NAME_PARAM, boto3.session.Session().region_name
)
os.environ[_params.JOB_NAME_ENV] = sagemaker_hyperparameters.get(_params.JOB_NAME_PARAM, "")
os.environ[_params.CURRENT_HOST_ENV] = current_host
os.environ[_params.REGION_NAME_ENV] = sagemaker_region or ""
self._hosts = hosts
# eth0 is the default network interface defined by SageMaker with VPC support and
# local mode.
# ethwe is the current network interface defined by SageMaker training, it will be
# changed to eth0 in the short future.
self._network_interface_name = resource_config.get("network_interface_name", "eth0")
self._hyperparameters = split_result.excluded
self._additional_framework_parameters = additional_framework_parameters
self._resource_config = resource_config
self._input_data_config = input_data_config
self._output_data_dir = output_data_dir
self._output_intermediate_dir = output_intermediate_dir
self._channel_input_dirs = {channel: channel_path(channel) for channel in input_data_config}
self._current_host = current_host
# override base class attributes
if self._module_name is None:
self._module_name = str(sagemaker_hyperparameters.get(_params.USER_PROGRAM_PARAM, None))
self._user_entry_point = self._user_entry_point or sagemaker_hyperparameters.get(
_params.USER_PROGRAM_PARAM
)
self._module_dir = str(sagemaker_hyperparameters.get(_params.SUBMIT_DIR_PARAM, code_dir))
self._log_level = sagemaker_hyperparameters.get(_params.LOG_LEVEL_PARAM, logging.INFO)
self._sagemaker_s3_output = sagemaker_hyperparameters.get(
_params.S3_OUTPUT_LOCATION_PARAM, None
)
self._framework_module = os.environ.get(_params.FRAMEWORK_TRAINING_MODULE_ENV, None)
self._input_dir = input_dir
self._input_config_dir = input_config_dir
self._output_dir = output_dir
self._job_name = os.environ.get(_params.TRAINING_JOB_ENV.upper(), None)
self._master_hostname = list(hosts)[0]
self._is_master = current_host == self._master_hostname
@property
def is_master(self): # type: () -> bool
"""Returns True if host is master
"""
return self._is_master
@property
def master_hostname(self): # type: () -> str
"""Returns the hostname of the master node
"""
return self._master_hostname
@property
def job_name(self): # type: () -> str
"""The name of the current training job.
Returns:
str: the training job name.
"""
return self._job_name
@property
def additional_framework_parameters(self): # type: () -> dict
"""The dict of additional framework hyperparameters. All the hyperparameters prefixed with
'sagemaker_' but not in SAGEMAKER_HYPERPARAMETERS will be included here.
Returns:
dict: additional framework hyperparameters, SageMaker Python SDK adds hyperparameters
with a prefix **sagemaker_** during training. These hyperparameters are
framework independent settings and are not defined by the user.
"""
return self._additional_framework_parameters
def sagemaker_s3_output(self): # type: () -> str
"""S3 output directory location provided by the user.
Returns:
str: S3 location.
"""
return self._sagemaker_s3_output
def to_cmd_args(self):
"""Command line arguments representation of the training environment.
Returns:
(list): List of cmd arguments
"""
return _mapping.to_cmd_args(self.hyperparameters)
def to_env_vars(self):
"""Environment variable representation of the training environment
Returns:
dict: an instance of dictionary
"""
env = {
"hosts": self.hosts,
"network_interface_name": self.network_interface_name,
"hps": self.hyperparameters,
"user_entry_point": self.user_entry_point,
"framework_params": self.additional_framework_parameters,
"resource_config": self.resource_config,
"input_data_config": self.input_data_config,
"output_data_dir": self.output_data_dir,
"channels": sorted(self.channel_input_dirs.keys()),
"current_host": self.current_host,
"module_name": self.module_name,
"log_level": self.log_level,
"framework_module": self.framework_module,
"input_dir": self.input_dir,
"input_config_dir": self.input_config_dir,
"output_dir": self.output_dir,
"num_cpus": self.num_cpus,
"num_gpus": self.num_gpus,
"model_dir": self.model_dir,
"module_dir": self.module_dir,
"training_env": dict(self),
"user_args": self.to_cmd_args(),
"output_intermediate_dir": self.output_intermediate_dir,
}
for name, path in self.channel_input_dirs.items():
env["channel_%s" % name] = path
for key, value in self.hyperparameters.items():
env["hp_%s" % key] = value
return _mapping.to_env_vars(env)
@property
def hosts(self): # type: () -> list
"""The list of names of all containers on the container network, sorted lexicographically.
For example, `["algo-1", "algo-2", "algo-3"]` for a three-node cluster.
Returns:
list[str]: all the hosts in the training network.
"""
return self._hosts
@property
def channel_input_dirs(self): # type: () -> dict
"""A dict[str, str] containing the data channels and the directories where the training
data was saved.
When you run training, you can partition your training data into different logical
"channels".
Depending on your problem, some common channel ideas are: "train", "test", "evaluation"
or "images',"labels".
The format of channel_input_dir is as follows:
- `channel`[key](str) - the name of the channel defined in the input_data_config.
- `training data path`[value](str) - the path to the directory where the training
data is saved.
Returns:
dict[str, str] with the information about the channels.
"""
return self._channel_input_dirs
@property
def network_interface_name(self): # type: () -> str
"""Name of the network interface used for distributed training
Returns:
str: name of the network interface, for example, 'ethwe'
"""
return self._network_interface_name
@property
def input_dir(self): # type: () -> str
"""The input_dir, e.g. /opt/ml/input/, is the directory where SageMaker saves input data
and configuration files before and during training.
The input data directory has the following subdirectories:
config (`input_config_dir`) and data (`input_data_dir`)
Returns:
str: the path of the input directory, e.g. /opt/ml/input/
"""
return self._input_dir
@property
def input_config_dir(self): # type: () -> str
"""The directory where standard SageMaker configuration files are located, e.g.
/opt/ml/input/config/.
SageMaker training creates the following files in this folder when training starts:
- `hyperparameters.json`: Amazon SageMaker makes the hyperparameters in a
CreateTrainingJob request available in this file.
- `inputdataconfig.json`: You specify data channel information in the
InputDataConfig parameter in a CreateTrainingJob request.
Amazon SageMaker makes this information available in this
file.
- `resourceconfig.json`: name of the current host and all host containers in the
training More information about this files can be find here:
https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html
Returns:
str: the path of the input directory, e.g. /opt/ml/input/config/
"""
return self._input_config_dir
@property
def output_dir(self): # type: () -> str
"""The directory where training success/failure indications will be written,
e.g. /opt/ml/output.
To save non-model artifacts check `output_data_dir`.
Returns:
str: the path to the output directory, e.g. /opt/ml/output/.
"""
return self._output_dir
@property
def hyperparameters(self): # type: () -> dict
"""The dict of hyperparameters that were passed to the training job.
Returns:
dict[str, object]: An instance of `HyperParameters` containing the training job
hyperparameters.
"""
return self._hyperparameters
@property
def resource_config(self): # type: () -> dict
"""A dictionary with the contents from /opt/ml/input/config/resourceconfig.json.
It has the following keys:
- current_host: The name of the current container on the container
network. For example, 'algo-1'.
- hosts: The list of names of all containers on the container network,
sorted lexicographically. For example,
`["algo-1", "algo-2", "algo-3"]` for a three-node cluster.
Returns:
dict[str, str or list(str)]
"""
return self._resource_config
@property
def input_data_config(self): # type: () -> dict
"""A dictionary with the contents from /opt/ml/input/config/inputdataconfig.json.
For example, suppose that you specify three data channels (train,
evaluation, and validation) in your request. This dictionary will contain:
```{"train": {
"ContentType": "trainingContentType",
"TrainingInputMode": "File",
"S3DistributionType": "FullyReplicated",
"RecordWrapperType": "None"
},
"evaluation" : {
"ContentType": "evalContentType",
"TrainingInputMode": "File",
"S3DistributionType": "FullyReplicated",
"RecordWrapperType": "None"
},
"validation": {
"TrainingInputMode": "File",
"S3DistributionType": "FullyReplicated",
"RecordWrapperType": "None"
}
} ```
You can find more information about /opt/ml/input/config/inputdataconfig.json here:
https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html#your-algorithms-training-algo-running-container-inputdataconfig
Returns:
dict[str, dict[str, str]]
"""
return self._input_data_config
@property
def output_data_dir(self): # type: () -> str
"""The dir to write non-model training artifacts (e.g. evaluation results) which will be
retained by SageMaker, e.g. /opt/ml/output/data/{current_host}.
As your algorithm runs in a container, it generates output including the status of the
training job and model and output artifacts. Your algorithm should write this information
to the this directory.
Returns:
str: the path to output data directory, e.g. /opt/ml/output/data/algo-1.
"""
return self._output_data_dir
@property
def output_intermediate_dir(self): # type: () -> str
"""The directory for intermediate output artifacts that should be synced to S3.
Any files written to this directory will be uploaded to S3 by a background process
while training is in progress, but only if sagemaker_s3_output was specified.
Returns:
str: the path to the intermediate output directory, e.g. /opt/ml/output/intermediate.
"""
return self._output_intermediate_dir
@property
def framework_module(self): # type: () -> str
"""Returns:
str: Name of the framework module and entry point. For example:
my_module:main"""
return self._framework_module
class ServingEnv(_Env):
"""Provides access to aspects of the serving environment relevant to serving containers,
including system characteristics, environment variables and configuration settings.
The ServingEnv is a read-only snapshot of the container environment. It does not contain any
form of state.
It is a dictionary like object, allowing any builtin function that works with dictionary.
Example on how to print the state of the container:
>>> from sagemaker_containers import _env
>>> print(str(_env.ServingEnv()))
Example on how a script can use training environment:
>>>ServingEnv = _env.ServingEnv()
Attributes:
use_nginx (bool): Whether to use nginx as a reverse proxy.
model_server_timeout (int): Timeout in seconds for the model server.
model_server_workers (int): Number of worker processes the model server will use.
framework_module (str): Name of the framework module and entry point. For example:
my_module:main
default_accept (str): The desired default MIME type of the inference in the response
as specified in the user-supplied SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT environment
variable. Otherwise, returns 'application/json' by default.
For example: application/json
http_port (str): Port that SageMaker will use to handle invocations and pings against
the running Docker container. Default is 8080. For example: 8080
safe_port_range (str): HTTP port range that can be used by customers to avoid collisions
with the HTTP port specified by SageMaker for handling pings and invocations.
For example: 1111-2222
"""
def __init__(self):
super(ServingEnv, self).__init__()
use_nginx = util.strtobool(os.environ.get(_params.USE_NGINX_ENV, "true")) == 1
model_server_timeout = int(os.environ.get(_params.MODEL_SERVER_TIMEOUT_ENV, "60"))
model_server_workers = int(os.environ.get(_params.MODEL_SERVER_WORKERS_ENV, num_cpus()))
framework_module = os.environ.get(_params.FRAMEWORK_SERVING_MODULE_ENV, None)
default_accept = os.environ.get(_params.DEFAULT_INVOCATIONS_ACCEPT_ENV, _content_types.JSON)
http_port = os.environ.get(_params.SAGEMAKER_BIND_TO_PORT_ENV, "8080")
safe_port_range = os.environ.get(_params.SAGEMAKER_SAFE_PORT_RANGE_ENV)
model_server_worker_class_type = os.environ.get(_params.MODEL_SERVER_WORKER_CLASS_TYPE_ENV, "gevent")
model_server_worker_connection = os.environ.get(_params.MODEL_SERVER_WORKER_CONNECTION_ENV, "1200")
model_server_worker_threads = os.environ.get(_params.MODEL_SERVER_THREAD_ENV, "800")
model_server_log_level = os.environ.get(_params.MODEL_SERVER_LOG_LEVEL, "info")
model_server_keep_alive_sec = os.environ.get(_params.MODEL_SERVER_KEEP_ALIVE_SEC, "5")
self._use_nginx = use_nginx
self._model_server_timeout = model_server_timeout
self._model_server_workers = model_server_workers
self._framework_module = framework_module
self._default_accept = default_accept
self._http_port = http_port
self._safe_port_range = safe_port_range
self._model_server_worker_class_type = model_server_worker_class_type
self._model_server_worker_connection = model_server_worker_connection
self._model_server_worker_threads = model_server_worker_threads
self._model_server_log_level = model_server_log_level
self._model_server_keep_alive_sec = model_server_keep_alive_sec
@property
def model_server_keep_alive_sec(self):
return self._model_server_keep_alive_sec
@property
def model_server_log_level(self):
return self._model_server_log_level
@property
def model_server_worker_threads(self):
return self._model_server_worker_threads
@property
def model_server_worker_connection(self):
return self._model_server_worker_connection
@property
def model_server_worker_class_type(self):
return self._model_server_worker_class_type
@property
def use_nginx(self): # type: () -> bool
"""Returns:
bool: whether to use nginx as a reverse proxy. Default: True"""
return self._use_nginx
@property
def model_server_timeout(self): # type: () -> int
"""Returns:
int: Timeout in seconds for the model server. This is passed over to gunicorn,
from the docs:
Workers silent for more than this many seconds are killed and restarted.
Our default value is 60. If ``use_nginx`` is True, then this same value
will be used for nginx's proxy_read_timeout."""
return self._model_server_timeout
@property
def model_server_workers(self): # type: () -> int
"""Returns:
int: Number of worker processes the model server will use"""
return self._model_server_workers
@property
def framework_module(self): # type: () -> str
"""Returns:
str: Name of the framework module and entry point. For example:
my_module:main"""
return self._framework_module
@property
def default_accept(self): # type: () -> str
"""Returns:
str: The desired MIME type of the inference in the response. For example:
application/json.
Default: application/json"""
return self._default_accept
@property
def http_port(self): # type: () -> str
"""Returns:
str: HTTP port that SageMaker will use to handle invocations and pings against
the running Docker container. Default is 8080. For example: 8080"""
return self._http_port
@property
def safe_port_range(self): # type: () -> str
"""Returns:
str: HTTP port range that can be used by customers to avoid collisions with the
HTTP port specified by SageMaker for handling pings and invocations.
For example: 1111-2222"""
return self._safe_port_range
def write_env_vars(env_vars=None): # type: (dict) -> None
"""Write the dictionary env_vars in the system, as environment variables.
Args:
env_vars ():
Returns:
"""
env_vars = env_vars or {}
env_vars["PYTHONPATH"] = ":".join(sys.path)
for name, value in env_vars.items():
os.environ[name] = value
|
the-stack_106_26757 | # -*- coding: utf-8 -*-
# TensorFlow Production Example (Training)
#----------------------------------
#
# We pull together everything and create an example
# of best tensorflow production tips
#
# The example we will productionalize is the spam/ham RNN
# from
import os
import re
import io
import requests
import numpy as np
import tensorflow as tf
from zipfile import ZipFile
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Define App Flags
tf.app.flags.DEFINE_string("storage_folder", "temp", "Where to store Model and data.")
tf.app.flags.DEFINE_float('learning_rate', 0.0005, 'Initial learning rate.')
tf.app.flags.DEFINE_float('dropout_prob', 0.5, 'Per to keep probability for dropout.')
tf.app.flags.DEFINE_integer('epochs', 20, 'Number of epochs for training.')
tf.app.flags.DEFINE_integer('batch_size', 250, 'Batch Size for training.')
tf.app.flags.DEFINE_integer('max_sequence_length', 20, 'Max sentence length in words.')
tf.app.flags.DEFINE_integer('rnn_size', 15, 'RNN feature size.')
tf.app.flags.DEFINE_integer('embedding_size', 25, 'Word embedding size.')
tf.app.flags.DEFINE_integer('min_word_frequency', 20, 'Word frequency cutoff.')
tf.app.flags.DEFINE_boolean('run_unit_tests', False, 'If true, run tests.')
FLAGS = tf.app.flags.FLAGS
# Define how to get data
def get_data(storage_folder=FLAGS.storage_folder, data_file="text_data.txt"):
"""
This function gets the spam/ham data. It will download it if it doesn't
already exist on disk (at specified folder/file location).
"""
# Make a storage folder for models and data
if not os.path.exists(storage_folder):
os.makedirs(storage_folder)
if not os.path.isfile(os.path.join(storage_folder, data_file)):
zip_url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip'
r = requests.get(zip_url)
z = ZipFile(io.BytesIO(r.content))
file = z.read('SMSSpamCollection')
# Format Data
text_data = file.decode()
text_data = text_data.encode('ascii',errors='ignore')
text_data = text_data.decode().split('\n')
# Save data to text file
with open(os.path.join(storage_folder, data_file), 'w') as file_conn:
for text in text_data:
file_conn.write("{}\n".format(text))
else:
# Open data from text file
text_data = []
with open(os.path.join(storage_folder, data_file), 'r') as file_conn:
for row in file_conn:
text_data.append(row)
text_data = text_data[:-1]
text_data = [x.split('\t') for x in text_data if len(x)>=1]
[y_data, x_data] = [list(x) for x in zip(*text_data)]
return(x_data, y_data)
# Create a text cleaning function
def clean_text(text_string):
text_string = re.sub(r'([^\s\w]|_|[0-9])+', '', text_string)
text_string = " ".join(text_string.split())
text_string = text_string.lower()
return(text_string)
# Test clean_text function
class clean_test(tf.test.TestCase):
# Make sure cleaning function behaves correctly
def clean_string_test(self):
with self.test_session():
test_input = '--TensorFlow\'s so Great! Don\t you think so? '
test_expected = 'tensorflows so great don you think so'
test_out = clean_text(test_input)
self.assertEqual(test_expected, test_out)
# Define RNN Model
def rnn_model(x_data_ph, max_sequence_length, vocab_size, embedding_size,
rnn_size, dropout_keep_prob):
# Create embedding
embedding_mat = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0))
embedding_output = tf.nn.embedding_lookup(embedding_mat, x_data_ph)
# Define the RNN cell
cell = tf.contrib.rnn.BasicRNNCell(num_units = rnn_size)
output, state = tf.nn.dynamic_rnn(cell, embedding_output, dtype=tf.float32)
output = tf.nn.dropout(output, dropout_keep_prob)
# Get output of RNN sequence
output = tf.transpose(output, [1, 0, 2])
last = tf.gather(output, int(output.get_shape()[0]) - 1)
weight = tf.Variable(tf.truncated_normal([rnn_size, 2], stddev=0.1))
bias = tf.Variable(tf.constant(0.1, shape=[2]))
logits_out = tf.nn.softmax(tf.matmul(last, weight) + bias)
return(logits_out)
# Define accuracy function
def get_accuracy(logits, actuals):
# Calulate if each output is correct
batch_acc = tf.equal(tf.argmax(logits, 1), tf.cast(actuals, tf.int64))
# Convert logical to float
batch_acc = tf.cast(batch_acc, tf.float32)
return(batch_acc)
# Define main program
def main(args):
# Set verbosity to get more information from TensorFlow
tf.logging.set_verbosity(tf.logging.INFO)
# Create a visualizer object for Tensorboard viewing
summary_writer = tf.summary.FileWriter('tensorboard', tf.get_default_graph())
# Create tensorboard folder if not exists
if not os.path.exists('tensorboard'):
os.makedirs('tensorboard')
# Set Model parameters
storage_folder = FLAGS.storage_folder
learning_rate = FLAGS.learning_rate
run_unit_tests = FLAGS.run_unit_tests
epochs = FLAGS.epochs
batch_size = FLAGS.batch_size
max_sequence_length = FLAGS.max_sequence_length
rnn_size = FLAGS.rnn_size
embedding_size = FLAGS.embedding_size
min_word_frequency = FLAGS.min_word_frequency
# Get text->spam/ham data
x_data, y_data = get_data()
# Clean texts
x_data = [clean_text(x) for x in x_data]
# Change texts into numeric vectors
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(max_sequence_length,
min_frequency=min_word_frequency)
text_processed = np.array(list(vocab_processor.fit_transform(x_data)))
# Save vocab processor (for loading and future evaluation)
vocab_processor.save(os.path.join(storage_folder, "vocab"))
# Shuffle and split data
text_processed = np.array(text_processed)
y_data = np.array([1 if x=='ham' else 0 for x in y_data])
shuffled_ix = np.random.permutation(np.arange(len(y_data)))
x_shuffled = text_processed[shuffled_ix]
y_shuffled = y_data[shuffled_ix]
# Split train/test set
ix_cutoff = int(len(y_shuffled)*0.80)
x_train, x_test = x_shuffled[:ix_cutoff], x_shuffled[ix_cutoff:]
y_train, y_test = y_shuffled[:ix_cutoff], y_shuffled[ix_cutoff:]
vocab_size = len(vocab_processor.vocabulary_)
with tf.Graph().as_default():
sess = tf.Session()
# Define placeholders
x_data_ph = tf.placeholder(tf.int32, [None, max_sequence_length], name='x_data_ph')
y_output_ph = tf.placeholder(tf.int32, [None], name='y_output_ph')
dropout_keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob')
# Define Model
rnn_model_outputs = rnn_model(x_data_ph, max_sequence_length, vocab_size,
embedding_size, rnn_size, dropout_keep_prob)
# Prediction
# Although we won't use the following operation, we declare and name
# the probability outputs so that we can recall them later for evaluation
rnn_prediction = tf.nn.softmax(rnn_model_outputs, name="probability_outputs")
# Loss function
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=rnn_model_outputs, labels=y_output_ph)
# Remember that for this loss function, logits=float32, labels=int32
loss = tf.reduce_mean(losses, name="loss")
# Model Accuracy Operation
accuracy = tf.reduce_mean(get_accuracy(rnn_model_outputs, y_output_ph), name="accuracy")
# Add scalar summaries for Tensorboard
with tf.name_scope('Scalar_Summaries'):
tf.summary.scalar('Loss', loss)
tf.summary.scalar('Accuracy', accuracy)
# Declare Optimizer/train step
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_step = optimizer.minimize(loss)
# Declare summary merging operation
summary_op = tf.summary.merge_all()
# Create a graph/Variable saving/loading operations
saver = tf.train.Saver()
init = tf.global_variables_initializer()
sess.run(init)
# Start training
for epoch in range(epochs):
# Shuffle training data
shuffled_ix = np.random.permutation(np.arange(len(x_train)))
x_train = x_train[shuffled_ix]
y_train = y_train[shuffled_ix]
num_batches = int(len(x_train)/batch_size) + 1
#
for i in range(num_batches):
# Select train data
min_ix = i * batch_size
max_ix = np.min([len(x_train), ((i+1) * batch_size)])
x_train_batch = x_train[min_ix:max_ix]
y_train_batch = y_train[min_ix:max_ix]
# Run train step
train_dict = {x_data_ph: x_train_batch,
y_output_ph: y_train_batch,
dropout_keep_prob:0.5}
_, summary = sess.run([train_step, summary_op], feed_dict=train_dict)
summary_writer = tf.summary.FileWriter('tensorboard')
summary_writer.add_summary(summary, i)
# Run loss and accuracy for training
temp_train_loss, temp_train_acc = sess.run([loss, accuracy], feed_dict=train_dict)
test_dict = {x_data_ph: x_test, y_output_ph: y_test, dropout_keep_prob:1.0}
temp_test_loss, temp_test_acc = sess.run([loss, accuracy], feed_dict=test_dict)
# Print Epoch Summary
print('Epoch: {}, Train Loss:{:.2}, Train Acc: {:.2}'.format(epoch+1, temp_train_loss, temp_train_acc))
print('Epoch: {}, Test Loss: {:.2}, Test Acc: {:.2}'.format(epoch+1, temp_test_loss, temp_test_acc))
# Save Model every epoch
saver.save(sess, os.path.join(storage_folder, "Model.ckpt"))
# Run main module/tf App
if __name__ == "__main__":
if FLAGS.run_unit_tests:
# Perform unit tests
tf.test.main()
else:
# Run evaluation
tf.app.run() |
the-stack_106_26758 | #!/usr/bin/python3
import argparse
import os
import json
import shutil
def loadIOC(filename):
conf = {}
with open(filename) as f:
while True:
line = f.readline().strip()
if not line:
break
if line[0] == '#':
continue
vals = line.split('=', 2)
if len(vals) < 2:
continue
conf[vals[0]] = vals[1]
return conf
def getCore(mcuName):
coreTable = {
"STM32F0": "cortex-m0",
"STM32F1": "cortex-m3",
"STM32F2": "cortex-m3",
"STM32F3": "cortex-m4",
"STM32F4": "cortex-m4",
"STM32F7": "cortex-m7",
"STM32H7": "cortex-m7",
"STM32L0": "cortex-m0",
"STM32L1": "cortex-m3",
"STM32L4": "cortex-m4",
}
for key, value in coreTable.items():
if mcuName.startswith(key):
return value
def getFpu(mcuName):
# TODO in case of m7 core, check if it has single or double precision fpu
fpuTable = {
"cortex-m0": None,
"cortex-m3": None,
"cortex-m4": "fpv4-sp-d16",
"cortex-m7": "fpv5-d16"
}
for key, value in fpuTable.items():
if getCore(mcuName) == key:
return value
def joinFwdSlash(*args):
# CMake doesn't like paths with backslashes on Windows
return os.path.join(*args).replace('\\', '/')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create CMake and vscode config files from CubeMX .ioc project file")
parser.add_argument("srcPath", help="Source path")
parser.add_argument("iocFile", help="CubeMX .ioc project file")
parser.add_argument("-s", help="additional source folder", action="append")
parser.add_argument("-i", help="additional include folder", action="append")
parser.add_argument("-v", help="enable vscode properties setup", action="store_true")
parser.add_argument("-t", help="toolchain location")
args = parser.parse_args()
iocConf = loadIOC(args.iocFile)
cmakeConf = {
"CUBEMX_PROJNAME": iocConf["ProjectManager.ProjectName"],
"CUBEMX_MCUFAMILY": iocConf["Mcu.Family"] + "xx",
"CUBEMX_MCUNAME": iocConf["Mcu.UserName"],
"CUBEMX_MCULINE": iocConf["Mcu.UserName"][0:9] + "xx",
"CUBEMX_LDFILE": joinFwdSlash(args.srcPath,
iocConf["Mcu.UserName"] + "_FLASH.ld"),
"CUBEMX_CPUTYPE": getCore(iocConf["Mcu.Family"]),
"CUBEMX_TOOLCHAIN": joinFwdSlash(args.t, "bin/") if args.t else ""
}
cmakeConf["CUBEMX_STARTUPFILE"] = \
joinFwdSlash(args.srcPath,
"startup_" + cmakeConf["CUBEMX_MCULINE"].lower() + ".s")
core = getCore(iocConf["Mcu.Family"])
mcuFlags = f"-mcpu={core} -mthumb"
fpu = getFpu(iocConf["Mcu.Family"])
mcuFlags += f" -mfpu={fpu} -mfloat-abi=hard" \
if fpu is not None else " -mfloat-abi=soft"
cmakeConf["CUBEMX_MCUFLAGS"] = mcuFlags
cdefs = [
"USE_FULL_LL_DRIVER",
f"HSE_VALUE={iocConf['RCC.HSE_VALUE']}",
f"HSI_VALUE={iocConf['RCC.HSI_VALUE']}",
f"LSI_VALUE={iocConf['RCC.LSI_VALUE']}",
cmakeConf["CUBEMX_MCULINE"]
]
cmakeConf["CUBEMX_CDEFS"] = "\n".join([f"-D{cdef}" for cdef in cdefs])
cmsisDir = joinFwdSlash(args.srcPath, "Drivers", "CMSIS")
deviceDir = joinFwdSlash(cmsisDir,
"Device", "ST", cmakeConf["CUBEMX_MCUFAMILY"])
halDir = joinFwdSlash(args.srcPath,
"Drivers", cmakeConf["CUBEMX_MCUFAMILY"] + "_HAL_Driver")
sourceDirs = [
joinFwdSlash(args.srcPath, "Src"),
joinFwdSlash(halDir, "Src"),
]
if args.s:
sourceDirs += args.s
cmakeConf["CUBEMX_SOURCEDIRS"] = "\n".join(sourceDirs)
includeDirs = [
joinFwdSlash(args.srcPath, "Inc"),
joinFwdSlash(cmsisDir, "Include"),
joinFwdSlash(deviceDir, "Include"),
joinFwdSlash(halDir, "Inc"),
]
if args.i:
includeDirs += args.i
cmakeConf["CUBEMX_INCLUDEDIRS"] = "\n".join(includeDirs)
for key, value in cmakeConf.items():
print(f"{key}={value};", end="")
if args.v:
compilerName = "arm-none-eabi-gcc"
defaultPath = os.path.join(args.t, "bin") if args.t else None
# Create vscode properties
vscodeProps = {
"c_cpp_properties.json": {
"configurations": [
{
"name": "Linux",
"includePath": includeDirs,
"defines": cdefs,
"compilerPath": shutil.which(compilerName, path=defaultPath).replace('\\', '/'),
"cStandard": "c11",
"intelliSenseMode": "gcc-x64"
}
],
"version": 4
},
"launch.json": {
"configurations": [
{
"name": "Cortex Debug",
"cwd": "${workspaceRoot}",
"executable": f"${{workspaceRoot}}/build/{iocConf['ProjectManager.ProjectName']}.elf",
"request": "attach",
"type": "cortex-debug",
"servertype": "openocd",
"configFiles": [
"${workspaceRoot}/openocd.cfg"
]
}
]
},
"settings.json": {
"cortex-debug.armToolchainPath": joinFwdSlash(args.t, "bin") if args.t else ""
}
}
os.makedirs(os.path.join(args.srcPath, ".vscode"), exist_ok=True)
for k, v in vscodeProps.items():
with open(os.path.join(args.srcPath, ".vscode", k), 'w') as outfile:
json.dump(v, outfile, sort_keys=True, indent=4)
|
the-stack_106_26760 | #!/usr/bin/env python
import matplotlib
import numpy as np
import wx
import copy
import os
import pmagpy.pmag as pmag
import pmagpy.ipmag as ipmag
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigCanvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg as NavigationToolbar
from pmagpy.demag_gui_utilities import *
from pmag_env import set_env
from numpy import vstack,sqrt
from functools import reduce
has_cartopy, Cartopy = pmag.import_cartopy()
if has_cartopy:
import cartopy.feature as cfeature
import cartopy.crs as ccrs
#============================================================================================
# LOG HEADER:
#
# Dialogs boxes for demag_gui.py
#
#============================================================================================
# 9/22/2016 Version 0.2 (beta) by Kevin Gaastra
#
# 3/10/2014 Version 0.1 (beta) by Ron Shaar
#
#
#============================================================================================
#--------------------------------------------------------------
# VGP viewer
#--------------------------------------------------------------
class VGP_Dialog(wx.Frame):
"""
"""
def __init__(self,parent,VGP_Data):
if set_env.IS_FROZEN and not set_env.IS_WIN:
parent.user_warning("This feature is not available in the standalone executable. If you need to look at VGPs, consider installing Python and PmagPy: https://earthref.org/PmagPy/cookbook/#getting_python");
self.failed_init=True
return
self.failed_init = False
if not has_cartopy:
parent.user_warning("This feature requires the Cartopy library to function.")
self.failed_init=True
return
super(VGP_Dialog, self).__init__(parent, title="VGP Viewer")
if not isinstance(VGP_Data,dict):
VGP_Data={}
if VGP_Data!={} and not all([len(VGP_Data[k]) for k in list(VGP_Data.keys())]):
parent.user_warning("No VGP Data for VGP viewer to display")
self.Destroy(); self.failed_init=True; return
self.parent=parent
self.WD=parent.WD
self.test_mode=parent.test_mode
self.selected_pole = None
self.selected_pole_index = 0
self.dp_list = []
self.GUI_RESOLUTION=parent.GUI_RESOLUTION
self.VGP_Data = VGP_Data
self.init_UI()
self.fill_logger() #initialize logger
self.plot() #initialize plot
def init_UI(self):
self.panel = wx.Panel(self,-1)
#build Plot
self.fig = Figure((3*self.GUI_RESOLUTION, 3*self.GUI_RESOLUTION),
dpi=100) # , tight_layout=True, frameon=False)
self.canvas = FigCanvas(self.panel, -1, self.fig)
self.toolbar = NavigationToolbar(self.canvas)
self.toolbar.Hide()
self.toolbar.zoom()
self.plot_setting = "Zoom"
# self.canvas.Bind(wx.EVT_LEFT_DCLICK,self.on_pan_zoom_plot)
# self.canvas.Bind(wx.EVT_LEFT_DCLICK,self.on_plot_select)
# self.canvas.Bind(wx.EVT_MOTION,self.on_change_plot_cursor)
# self.canvas.Bind(wx.EVT_MIDDLE_DOWN,self.on_home_plot)
self.canvas.Bind(wx.EVT_MIDDLE_DOWN,self.on_pan_zoom_plot)
#set map parameters
vgp_lons = [dp['vgp_lon'] for dp in self.VGP_Data['sites'] if 'vgp_lon' in dp]
self.mean_lon = sum(vgp_lons)/len(vgp_lons)
#build combobox with VGP level options
self.VGP_level = "sites"
self.combo_box = wx.ComboBox(self.panel, -1, size=(340*self.GUI_RESOLUTION, 25), value=self.VGP_level,
choices=sorted(self.VGP_Data.keys()), style=wx.CB_DROPDOWN | wx.TE_READONLY, name="vgp_level")
self.Bind(wx.EVT_COMBOBOX, self.on_level_box, self.combo_box)
projs = ["Orthographic","Mollweide", "Mercator", "North Polar Stereographic","South Polar Stereographic"]
self.proj_box = wx.ComboBox(self.panel, -1,
size=(340*self.GUI_RESOLUTION,25),
value=projs[0], choices=projs, style=wx.CB_DROPDOWN|wx.TE_READONLY, name="proj")
self.Bind(wx.EVT_COMBOBOX, self.on_proj_box, self.proj_box)
#build logger
self.logger = wx.ListCtrl(self.panel, -1, size=(340*self.GUI_RESOLUTION, 240*self.GUI_RESOLUTION), style=wx.LC_REPORT)
self.logger.InsertColumn(0, 'element', width=50*self.GUI_RESOLUTION)
self.logger.InsertColumn(1, 'fit name', width=50*self.GUI_RESOLUTION)
self.logger.InsertColumn(2, 'lat', width=50*self.GUI_RESOLUTION)
self.logger.InsertColumn(3, 'lon', width=50*self.GUI_RESOLUTION)
self.logger.InsertColumn(4, 'dp', width=50*self.GUI_RESOLUTION)
self.logger.InsertColumn(5, 'dm', width=50*self.GUI_RESOLUTION)
self.logger.InsertColumn(6, 'n', width=50*self.GUI_RESOLUTION)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.on_click_listctrl, self.logger)
hbox0 = wx.BoxSizer(wx.HORIZONTAL)
vbox0 = wx.BoxSizer(wx.VERTICAL)
vbox0.Add(self.combo_box,proportion=0,flag=wx.ALIGN_TOP|wx.ALL,border=2)
vbox0.Add(self.proj_box,proportion=0,flag=wx.ALIGN_TOP|wx.ALL,border=2)
vbox0.Add(self.logger,proportion=8,flag=wx.ALIGN_TOP|wx.ALL|wx.EXPAND,border=2)
hbox0.Add(vbox0,proportion=4,flag=wx.ALIGN_TOP|wx.ALL|wx.EXPAND, border=4)
hbox0.Add(self.canvas,proportion=7,flag=wx.ALIGN_TOP|wx.ALL|wx.EXPAND,border=4)
self.panel.SetSizer(hbox0)
# self.panel.SetAutoLayout(True)
hbox0.Fit(self)
self.Layout()
#set hotkeys
# the ids used are arbitrary but needed to bind the accel_table
cid = self.canvas.GetId()
pid = self.panel.GetId()
self.Bind(wx.EVT_MENU, self.on_exit_hk, id=cid)
self.Bind(wx.EVT_MENU, self.save_plot, id=pid)
accel_tbl = wx.AcceleratorTable([(wx.ACCEL_CTRL, ord('Q'), cid ),(wx.ACCEL_CTRL, ord('S'), pid )])
self.SetAcceleratorTable(accel_tbl)
self.Bind(wx.EVT_CLOSE, self.OnClose)
def on_exit_hk(self,event):
self.parent.vgp_open=False
self.Close()
def OnClose(self, event):
self.Destroy()
self.parent.vgp_open=False
def save_plot(self,event):
SaveMyPlot(self.fig,self.VGP_level,"VGPPlot",self.WD,test_mode=self.test_mode)
def on_plot_select(self,event):
"""
Select data point if cursor is in range of a data point
@param: event -> the wx Mouseevent for that click
"""
if not self.xdata or not self.ydata: return
pos=event.GetPosition()
width, height = self.canvas.get_width_height()
pos[1] = height - pos[1]
xpick_data,ypick_data = pos
xdata_org = self.xdata
ydata_org = self.ydata
data_corrected = self.map.transData.transform(vstack([xdata_org,ydata_org]).T)
xdata,ydata = data_corrected.T
xdata = list(map(float,xdata))
ydata = list(map(float,ydata))
e = 4e0
index = None
for i,(x,y) in enumerate(zip(xdata,ydata)):
if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e:
index = i
break
if index==None: print("Couldn't find point %.1f,%.1f"%(xpick_data,ypick_data))
self.change_selected(index)
def on_change_plot_cursor(self,event):
"""
If mouse is over data point making it selectable change the shape of the cursor
@param: event -> the wx Mouseevent for that click
"""
if not self.xdata or not self.ydata: return
pos=event.GetPosition()
width, height = self.canvas.get_width_height()
pos[1] = height - pos[1]
xpick_data,ypick_data = pos
xdata_org = self.xdata
ydata_org = self.ydata
data_corrected = self.map.transData.transform(vstack([xdata_org,ydata_org]).T)
xdata,ydata = data_corrected.T
xdata = list(map(float,xdata))
ydata = list(map(float,ydata))
e = 4e0
if self.plot_setting == "Zoom":
self.canvas.SetCursor(wx.Cursor(wx.CURSOR_CROSS))
else:
self.canvas.SetCursor(wx.Cursor(wx.CURSOR_ARROW))
for i,(x,y) in enumerate(zip(xdata,ydata)):
if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e:
self.canvas.SetCursor(wx.Cursor(wx.CURSOR_HAND))
break
event.Skip()
def on_proj_box(self,event):
self.plot()
def on_home_plot(self,event):
self.toolbar.home()
def on_pan_zoom_plot(self,event):
if event.LeftIsDown():
return
elif self.plot_setting == "Zoom":
self.plot_setting = "Pan"
try: self.toolbar.pan('off')
except TypeError: print('error in changing plot function to pan')
elif self.plot_setting == "Pan":
self.plot_setting = "Zoom"
try: self.toolbar.zoom()
except TypeError: print('error in changing plot function to zoom')
def on_level_box(self,event):
self.VGP_level=self.combo_box.GetValue()
plons = [dp["vgp_lon"] for dp in self.VGP_Data[self.VGP_level]]
self.mean_lon = sum(plons)/len(plons)
self.fill_logger(); self.plot()
def draw_map(self):
#set basemap
try:
self.fig.delaxes(self.map)
except (AttributeError, KeyError):
pass
self.fig.clf(keep_observers=True)
if self.proj_box.GetValue() == 'Orthographic':
self.proj = ccrs.Orthographic(central_longitude=self.mean_lon, globe=None)
elif self.proj_box.GetValue() == 'Mollweide':
# __import__('pdb').set_trace()
self.proj = ccrs.Mollweide()
elif self.proj_box.GetValue() == 'Mercator':
# __import__('pdb').set_trace()
self.proj = ccrs.Mercator()#central_longitude=self.mean_lon)
elif self.proj_box.GetValue() == 'North Polar Stereographic':
self.proj = ccrs.NorthPolarStereo(central_longitude=0,true_scale_latitude=None,globe=None)
# self.map = self.fig.add_subplot(111,projection=self.proj)
elif self.proj_box.GetValue() == 'South Polar Stereographic':
self.proj = ccrs.SouthPolarStereo(central_longitude=0,true_scale_latitude=None,globe=None)
else:
self.parent.user_warning(
"Projection %s not supported" % str(self.proj_box.GetValue()))
self.map = self.fig.add_subplot(1,1,1,projection=self.proj)
self.map.set_global()
land = cfeature.NaturalEarthFeature('physical', 'land',
'110m',edgecolor="black",facecolor="bisque")
self.map.add_feature(land)
self.map.gridlines()
self.canvas.figure = self.fig
def plot(self):
self.xdata,self.ydata = [],[]
data = self.VGP_Data[self.VGP_level]
self.draw_map()
for dp in data:
lat, lon, slat, slon = float(dp['vgp_lat']), float(dp['vgp_lon']), float(dp['lat']), float(dp['lon'])
azi = self.orient_dp_dm(lat, lon, slat, slon)
if self.selected_pole == dp['name']+dp['comp_name']:
marker = 'D'
else:
marker = 'o'
FC = dp['color']
EC = 'black'
if self.proj_box.GetValue() == "North Polar Stereographic" and lat < 0:
FC, EC, lat, lon = 'white', dp['color'], -lat, (lon+180) % 360
elif self.proj_box.GetValue() == "South Polar Stereographic" and lat > 0:
FC, EC, lat, lon = 'white', dp['color'], -lat, (lon+180) % 360
self.map.scatter([lon], [lat], marker=marker, edgecolors=EC, facecolor=FC,
s=30, lw=1, clip_on=False, zorder=2, transform=ccrs.Geodetic())
if self.combo_box.GetValue() != "samples":
ellipse_ran = ipmag.ellipse(self.map, lon, lat, float(dp["vgp_dp"])*111.32,
float(dp["vgp_dm"])*111.32, azi, color=dp['color'])
if not ellipse_ran:
print("-E- An error occurred while plotting VGP data for",
"{} in the {} projection.".format(dp['name'], self.proj_box.GetValue()))
self.xdata.append(lon)
self.ydata.append(lat)
self.canvas.draw()
def orient_dp_dm(self,plat,plon,slat,slon):
site_lon_rad = np.deg2rad(slon)
site_lat_rad = np.deg2rad(slat)
c_rad = np.deg2rad(90-slat)
pole_lon_rad = np.deg2rad(plon)
pole_lat_rad = np.deg2rad(plat)
a_rad = np.deg2rad(90-plat)
B_rad = np.abs(pole_lon_rad-site_lon_rad)
cos_b = np.cos(c_rad)*np.cos(a_rad) + np.sin(c_rad)*np.sin(a_rad)*np.cos(B_rad)
b_rad = np.arccos(cos_b)
sin_C = (np.sin(B_rad)/np.sin(b_rad))*np.sin(c_rad)
C_rad = np.arcsin(sin_C)
#need to make the rotation of the ellipse go the right way
if slon-plon > 180:
if plon>=slon and plat>=slat:
C_deg = -np.abs(np.rad2deg(C_rad))
elif plon<=slon and plat>=slat:
C_deg = np.abs(np.rad2deg(C_rad))
elif plon>=slon and plat<=slat:
C_deg = np.abs(np.rad2deg(C_rad))
elif plon<=slon and plat<=slat:
C_deg = -np.abs(np.rad2deg(C_rad))
elif slon-plon <= 180:
if plon>=slon and plat>=slat:
C_deg = np.abs(np.rad2deg(C_rad))
elif plon<=slon and plat>=slat:
C_deg = -np.abs(np.rad2deg(C_rad))
elif plon>=slon and plat<=slat:
C_deg = -np.abs(np.rad2deg(C_rad))
elif plon<=slon and plat<=slat:
C_deg = np.abs(np.rad2deg(C_rad))
return C_deg
def fill_logger(self):
self.logger.DeleteAllItems(); self.dp_list = []
data = self.VGP_Data[self.VGP_level]
for i,dp in enumerate(data): self.update_logger_entry(i,dp)
def update_logger_entry(self,i,pars):
if len(self.dp_list)>i:
self.dp_list.pop(i)
self.dp_list.insert(i,pars['name']+pars['comp_name'])
if i < self.logger.GetItemCount():
self.logger.DeleteItem(i)
self.logger.InsertItem(i, str(pars['name']))
self.logger.SetItem(i, 1, str(pars['comp_name']))
self.logger.SetItem(i, 2, str(pars['vgp_lat']))
self.logger.SetItem(i, 3, str(pars['vgp_lon']))
self.logger.SetItem(i, 4, str(pars['vgp_dp']))
self.logger.SetItem(i, 5, str(pars['vgp_dm']))
self.logger.SetItem(i, 6, str(pars['n']))
self.logger.SetItemBackgroundColour(i,"WHITE")
if self.selected_pole_index==i:
self.selected_pole=pars['name']+pars['comp_name']
self.logger.SetItemBackgroundColour(i,"LIGHT BLUE")
def change_selected(self,i):
old_pole_index = self.selected_pole_index
self.selected_pole_index = i
self.logger.SetItemBackgroundColour(old_pole_index,"WHITE")
self.logger.SetItemBackgroundColour(self.selected_pole_index,"LIGHT BLUE")
self.selected_pole = self.dp_list[self.selected_pole_index]
self.plot()
def on_click_listctrl(self,event):
self.change_selected(event.GetIndex())
#--------------------------------------------------------------
# Save plots
#--------------------------------------------------------------
class SaveMyPlot(wx.Frame):
""""""
def __init__(self,fig,name,plot_type,dir_path,test_mode=False):
"""Constructor"""
wx.Frame.__init__(self, parent=None, title="")
file_choices="(*.pdf)|*.pdf|(*.svg)|*.svg| (*.png)|*.png"
default_fig_name="%s_%s.pdf"%(name,plot_type)
dlg = wx.FileDialog(
self,
message="Save plot as...",
defaultDir=dir_path,
defaultFile=default_fig_name,
wildcard=file_choices,
style=wx.FD_SAVE)
dlg.Center()
if test_mode: result=dlg.GetAffirmativeId()
else: result=dlg.ShowModal()
if result == wx.ID_OK:
path = dlg.GetPath()
else:
return
title=name
self.panel = wx.Panel(self)
self.dpi=300
canvas_tmp_1 = FigCanvas(self.panel, -1, fig)
canvas_tmp_1.print_figure(path, dpi=self.dpi)
#--------------------------------------------------------------
# MagIc results tables dialog
#--------------------------------------------------------------
class magic_pmag_specimens_table_dialog(wx.Dialog):
def __init__(self,parent):
super(magic_pmag_specimens_table_dialog, self).__init__(parent, title="MagIC specimens table dialog")
self.InitUI()
def InitUI(self):
pnl1 = wx.Panel(self)
vbox = wx.StaticBoxSizer(wx.StaticBox( pnl1, wx.ID_ANY, "MagIC result tables options" ), wx.VERTICAL)
#---------------------
# Acceptance criteria
#---------------------
#self.acceptance_criteria_text=wx.StaticText(pnl1,label="apply acceptance criteria from criteria.txt:",style=wx.TE_CENTER)
#self.cb_acceptance_criteria= wx.CheckBox(pnl1, -1, 'apply acceptance criteria from criteria.txt', (10, 30))
#---------------------
# choose coordinate system
#---------------------
self.coor_text=wx.StaticText(pnl1,label="choose which coordinate systems to save in specimens table:",style=wx.TE_CENTER)
#self.rb_spec_coor = wx.RadioButton(pnl1, -1, 'specimen', (10, 10), style=wx.RB_GROUP)
#self.rb_geo_coor = wx.RadioButton(pnl1, -1, 'geographic', (10, 30))
#self.rb_tilt_coor = wx.RadioButton(pnl1, -1, 'tilt-corrected', (10, 30))
self.cb_spec_coor = wx.CheckBox(pnl1, -1, label='specimen')
self.cb_geo_coor = wx.CheckBox(pnl1, -1, label='geographic')
self.cb_tilt_coor = wx.CheckBox(pnl1, -1, label='tilt-corrected')
#self.rb_geo_tilt_coor = wx.RadioButton(pnl1, -1, 'geographic and tilt-corrected', (10, 30))
self.cb_spec_coor.SetValue(True)
self.cb_geo_coor.SetValue(False)
self.cb_tilt_coor.SetValue(False)
#self.rb_geo_coor.SetValue(True)
#self.rb_tilt_coor.SetValue(True)
#self.rb_geo_tilt_coor.SetValue(True)
coordinates_window = wx.GridSizer(1, 3, 6, 6)
coordinates_window.AddMany( [(self.cb_spec_coor),
(self.cb_geo_coor),
(self.cb_tilt_coor)])
#(self.rb_geo_tilt_coor)])
#---------------------
# OK/Cancel buttons
#---------------------
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.okButton = wx.Button(pnl1, wx.ID_OK, "&OK")
self.cancelButton = wx.Button(pnl1, wx.ID_CANCEL, '&Cancel')
hboxok.Add(self.cancelButton)
hboxok.AddSpacer(20)
hboxok.Add(self.okButton)
#---------------------
vbox.Add(wx.StaticLine(pnl1), 0, wx.ALL|wx.EXPAND, 5)
vbox.AddSpacer(10)
vbox.Add(self.coor_text,flag=wx.ALIGN_CENTER_HORIZONTAL, border=100)
vbox.AddSpacer(10)
vbox.Add(coordinates_window,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
#-------------
vbox1=wx.BoxSizer(wx.VERTICAL)
vbox1.AddSpacer(10)
vbox1.Add(vbox)
vbox1.AddSpacer(10)
vbox1.Add(hboxok,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox1.AddSpacer(10)
pnl1.SetSizer(vbox1)
vbox1.Fit(self)
self.okButton.SetDefault()
#--------------------------------------------------------------
# No Lat, Lon for VGP dialog
#--------------------------------------------------------------
class user_input(wx.Dialog):
"""
Generic user input dialog that asks for input any set of inputs into a series of TextCtrls
"""
def __init__(self,parent,inputs,parse_funcs=[],heading=None,title="User Input Required",values=[]):
"""
@param: parent - the wx.Frame calling the dialog
@param: inputs - a list of strings giving the names of the inputs wanted
@param: parse_funcs - a list of the functions to apply to inputs, None for any entry will result in return of raw input.
@param: heading - string giving the heading for the dialog if None a default heading will be constructed
"""
super(user_input, self).__init__(parent, title=title)
self.inputs = inputs
self.parse_funcs = parse_funcs
self.InitUI(heading,values=values)
def InitUI(self,heading,values=[]):
#make header and panel
pnl1 = wx.Panel(self)
if heading == None:
heading = "User Input required for values: " + reduce(lambda x,y: x+','+y, self.inputs)
vbox = wx.StaticBoxSizer(wx.StaticBox(pnl1, wx.ID_ANY,heading), wx.VERTICAL)
#make inputs
list_ctrls_for_window=[]
self.list_ctrls=[]
if len(values) != len(self.inputs): values = ['' for _ in range(len(self.inputs))]
for inp,val in zip(self.inputs,values):
list_ctrls_for_window.append((wx.StaticText(pnl1,label=inp,style=wx.TE_CENTER), wx.EXPAND))
self.list_ctrls.append(wx.TextCtrl(pnl1,value=str(val),style=wx.TE_CENTER,size=(200,20)))
list_ctrls_for_window.append(self.list_ctrls[-1])
ctrl_window = wx.GridSizer(2, len(self.list_ctrls), 6, 6)
ctrl_window.AddMany(list_ctrls_for_window)
#make okay and cancel buttons
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.okButton = wx.Button(pnl1, wx.ID_OK, "&OK")
self.cancelButton = wx.Button(pnl1, wx.ID_CANCEL, '&Cancel')
hboxok.Add(self.okButton)
hboxok.AddSpacer(20)
hboxok.Add(self.cancelButton)
vbox.Add(wx.StaticLine(pnl1), 0, wx.ALL|wx.EXPAND, 5)
vbox.Add(ctrl_window, 0, wx.ALL|wx.EXPAND, 5)
vbox.AddSpacer(10)
vbox.Add(hboxok, 0, wx.ALL|wx.EXPAND, 5)
pnl1.SetSizer(vbox)
vbox.Fit(self)
def get_values(self):
"""
Applies parsing functions to each input as specified in init before returning a tuple with first entry being a boolean which specifies if the user entered all values and a second entry which is a dictionary of input names to parsed values.
"""
return_dict = {}
for i,ctrl in enumerate(self.list_ctrls):
if hasattr(self.parse_funcs,'__getitem__') and len(self.parse_funcs)>i and hasattr(self.parse_funcs[i],'__call__'):
try: return_dict[self.inputs[i]] = self.parse_funcs[i](ctrl.GetValue())
except: return_dict[self.inputs[i]] = ctrl.GetValue()
else:
return_dict[self.inputs[i]] = ctrl.GetValue()
return ('' not in list(return_dict.values()), return_dict)
#--------------------------------------------------------------
# MagIC results tables dialog
#--------------------------------------------------------------
class magic_pmag_tables_dialog(wx.Dialog):
def __init__(self,parent,WD,Data,Data_info):
super(magic_pmag_tables_dialog, self).__init__(parent, title="MagIC results table Dialog")
self.InitUI()
def InitUI(self):
pnl1 = wx.Panel(self)
vbox = wx.StaticBoxSizer(wx.StaticBox( pnl1, wx.ID_ANY, "MagIC result tables options" ), wx.VERTICAL)
#---------------------
# Acceptance criteria
#---------------------
#self.acceptance_criteria_text=wx.StaticText(pnl1,label="apply acceptance criteria from criteria.txt:",style=wx.TE_CENTER)
self.cb_acceptance_criteria= wx.CheckBox(pnl1, -1, 'apply acceptance criteria from criteria.txt', (10, 30))
#---------------------
# choose coordinate system
#---------------------
self.coor_text=wx.StaticText(pnl1,label="coordinate system:",style=wx.TE_CENTER)
self.rb_spec_coor = wx.RadioButton(pnl1, -1, 'specimen', (10, 10), style=wx.RB_GROUP)
self.rb_geo_coor = wx.RadioButton(pnl1, -1, 'geographic', (10, 30))
self.rb_tilt_coor = wx.RadioButton(pnl1, -1, 'tilt-corrected', (10, 30))
self.rb_geo_tilt_coor = wx.RadioButton(pnl1, -1, 'geographic and tilt-corrected', (10, 30))
self.rb_geo_coor.SetValue(True)
coordinates_window = wx.GridSizer(1, 4, 5, 5)
coordinates_window.AddMany( [(self.rb_spec_coor),
(self.rb_geo_coor),
(self.rb_tilt_coor),
(self.rb_geo_tilt_coor)])
#---------------------
# default age
#---------------------
self.default_age_text = wx.StaticText(pnl1, label="If a site's age is not defined in the sites or ages table, you can provide it here:", style=wx.TE_CENTER)
self.add_ages = wx.CheckBox(pnl1, -1, 'add ages')
self.Bind(wx.EVT_CHECKBOX, self.toggle_ages, self.add_ages)
# all hideable age stuff
self.ages_optional = wx.StaticBoxSizer(wx.StaticBox(pnl1, wx.ID_ANY, "" ), wx.VERTICAL)
# age & age sigma
self.default_age = wx.TextCtrl(pnl1,style=wx.TE_CENTER,size=(50,20))
self.default_age_sigma = wx.TextCtrl(pnl1,style=wx.TE_CENTER,size=(50,20))
age_unit_choices = ['Years BP', 'Years AD (+/-)', 'Years Cal BP', 'Years Cal AD (+/-)', 'ka', 'Ma', 'Ga']
#or age_high and age_low
self.default_age_min=wx.TextCtrl(pnl1,style=wx.TE_CENTER,size=(50,20))
self.default_age_max=wx.TextCtrl(pnl1,style=wx.TE_CENTER,size=(50,20))
self.default_age_unit=wx.ComboBox(pnl1, -1,size=(150, -1), value = '', choices=age_unit_choices, style=wx.CB_READONLY)
self.ages_note = wx.StaticText(pnl1, label="All sites must have an age associated with them. Either the age or both the younger and older bounds must be given.\nIf the age is given, younger and/or older bounds can be added for special cases.\nIf the uncertainty of an age is known, enter it in the 'age one sigma' box.\nRemember that many age uncertainties are published as two sigma. In that case, just divide the two sigma value by 2.\nNote: values provided here will NOT overwrite values already in your sites file, they will just fill in the blanks.", style=wx.TE_CENTER)
default_age_window = wx.GridSizer(2, 5, 5, 5)
default_age_window.AddMany( [(wx.StaticText(pnl1,label="age",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="age one sigma",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="younger bound",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="older bound",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="units",style=wx.TE_CENTER), wx.EXPAND),
#(wx.StaticText(pnl1,label="",style=wx.TE_CENTER), wx.EXPAND),
# row 2
(self.default_age, wx.EXPAND),
(self.default_age_sigma, wx.EXPAND),
(self.default_age_min,wx.EXPAND),
(self.default_age_max,wx.EXPAND),
(self.default_age_unit,wx.EXPAND)
#(wx.StaticText(pnl1,label="",style=wx.TE_CENTER), wx.EXPAND)
])
self.ages_optional.AddMany([default_age_window, self.ages_note])
#---------------------
# sample
#---------------------
self.cb_sample_mean=wx.CheckBox(pnl1, -1, 'calculate sample mean ', (10, 30))
self.Bind(wx.EVT_CHECKBOX,self.on_change_cb_sample_mean,self.cb_sample_mean)
self.cb_sample_mean.SetValue(False)
sample_mean_choices=['specimens']
self.combo_sample_mean=wx.ComboBox(pnl1, -1,size=(150, -1), value = 'specimens', choices=sample_mean_choices, style=wx.CB_READONLY)
sample_mean_types=['Fisher']
self.combo_sample_type=wx.ComboBox(pnl1, -1,size=(150, -1), value = 'Fisher', choices=sample_mean_types, style=wx.CB_READONLY)
self.cb_sample_mean_VGP=wx.CheckBox(pnl1, -1, 'calculate sample VGP', (10, 30))
self.cb_sample_mean_VGP.SetValue(False)
self.Bind(wx.EVT_CHECKBOX,self.on_change_cb_sample_mean_VGP,self.cb_sample_mean_VGP)
sample_mean_window = wx.GridSizer(2, 4, 6, 6)
sample_mean_window.AddMany( [(wx.StaticText(pnl1,label="",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="average sample by:",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="calculation type",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="",style=wx.TE_CENTER), wx.EXPAND),
(self.cb_sample_mean,wx.EXPAND),
(self.combo_sample_mean,wx.EXPAND),
(self.combo_sample_type,wx.EXPAND),
(self.cb_sample_mean_VGP,wx.EXPAND)])
#---------------------
# site
#---------------------
self.cb_site_mean=wx.CheckBox(pnl1, -1, 'calculate site mean ', (10, 30))
self.cb_site_mean.SetValue(True)
site_mean_choices=['specimens','samples']
self.combo_site_mean=wx.ComboBox(pnl1, -1,size=(150, -1), value = 'specimens', choices=site_mean_choices, style=wx.CB_READONLY)
self.Bind(wx.EVT_COMBOBOX,self.on_change_site_mean,self.combo_site_mean)
site_mean_types=['Fisher']
self.combo_site_type=wx.ComboBox(pnl1, -1,size=(150, -1), value = 'Fisher', choices=site_mean_types, style=wx.CB_READONLY)
self.cb_site_mean_VGP=wx.CheckBox(pnl1, -1, 'calculate site VGP', (10, 30))
self.cb_site_mean_VGP.SetValue(True)
self.Bind(wx.EVT_CHECKBOX,self.on_change_cb_site_mean_VGP,self.cb_site_mean_VGP)
site_mean_window = wx.GridSizer(2, 4, 6, 6)
site_mean_window.AddMany( [(wx.StaticText(pnl1,label="",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="average site by:",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="calculation type",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="",style=wx.TE_CENTER), wx.EXPAND),
(self.cb_site_mean,wx.EXPAND),
(self.combo_site_mean,wx.EXPAND),
(self.combo_site_type,wx.EXPAND),
(self.cb_site_mean_VGP,wx.EXPAND)])
#---------------------
# location
#---------------------
self.cb_location_mean=wx.CheckBox(pnl1, -1, 'calculate location mean', (10, 30))
self.cb_location_mean.SetValue(False)
location_mean_choices=['sites']
self.combo_location_mean=wx.ComboBox(pnl1, -1,size=(150, -1), value = 'sites', choices=location_mean_choices, style=wx.CB_READONLY)
location_mean_types=['Fisher-separate polarities']
self.combo_loction_type=wx.ComboBox(pnl1, -1,size=(150, -1), value = 'Fisher-separate polarities', choices=location_mean_types, style=wx.CB_READONLY)
self.cb_location_mean_VGP=wx.CheckBox(pnl1, -1, 'calculate location VGP', (10, 30))
self.cb_location_mean_VGP.SetValue(True)
#self.Bind(wx.EVT_CHECKBOX,self.on_change_cb_location_mean_VGP,self.cb_location_mean_VGP)
loaction_mean_window = wx.GridSizer(2, 4, 6, 6)
loaction_mean_window.AddMany( [(wx.StaticText(pnl1,label="",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="average location by:",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="calculation type",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="",style=wx.TE_CENTER), wx.EXPAND),
(self.cb_location_mean,wx.EXPAND),
(self.combo_location_mean,wx.EXPAND),
(self.combo_loction_type,wx.EXPAND),
(self.cb_location_mean_VGP,wx.EXPAND)])
#---------------------
# OK/Cancel buttons
#---------------------
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.okButton = wx.Button(pnl1, wx.ID_OK, "&OK")
self.cancelButton = wx.Button(pnl1, wx.ID_CANCEL, '&Cancel')
hboxok.Add(self.okButton)
hboxok.AddSpacer(20)
hboxok.Add(self.cancelButton)
#---------------------
vbox.Add(wx.StaticLine(pnl1), 0, wx.ALL|wx.EXPAND, 5)
vbox.AddSpacer(10)
vbox.Add(self.cb_acceptance_criteria,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
vbox.Add(wx.StaticLine(pnl1), 0, wx.ALL|wx.EXPAND, 5)
vbox.AddSpacer(10)
vbox.Add(self.coor_text,flag=wx.ALIGN_CENTER_HORIZONTAL, border=100)
vbox.AddSpacer(10)
vbox.Add(coordinates_window,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
vbox.Add(wx.StaticLine(pnl1), 0, wx.ALL|wx.EXPAND, 5)
vbox.AddSpacer(10)
vbox.Add(self.default_age_text,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
vbox.Add(self.add_ages, flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
vbox.Add(self.ages_optional)
self.ages_optional.ShowItems(False)
#vbox.Add(default_age_window,flag=wx.ALIGN_CENTER_HORIZONTAL)
#vbox.AddSpacer(10)
#vbox.Add(self.ages_note, flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
vbox.Add(wx.StaticLine(pnl1), 0, wx.ALL|wx.EXPAND, 5)
vbox.AddSpacer(10)
vbox.Add(sample_mean_window,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
vbox.Add(wx.StaticLine(pnl1), 0, wx.ALL|wx.EXPAND, 5)
vbox.AddSpacer(10)
vbox.Add(site_mean_window,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
vbox.Add(wx.StaticLine(pnl1), 0, wx.ALL|wx.EXPAND, 5)
vbox.AddSpacer(10)
vbox.Add(loaction_mean_window,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
vbox.Add(wx.StaticLine(pnl1), 0, wx.ALL|wx.EXPAND, 5)
vbox.AddSpacer(10)
#-------------
self.vbox1=wx.BoxSizer(wx.VERTICAL)
self.vbox1.AddSpacer(10)
self.vbox1.Add(vbox)
self.vbox1.AddSpacer(10)
self.vbox1.Add(hboxok,flag=wx.ALIGN_CENTER_HORIZONTAL)
self.vbox1.AddSpacer(10)
pnl1.SetSizer(self.vbox1)
self.vbox1.Fit(self)
def toggle_ages(self, event):
if self.add_ages.GetValue():
self.ages_optional.ShowItems(True)
else:
self.ages_optional.ShowItems(False)
self.vbox1.Fit(self)
self.Centre()
def on_change_cb_sample_mean_VGP(self,event):
if self.cb_sample_mean_VGP.GetValue()==True:
self.cb_site_mean_VGP.SetValue(False)
def on_change_cb_site_mean_VGP(self,event):
if self.cb_site_mean_VGP.GetValue()==True:
self.cb_sample_mean_VGP.SetValue(False)
def on_change_cb_location_mean_VGP(self,event):
if self.cb_location_mean_VGP.GetValue()==True:
self.cb_location_mean_VGP.SetValue(False)
def on_change_cb_sample_mean(self,event):
if self.combo_site_mean.GetValue()=='samples' and not self.cb_sample_mean.GetValue():
self.combo_site_mean.SetValue('specimens')
def on_change_site_mean(self,event):
if self.combo_site_mean.GetValue()=='samples' and not self.cb_sample_mean.GetValue():
self.cb_sample_mean.SetValue(True)
#--------------------------------------------------------------
# MagIc results tables dialog
#--------------------------------------------------------------
#--------------------------------------------------------------
# MagIC generic files conversion
#--------------------------------------------------------------
"""
class convert_generic_files_to_MagIC(wx.Frame):
title = "PmagPy Thellier GUI generic file conversion"
def __init__(self,WD):
wx.Frame.__init__(self, None, wx.ID_ANY, self.title)
self.panel = wx.Panel(self)
#self.MakeModal(True)
self.max_files=10
self.WD=WD
self.InitUI()
self.END=False
def InitUI(self):
pnl = self.panel
#---sizer infor ----
TEXT=[]
TEXT.append("A generic file is a tab-delimited file with the following headers:\n")
TEXT.append("specimen treatment step moment dec_s inc_s dec_g inc_g dec_t inc_t \n")
TEXT.append("treatment: N [NRM], A[AF] T[Thermal].\n")
TEXT.append("step: if treatment=N: should be 0.\n")
TEXT.append("step: if treatment=A: peak field in mT.\n")
TEXT.append("step: if treatment=T: Temperature in C.\n")
TEXT.append("step: if treatment=N: peak field in mT.\n")
TEXT.append("moment: magnetic moment in units of emu.\n")
TEXT.append("dec_s inc_s: declination/inclination in specimen coordinates\n" )
TEXT.append("dec_g inc_g: declination/inclination in geographic coordinates\n")
TEXT.append("dec_t inc_t: declination/inclination in tilt corrected coordinates\n")
TEXT.append("\n At least one set of dec/inc is required.\n")
TEXT.append("\n The order of the columns is not important.\n")
STRING="".join(TEXT)
bSizer_info = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.HORIZONTAL )
bSizer_info.Add(wx.StaticText(pnl,label=STRING),wx.ALIGN_LEFT)
#---sizer 0 ----
TEXT="file:\n choose measurement file\n no spaces are allowed in path"
bSizer0 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
bSizer0.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_TOP)
bSizer0.AddSpacer(5)
for i in range(self.max_files):
command= "self.file_path_%i = wx.TextCtrl(self.panel, id=-1, size=(200,25), style=wx.TE_READONLY)"%i
exec(command)
command= "self.add_file_button_%i = wx.Button(self.panel, id=-1, label='add',name='add_%i')"%(i,i)
exec(command)
command= "self.Bind(wx.EVT_BUTTON, self.on_add_file_button_i, self.add_file_button_%i)"%i
#print command
exec(command)
command="bSizer0_%i = wx.BoxSizer(wx.HORIZONTAL)"%i
exec(command)
command="bSizer0_%i.Add(wx.StaticText(pnl,label=('%i '[:2])),wx.ALIGN_LEFT)"%(i,i+1)
exec(command)
command="bSizer0_%i.Add(self.file_path_%i,wx.ALIGN_LEFT)" %(i,i)
exec(command)
command="bSizer0_%i.Add(self.add_file_button_%i,wx.ALIGN_LEFT)" %(i,i)
exec(command)
command="bSizer0.Add(bSizer0_%i,wx.ALIGN_TOP)" %i
exec(command)
bSizer0.AddSpacer(5)
# #---sizer 1 ----
#
# TEXT="\n\nExperiment:"
# bSizer1 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
# bSizer1.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_TOP)
# self.experiments_names=['IZZI','IZ','ZI','ATRM 6 positions','cooling rate','NLT']
# bSizer1.AddSpacer(5)
# for i in range(self.max_files):
# command="self.protocol_info_%i = wx.ComboBox(self.panel, -1, self.experiments_names[0], size=(100,25), choices=self.experiments_names, style=wx.CB_DROPDOWN)"%i
# #command="self.protocol_info_%i = wx.TextCtrl(self.panel, id=-1, size=(100,20), style=wx.TE_MULTILINE | wx.HSCROLL)"%i
# #print command
# exec command
# command="bSizer1.Add(self.protocol_info_%i,wx.ALIGN_TOP)"%i
# exec command
# bSizer1.AddSpacer(5)
#---sizer 2 ----
#TEXT="Blab:\n(microT dec inc)\nexample: 40 0 -90 "
#bSizer2 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
#bSizer2.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_TOP)
#bSizer2.AddSpacer(5)
#for i in range(self.max_files):
# command= "self.file_info_Blab_%i = wx.TextCtrl(self.panel, id=-1, size=(40,25))"%i
# exec command
# command= "self.file_info_Blab_dec_%i = wx.TextCtrl(self.panel, id=-1, size=(40,25))"%i
# exec command
# command= "self.file_info_Blab_inc_%i = wx.TextCtrl(self.panel, id=-1, size=(40,25))"%i
# exec command
# command="bSizer2_%i = wx.BoxSizer(wx.HORIZONTAL)"%i
# exec command
# command="bSizer2_%i.Add(self.file_info_Blab_%i ,wx.ALIGN_LEFT)" %(i,i)
# exec command
# command="bSizer2_%i.Add(self.file_info_Blab_dec_%i,wx.ALIGN_LEFT)" %(i,i)
# exec command
# command="bSizer2_%i.Add(self.file_info_Blab_inc_%i,wx.ALIGN_LEFT)" %(i,i)
# exec command
# command="bSizer2.Add(bSizer2_%i,wx.ALIGN_TOP)" %i
# exec command
# bSizer2.AddSpacer(5)
#self.blab_info = wx.TextCtrl(self.panel, id=-1, size=(80,250), style=wx.TE_MULTILINE | wx.HSCROLL)
#bSizer2.Add(self.blab_info,wx.ALIGN_TOP)
#---sizer 3 ----
TEXT="\nUser\nname:"
bSizer3 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
bSizer3.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_TOP)
bSizer3.AddSpacer(5)
for i in range(self.max_files):
command= "self.file_info_user_%i = wx.TextCtrl(self.panel, id=-1, size=(60,25))"%i
exec(command)
command="bSizer3.Add(self.file_info_user_%i,wx.ALIGN_TOP)" %i
exec(command)
bSizer3.AddSpacer(5)
#---sizer 4 ----
TEXT="\nsample-specimen\nnaming convention:"
bSizer4 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
bSizer4.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_TOP)
self.sample_naming_conventions=['sample=specimen','no. of terminate characters','charceter delimited']
bSizer4.AddSpacer(5)
for i in range(self.max_files):
command="self.sample_naming_convention_%i = wx.ComboBox(self.panel, -1, self.sample_naming_conventions[0], size=(180,25), choices=self.sample_naming_conventions, style=wx.CB_DROPDOWN)"%i
exec(command)
command="self.sample_naming_convention_char_%i = wx.TextCtrl(self.panel, id=-1, size=(40,25))"%i
exec(command)
command="bSizer4_%i = wx.BoxSizer(wx.HORIZONTAL)"%i
exec(command)
command="bSizer4_%i.Add(self.sample_naming_convention_%i,wx.ALIGN_LEFT)" %(i,i)
exec(command)
command="bSizer4_%i.Add(self.sample_naming_convention_char_%i,wx.ALIGN_LEFT)" %(i,i)
exec(command)
command="bSizer4.Add(bSizer4_%i,wx.ALIGN_TOP)"%i
exec(command)
bSizer4.AddSpacer(5)
#---sizer 5 ----
TEXT="\nsite-sample\nnaming convention:"
bSizer5 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
bSizer5.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_TOP)
self.site_naming_conventions=['site=sample','no. of terminate characters','charceter delimited']
bSizer5.AddSpacer(5)
for i in range(self.max_files):
command="self.site_naming_convention_char_%i = wx.TextCtrl(self.panel, id=-1, size=(40,25))"%i
exec(command)
command="self.site_naming_convention_%i = wx.ComboBox(self.panel, -1, self.site_naming_conventions[0], size=(180,25), choices=self.site_naming_conventions, style=wx.CB_DROPDOWN)"%i
exec(command)
command="bSizer5_%i = wx.BoxSizer(wx.HORIZONTAL)"%i
exec(command)
command="bSizer5_%i.Add(self.site_naming_convention_%i,wx.ALIGN_LEFT)" %(i,i)
exec(command)
command="bSizer5_%i.Add(self.site_naming_convention_char_%i,wx.ALIGN_LEFT)" %(i,i)
exec(command)
command="bSizer5.Add(bSizer5_%i,wx.ALIGN_TOP)"%i
exec(command)
bSizer5.AddSpacer(5)
#---sizer 6 ----
TEXT="\n\nlocation:"
bSizer6 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
bSizer6.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_TOP)
bSizer6.AddSpacer(5)
for i in range(self.max_files):
command= "self.file_info_location_%i = wx.TextCtrl(self.panel, id=-1, size=(60,25))"%i
exec(command)
command="bSizer6.Add(self.file_info_location_%i,wx.ALIGN_TOP)" %i
exec(command)
bSizer6.AddSpacer(5)
#------------------
#self.add_file_button = wx.Button(self.panel, id=-1, label='add file')
#self.Bind(wx.EVT_BUTTON, self.on_add_file_button, self.add_file_button)
#self.remove_file_button = wx.Button(self.panel, id=-1, label='remove file')
self.okButton = wx.Button(self.panel, wx.ID_OK, "&OK")
self.Bind(wx.EVT_BUTTON, self.on_okButton, self.okButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
#hbox1.Add(self.add_file_button)
#hbox1.Add(self.remove_file_button )
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox2.Add(self.okButton)
hbox2.Add(self.cancelButton )
#------
vbox=wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.AddSpacer(5)
hbox.Add(bSizer0, flag=wx.ALIGN_LEFT)
hbox.AddSpacer(5)
#hbox.Add(bSizer1, flag=wx.ALIGN_LEFT)
#hbox.AddSpacer(5)
#hbox.Add(bSizer2, flag=wx.ALIGN_LEFT)
#hbox.AddSpacer(5)
hbox.Add(bSizer3, flag=wx.ALIGN_LEFT)
hbox.AddSpacer(5)
hbox.Add(bSizer4, flag=wx.ALIGN_LEFT)
hbox.AddSpacer(5)
hbox.Add(bSizer5, flag=wx.ALIGN_LEFT)
hbox.AddSpacer(5)
hbox.Add(bSizer6, flag=wx.ALIGN_LEFT)
hbox.AddSpacer(5)
#-----
vbox.AddSpacer(20)
vbox.Add(bSizer_info,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(20)
vbox.Add(hbox)
vbox.AddSpacer(20)
vbox.Add(hbox1,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(20)
vbox.Add(hbox2,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(20)
self.panel.SetSizer(vbox)
vbox.Fit(self)
self.Show()
self.Centre()
def on_add_file_button(self,event):
dlg = wx.FileDialog(
None,message="choose file to convert to MagIC",
defaultDir=self.WD,
defaultFile="",
style=wx.FD_OPEN | wx.FD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
FILE = dlg.GetPath()
# fin=open(FILE,'r')
self.file_path.AppendText(FILE+"\n")
self.protocol_info.AppendText("IZZI"+"\n")
def on_add_file_button_i(self,event):
dlg = wx.FileDialog(
None,message="choose file to convert to MagIC",
defaultDir="./",
defaultFile="",
style=wx.FD_OPEN | wx.FD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
FILE = dlg.GetPath()
# fin=open(FILE,'r')
button = event.GetEventObject()
name=button.GetName()
i=int((name).split("_")[-1])
#print "The button's name is " + button.GetName()
command="self.file_path_%i.SetValue(FILE)"%i
exec(command)
#self.file_path.AppendText(FILE)
#self.protocol_info.AppendText("IZZI"+"\n")
def read_generic_file(self,path):
Data={}
if str(path)=="":
return ({})
Fin=open(str(path),'r')
header=Fin.readline().strip('\n').split('\t')
for line in Fin.readlines():
tmp_data={}
l=line.strip('\n').split('\t')
if len(l)<len(header):
continue
else:
for i in range(len(header)):
tmp_data[header[i]]=l[i]
specimen=tmp_data['specimen']
if specimen not in list(Data.keys()):
Data[specimen]=[]
# check dupliactes
if len(Data[specimen]) >0:
if tmp_data['treatment']==Data[specimen][-1]['treatment']:
if tmp_data['step']==Data[specimen][-1]['step']:
print("-W- WARNING: duplicate measurements specimen %s, Treatment %s:%s. keeping onlt the last one"%(tmp_data['specimen'],tmp_data['treatment'],tmp_data['step']))
Data[specimen].pop()
Data[specimen].append(tmp_data)
return(Data)
def on_okButton(self,event):
#-----------------------------------
# Prepare MagIC measurement file
#-----------------------------------
# prepare output file
#magic_headers=['er_citation_names','er_specimen_name',"er_sample_name","er_site_name",'er_location_name','er_analyst_mail_names',\
# "magic_instrument_codes","measurement_flag","measurement_standard","magic_experiment_name","magic_method_codes","measurement_number",'treatment_temp',"measurement_dec","measurement_inc",\
# "measurement_magn_moment","measurement_temp","treatment_dc_field","treatment_dc_field_phi","treatment_dc_field_theta"]
#fout=open("magic_measurements.txt",'w')
#fout.write("tab\tmagic_measurements\n")
#header_string=""
#for i in range(len(magic_headers)):
# header_string=header_string+magic_headers[i]+"\t"
#fout.write(header_string[:-1]+"\n")
#-----------------------------------
os.chdir(self.WD)
Data={}
header_codes=[]
ERROR=""
datafiles=[]
MagRecs=[]
self.er_sample_data={}
try:
self.er_sample_data=self.read_magic_file(os.path.join(self.WD, "er_samples.txt"), 'er_sample_name')
except:
print("-W- WARNING: Cant find er_samples.txt table")
for i in range(self.max_files):
# read data from generic file
datafile=""
command="datafile=self.file_path_%i.GetValue()"%i
exec(command)
#if datafile!="":
# try:
# this_file_data= self.read_generic_file(datafile)
# except:
# print "-E- Cant read file %s" %datafile
#else:
# continue
this_file_data= self.read_generic_file(datafile)
#print "datafile",datafile
#print "this_file_data",this_file_data
# get experiment
#command="experiment=self.protocol_info_%i.GetValue()"%i
#exec command
# get Blab
#labfield=["0","-1","-1"]
#command="labfield[0]=self.file_info_Blab_%i.GetValue()"%i
#exec command
#command="labfield[1]=self.file_info_Blab_dec_%i.GetValue()"%i
#exec command
#command="labfield[2]=self.file_info_Blab_inc_%i.GetValue()"%i
#exec command
# get User_name
user_name=""
command="user_name=self.file_info_user_%i.GetValue()"%i
exec(command)
# get sample-specimen naming convention
sample_naming_convenstion=["",""]
command="sample_naming_convenstion[0]=self.sample_naming_convention_%i.GetValue()"%i
exec(command)
command="sample_naming_convenstion[1]=self.sample_naming_convention_char_%i.GetValue()"%i
exec(command)
# get site-sample naming convention
site_naming_convenstion=["",""]
command="site_naming_convenstion[0]=self.site_naming_convention_%i.GetValue()"%i
exec(command)
command="site_naming_convenstion[1]=self.site_naming_convention_char_%i.GetValue()"%i
exec(command)
# get location
location_name=""
command="location_name=self.file_info_location_%i.GetValue()"%i
exec(command)
# read er_samples.txt
# to check for sample orientation data and tilt-corrected data
ErSamplesRecs=[]
for specimen in list(this_file_data.keys()):
measurement_running_number=0
this_specimen_LT=[]
this_specimen_LP=[]
MagRecs_this_specimen=[]
for meas_line in this_file_data[specimen]:
MagRec={}
#
MagRec["er_specimen_name"]=meas_line['specimen']
MagRec['er_citation_names']="This study"
MagRec["er_sample_name"]=self.get_sample_name(MagRec["er_specimen_name"],sample_naming_convenstion)
MagRec["er_site_name"]=self.get_site_name(MagRec["er_sample_name"],site_naming_convenstion)
MagRec["er_location_name"]=location_name
MagRec['er_analyst_mail_names']=user_name
MagRec["magic_instrument_codes"]=""
MagRec["measurement_flag"]='g'
MagRec["measurement_number"]="%i"%measurement_running_number
MagRec["measurement_temp"]='273.' # room temp in kelvin
MagRec["measurement_standard"]="u"
#-----
MagRec["measurement_magn_moment"]='%10.3e'%(float(meas_line["moment"])*1e-3) # convert to Am^2
# see if core azimuth and tilt-corrected data are in er_samples.txt
sample=MagRec["er_sample_name"]
found_sample_azimuth,found_sample_dip,found_sample_bed_dip_direction,found_sample_bed_dip=False,False,False,False
if sample in list(self.er_sample_data.keys()):
if "sample_azimuth" in list(self.er_sample_data[sample].keys()) and self.er_sample_data[sample]['sample_azimuth'] !="":
sample_azimuth=float(self.er_sample_data[sample]['sample_azimuth'])
found_sample_azimuth=True
if "sample_dip" in list(self.er_sample_data[sample].keys()) and self.er_sample_data[sample]['sample_dip']!="":
sample_dip=float(self.er_sample_data[sample]['sample_dip'])
found_sample_dip=True
if "sample_bed_dip_direction" in list(self.er_sample_data[sample].keys()) and self.er_sample_data[sample]['sample_bed_dip_direction']!="":
sample_bed_dip_direction=float(self.er_sample_data[sample]['sample_bed_dip_direction'])
found_sample_bed_dip_direction=True
if "sample_bed_dip" in list(self.er_sample_data[sample].keys()) and self.er_sample_data[sample]['sample_bed_dip']!="":
sample_bed_dip=float(self.er_sample_data[sample]['sample_bed_dip'])
found_sample_bed_dip=True
else:
self.er_sample_data[sample]={}
#--------------------
# deal with sample orientation
#--------------------
found_s,found_geo,found_tilt=False,False,False
if "dec_s" in list(meas_line.keys()) and "inc_s" in list(meas_line.keys()):
found_s=True
MagRec["measurement_dec"]=meas_line["dec_s"]
MagRec["measurement_inc"]=meas_line["inc_s"]
if "dec_g" in list(meas_line.keys()) and "inc_g" in list(meas_line.keys()):
found_geo=True
if "dec_t" in list(meas_line.keys()) and "inc_t" in list(meas_line.keys()):
found_tilt=True
#-----------------------------
# specimen coordinates: no
# geographic coordinates: yes
#-----------------------------
if found_geo and not found_s:
MagRec["measurement_dec"]=meas_line["dec_g"]
MagRec["measurement_inc"]=meas_line["inc_g"]
# core azimuth/plunge is not in er_samples.txt
if not found_sample_dip or not found_sample_azimuth:
self.er_sample_data[sample]['sample_azimuth']="0"
self.er_sample_data[sample]['sample_dip']="0"
# core azimuth/plunge is in er_samples.txt
else:
sample_azimuth=float(self.er_sample_data[sample]['sample_azimuth'])
sample_dip=float(self.er_sample_data[sample]['sample_dip'])
if sample_azimuth!=0 and sample_dip!=0:
print("-W- WARNING: delete core azimuth/plunge in er_samples.txt\n\
becasue dec_s and inc_s are not avaialable")
#-----------------------------
# specimen coordinates: no
# geographic coordinates: no
#-----------------------------
if not found_geo and not found_s:
print("-E- ERROR: sample %s does not have dec_s/inc_s or dec_g/inc_g. Ignore specimen %s "%(sample,specimen))
break
#-----------------------------
# specimen coordinates: yes
# geographic coordinates: yes
#
# commant: Ron, this need to be tested !!
#-----------------------------
if found_geo and found_s:
cdec,cinc=float(meas_line["dec_s"]),float(meas_line["inc_s"])
gdec,ginc=float(meas_line["dec_g"]),float(meas_line["inc_g"])
az,pl=pmag.get_azpl(cdec,cinc,gdec,ginc)
# core azimuth/plunge is not in er_samples.txt:
# calculate core az/pl and add it to er_samples.txt
if not found_sample_dip or not found_sample_azimuth:
self.er_sample_data[sample]['sample_azimuth']="%.1f"%az
self.er_sample_data[sample]['sample_dip']="%.1f"%pl
# core azimuth/plunge is in er_samples.txt
else:
if float(self.er_sample_data[sample]['sample_azimuth'])!= az:
print("-E- ERROR in sample_azimuth sample %s. Check it! using the value in er_samples.txt"%sample)
if float(self.er_sample_data[sample]['sample_dip'])!= pl:
print("-E- ERROR in sample_dip sample %s. Check it! using the value in er_samples.txt"%sample)
#-----------------------------
# specimen coordinates: yes
# geographic coordinates: no
#-----------------------------
if found_geo and found_s:
if found_sample_dip and found_sample_azimuth:
pass
# (nothing to do)
else:
print("-E- ERROR: missing sample_dip or sample_azimuth for sample %s.ignoring specimens "%sample)
break
#-----------------------------
# tilt-corrected coordinates: yes
# geographic coordinates: no
#-----------------------------
if found_tilt and not found_geo:
print("-E- ERROR: missing geographic data for sample %s. Ignoring tilt-corrected data "%sample)
if found_tilt and found_geo:
dec_geo,inc_geo=float(meas_line["dec_g"]),float(meas_line["inc_g"])
dec_tilt,inc_tilt=float(meas_line["dec_t"]),float(meas_line["inc_t"])
if dec_geo==dec_tilt and inc_geo==inc_tilt:
DipDir,Dip=0.,0.
else:
DipDir,Dip=pmag.get_tilt(dec_geo,inc_geo,dec_tilt,inc_tilt)
if not found_sample_bed_dip_direction or not found_sample_bed_dip:
print("-I- calculating dip and dip direction used for tilt correction sample %s. results are put in er_samples.txt"%sample)
self.er_sample_data[sample]['sample_bed_dip_direction']="%.1f"%DipDir
self.er_sample_data[sample]['sample_bed_dip']="%.1f"%Dip
#-----------------------------
# er_samples method codes
# geographic coordinates: no
#-----------------------------
if found_tilt or found_geo:
self.er_sample_data[sample]['magic_method_codes']="SO-NO"
#-----
# Lab treatments and MagIC methods
#-----
if meas_line['treatment']=="N":
LT="LT-NO"
LP=""
MagRec["treatment_temp"]="273."
#MagRec["treatment_temp"]
elif meas_line['treatment']=="A":
LT="LT-AF-Z"
LP="LP-DIR-AF"
MagRec["treatment_ac_field"]="%.4f"%(float(meas_line['step'])*1e-3)
MagRec["treatment_temp"]="273."
#print MagRec["treatment_ac_field"],"treatment_ac_field"
elif meas_line['treatment']=="T":
LT="LT-T-Z"
LP="LP-DIR-T"
MagRec["treatment_temp"]="%.1f"%(float(meas_line['step'])+273.)
#print MagRec["treatment_temp"],"treatment_temp"
#if LT not in this_specimen_LT:
# this_specimen_LT.append(LT)
if LP!="" and LP not in this_specimen_LP:
this_specimen_LP.append(LP)
#MagRec["magic_experiment_name"]=MagRec["er_specimen_name"]+":"+":".join(this_specimen_LP)
MagRec["magic_method_codes"]=LT#+":"+":".join(this_specimen_LP)
MagRecs_this_specimen.append(MagRec)
#-----------------
# er_samples_data
#
if sample in list(self.er_sample_data.keys()):
self.er_sample_data[sample]['er_sample_name']=sample
self.er_sample_data[sample]['er_site_name']=MagRec["er_site_name"]
self.er_sample_data[sample]['er_location_name']=MagRec["er_location_name"]
measurement_running_number+=1
# add magic_experiment_name and magic_method_codes to magic_measurements.txt
for MagRec in MagRecs_this_specimen:
MagRec["magic_experiment_name"]=MagRec["er_specimen_name"]+":"+":".join(this_specimen_LP)
MagRec["magic_method_codes"]=MagRec["magic_method_codes"]+":"+":".join(this_specimen_LP)
MagRecs.append(MagRec)
#--
# write magic_measurements.txt
#--
MagRecs_fixed=self.merge_pmag_recs(MagRecs)
pmag.magic_write(os.path.join(self.WD, "magic_measurements.txt"), MagRecs_fixed, 'magic_measurements')
#--
# write er_samples.txt
#--
ErSamplesRecs=[]
samples=list(self.er_sample_data.keys())
for sample in samples:
ErSamplesRecs.append(self.er_sample_data[sample])
ErSamplesRecs_fixed=self.merge_pmag_recs(ErSamplesRecs)
pmag.magic_write(os.path.join(self.WD, "er_samples.txt"), ErSamplesRecs_fixed, 'er_samples')
MSG=" Files converted to MagIC format and merged into two files:\n\
magic_measurements.txt and er_samples.txt.\n\
Files saved in the current MagIC directory.\n\
Quit the GUI and restart it to view the data."
dlg1 = wx.MessageDialog(None,caption="Message:", message=MSG ,style=wx.OK|wx.ICON_INFORMATION)
dlg1.ShowModal()
dlg1.Destroy()
self.END=True
self.Destroy()
def merge_pmag_recs(self,old_recs):
# fix the headers of pmag recs
recs={}
recs=copy.deepcopy(old_recs)
headers=[]
for rec in recs:
for key in list(rec.keys()):
if key not in headers:
headers.append(key)
for rec in recs:
for header in headers:
if header not in list(rec.keys()):
rec[header]=""
return recs
def on_cancelButton(self,event):
self.Destroy()
def get_sample_name(self,specimen,sample_naming_convenstion):
if sample_naming_convenstion[0]=="sample=specimen":
sample=specimen
elif sample_naming_convenstion[0]=="no. of terminate characters":
n=int(sample_naming_convenstion[1])*-1
sample=specimen[:n]
elif sample_naming_convenstion[0]=="charceter delimited":
d=sample_naming_convenstion[1]
sample_splitted=specimen.split(d)
if len(sample_splitted)==1:
sample=sample_splitted[0]
else:
sample=d.join(sample_splitted[:-1])
return sample
def get_site_name(self,sample,site_naming_convenstion):
if site_naming_convenstion[0]=="site=sample":
site=sample
elif site_naming_convenstion[0]=="no. of terminate characters":
n=int(site_naming_convenstion[1])*-1
site=sample[:n]
elif site_naming_convenstion[0]=="charceter delimited":
d=site_naming_convenstion[1]
site_splitted=sample.split(d)
if len(site_splitted)==1:
site=site_splitted[0]
else:
site=d.join(site_splitted[:-1])
#print "d",d
#print "sample",sample
#print "site_splitted",site_splitted
#print "site",site
return site
def read_magic_file(self,path,sort_by_this_name):
DATA={}
fin=open(path,'r')
fin.readline()
line=fin.readline()
header=line.strip('\n').split('\t')
for line in fin.readlines():
tmp_data={}
tmp_line=line.strip('\n').split('\t')
for i in range(len(tmp_line)):
tmp_data[header[i]]=tmp_line[i]
if tmp_data[sort_by_this_name] in list(DATA.keys()):
print("-E- ERROR: magic file %s has more than one line for %s %s\n"%(path,sort_by_this_name,tmp_data[sort_by_this_name]))
DATA[tmp_data[sort_by_this_name]]=tmp_data
fin.close()
return(DATA)
"""
#--------------------------------------------------------------
# Popupmenu
#--------------------------------------------------------------
"""
class GBPopupMenu(wx.Menu):
def __init__(self,Data,magic_file,mag_meas_data,s,g_index,position):
self.g_index=g_index
self.s=s
self.Data=Data
self.mag_meas_data=mag_meas_data
self.magic_file=magic_file
#self.measurement_flag=measurement_flag
wx.Menu.__init__(self)
item = wx.MenuItem(self, wx.ID_ANY, "'good measurement'")
self.AppendItem(item)
self.Bind(wx.EVT_MENU, self.OnItemGood, item)
item = wx.MenuItem(self, wx.ID_ANY,"'bad measurement'")
self.AppendItem(item)
self.Bind(wx.EVT_MENU, self.OnItemBad, item)
def OnItemGood(self, event):
#print "good"
index=self.Data[self.s]['mag_meas_data_index'][self.g_index]
#print self.mag_meas_data[index]
self.mag_meas_data[index]['measurement_flag']='g'
self.write_good_bad_magic_measurements()
def OnItemBad(self, event):
#print "bad"
index=self.Data[self.s]['mag_meas_data_index'][self.g_index]
#print self.mag_meas_data[index]
self.mag_meas_data[index]['measurement_flag']='b'
self.write_good_bad_magic_measurements()
def write_good_bad_magic_measurements(self):
#print "write_good_bad_magic_measurements"
print("self.magic_file",self.magic_file)
pmag.magic_write(self.magic_file,self.mag_meas_data,"magic_measurements")
"""
#--------------------------------------------------------------
# Change Acceptance criteria dialog
#--------------------------------------------------------------
class demag_criteria_dialog(wx.Dialog):
def __init__(self, parent, acceptance_criteria,title):
style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER
super(demag_criteria_dialog, self).__init__(parent, title=title,style=style)
self.acceptance_criteria=acceptance_criteria
self.InitUI(acceptance_criteria)
#self.SetSize((250, 200))
def InitUI(self,acceptance_criteria):
pnl1 = wx.Panel(self)
#-----------
# specimen criteria
#-----------
vbox = wx.BoxSizer(wx.VERTICAL)
bSizer1 = wx.StaticBoxSizer( wx.StaticBox( pnl1, wx.ID_ANY, "specimen acceptance criteria" ), wx.HORIZONTAL )
# Specimen criteria
window_list_specimens=['specimen_n','specimen_mad','specimen_dang','specimen_alpha95']
for key in window_list_specimens:
command="self.set_%s=wx.TextCtrl(pnl1,style=wx.TE_CENTER,size=(50,20))"%key
exec(command)
criteria_specimen_window = wx.GridSizer(2, len(window_list_specimens), 10, 10)
criteria_specimen_window.AddMany( [(wx.StaticText(pnl1,label="n",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="MAD",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="DANG",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="alpha95",style=wx.TE_CENTER), wx.EXPAND),
(self.set_specimen_n),
(self.set_specimen_mad),
(self.set_specimen_dang),
(self.set_specimen_alpha95)])
bSizer1.Add( criteria_specimen_window, 0, wx.ALIGN_LEFT|wx.ALL, 5 )
#-----------
# sample criteria
#-----------
bSizer2 = wx.StaticBoxSizer( wx.StaticBox( pnl1, wx.ID_ANY, "sample acceptance criteria" ), wx.HORIZONTAL )
#self.set_average_by_sample_or_site=wx.ComboBox(pnl1, -1,size=(150, -1), value = 'sample', choices=['sample','site'], style=wx.CB_READONLY)
# Sample criteria
window_list_samples=['sample_n','sample_n_lines','sample_n_planes','sample_k','sample_r','sample_alpha95']
for key in window_list_samples:
command="self.set_%s=wx.TextCtrl(pnl1,style=wx.TE_CENTER,size=(50,20))"%key
exec(command)
criteria_sample_window = wx.GridSizer(2, len(window_list_samples), 10, 10)
criteria_sample_window.AddMany( [(wx.StaticText(pnl1,label="n",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="n lines",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="n planes",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="k",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="r",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="alpha95",style=wx.TE_CENTER), wx.EXPAND),
(self.set_sample_n),
(self.set_sample_n_lines),
(self.set_sample_n_planes),
(self.set_sample_k),
(self.set_sample_r),
(self.set_sample_alpha95)])
bSizer2.Add( criteria_sample_window, 0, wx.ALIGN_LEFT|wx.ALL, 5 )
#-----------
# site criteria
#-----------
bSizer3 = wx.StaticBoxSizer( wx.StaticBox( pnl1, wx.ID_ANY, "site acceptance criteria" ), wx.HORIZONTAL )
# Site criteria
window_list_sites=['site_n','site_n_lines','site_n_planes','site_k','site_r','site_alpha95']
for key in window_list_sites:
command="self.set_%s=wx.TextCtrl(pnl1,style=wx.TE_CENTER,size=(50,20))"%key
exec(command)
criteria_site_window = wx.GridSizer(2, len(window_list_sites), 10, 10)
criteria_site_window.AddMany( [(wx.StaticText(pnl1,label="n",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="n lines",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="n planes",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="k",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="r",style=wx.TE_CENTER), wx.EXPAND),
(wx.StaticText(pnl1,label="alpha95",style=wx.TE_CENTER), wx.EXPAND),
(self.set_site_n),
(self.set_site_n_lines),
(self.set_site_n_planes),
(self.set_site_k),
(self.set_site_r),
(self.set_site_alpha95)])
bSizer3.Add( criteria_site_window, 0, wx.ALIGN_LEFT|wx.ALL, 5 )
#-----------
#ok_sizer=self.CreateButtonSizer(wx.OK|wx.CANCEL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
self.okButton = wx.Button(pnl1, wx.ID_OK, "&OK")
self.cancelButton = wx.Button(pnl1, wx.ID_CANCEL, '&Cancel')
hbox3.Add(self.okButton)
hbox3.AddSpacer(10)
hbox3.Add(self.cancelButton )
#self.okButton.Bind(wx.EVT_BUTTON, self.OnOK)
#-----------
supported_crit=window_list_specimens+window_list_samples+window_list_sites
# initialize value:
for crit in supported_crit:
if crit not in list(acceptance_criteria.keys()):
continue
if acceptance_criteria[crit]['value']!="":
value=float(acceptance_criteria[crit]['value'])
if value!=-999:
decimal_points=acceptance_criteria[crit]['decimal_points']
command="self.set_%s.SetValue('%%.%if'%%(value))"%(crit,int(decimal_points))
exec(command)
#----------------------
vbox.AddSpacer(10)
vbox.Add(bSizer1, flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
vbox.Add(bSizer2, flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
vbox.Add(bSizer3, flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
vbox.Add(hbox3, flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(10)
hbox_top=wx.BoxSizer(wx.HORIZONTAL)
hbox_top.AddSpacer(50)
hbox_top.Add(vbox)
hbox_top.AddSpacer(50)
pnl1.SetSizer(hbox_top)
hbox_top.Fit(self)
#class MyFrame(wx.Frame):
# def __init__(self, parent, id, title):
# wx.Frame.__init__(self, parent, id, title, size=(500,500))
#
# panel = wx.Panel(self, -1)
# wx.Button(panel, 1, 'Show Custom Dialog', (100,100))
# self.Bind (wx.EVT_BUTTON, self.OnShowCustomDialog, id=1)
#
# def OnShowCustomDialog(self, event):
# #dia = MyDialog(self, -1, 'buttons')
#
# dia=demag_criteria_dialog(None, {},title='Set Acceptance Criteria')
# dia.Center()
# dia.ShowModal()
# dia.Destroy()
#
#class MyApp(wx.App):
# def OnInit(self):
# frame = MyFrame(None, -1, 'customdialog1.py')
# frame.Show(True)
# frame.Centre()
# return True
##
#app = MyApp(0)
#app.MainLoop()
#if __name__ == '__main__':
# app = wx.App()
# app.frame = demag_criteria_dialog(None, {},title='Set Acceptance Criteria')
# app.frame.Show()
# app.frame.Center()
# app.MainLoop()
#if __name__ == '__main__':
# app = wx.App()
# app.frame = magic_pmag_tables_dialog(None,"./",{},{})
# app.frame.Center()
# #alignToTop(app.frame)
# #dw, dh = wx.DisplaySize()
# #w, h = app.frame.GetSize()
# #print 'display 2', dw, dh
# #print "gui 2", w, h
# app.frame.Show()
# app.MainLoop()
|
the-stack_106_26762 | import os
import pytest
import sys
import ray
import pathlib
import json
import time
import subprocess
from dataclasses import asdict
from pathlib import Path
from jsonschema import validate
import ray._private.usage.usage_lib as ray_usage_lib
import ray._private.usage.usage_constants as usage_constants
from ray._private.usage.usage_lib import ClusterConfigToReport
from ray._private.test_utils import wait_for_condition
schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"schema_version": {"type": "string"},
"source": {"type": "string"},
"session_id": {"type": "string"},
"ray_version": {"type": "string"},
"git_commit": {"type": "string"},
"os": {"type": "string"},
"python_version": {"type": "string"},
"collect_timestamp_ms": {"type": "integer"},
"session_start_timestamp_ms": {"type": "integer"},
"cloud_provider": {"type": ["null", "string"]},
"min_workers": {"type": ["null", "integer"]},
"max_workers": {"type": ["null", "integer"]},
"head_node_instance_type": {"type": ["null", "string"]},
"worker_node_instance_types": {
"type": ["null", "array"],
"items": {"type": "string"},
},
"total_num_cpus": {"type": ["null", "integer"]},
"total_num_gpus": {"type": ["null", "integer"]},
"total_memory_gb": {"type": ["null", "number"]},
"total_object_store_memory_gb": {"type": ["null", "number"]},
"total_success": {"type": "integer"},
"total_failed": {"type": "integer"},
"seq_number": {"type": "integer"},
},
}
def file_exists(temp_dir: Path):
for path in temp_dir.iterdir():
if usage_constants.USAGE_STATS_FILE in str(path):
return True
return False
def read_file(temp_dir: Path, column: str):
usage_stats_file = temp_dir / usage_constants.USAGE_STATS_FILE
with usage_stats_file.open() as f:
result = json.load(f)
return result[column]
def print_dashboard_log():
session_dir = ray.worker.global_worker.node.address_info["session_dir"]
session_path = Path(session_dir)
log_dir_path = session_path / "logs"
paths = list(log_dir_path.iterdir())
contents = None
for path in paths:
if "dashboard.log" in str(path):
with open(str(path), "r") as f:
contents = f.readlines()
from pprint import pprint
pprint(contents)
def test_usage_stats_heads_up_message():
"""
Test usage stats heads-up message is shown in the proper cases.
"""
env = os.environ.copy()
env["RAY_USAGE_STATS_PROMPT_ENABLED"] = "0"
result = subprocess.run(
"ray start --head",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
)
assert result.returncode == 0
assert usage_constants.USAGE_STATS_HEADS_UP_MESSAGE not in result.stdout.decode(
"utf-8"
)
assert usage_constants.USAGE_STATS_HEADS_UP_MESSAGE not in result.stderr.decode(
"utf-8"
)
subprocess.run("ray stop --force", shell=True)
result = subprocess.run(
"ray start --head",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
assert result.returncode == 0
assert usage_constants.USAGE_STATS_HEADS_UP_MESSAGE not in result.stdout.decode(
"utf-8"
)
assert usage_constants.USAGE_STATS_HEADS_UP_MESSAGE in result.stderr.decode("utf-8")
result = subprocess.run(
'ray start --address="127.0.0.1:6379"',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
assert result.returncode == 0
assert usage_constants.USAGE_STATS_HEADS_UP_MESSAGE not in result.stdout.decode(
"utf-8"
)
assert usage_constants.USAGE_STATS_HEADS_UP_MESSAGE not in result.stderr.decode(
"utf-8"
)
subprocess.run("ray stop --force", shell=True)
result = subprocess.run(
"ray up xxx.yml", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert usage_constants.USAGE_STATS_HEADS_UP_MESSAGE not in result.stdout.decode(
"utf-8"
)
assert usage_constants.USAGE_STATS_HEADS_UP_MESSAGE in result.stderr.decode("utf-8")
result = subprocess.run(
"ray exec xxx.yml ls --start",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
assert usage_constants.USAGE_STATS_HEADS_UP_MESSAGE not in result.stdout.decode(
"utf-8"
)
assert usage_constants.USAGE_STATS_HEADS_UP_MESSAGE in result.stderr.decode("utf-8")
result = subprocess.run(
"ray exec xxx.yml ls",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
assert usage_constants.USAGE_STATS_HEADS_UP_MESSAGE not in result.stdout.decode(
"utf-8"
)
assert usage_constants.USAGE_STATS_HEADS_UP_MESSAGE not in result.stderr.decode(
"utf-8"
)
result = subprocess.run(
"ray submit xxx.yml yyy.py --start",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
assert usage_constants.USAGE_STATS_HEADS_UP_MESSAGE not in result.stdout.decode(
"utf-8"
)
assert usage_constants.USAGE_STATS_HEADS_UP_MESSAGE in result.stderr.decode("utf-8")
result = subprocess.run(
"ray submit xxx.yml yyy.py",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
assert usage_constants.USAGE_STATS_HEADS_UP_MESSAGE not in result.stdout.decode(
"utf-8"
)
assert usage_constants.USAGE_STATS_HEADS_UP_MESSAGE not in result.stderr.decode(
"utf-8"
)
result = subprocess.run(
'python -c "import ray; ray.init()"',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
assert usage_constants.USAGE_STATS_HEADS_UP_MESSAGE not in result.stdout.decode(
"utf-8"
)
assert usage_constants.USAGE_STATS_HEADS_UP_MESSAGE not in result.stderr.decode(
"utf-8"
)
def test_usage_lib_cluster_metadata_generation(monkeypatch, shutdown_only):
with monkeypatch.context() as m:
m.setenv("RAY_USAGE_STATS_ENABLED", "1")
m.setenv("RAY_USAGE_STATS_REPORT_URL", "http://127.0.0.1:8000")
ray.init(num_cpus=0)
"""
Test metadata stored is equivalent to `_generate_cluster_metadata`.
"""
meta = ray_usage_lib._generate_cluster_metadata()
cluster_metadata = ray_usage_lib.get_cluster_metadata(
ray.experimental.internal_kv.internal_kv_get_gcs_client(), num_retries=20
)
# Remove fields that are dynamically changed.
assert meta.pop("session_id")
assert meta.pop("session_start_timestamp_ms")
assert cluster_metadata.pop("session_id")
assert cluster_metadata.pop("session_start_timestamp_ms")
assert meta == cluster_metadata
"""
Make sure put & get works properly.
"""
cluster_metadata = ray_usage_lib.put_cluster_metadata(
ray.experimental.internal_kv.internal_kv_get_gcs_client(), num_retries=20
)
assert cluster_metadata == ray_usage_lib.get_cluster_metadata(
ray.experimental.internal_kv.internal_kv_get_gcs_client(), num_retries=20
)
def test_usage_lib_cluster_metadata_generation_usage_disabled(shutdown_only):
"""
Make sure only version information is generated when usage stats are not enabled.
"""
meta = ray_usage_lib._generate_cluster_metadata()
assert "ray_version" in meta
assert "python_version" in meta
assert len(meta) == 2
def test_usage_lib_get_cluster_status_to_report(shutdown_only):
ray.init(num_cpus=3, num_gpus=1, object_store_memory=2 ** 30)
# Wait for monitor.py to update cluster status
wait_for_condition(
lambda: ray_usage_lib.get_cluster_status_to_report(
ray.experimental.internal_kv.internal_kv_get_gcs_client(),
num_retries=20,
).total_num_cpus
== 3,
timeout=10,
)
cluster_status_to_report = ray_usage_lib.get_cluster_status_to_report(
ray.experimental.internal_kv.internal_kv_get_gcs_client(),
num_retries=20,
)
assert cluster_status_to_report.total_num_cpus == 3
assert cluster_status_to_report.total_num_gpus == 1
assert cluster_status_to_report.total_memory_gb > 0
assert cluster_status_to_report.total_object_store_memory_gb == 1.0
def test_usage_lib_get_cluster_config_to_report(monkeypatch, tmp_path):
cluster_config_file_path = tmp_path / "ray_bootstrap_config.yaml"
""" Test minimal cluster config"""
cluster_config_file_path.write_text(
"""
cluster_name: minimal
max_workers: 1
provider:
type: aws
region: us-west-2
availability_zone: us-west-2a
"""
)
cluster_config_to_report = ray_usage_lib.get_cluster_config_to_report(
cluster_config_file_path
)
assert cluster_config_to_report.cloud_provider == "aws"
assert cluster_config_to_report.min_workers is None
assert cluster_config_to_report.max_workers == 1
assert cluster_config_to_report.head_node_instance_type is None
assert cluster_config_to_report.worker_node_instance_types is None
cluster_config_file_path.write_text(
"""
cluster_name: full
min_workers: 1
provider:
type: gcp
head_node_type: head_node
available_node_types:
head_node:
node_config:
InstanceType: m5.large
min_workers: 0
max_workers: 0
aws_worker_node:
node_config:
InstanceType: m3.large
min_workers: 0
max_workers: 0
azure_worker_node:
node_config:
azure_arm_parameters:
vmSize: Standard_D2s_v3
gcp_worker_node:
node_config:
machineType: n1-standard-2
"""
)
cluster_config_to_report = ray_usage_lib.get_cluster_config_to_report(
cluster_config_file_path
)
assert cluster_config_to_report.cloud_provider == "gcp"
assert cluster_config_to_report.min_workers == 1
assert cluster_config_to_report.max_workers is None
assert cluster_config_to_report.head_node_instance_type == "m5.large"
assert cluster_config_to_report.worker_node_instance_types == list(
{"m3.large", "Standard_D2s_v3", "n1-standard-2"}
)
cluster_config_file_path.write_text(
"""
cluster_name: full
head_node_type: head_node
available_node_types:
worker_node_1:
node_config:
ImageId: xyz
worker_node_2:
resources: {}
worker_node_3:
node_config:
InstanceType: m5.large
"""
)
cluster_config_to_report = ray_usage_lib.get_cluster_config_to_report(
cluster_config_file_path
)
assert cluster_config_to_report.cloud_provider is None
assert cluster_config_to_report.min_workers is None
assert cluster_config_to_report.max_workers is None
assert cluster_config_to_report.head_node_instance_type is None
assert cluster_config_to_report.worker_node_instance_types == ["m5.large"]
cluster_config_file_path.write_text("[invalid")
cluster_config_to_report = ray_usage_lib.get_cluster_config_to_report(
cluster_config_file_path
)
assert cluster_config_to_report == ClusterConfigToReport()
cluster_config_to_report = ray_usage_lib.get_cluster_config_to_report(
tmp_path / "does_not_exist.yaml"
)
assert cluster_config_to_report == ClusterConfigToReport()
monkeypatch.setenv("KUBERNETES_SERVICE_HOST", "localhost")
cluster_config_to_report = ray_usage_lib.get_cluster_config_to_report(
tmp_path / "does_not_exist.yaml"
)
assert cluster_config_to_report.cloud_provider == "kubernetes"
assert cluster_config_to_report.min_workers is None
assert cluster_config_to_report.max_workers is None
assert cluster_config_to_report.head_node_instance_type is None
assert cluster_config_to_report.worker_node_instance_types is None
@pytest.mark.skipif(
sys.platform == "win32",
reason="Test depends on runtime env feature not supported on Windows.",
)
def test_usage_lib_report_data(monkeypatch, shutdown_only, tmp_path):
with monkeypatch.context() as m:
m.setenv("RAY_USAGE_STATS_ENABLED", "1")
m.setenv("RAY_USAGE_STATS_REPORT_URL", "http://127.0.0.1:8000")
# Runtime env is required to run this test in minimal installation test.
ray.init(num_cpus=0, runtime_env={"pip": ["ray[serve]"]})
"""
Make sure the generated data is following the schema.
"""
cluster_metadata = ray_usage_lib.get_cluster_metadata(
ray.experimental.internal_kv.internal_kv_get_gcs_client(), num_retries=20
)
cluster_config_file_path = tmp_path / "ray_bootstrap_config.yaml"
cluster_config_file_path.write_text(
"""
cluster_name: minimal
max_workers: 1
provider:
type: aws
region: us-west-2
availability_zone: us-west-2a
"""
)
cluster_config_to_report = ray_usage_lib.get_cluster_config_to_report(
cluster_config_file_path
)
d = ray_usage_lib.generate_report_data(
cluster_metadata, cluster_config_to_report, 2, 2, 2
)
validate(instance=asdict(d), schema=schema)
"""
Make sure writing to a file works as expected
"""
client = ray_usage_lib.UsageReportClient()
temp_dir = Path(tmp_path)
client.write_usage_data(d, temp_dir)
wait_for_condition(lambda: file_exists(temp_dir))
"""
Make sure report usage data works as expected
"""
@ray.remote(num_cpus=0, runtime_env={"pip": ["ray[serve]"]})
class ServeInitator:
def __init__(self):
# Start the ray serve server to verify requests are sent
# to the right place.
from ray import serve
serve.start()
@serve.deployment(ray_actor_options={"num_cpus": 0})
async def usage(request):
body = await request.json()
if body == asdict(d):
return True
else:
return False
usage.deploy()
def ready(self):
pass
# We need to start a serve with runtime env to make this test
# work with minimal installation.
s = ServeInitator.remote()
ray.get(s.ready.remote())
# Query our endpoint over HTTP.
r = client.report_usage_data("http://127.0.0.1:8000/usage", d)
r.raise_for_status()
assert json.loads(r.text) is True
@pytest.mark.skipif(
sys.platform == "win32",
reason="Test depends on runtime env feature not supported on Windows.",
)
def test_usage_report_e2e(monkeypatch, shutdown_only, tmp_path):
"""
Test usage report works e2e with env vars.
"""
cluster_config_file_path = tmp_path / "ray_bootstrap_config.yaml"
cluster_config_file_path.write_text(
"""
cluster_name: minimal
max_workers: 1
provider:
type: aws
region: us-west-2
availability_zone: us-west-2a
"""
)
with monkeypatch.context() as m:
m.setenv("HOME", str(tmp_path))
m.setenv("RAY_USAGE_STATS_ENABLED", "1")
m.setenv("RAY_USAGE_STATS_REPORT_URL", "http://127.0.0.1:8000/usage")
m.setenv("RAY_USAGE_STATS_REPORT_INTERVAL_S", "1")
ray.init(num_cpus=3)
@ray.remote(num_cpus=0)
class StatusReporter:
def __init__(self):
self.reported = 0
self.payload = None
def report_payload(self, payload):
self.payload = payload
def reported(self):
self.reported += 1
def get(self):
return self.reported
def get_payload(self):
return self.payload
reporter = StatusReporter.remote()
@ray.remote(num_cpus=0, runtime_env={"pip": ["ray[serve]"]})
class ServeInitator:
def __init__(self):
from ray import serve
serve.start()
# Usage report should be sent to the URL every 1 second.
@serve.deployment(ray_actor_options={"num_cpus": 0})
async def usage(request):
body = await request.json()
reporter.reported.remote()
reporter.report_payload.remote(body)
return True
usage.deploy()
def ready(self):
pass
# We need to start a serve with runtime env to make this test
# work with minimal installation.
s = ServeInitator.remote()
ray.get(s.ready.remote())
"""
Verify the usage stats are reported to the server.
"""
print("Verifying usage stats report.")
# Since the interval is 1 second, there must have been
# more than 5 requests sent within 30 seconds.
try:
wait_for_condition(lambda: ray.get(reporter.get.remote()) > 5, timeout=30)
except Exception:
print_dashboard_log()
raise
payload = ray.get(reporter.get_payload.remote())
ray_version, python_version = ray._private.utils.compute_version_info()
assert payload["ray_version"] == ray_version
assert payload["python_version"] == python_version
assert payload["schema_version"] == "0.1"
assert payload["os"] == sys.platform
assert payload["source"] == "OSS"
assert payload["cloud_provider"] == "aws"
assert payload["min_workers"] is None
assert payload["max_workers"] == 1
assert payload["head_node_instance_type"] is None
assert payload["worker_node_instance_types"] is None
assert payload["total_num_cpus"] == 3
assert payload["total_num_gpus"] is None
assert payload["total_memory_gb"] > 0
assert payload["total_object_store_memory_gb"] > 0
validate(instance=payload, schema=schema)
"""
Verify the usage_stats.json is updated.
"""
print("Verifying usage stats write.")
global_node = ray.worker._global_node
temp_dir = pathlib.Path(global_node.get_session_dir_path())
wait_for_condition(lambda: file_exists(temp_dir), timeout=30)
timestamp_old = read_file(temp_dir, "usage_stats")["collect_timestamp_ms"]
success_old = read_file(temp_dir, "usage_stats")["total_success"]
# Test if the timestampe has been updated.
wait_for_condition(
lambda: timestamp_old
< read_file(temp_dir, "usage_stats")["collect_timestamp_ms"]
)
wait_for_condition(
lambda: success_old < read_file(temp_dir, "usage_stats")["total_success"]
)
assert read_file(temp_dir, "success")
def test_usage_report_disabled(monkeypatch, shutdown_only):
"""
Make sure usage report module is disabled when the env var is not set.
It also verifies that the failure message is not printed (note that
the invalid report url is given as an env var).
"""
with monkeypatch.context() as m:
# It is disabled by default.
# m.setenv("RAY_USAGE_STATS_ENABLED", "0")
m.setenv("RAY_USAGE_STATS_REPORT_URL", "http://127.0.0.1:8000")
m.setenv("RAY_USAGE_STATS_REPORT_INTERVAL_S", "1")
ray.init(num_cpus=0)
# Wait enough so that usage report should happen.
time.sleep(5)
session_dir = ray.worker.global_worker.node.address_info["session_dir"]
session_path = Path(session_dir)
log_dir_path = session_path / "logs"
paths = list(log_dir_path.iterdir())
contents = None
for path in paths:
if "dashboard.log" in str(path):
with open(str(path), "r") as f:
contents = f.readlines()
assert contents is not None
keyword_found = False
for c in contents:
if "Usage reporting is disabled" in c:
keyword_found = True
# Make sure the module was disabled.
assert keyword_found
for c in contents:
assert "Failed to report usage stats" not in c
def test_usage_file_error_message(monkeypatch, shutdown_only):
"""
Make sure the usage report file is generated with a proper
error message when the report is failed.
"""
with monkeypatch.context() as m:
m.setenv("RAY_USAGE_STATS_ENABLED", "1")
m.setenv("RAY_USAGE_STATS_REPORT_URL", "http://127.0.0.1:8000")
m.setenv("RAY_USAGE_STATS_REPORT_INTERVAL_S", "1")
ray.init(num_cpus=0)
global_node = ray.worker._global_node
temp_dir = pathlib.Path(global_node.get_session_dir_path())
try:
wait_for_condition(lambda: file_exists(temp_dir), timeout=30)
except Exception:
print_dashboard_log()
raise
error_message = read_file(temp_dir, "error")
failure_old = read_file(temp_dir, "usage_stats")["total_failed"]
report_success = read_file(temp_dir, "success")
# Test if the timestampe has been updated.
assert (
"HTTPConnectionPool(host='127.0.0.1', port=8000): "
"Max retries exceeded with url:"
) in error_message
assert not report_success
try:
wait_for_condition(
lambda: failure_old < read_file(temp_dir, "usage_stats")["total_failed"]
)
except Exception:
print_dashboard_log()
read_file(temp_dir, "usage_stats")["total_failed"]
raise
assert read_file(temp_dir, "usage_stats")["total_success"] == 0
if __name__ == "__main__":
os.environ["RAY_USAGE_STATS_REPORT_URL"] = "http://127.0.0.1:8000"
sys.exit(pytest.main(["-v", __file__]))
|
the-stack_106_26764 | from multiprocessing import cpu_count
SEED = 777
TEMP_DIRECTORY = "temp/data"
RESULT_FILE_DEV = "result_dev.tsv"
RESULT_FILE_TEST = "result_test.tsv"
SUBMISSION_FILE = "predictions.txt"
RESULT_IMAGE = "result.jpg"
GOOGLE_DRIVE = False
DRIVE_FILE_ID = None
MODEL_TYPE = "mt5"
MODEL_NAME = "google/mt5-base"
quest5_config = {
'output_dir': 'temp/outputs/',
"best_model_dir": "temp/outputs/best_model",
'cache_dir': 'temp/cache_dir/',
'fp16': False,
'fp16_opt_level': 'O1',
'max_seq_length': 256,
'train_batch_size': 4,
'gradient_accumulation_steps': 1,
'eval_batch_size': 4,
'num_train_epochs': 5,
'weight_decay': 0,
'learning_rate': 2e-5,
'adam_epsilon': 1e-8,
'warmup_ratio': 0.1,
'warmup_steps': 0,
'max_grad_norm': 1.0,
'do_lower_case': False,
'logging_steps': 300,
'save_steps': 300,
"no_cache": False,
"no_save": False,
"save_recent_only": True,
'save_model_every_epoch': False,
'n_fold': 3,
'evaluate_during_training': True,
"evaluate_during_training_silent": False,
'evaluate_during_training_steps': 300,
"evaluate_during_training_verbose": True,
'use_cached_eval_features': False,
"save_best_model": True,
'save_eval_checkpoints': False,
'tensorboard_dir': None,
"save_optimizer_and_scheduler": True,
'regression': True,
'overwrite_output_dir': True,
'reprocess_input_data': True,
'process_count': 1,
'n_gpu': 1,
'use_multiprocessing': False,
"multiprocessing_chunksize": 500,
'silent': False,
'wandb_project': "quest5-et-en",
'wandb_kwargs': {},
"use_early_stopping": True,
"early_stopping_patience": 10,
"early_stopping_delta": 0,
"early_stopping_metric": "eval_loss",
"early_stopping_metric_minimize": True,
"early_stopping_consider_epochs": False,
"manual_seed": SEED,
"config": {},
"local_rank": -1,
"encoding": None,
}
|
the-stack_106_26766 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.tasks.jvm_compile.scala.zinc_utils import ZincUtils
from pants_test.base_test import BaseTest
class TestZincUtils(BaseTest):
def test_get_compile_args(self):
jar_outside_build_root = os.path.join(os.path.sep, 'outside-build-root', 'bar.jar')
classpath = [os.path.join(self.build_root, 'foo.jar'), jar_outside_build_root]
sources = ['X.scala']
args = ZincUtils._get_compile_args([], classpath, sources, 'bogus output dir',
'bogus analysis file', [])
classpath_found = False
classpath_correct = False
for arg in args:
if classpath_found:
# Classpath elements are always relative to the build root.
jar_relpath = os.path.relpath(jar_outside_build_root, self.build_root)
self.assertEquals('foo.jar:{0}'.format(jar_relpath), arg)
classpath_correct = True
break
if arg == '-classpath':
classpath_found = True
self.assertTrue(classpath_correct)
|
the-stack_106_26768 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Transformer decoder implementations.
"""
from __future__ import annotations
from typing import Dict, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
from parlai.agents.transformer.modules import (
create_position_codes,
get_n_positions_from_options,
LAYER_NORM_EPS,
MultiHeadAttention,
TransformerFFN,
)
from parlai.agents.transformer.modules.modular import swappable
from parlai.core.opt import Opt
from parlai.utils.misc import warn_once
from parlai.utils.torch import PipelineHelper
from parlai.utils.fsdp import fsdp_wrap
from parlai.nn.checkpoint import checkpoint_wrapper
@swappable(
self_attention=MultiHeadAttention,
encoder_attention=MultiHeadAttention,
feedforward=TransformerFFN,
)
class TransformerDecoderLayer(nn.Module):
"""
Implements a single Transformer decoder layer.
Decoder layers are similar to encoder layers but:
1. Self-attention is limited in a causal (auto-regressive) manner.
2. Attend over all of the encoder states.
"""
def __init__(
self,
opt: Opt,
n_heads: int = None,
embedding_size: int = None,
ffn_size: int = None,
attention_dropout: float = 0.0,
relu_dropout: float = 0.0,
dropout: float = 0.0,
activation: str = 'relu',
variant: str = 'aiayn',
**kwargs,
):
super().__init__(**kwargs)
def _default(val, default):
"""
shorthand for explicit None check for optional arguments.
"""
return val if val is not None else default
n_heads = _default(n_heads, opt['n_heads'])
embedding_size = _default(embedding_size, opt['embedding_size'])
ffn_size = _default(ffn_size, opt['ffn_size'])
self.opt = opt
self.dim = embedding_size
self.ffn_dim = ffn_size
self.variant = variant
self.activation = activation
self.dropout = nn.Dropout(p=dropout)
self.self_attention = self.swappables.self_attention(
opt=self.opt, n_heads=n_heads, dim=embedding_size, dropout=attention_dropout
) # type: ignore
self.norm1 = torch.nn.LayerNorm(embedding_size, eps=LAYER_NORM_EPS)
self.encoder_attention = self.swappables.encoder_attention(
opt=self.opt, n_heads=n_heads, dim=embedding_size, dropout=attention_dropout
) # type: ignore
self.norm2 = torch.nn.LayerNorm(embedding_size, eps=LAYER_NORM_EPS)
self.ffn = self.swappables.feedforward(
opt=self.opt,
dim=embedding_size,
dim_hidden=ffn_size,
relu_dropout=relu_dropout,
activation=activation,
) # type: ignore
self.norm3 = torch.nn.LayerNorm(embedding_size, eps=LAYER_NORM_EPS)
def forward(
self,
x: torch.Tensor,
encoder_output: torch.Tensor,
encoder_mask: torch.Tensor,
incr_state: Optional[Dict[str, torch.Tensor]] = None,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
"""
Forward pass.
The incremental state is a dict with values for self- and encoder-attention
states.
"""
if incr_state is None:
incr_state = {}
decoder_mask = self._create_selfattn_mask(x)
# first self attn
residual = x
if self.variant == 'prelayernorm':
x = self.norm1(x)
# don't peak into the future!
x, final_self_attn_incr_state = self.self_attention(
query=x,
mask=decoder_mask,
incr_state=incr_state.get('self_attn'),
static_kv=False,
**kwargs,
)[:2]
x = self.dropout(x) # --dropout
x = x + residual
if self.variant == 'aiayn' or self.variant == 'xlm' or self.variant == 'bart':
x = self.norm1(x)
residual = x
# encoder_attn_layer_norm norm 2
if self.variant == 'prelayernorm':
x = self.norm2(x)
x, final_encoder_attn_incr_state = self.encoder_attention(
query=x,
key=encoder_output,
value=encoder_output,
mask=encoder_mask,
incr_state=incr_state.get('encoder_attn'),
static_kv=True,
**kwargs,
)[:2]
x = self.dropout(x) # --dropout
x = residual + x
if self.variant == 'aiayn' or self.variant == 'xlm' or self.variant == 'bart':
x = self.norm2(x)
# finally the ffn
residual = x
if self.variant == 'prelayernorm':
x = self.norm3(x)
x = self.ffn(x, **kwargs)
x = self.dropout(x) # --dropout
x = residual + x
if self.variant == 'aiayn' or self.variant == 'xlm' or self.variant == 'bart':
x = self.norm3(x)
new_incr_state = {
'self_attn': final_self_attn_incr_state,
'encoder_attn': final_encoder_attn_incr_state,
}
return x, new_incr_state
def _create_selfattn_mask(self, x):
# figure out how many timestamps we need
bsz = x.size(0)
time = x.size(1)
# make sure that we don't look into the future
mask = torch.tril(x.new(time, time).fill_(1))
# broadcast across batch
mask = mask.unsqueeze(0).expand(bsz, -1, -1)
return mask
def reorder_incremental_state(
self, incremental_state: Dict[str, dict], inds: torch.Tensor
) -> Dict[str, dict]:
"""
Reorder all incremental-state tensors for this layer.
"""
attn_types = {
'self_attn': self.self_attention,
'encoder_attn': self.encoder_attention,
}
return {
attn_type: attn.reorder_incremental_state(
incremental_state[attn_type], inds
)
for attn_type, attn in attn_types.items()
}
@swappable(layer=TransformerDecoderLayer)
class TransformerDecoder(nn.Module):
"""
Transformer Decoder module.
For documentation on parameters that are take directly from opt,
see parlai/agents/transformer/transformer.py
:param opt: ParlAI-parsed options.
:param embedding: an embedding matrix for the bottom layer of the transformer.
If none, one is created for this encoder.
:param int n_positions: Size of the position embeddings matrix.
"""
def __init__(
self,
opt: Opt,
embedding: Optional[nn.Embedding] = None,
n_positions: Optional[int] = None,
**kwargs,
):
super().__init__(**kwargs)
self.opt = opt
def _default(val, default):
return val if val is not None else default
self.embedding_size = opt['embedding_size']
self.ffn_size = opt['ffn_size']
self.n_layers = (
opt['n_decoder_layers']
if opt.get('n_decoder_layers', -1) > 0
else opt['n_layers']
)
self.n_heads = opt['n_heads']
self.dim = self.embedding_size
self.activation = opt.get('activation', 'relu')
self.variant = opt.get('variant', 'aiayn')
self.embeddings_scale = opt.get('embeddings_scale', True)
self.dropout = nn.Dropout(p=opt.get('dropout', 0.0)) # --dropout
self.n_positions = _default(n_positions, get_n_positions_from_options(opt))
self.out_dim = self.embedding_size
assert (
self.embedding_size % self.n_heads == 0
), 'Transformer embedding size must be a multiple of n_heads'
self.embeddings = embedding
if (
self.variant == 'xlm'
or self.variant == 'prelayernorm'
or self.variant == 'bart'
):
self.norm_embeddings = torch.nn.LayerNorm(self.dim, eps=LAYER_NORM_EPS)
if self.variant == 'xlm':
warn_once(
'DEPRECATED: XLM should only be used for backwards compatibility, '
'as it involves a less-stable layernorm operation.'
)
elif self.variant == 'aiayn':
pass
else:
raise ValueError("Can't handle --variant {}".format(self.variant))
# create the positional embeddings
self.position_embeddings = nn.Embedding(self.n_positions, self.embedding_size)
if not opt.get('learn_positional_embeddings', False):
create_position_codes(
self.n_positions,
self.embedding_size,
out=self.position_embeddings.weight,
)
else:
nn.init.normal_(
self.position_embeddings.weight, 0, self.embedding_size ** -0.5
)
# build the model
self.layers = self.build_layers()
def build_layers(self) -> nn.ModuleList:
layers = nn.ModuleList()
for _ in range(self.n_layers):
layer = self.swappables.layer(
self.opt,
attention_dropout=self.opt.get('attention_dropout', 0.0),
relu_dropout=self.opt.get('relu_dropout', 0.0),
dropout=self.opt.get('dropout', 0.0),
activation=self.activation,
variant=self.variant,
)
if self.opt.get('checkpoint_activations'):
layer = checkpoint_wrapper(layer)
layers.append(fsdp_wrap(layer)) # type: ignore
return layers
def forward_embedding(
self,
input: torch.LongTensor,
positions: Optional[torch.LongTensor] = None,
segments: Optional[torch.LongTensor] = None,
**kwargs,
):
"""
Embed tokens prior to feeding into transformer.
:param LongTensor[batch, seqlen] input:
The target input IDs
:param LongTensor[batch, seqlen] positions:
Positions for input IDs. If None, computes defaults.
:param LongTensor[batch, seqlen] segements:
Segment IDs for extra embedding features. If None, not used.
:return (tensor, mask):
embeded input and mask
"""
tensor = self.embeddings(input)
if self.embeddings_scale:
tensor = tensor * np.sqrt(self.dim)
if self.variant == 'xlm':
tensor = self.norm_embeddings(tensor)
if positions.max().item() > self.n_positions:
warn_once(
'You are inputting a sequence of {x} length, but only have '
'--n-positions {y}. Set --truncate or increase --n-positions'.format(
x=positions.max().item(), y=self.n_positions
)
)
tensor = tensor + self.position_embeddings(positions).expand_as(tensor)
if self.variant == 'bart':
tensor = self.norm_embeddings(tensor)
return tensor
def forward_layers(
self,
tensor: torch.Tensor,
encoder_output: torch.Tensor,
encoder_mask: torch.Tensor,
incr_state: Dict[int, Dict[str, Dict[str, torch.Tensor]]],
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
"""
Forward pass of decoder layers.
:param tensor:
embedded input tensor for the decoder
:param enc_out:
encoder outputs
:param enc_mask:
encoder output mask
:param incr_state:
Dict mapping layer_idx to incremental state
:return (tensor, new_incr_state):
return encoding after applying decoder layers, as well
as new incremental decoding state.
"""
new_incr_state = {}
if getattr(self.layers, 'is_model_parallel', False):
tensor, new_incr_state = self._apply_model_parallel(
tensor, encoder_output, encoder_mask, incr_state
)
else:
for idx, layer in enumerate(self.layers):
tensor, new_incr_state[idx] = layer(
x=tensor,
encoder_output=encoder_output,
encoder_mask=encoder_mask,
incr_state=incr_state.get(idx),
**kwargs,
)
return tensor, new_incr_state
def forward(
self,
input: torch.Tensor,
encoder_state,
incr_state: Optional[Dict[str, torch.Tensor]] = None,
**kwargs,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
"""
Forward pass.
:param LongTensor[batch,seqlen] input:
The decoder inputs (partial or full decoded token IDs).
:param encoder_state:
Output from the encoder module forward pass.
:param incr_state:
The incremental state: a dictionary whose keys index the layers and whose
values contain the incremental state for each layer.
"""
encoder_output, encoder_mask = encoder_state
seq_len = input.size(1)
positions = torch.arange(
seq_len, dtype=torch.long, device=input.device
).unsqueeze(0)
if incr_state is not None:
# We're doing incremental decoding, so select only the most recent position
input = input[:, -1:]
if positions is not None:
positions = positions[:, -1:]
else:
incr_state = {}
tensor = self.forward_embedding(input, positions, **kwargs)
tensor = self.dropout(tensor) # --dropout
tensor, new_incr_state = self.forward_layers(
tensor, encoder_output, encoder_mask, incr_state, **kwargs
)
if self.variant == 'prelayernorm':
tensor = self.norm_embeddings(tensor)
return tensor, new_incr_state
def _apply_model_parallel(self, tensor, encoder_output, encoder_mask, incr_state):
"""
Pipeline application of model parallelism.
"""
chunks = PipelineHelper.split(
(tensor, encoder_output, encoder_mask, incr_state)
)
work_items = PipelineHelper.schedule_work_items(self.layers, chunks)
new_incr_state = {i: [] for i, _ in enumerate(self.layers)}
for chunk_idx, layer_nos, next_device in work_items:
s_tensor, s_enc_out, s_enc_mask, s_incr_state = chunks[chunk_idx]
for layer_no in layer_nos:
s_tensor, nis = self.layers[layer_no](
x=s_tensor,
encoder_output=s_enc_out,
encoder_mask=s_enc_mask,
incr_state=s_incr_state.get(layer_no),
)
new_incr_state[layer_no].append(nis)
# don't move incr state, it's always on the correct device
s_tensor, s_enc_out, s_enc_mask = PipelineHelper.chunk_to(
(s_tensor, s_enc_out, s_enc_mask), next_device
)
chunks[chunk_idx] = (s_tensor, s_enc_out, s_enc_mask, s_incr_state)
tensor_out = PipelineHelper.join([c[0] for c in chunks])
new_incr_state = {
layer_no: PipelineHelper.join(pieces)
for layer_no, pieces in new_incr_state.items()
}
return tensor_out, new_incr_state
|
the-stack_106_26769 | #!/usr/bin/env python
from __future__ import print_function
import sys
import os
import struct
try:
import usocket as socket
except ImportError:
import socket
import websocket_helper
# Define to 1 to use builtin "websocket" module of MicroPython
USE_BUILTIN_WEBSOCKET = 0
# Treat this remote directory as a root for file transfers
SANDBOX = ""
#SANDBOX = "/tmp/webrepl/"
DEBUG = 0
WEBREPL_REQ_S = "<2sBBQLH64s"
WEBREPL_PUT_FILE = 1
WEBREPL_GET_FILE = 2
WEBREPL_GET_VER = 3
def debugmsg(msg):
if DEBUG:
print(msg)
if USE_BUILTIN_WEBSOCKET:
from websocket import websocket
else:
class websocket:
def __init__(self, s):
self.s = s
self.buf = b""
def write(self, data):
l = len(data)
if l < 126:
# TODO: hardcoded "binary" type
hdr = struct.pack(">BB", 0x82, l)
else:
hdr = struct.pack(">BBH", 0x82, 126, l)
self.s.send(hdr)
self.s.send(data)
def recvexactly(self, sz):
res = b""
while sz:
data = self.s.recv(sz)
if not data:
break
res += data
sz -= len(data)
return res
def read(self, size, text_ok=False):
if not self.buf:
while True:
hdr = self.recvexactly(2)
assert len(hdr) == 2
fl, sz = struct.unpack(">BB", hdr)
if sz == 126:
hdr = self.recvexactly(2)
assert len(hdr) == 2
(sz,) = struct.unpack(">H", hdr)
if fl == 0x82:
break
if text_ok and fl == 0x81:
break
debugmsg("Got unexpected websocket record of type %x, skipping it" % fl)
while sz:
skip = self.s.recv(sz)
debugmsg("Skip data: %s" % skip)
sz -= len(skip)
data = self.recvexactly(sz)
assert len(data) == sz
self.buf = data
d = self.buf[:size]
self.buf = self.buf[size:]
assert len(d) == size, len(d)
return d
def ioctl(self, req, val):
assert req == 9 and val == 2
def login(ws, passwd):
while True:
c = ws.read(1, text_ok=True)
if c == b":":
assert ws.read(1, text_ok=True) == b" "
break
ws.write(passwd.encode("utf-8") + b"\r")
def read_resp(ws):
data = ws.read(4)
sig, code = struct.unpack("<2sH", data)
assert sig == b"WB"
return code
def send_req(ws, op, sz=0, fname=b""):
rec = struct.pack(WEBREPL_REQ_S, b"WA", op, 0, 0, sz, len(fname), fname)
debugmsg("%r %d" % (rec, len(rec)))
ws.write(rec)
def get_ver(ws):
send_req(ws, WEBREPL_GET_VER)
d = ws.read(3)
d = struct.unpack("<BBB", d)
return d
def put_file(ws, local_file, remote_file):
sz = os.stat(local_file)[6]
dest_fname = (SANDBOX + remote_file).encode("utf-8")
rec = struct.pack(WEBREPL_REQ_S, b"WA", WEBREPL_PUT_FILE, 0, 0, sz, len(dest_fname), dest_fname)
debugmsg("%r %d" % (rec, len(rec)))
ws.write(rec[:10])
ws.write(rec[10:])
assert read_resp(ws) == 0
cnt = 0
with open(local_file, "rb") as f:
while True:
sys.stdout.write("Sent %d of %d bytes\r" % (cnt, sz))
sys.stdout.flush()
buf = f.read(1024)
if not buf:
break
ws.write(buf)
cnt += len(buf)
print()
assert read_resp(ws) == 0
def get_file(ws, local_file, remote_file):
src_fname = (SANDBOX + remote_file).encode("utf-8")
rec = struct.pack(WEBREPL_REQ_S, b"WA", WEBREPL_GET_FILE, 0, 0, 0, len(src_fname), src_fname)
debugmsg("%r %d" % (rec, len(rec)))
ws.write(rec)
assert read_resp(ws) == 0
with open(local_file, "wb") as f:
cnt = 0
while True:
ws.write(b"\0")
(sz,) = struct.unpack("<H", ws.read(2))
if sz == 0:
break
while sz:
buf = ws.read(sz)
if not buf:
raise OSError()
cnt += len(buf)
f.write(buf)
sz -= len(buf)
sys.stdout.write("Received %d bytes\r" % cnt)
sys.stdout.flush()
print()
assert read_resp(ws) == 0
def help(rc=0):
exename = sys.argv[0].rsplit("/", 1)[-1]
print("%s - Perform remote file operations using MicroPython WebREPL protocol" % exename)
print("Arguments:")
print(" <host>:<remote_file> <local_file> - Copy remote file to local file")
print(" <local_file> <host>:<remote_file> - Copy local file to remote file")
print("Examples:")
print(" %s script.py 192.168.4.1:/another_name.py" % exename)
print(" %s script.py 192.168.4.1:/app/" % exename)
print(" %s 192.168.4.1:/app/script.py ." % exename)
sys.exit(rc)
def error(msg):
print(msg)
sys.exit(1)
def parse_remote(remote):
host, fname = remote.rsplit(":", 1)
if fname == "":
fname = "/"
port = 8266
if ":" in host:
host, port = remote.split(":")
port = int(port)
return (host, port, fname)
def main():
if len(sys.argv) != 3:
help(1)
if ":" in sys.argv[1] and ":" in sys.argv[2]:
error("Operations on 2 remote files are not supported")
if ":" not in sys.argv[1] and ":" not in sys.argv[2]:
error("One remote file is required")
if ":" in sys.argv[1]:
op = "get"
host, port, src_file = parse_remote(sys.argv[1])
dst_file = sys.argv[2]
if os.path.isdir(dst_file):
basename = src_file.rsplit("/", 1)[-1]
dst_file += "/" + basename
else:
op = "put"
host, port, dst_file = parse_remote(sys.argv[2])
src_file = sys.argv[1]
if dst_file[-1] == "/":
basename = src_file.rsplit("/", 1)[-1]
dst_file += basename
if 1:
print(op, host, port)
print(src_file, "->", dst_file)
s = socket.socket()
ai = socket.getaddrinfo(host, port)
addr = ai[0][4]
s.connect(addr)
#s = s.makefile("rwb")
websocket_helper.client_handshake(s)
ws = websocket(s)
import getpass
passwd = getpass.getpass()
login(ws, passwd)
print("Remote WebREPL version:", get_ver(ws))
# Set websocket to send data marked as "binary"
ws.ioctl(9, 2)
if op == "get":
get_file(ws, dst_file, src_file)
elif op == "put":
put_file(ws, src_file, dst_file)
s.close()
if __name__ == "__main__":
main()
|
the-stack_106_26770 | # -*- coding: utf-8 -*-
#
# Copyright 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib.parse
import json
import time
from push_admin import _http
from push_admin import _message_serializer
class App(object):
"""application for HW Cloud Message(HCM)"""
JSON_ENCODER = _message_serializer.MessageSerializer()
@classmethod
def _send_to_server(cls, headers, body, url):
try:
msg_body = json.dumps(body)
response = _http.post(url, msg_body, headers)
if response.status_code is not 200:
raise ApiCallError('http status code is {0} in send.'.format(response.status_code))
# json text to dict
resp_dict = json.loads(response.text)
return resp_dict
except Exception as e:
raise ApiCallError('caught exception when send. {0}'.format(e))
def __init__(self, app_id, app_secret, token_server='https://oauth-login.cloud.huawei.com/oauth2/v2/token',
push_open_url='https://push-api.cloud.huawei.com'):
"""class init"""
self.app_id = app_id
self.app_secret = app_secret
self.token_expired_time = 0
self.access_token = None
self.token_server = token_server
self.push_open_url = push_open_url
self.hw_push_server = self.push_open_url + "/v1/{0}/messages:send"
self.hw_push_topic_sub_server = self.push_open_url + "/v1/{0}/topic:subscribe"
self.hw_push_topic_unsub_server = self.push_open_url + "/v1/{0}/topic:unsubscribe"
self.hw_push_topic_query_server = self.push_open_url + "/v1/{0}/topic:list"
def _refresh_token(self):
"""refresh access token"""
headers = dict()
headers['Content-Type'] = 'application/x-www-form-urlencoded;charset=utf-8'
params = dict()
params['grant_type'] = 'client_credentials'
params['client_secret'] = self.app_secret
params['client_id'] = self.app_id
msg_body = urllib.parse.urlencode(params)
try:
response = _http.post(self.token_server, msg_body, headers)
if response.status_code is not 200:
return False, 'http status code is {0} in get access token'.format(response.status_code)
""" json string to directory """
response_body = json.loads(response.text)
self.access_token = response_body.get('access_token')
self.token_expired_time = int(round(time.time() * 1000)) + (int(response_body.get('expires_in')) - 5 * 60) * 1000
return True, None
except Exception as e:
raise ApiCallError(format(repr(e)))
def _is_token_expired(self):
"""is access token expired"""
if self.access_token is None:
""" need refresh token """
return True
return int(round(time.time() * 1000)) >= self.token_expired_time
def _update_token(self):
if self._is_token_expired() is True:
result, reason = self._refresh_token()
if result is False:
raise ApiCallError(reason)
def _create_header(self):
headers = dict()
headers['Content-Type'] = 'application/json;charset=utf-8'
headers['Authorization'] = 'Bearer {0}'.format(self.access_token)
return headers
def send(self, message, validate_only):
"""
Sends the given message Huawei Cloud Messaging (HCM)
:param message:
:param validate_only:
:return:
response dict: response body dict
:raise:
ApiCallError: failure reason
"""
self._update_token()
headers = self._create_header()
url = self.hw_push_server.format(self.app_id)
msg_body_dict = dict()
msg_body_dict['validate_only'] = validate_only
msg_body_dict['message'] = App.JSON_ENCODER.default(message)
return App._send_to_server(headers, msg_body_dict, url)
def subscribe_topic(self, topic, token_list):
"""
:param topic: The specific topic
:param token_list: The token list to be added
:return:
"""
self._update_token()
headers = self._create_header()
url = self.hw_push_topic_sub_server.format(self.app_id)
msg_body_dict = {'topic': topic, 'tokenArray': token_list}
return App._send_to_server(headers, msg_body_dict, url)
def unsubscribe_topic(self, topic, token_list):
"""
:param topic: The specific topic
:param token_list: The token list to be deleted
:return:
"""
self._update_token()
headers = self._create_header()
url = self.hw_push_topic_unsub_server.format(self.app_id)
msg_body_dict = {'topic': topic, 'tokenArray': token_list}
return App._send_to_server(headers, msg_body_dict, url)
def query_subscribe_list(self, token):
"""
:param token: The specific token
:return:
"""
self._update_token()
headers = self._create_header()
url = self.hw_push_topic_query_server.format(self.app_id)
msg_body_dict = {'token': token}
return App._send_to_server(headers, msg_body_dict, url)
class ApiCallError(Exception):
"""Represents an Exception encountered while invoking the HCM API.
Attributes:
message: A error message string.
detail: Original low-level exception.
"""
def __init__(self, message, detail=None):
Exception.__init__(self, message)
self.detail = detail
|
the-stack_106_26771 | import cv2
import numpy as np
import time, imutils
import math
import pydirectinput
time_for_camera_warmup = 3
min_contour_area = 2000
cap = cv2.VideoCapture(0)
time.sleep(time_for_camera_warmup)
try:
while True:
counter = 0
try:
_, frame = cap.read()
# ROI where hand needs to be placed and image processing will be performed
roi = frame[50:300, 100:350]
cv2.rectangle(frame, (100,50), (350,300), (128,128,128), 1)
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
'''lower and upper bound HSV to mask the hand only. The values
might change depending on your background or lighting conditions.
Feel free to experiment with these values'''
lower_bound = np.array([0, 80, 60])
upper_bound = np.array([25, 255, 255])
mask = cv2.inRange(hsv, lower_bound, upper_bound)
kernel = np.ones((3,3), dtype=np.uint8)
# Image processing to clean noisy pixels
dil = cv2.dilate(mask, kernel, iterations=4)
mask = cv2.GaussianBlur(dil, (3,3), 100)
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
cv2.imshow('closing', closing)
cnts = cv2.findContours(closing, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
if len(cnts) > 0:
max_area_cnt = max(cnts, key = cv2.contourArea)
# Min contour area set so that contour is not formed on any background objects
if cv2.contourArea(max_area_cnt) > min_contour_area:
x, y, w, h = cv2.boundingRect(max_area_cnt)
cv2.drawContours(roi, [max_area_cnt], -1, (0,0,255), 3)
# Convex hull is the smallest convex polygon that connects and encloses all points of a contour
hull = cv2.convexHull(max_area_cnt)
cv2.drawContours(roi, [hull], -1, (0,255,0), 2)
# To find the convexity defects in the hull. Defect is an area that do not belong to the object but located inside of its outer boundary
hull2 = cv2.convexHull(max_area_cnt, returnPoints=False)
defects = cv2.convexityDefects(max_area_cnt, hull2)
# Inside the loop coordinates of start, end and far points are computed which will be the three points of a triangle
for i in range(defects.shape[0]):
s, e, f, d = defects[i, 0]
start = tuple(max_area_cnt[s][0])
end = tuple(max_area_cnt[e][0])
far = tuple(max_area_cnt[f][0])
a = math.sqrt((end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2)
b = math.sqrt((start[0] - far[0]) ** 2 + (start[1] - far[1]) ** 2)
c = math.sqrt((end[0] - far[0]) ** 2 + (end[1] - far[1]) ** 2)
s = a + b + c / 2
# Area of the triangle
ar = math.sqrt(s * (s - a) * (s - b) * (s - c))
d = ar * 2 / a
angle = math.acos((b ** 2 + c ** 2 - a ** 2)/(2 * b * c)) * 57
# Condition to eliminate defects which are not forming between fingers
if angle <= 90 and d > 30:
counter += 1
cv2.circle(roi, far, 5, (255,0,0), -1)
'''One added to counter as defects will be one less than fingers shown, i.e if five fingers shown then,
defects ideally should be four and four plus one is five'''
counter += 1
if counter == 5:
cv2.putText(frame, str(counter), (10,30), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255,0,0), 2)
pydirectinput.keyDown('left')
elif counter == 4:
cv2.putText(frame, str(counter), (10,30), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255,0,0), 2)
elif counter == 3:
cv2.putText(frame, str(counter), (10,30), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255,0,0), 2)
elif counter == 2:
cv2.putText(frame, str(counter), (10,30), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255,0,0), 2)
elif counter == 1:
cv2.putText(frame, str(counter), (10,30), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255,0,0), 2)
pydirectinput.keyDown('right')
cv2.imshow('frame', frame)
else:
pydirectinput.keyUp('left')
pydirectinput.keyUp('right')
if cv2.waitKey(1) & 0xff == 27:
break
except Exception as e:
print(str(e))
continue
except Exception as e:
print(str(e))
cap.release()
cv2.destroyAllWindows() |
the-stack_106_26772 | """Core visualization operations."""
# Authors: Alexandre Gramfort <[email protected]>
# Eric Larson <[email protected]>
# Joan Massich <[email protected]>
# Guillaume Favelier <[email protected]>
#
# License: Simplified BSD
import sys
import os
from contextlib import contextmanager
import importlib
from ._utils import VALID_3D_BACKENDS
from ...utils import (logger, verbose, get_config, _check_option, fill_doc,
_validate_type)
MNE_3D_BACKEND = None
MNE_3D_BACKEND_TESTING = False
MNE_3D_BACKEND_INTERACTIVE = False
_backend_name_map = dict(
pyvistaqt='._qt',
notebook='._notebook',
)
backend = None
def _reload_backend(backend_name):
global backend
backend = importlib.import_module(name=_backend_name_map[backend_name],
package='mne.viz.backends')
logger.info('Using %s 3d backend.\n' % backend_name)
def _get_renderer(*args, **kwargs):
_get_3d_backend()
return backend._Renderer(*args, **kwargs)
def _check_3d_backend_name(backend_name):
_validate_type(backend_name, str, 'backend_name')
backend_name = 'pyvistaqt' if backend_name == 'pyvista' else backend_name
_check_option('backend_name', backend_name, VALID_3D_BACKENDS)
return backend_name
@verbose
def set_3d_backend(backend_name, verbose=None):
"""Set the 3D backend for MNE.
The backend will be set as specified and operations will use
that backend.
Parameters
----------
backend_name : str
The 3d backend to select. See Notes for the capabilities of each
backend (``'pyvistaqt'`` and ``'notebook'``).
.. versionchanged:: 0.24
The ``'pyvista'`` backend was renamed ``'pyvistaqt'``.
%(verbose)s
Returns
-------
old_backend_name : str | None
The old backend that was in use.
Notes
-----
To use PyVista, set ``backend_name`` to ``pyvistaqt`` but the value
``pyvista`` is still supported for backward compatibility.
This table shows the capabilities of each backend ("✓" for full support,
and "-" for partial support):
.. table::
:widths: auto
+--------------------------------------+-----------+----------+
| **3D function:** | pyvistaqt | notebook |
+======================================+===========+==========+
| :func:`plot_vector_source_estimates` | ✓ | ✓ |
+--------------------------------------+-----------+----------+
| :func:`plot_source_estimates` | ✓ | ✓ |
+--------------------------------------+-----------+----------+
| :func:`plot_alignment` | ✓ | ✓ |
+--------------------------------------+-----------+----------+
| :func:`plot_sparse_source_estimates` | ✓ | ✓ |
+--------------------------------------+-----------+----------+
| :func:`plot_evoked_field` | ✓ | ✓ |
+--------------------------------------+-----------+----------+
| :func:`plot_sensors_connectivity` | ✓ | ✓ |
+--------------------------------------+-----------+----------+
| :func:`snapshot_brain_montage` | ✓ | ✓ |
+--------------------------------------+-----------+----------+
| :func:`link_brains` | ✓ | |
+--------------------------------------+-----------+----------+
+--------------------------------------+-----------+----------+
| **Feature:** |
+--------------------------------------+-----------+----------+
| Large data | ✓ | ✓ |
+--------------------------------------+-----------+----------+
| Opacity/transparency | ✓ | ✓ |
+--------------------------------------+-----------+----------+
| Support geometric glyph | ✓ | ✓ |
+--------------------------------------+-----------+----------+
| Smooth shading | ✓ | ✓ |
+--------------------------------------+-----------+----------+
| Subplotting | ✓ | ✓ |
+--------------------------------------+-----------+----------+
| Inline plot in Jupyter Notebook | | ✓ |
+--------------------------------------+-----------+----------+
| Inline plot in JupyterLab | | ✓ |
+--------------------------------------+-----------+----------+
| Inline plot in Google Colab | | |
+--------------------------------------+-----------+----------+
| Toolbar | ✓ | ✓ |
+--------------------------------------+-----------+----------+
"""
global MNE_3D_BACKEND
old_backend_name = MNE_3D_BACKEND
backend_name = _check_3d_backend_name(backend_name)
if MNE_3D_BACKEND != backend_name:
_reload_backend(backend_name)
MNE_3D_BACKEND = backend_name
# Qt5 macOS 11 compatibility
if sys.platform == 'darwin' and 'QT_MAC_WANTS_LAYER' not in os.environ:
os.environ['QT_MAC_WANTS_LAYER'] = '1'
return old_backend_name
def get_3d_backend():
"""Return the 3D backend currently used.
Returns
-------
backend_used : str | None
The 3d backend currently in use. If no backend is found,
returns ``None``.
.. versionchanged:: 0.24
The ``'pyvista'`` backend has been renamed ``'pyvistaqt'``, so
``'pyvista'`` is no longer returned by this function.
"""
try:
backend = _get_3d_backend()
except RuntimeError as exc:
backend = None
logger.info(str(exc))
return backend
def _get_3d_backend():
"""Load and return the current 3d backend."""
global MNE_3D_BACKEND
if MNE_3D_BACKEND is None:
MNE_3D_BACKEND = get_config(key='MNE_3D_BACKEND', default=None)
if MNE_3D_BACKEND is None: # try them in order
errors = dict()
for name in VALID_3D_BACKENDS:
try:
_reload_backend(name)
except ImportError as exc:
errors[name] = str(exc)
else:
MNE_3D_BACKEND = name
break
else:
raise RuntimeError(
'Could not load any valid 3D backend:\n' +
"\n".join(f'{key}: {val}' for key, val in errors.items()))
else:
MNE_3D_BACKEND = _check_3d_backend_name(MNE_3D_BACKEND)
_reload_backend(MNE_3D_BACKEND)
MNE_3D_BACKEND = _check_3d_backend_name(MNE_3D_BACKEND)
return MNE_3D_BACKEND
@contextmanager
def use_3d_backend(backend_name):
"""Create a 3d visualization context using the designated backend.
See :func:`mne.viz.set_3d_backend` for more details on the available
3d backends and their capabilities.
Parameters
----------
backend_name : {'pyvistaqt', 'notebook'}
The 3d backend to use in the context.
"""
old_backend = set_3d_backend(backend_name)
try:
yield
finally:
if old_backend is not None:
try:
set_3d_backend(old_backend)
except Exception:
pass
@contextmanager
def _use_test_3d_backend(backend_name, interactive=False):
"""Create a testing viz context.
Parameters
----------
backend_name : str
The 3d backend to use in the context.
interactive : bool
If True, ensure interactive elements are accessible.
"""
with _actors_invisible():
with use_3d_backend(backend_name):
with backend._testing_context(interactive):
yield
@contextmanager
def _actors_invisible():
global MNE_3D_BACKEND_TESTING
orig_testing = MNE_3D_BACKEND_TESTING
MNE_3D_BACKEND_TESTING = True
try:
yield
finally:
MNE_3D_BACKEND_TESTING = orig_testing
@fill_doc
def set_3d_view(figure, azimuth=None, elevation=None,
focalpoint=None, distance=None, roll=None,
reset_camera=True):
"""Configure the view of the given scene.
Parameters
----------
figure : object
The scene which is modified.
%(azimuth)s
%(elevation)s
%(focalpoint)s
%(distance)s
%(roll)s
reset_camera : bool
If True, reset the camera properties beforehand.
"""
backend._set_3d_view(figure=figure, azimuth=azimuth,
elevation=elevation, focalpoint=focalpoint,
distance=distance, roll=roll,
reset_camera=reset_camera)
def set_3d_title(figure, title, size=40):
"""Configure the title of the given scene.
Parameters
----------
figure : object
The scene which is modified.
title : str
The title of the scene.
size : int
The size of the title.
"""
backend._set_3d_title(figure=figure, title=title, size=size)
def create_3d_figure(size, bgcolor=(0, 0, 0), smooth_shading=True,
handle=None, scene=True):
"""Return an empty figure based on the current 3d backend.
.. warning:: Proceed with caution when the renderer object is
returned (with ``scene=False``) because the _Renderer
API is not necessarily stable enough for production,
it's still actively in development.
Parameters
----------
size : tuple
The dimensions of the 3d figure (width, height).
bgcolor : tuple
The color of the background.
smooth_shading : bool
If True, smooth shading is enabled. Defaults to True.
handle : int | None
The figure identifier.
scene : bool
Specify if the returned object is the scene. If False,
the renderer object is returned. Defaults to True.
Returns
-------
figure : object
The requested empty scene or the renderer object if
``scene=False``.
"""
renderer = _get_renderer(
fig=handle,
size=size,
bgcolor=bgcolor,
smooth_shading=smooth_shading,
)
if scene:
return renderer.scene()
else:
return renderer
def close_3d_figure(figure):
"""Close the given scene.
Parameters
----------
figure : object
The scene which needs to be closed.
"""
backend._close_3d_figure(figure)
def close_all_3d_figures():
"""Close all the scenes of the current 3d backend."""
backend._close_all()
def get_brain_class():
"""Return the proper Brain class based on the current 3d backend.
Returns
-------
brain : object
The Brain class corresponding to the current 3d backend.
"""
from ...viz._brain import Brain
return Brain
|
the-stack_106_26773 | import requests
from pkg_resources import parse_version
def update_pypi_source(server: str) -> bool:
# Gets the latest version on PyPi accompanied by a source distribution
url = server + '/cvxpy/json'
r = requests.get(url)
if r.ok:
data = r.json()
releases = data["releases"]
versions = [
v for v in releases.keys() if 'sdist' in [rel['packagetype'] for rel in
releases[v]]]
versions.sort(key=parse_version)
remote_version = versions[-1]
import cvxpy
local_version = cvxpy.__version__
return parse_version(local_version) > parse_version(remote_version)
else:
msg = 'The request to pypi returned status code' + str(r.status_code)
raise RuntimeError(msg)
def map_runner_os_name_to_os(runner_os_name: str) -> str:
if runner_os_name.lower() == 'linux':
operating_system = 'linux'
elif runner_os_name.lower() in {'osx', 'macos'}:
operating_system = 'osx'
elif runner_os_name.lower() == 'win':
operating_system = 'win'
else:
raise Exception('Unknown runner_os: ' + runner_os_name)
return operating_system
def update_pypi_wheel(python_version: str, runner_os_name: str, server: str) -> bool:
# python_version is expected to be
#
# '2.7', '3.5', '3.6', '3.7', ... etc..
#
# runner_os_name is expected to be
#
# 'win' or 'osx' or 'macOS' or 'linux' or 'Linux'
#
# server is expected to be
#
# 'https://pypi.org/pypi' or 'https://test.pypi.org/pypi'
#
# our wheels are associated with an operating system and a python version.
# We need to check the file names of existing wheels on pypi to see how the
# current version of cvxpy compares to versions with the desired OS and
# desired python version.
#
# Here is an example filename in the official pypi format:
#
# cvxpy-1.0.24-cp36-cp36m-win_amd64.whl
#
# So we can check that the wheel contains the string given by "operating_system".
# Checking that the file has a desired pythonversion can be done with a string
# 'cp[MAJOR_VERSION][minor_version]' -- without the brackets.
url = server + '/cvxpy/json'
r = requests.get(url)
major_minor = python_version.split('.')
py_ver = 'cp' + major_minor[0] + major_minor[1]
operating_system = map_runner_os_name_to_os(runner_os_name)
if 'linux' in operating_system:
return False
if r.ok:
data = r.json()
relevant_versions = ['0.0.0']
for version, version_data in data['releases'].items():
# version is something like '1.0.24'
#
# version_data is a list of dicts, with one dict
# for each file (of this cvxpy version) hosted on pypi.
#
# pypi hosts source distributions, wheels, and eggs.
filenames = [file_data['filename'] for file_data in version_data]
for fn in filenames:
if py_ver in fn and operating_system in fn:
relevant_versions.append(version)
relevant_versions.sort(key=parse_version)
most_recent_remote = relevant_versions[-1]
import cvxpy
local_version = cvxpy.__version__
return parse_version(local_version) > parse_version(most_recent_remote)
else:
msg = 'The request to pypi returned status code' + str(r.status_code)
raise RuntimeError(msg)
|
the-stack_106_26774 | #!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EmailSettingsClient simplifies Email Settings API calls.
EmailSettingsClient extends gdata.client.GDClient to ease interaction with
the Google Apps Email Settings API. These interactions include the ability
to create labels, filters, aliases, and update web-clip, forwarding, POP,
IMAP, vacation-responder, signature, language, and general settings, and
retrieve labels, send-as, forwarding, pop, imap, vacation and signature
settings.
"""
__author__ = 'Claudio Cherubino <[email protected]>'
import urllib.request, urllib.parse, urllib.error
import gdata.apps.emailsettings.data
import gdata.client
# Email Settings URI template
# The strings in this template are eventually replaced with the API version,
# Google Apps domain name, username, and settingID, respectively.
EMAIL_SETTINGS_URI_TEMPLATE = '/a/feeds/emailsettings/%s/%s/%s/%s'
# The settingID value for the label requests
SETTING_ID_LABEL = 'label'
# The settingID value for the filter requests
SETTING_ID_FILTER = 'filter'
# The settingID value for the send-as requests
SETTING_ID_SENDAS = 'sendas'
# The settingID value for the webclip requests
SETTING_ID_WEBCLIP = 'webclip'
# The settingID value for the forwarding requests
SETTING_ID_FORWARDING = 'forwarding'
# The settingID value for the POP requests
SETTING_ID_POP = 'pop'
# The settingID value for the IMAP requests
SETTING_ID_IMAP = 'imap'
# The settingID value for the vacation responder requests
SETTING_ID_VACATION_RESPONDER = 'vacation'
# The settingID value for the signature requests
SETTING_ID_SIGNATURE = 'signature'
# The settingID value for the language requests
SETTING_ID_LANGUAGE = 'language'
# The settingID value for the general requests
SETTING_ID_GENERAL = 'general'
# The settingID value for the delegation requests
SETTING_ID_DELEGATION = 'delegation'
# The KEEP action for the email settings
ACTION_KEEP = 'KEEP'
# The ARCHIVE action for the email settings
ACTION_ARCHIVE = 'ARCHIVE'
# The DELETE action for the email settings
ACTION_DELETE = 'DELETE'
# The ALL_MAIL setting for POP enable_for property
POP_ENABLE_FOR_ALL_MAIL = 'ALL_MAIL'
# The MAIL_FROM_NOW_ON setting for POP enable_for property
POP_ENABLE_FOR_MAIL_FROM_NOW_ON = 'MAIL_FROM_NOW_ON'
class EmailSettingsClient(gdata.client.GDClient):
"""Client extension for the Google Email Settings API service.
Attributes:
host: string The hostname for the Email Settings API service.
api_version: string The version of the Email Settings API.
"""
host = 'apps-apis.google.com'
api_version = '2.0'
auth_service = 'apps'
auth_scopes = gdata.gauth.AUTH_SCOPES['apps']
ssl = True
def __init__(self, domain, auth_token=None, **kwargs):
"""Constructs a new client for the Email Settings API.
Args:
domain: string The Google Apps domain with Email Settings.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the email settings.
kwargs: The other parameters to pass to the gdata.client.GDClient
constructor.
"""
gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs)
self.domain = domain
def make_email_settings_uri(self, username, setting_id):
"""Creates the URI for the Email Settings API call.
Using this client's Google Apps domain, create the URI to setup
email settings for the given user in that domain. If params are provided,
append them as GET params.
Args:
username: string The name of the user affected by this setting.
setting_id: string The key of the setting to be configured.
Returns:
A string giving the URI for Email Settings API calls for this client's
Google Apps domain.
"""
if '@' in username:
username, domain = username.split('@', 1)
else:
domain = self.domain
uri = EMAIL_SETTINGS_URI_TEMPLATE % (self.api_version, domain,
username, setting_id)
return uri
MakeEmailSettingsUri = make_email_settings_uri
def create_label(self, username, name, **kwargs):
"""Creates a label with the given properties.
Args:
username: string The name of the user.
name: string The name of the label.
kwargs: The other parameters to pass to gdata.client.GDClient.post().
Returns:
gdata.apps.emailsettings.data.EmailSettingsLabel of the new resource.
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_LABEL)
new_label = gdata.apps.emailsettings.data.EmailSettingsLabel(
uri=uri, name=name)
return self.post(new_label, uri, **kwargs)
CreateLabel = create_label
def retrieve_labels(self, username, **kwargs):
"""Retrieves email labels for the specified username
Args:
username: string The name of the user to get the labels for
Returns:
A gdata.data.GDFeed of the user's email labels
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_LABEL)
return self.GetFeed(
uri,
auth_token=None,
query=None,
desired_class=gdata.apps.emailsettings.data.EmailSettingsLabelFeed,
**kwargs)
RetrieveLabels = retrieve_labels
def delete_label(self, username, label, **kwargs):
"""Delete a label from the specified account.
Args:
username: string Name of the user
label: string Name of the label to be deleted
Returns:
An atom.http_core.HttpResponse() with the result of the request
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_LABEL)
uri = '/'.join([uri, urllib.parse.quote_plus(label)])
return self.delete(uri, **kwargs)
DeleteLabel = delete_label
def create_filter(self, username, from_address=None,
to_address=None, subject=None, has_the_word=None,
does_not_have_the_word=None, has_attachments=None,
label=None, mark_as_read=None, archive=None, **kwargs):
"""Creates a filter with the given properties.
Args:
username: string The name of the user.
from_address: string The source email address for the filter.
to_address: string (optional) The destination email address for
the filter.
subject: string (optional) The value the email must have in its
subject to be filtered.
has_the_word: string (optional) The value the email must have
in its subject or body to be filtered.
does_not_have_the_word: string (optional) The value the email
cannot have in its subject or body to be filtered.
has_attachments: string (optional) A boolean string representing
whether the email must have an attachment to be filtered.
label: string (optional) The name of the label to apply to
messages matching the filter criteria.
mark_as_read: Boolean (optional) Whether or not to mark
messages matching the filter criteria as read.
archive: Boolean (optional) Whether or not to move messages
matching to Archived state.
kwargs: The other parameters to pass to gdata.client.GDClient.post().
Returns:
gdata.apps.emailsettings.data.EmailSettingsFilter of the new resource.
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_FILTER)
new_filter = gdata.apps.emailsettings.data.EmailSettingsFilter(
uri=uri, from_address=from_address,
to_address=to_address, subject=subject,
has_the_word=has_the_word,
does_not_have_the_word=does_not_have_the_word,
has_attachments=has_attachments, label=label,
mark_as_read=mark_as_read, archive=archive)
return self.post(new_filter, uri, **kwargs)
CreateFilter = create_filter
def create_send_as(self, username, name, address, reply_to=None,
make_default=None, **kwargs):
"""Creates a send-as alias with the given properties.
Args:
username: string The name of the user.
name: string The name that will appear in the "From" field.
address: string The email address that appears as the
origination address for emails sent by this user.
reply_to: string (optional) The address to be used as the reply-to
address in email sent using the alias.
make_default: Boolean (optional) Whether or not this alias should
become the default alias for this user.
kwargs: The other parameters to pass to gdata.client.GDClient.post().
Returns:
gdata.apps.emailsettings.data.EmailSettingsSendAsAlias of the
new resource.
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_SENDAS)
new_alias = gdata.apps.emailsettings.data.EmailSettingsSendAsAlias(
uri=uri, name=name, address=address,
reply_to=reply_to, make_default=make_default)
return self.post(new_alias, uri, **kwargs)
CreateSendAs = create_send_as
def retrieve_send_as(self, username, **kwargs):
"""Retrieves send-as aliases for the specified username
Args:
username: string The name of the user to get the send-as for
Returns:
A gdata.data.GDFeed of the user's send-as alias settings
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_SENDAS)
return self.GetFeed(
uri,
auth_token=None,
query=None,
desired_class=gdata.apps.emailsettings.data.EmailSettingsSendAsAliasFeed,
**kwargs)
RetrieveSendAs = retrieve_send_as
def update_webclip(self, username, enable, **kwargs):
"""Enable/Disable Google Mail web clip.
Args:
username: string The name of the user.
enable: Boolean Whether to enable showing Web clips.
kwargs: The other parameters to pass to the update method.
Returns:
gdata.apps.emailsettings.data.EmailSettingsWebClip of the
updated resource.
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_WEBCLIP)
new_webclip = gdata.apps.emailsettings.data.EmailSettingsWebClip(
uri=uri, enable=enable)
return self.update(new_webclip, **kwargs)
UpdateWebclip = update_webclip
def update_forwarding(self, username, enable, forward_to=None,
action=None, **kwargs):
"""Update Google Mail Forwarding settings.
Args:
username: string The name of the user.
enable: Boolean Whether to enable incoming email forwarding.
forward_to: (optional) string The address email will be forwarded to.
action: string (optional) The action to perform after forwarding
an email (ACTION_KEEP, ACTION_ARCHIVE, ACTION_DELETE).
kwargs: The other parameters to pass to the update method.
Returns:
gdata.apps.emailsettings.data.EmailSettingsForwarding of the
updated resource
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_FORWARDING)
new_forwarding = gdata.apps.emailsettings.data.EmailSettingsForwarding(
uri=uri, enable=enable, forward_to=forward_to, action=action)
return self.update(new_forwarding, **kwargs)
UpdateForwarding = update_forwarding
def retrieve_forwarding(self, username, **kwargs):
"""Retrieves forwarding settings for the specified username
Args:
username: string The name of the user to get the forwarding settings for
Returns:
A gdata.data.GDEntry of the user's email forwarding settings
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_FORWARDING)
return self.GetEntry(
uri,
auth_token=None,
query=None,
desired_class=gdata.apps.emailsettings.data.EmailSettingsForwarding,
**kwargs)
RetrieveForwarding = retrieve_forwarding
def update_pop(self, username, enable, enable_for=None, action=None,
**kwargs):
"""Update Google Mail POP settings.
Args:
username: string The name of the user.
enable: Boolean Whether to enable incoming POP3 access.
enable_for: string (optional) Whether to enable POP3 for all mail
(POP_ENABLE_FOR_ALL_MAIL), or mail from now on
(POP_ENABLE_FOR_MAIL_FROM_NOW_ON).
action: string (optional) What Google Mail should do with its copy
of the email after it is retrieved using POP (ACTION_KEEP,
ACTION_ARCHIVE, ACTION_DELETE).
kwargs: The other parameters to pass to the update method.
Returns:
gdata.apps.emailsettings.data.EmailSettingsPop of the updated resource.
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_POP)
new_pop = gdata.apps.emailsettings.data.EmailSettingsPop(
uri=uri, enable=enable,
enable_for=enable_for, action=action)
return self.update(new_pop, **kwargs)
UpdatePop = update_pop
def retrieve_pop(self, username, **kwargs):
"""Retrieves POP settings for the specified username
Args:
username: string The name of the user to get the POP settings for
Returns:
A gdata.data.GDEntry of the user's POP settings
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_POP)
return self.GetEntry(
uri,
auth_token=None,
query=None,
desired_class=gdata.apps.emailsettings.data.EmailSettingsPop,
**kwargs)
RetrievePop = retrieve_pop
def update_imap(self, username, enable, **kwargs):
"""Update Google Mail IMAP settings.
Args:
username: string The name of the user.
enable: Boolean Whether to enable IMAP access.language
kwargs: The other parameters to pass to the update method.
Returns:
gdata.apps.emailsettings.data.EmailSettingsImap of the updated resource.
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_IMAP)
new_imap = gdata.apps.emailsettings.data.EmailSettingsImap(
uri=uri, enable=enable)
return self.update(new_imap, **kwargs)
UpdateImap = update_imap
def retrieve_imap(self, username, **kwargs):
"""Retrieves imap settings for the specified username
Args:
username: string The name of the user to get the imap settings for
Returns:
A gdata.data.GDEntry of the user's IMAP settings
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_IMAP)
return self.GetEntry(
uri,
auth_token=None,
query=None,
desired_class=gdata.apps.emailsettings.data.EmailSettingsImap,
**kwargs)
RetrieveImap = retrieve_imap
def update_vacation(self, username, enable, subject=None, message=None,
start_date=None, end_date=None, contacts_only=None,
domain_only=None, **kwargs):
"""Update Google Mail vacation-responder settings.
Args:
username: string The name of the user.
enable: Boolean Whether to enable the vacation responder.
subject: string (optional) The subject line of the vacation responder
autoresponse.
message: string (optional) The message body of the vacation responder
autoresponse.
startDate: string (optional) The start date of the vacation responder
autoresponse.
endDate: string (optional) The end date of the vacation responder
autoresponse.
contacts_only: Boolean (optional) Whether to only send autoresponses
to known contacts.
domain_only: Boolean (optional) Whether to only send autoresponses
to users in the primary domain.
kwargs: The other parameters to pass to the update method.
Returns:
gdata.apps.emailsettings.data.EmailSettingsVacationResponder of the
updated resource.
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_VACATION_RESPONDER)
new_vacation = gdata.apps.emailsettings.data.EmailSettingsVacationResponder(
uri=uri, enable=enable, subject=subject,
message=message, start_date=start_date, end_date=end_date,
contacts_only=contacts_only, domain_only=domain_only)
return self.update(new_vacation, **kwargs)
UpdateVacation = update_vacation
def retrieve_vacation(self, username, **kwargs):
"""Retrieves vacation settings for the specified username
Args:
username: string The name of the user to get the vacation settings for
Returns:
A gdata.data.GDEntry of the user's vacation auto-responder settings
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_VACATION_RESPONDER)
return self.GetEntry(
uri,
auth_token=None,
query=None,
desired_class=
gdata.apps.emailsettings.data.EmailSettingsVacationResponder,
**kwargs)
RetrieveVacation = retrieve_vacation
def update_signature(self, username, signature, **kwargs):
"""Update Google Mail signature.
Args:
username: string The name of the user.
signature: string The signature to be appended to outgoing messages.
kwargs: The other parameters to pass to the update method.
Returns:
gdata.apps.emailsettings.data.EmailSettingsSignature of the
updated resource.
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_SIGNATURE)
new_signature = gdata.apps.emailsettings.data.EmailSettingsSignature(
uri=uri, signature=signature)
return self.update(new_signature, **kwargs)
UpdateSignature = update_signature
def retrieve_signature(self, username, **kwargs):
"""Retrieves signature settings for the specified username
Args:
username: string The name of the user to get the signature settings for
Returns:
A gdata.data.GDEntry of the user's signature settings
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_SIGNATURE)
return self.GetEntry(
uri,
auth_token=None,
query=None,
desired_class=gdata.apps.emailsettings.data.EmailSettingsSignature,
**kwargs)
RetrieveSignature = retrieve_signature
def update_language(self, username, language, **kwargs):
"""Update Google Mail language settings.
Args:
username: string The name of the user.
language: string The language tag for Google Mail's display language.
kwargs: The other parameters to pass to the update method.
Returns:
gdata.apps.emailsettings.data.EmailSettingsLanguage of the
updated resource.
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_LANGUAGE)
new_language = gdata.apps.emailsettings.data.EmailSettingsLanguage(
uri=uri, language=language)
return self.update(new_language, **kwargs)
UpdateLanguage = update_language
def update_general_settings(self, username, page_size=None, shortcuts=None,
arrows=None, snippets=None, use_unicode=None,
**kwargs):
"""Update Google Mail general settings.
Args:
username: string The name of the user.
page_size: int (optional) The number of conversations to be shown per
page.
shortcuts: Boolean (optional) Whether to enable keyboard shortcuts.
arrows: Boolean (optional) Whether to display arrow-shaped personal
indicators next to email sent specifically to the user.
snippets: Boolean (optional) Whether to display snippets of the messages
in the inbox and when searching.
use_unicode: Boolean (optional) Whether to use UTF-8 (unicode) encoding
for all outgoing messages.
kwargs: The other parameters to pass to the update method.
Returns:
gdata.apps.emailsettings.data.EmailSettingsGeneral of the
updated resource.
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_GENERAL)
new_general = gdata.apps.emailsettings.data.EmailSettingsGeneral(
uri=uri, page_size=page_size, shortcuts=shortcuts,
arrows=arrows, snippets=snippets, use_unicode=use_unicode)
return self.update(new_general, **kwargs)
UpdateGeneralSettings = update_general_settings
def add_email_delegate(self, username, address, **kwargs):
"""Add an email delegate to the mail account
Args:
username: string The name of the user
address: string The email address of the delegated account
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_DELEGATION)
new_delegation = gdata.apps.emailsettings.data.EmailSettingsDelegation(
uri=uri, address=address)
return self.post(new_delegation, uri, **kwargs)
AddEmailDelegate = add_email_delegate
def retrieve_email_delegates(self, username, **kwargs):
"""Retrieve a feed of the email delegates for the specified username
Args:
username: string The name of the user to get the email delegates for
Returns:
A gdata.data.GDFeed of the user's email delegates
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_DELEGATION)
return self.GetFeed(
uri,
auth_token=None,
query=None,
desired_class=gdata.apps.emailsettings.data.EmailSettingsDelegationFeed,
**kwargs)
RetrieveEmailDelegates = retrieve_email_delegates
def delete_email_delegate(self, username, address, **kwargs):
"""Delete an email delegate from the specified account
Args:
username: string The name of the user
address: string The email address of the delegated account
"""
uri = self.MakeEmailSettingsUri(username=username,
setting_id=SETTING_ID_DELEGATION)
uri = uri + '/' + address
return self.delete(uri, **kwargs)
DeleteEmailDelegate = delete_email_delegate
|
the-stack_106_26775 | # %%
# VScodeで入力をテキストから読み込んで標準入力に渡す
import sys
import os
f=open(r'.\chapter_2\C_input.txt', 'r', encoding="utf-8")
# inputをフルパスで指定
# win10でファイルを作るとs-jisで保存されるため、読み込みをutf-8へエンコードする必要あり
# VScodeでinput file開くとutf8になってるんだけど中身は結局s-jisになっているらしい
sys.stdin=f
#
# 入力スニペット
# num = int(input())
# num_list = [int(item) for item in input().split()]
# num_list = [input() for _ in range(3)]
##################################
# %%
# 以下ペースト可
import copy
N = int(input())
A = [[i for i in item] for item in input().split()]
B = copy.deepcopy(A)
def bubbleSort(A, N):
for i in range(N):
for i in reversed(range(i, N-1)):
if A[i][1] > A[i+1][1]:
A[i], A[i+1] = A[i+1], A[i]
res = []
for item in A:
temp_res = ''
for i in item:
temp_res += i
res.append(temp_res)
return res
def selectionSort(A, N):
cnt = 0
for i in range(N):
minj = i
for j in range(i, N):
if A[j][1] < A[minj][1]:
minj = j
if i != minj:
A[i], A[minj] = A[minj], A[i]
res = []
for item in A:
temp_res = ''
for i in item:
temp_res += i
res.append(temp_res)
return res
A_res = bubbleSort(A, N)
B_res = selectionSort(B, N)
print(*A_res)
print('Stable')
print(*B_res)
if A_res == B_res:
print('Stable')
else:
print('Not stable') |
the-stack_106_26777 | # petname: library for generating human-readable, random names
# for objects (e.g. hostnames, containers, blobs)
#
# Copyright 2014 Dustin Kirkland <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
adjectives = [
"able",
"above",
"absolute",
"accepted",
"accurate",
"ace",
"active",
"actual",
"adapted",
"adapting",
"adequate",
"adjusted",
"advanced",
"alert",
"alive",
"allowed",
"allowing",
"amazed",
"amazing",
"ample",
"amused",
"amusing",
"apparent",
"apt",
"arriving",
"artistic",
"assured",
"assuring",
"awaited",
"awake",
"aware",
"balanced",
"becoming",
"beloved",
"better",
"big",
"blessed",
"bold",
"boss",
"brave",
"brief",
"bright",
"bursting",
"busy",
"calm",
"capable",
"capital",
"careful",
"caring",
"casual",
"causal",
"central",
"certain",
"champion",
"charmed",
"charming",
"cheerful",
"chief",
"choice",
"civil",
"classic",
"clean",
"clear",
"clever",
"climbing",
"close",
"closing",
"coherent",
"comic",
"communal",
"complete",
"composed",
"concise",
"concrete",
"content",
"cool",
"correct",
"cosmic",
"crack",
"creative",
"credible",
"crisp",
"crucial",
"cuddly",
"cunning",
"curious",
"current",
"cute",
"daring",
"darling",
"dashing",
"dear",
"decent",
"deciding",
"deep",
"definite",
"delicate",
"desired",
"destined",
"devoted",
"direct",
"discrete",
"distinct",
"diverse",
"divine",
"dominant",
"driven",
"driving",
"dynamic",
"eager",
"easy",
"electric",
"elegant",
"emerging",
"eminent",
"enabled",
"enabling",
"endless",
"engaged",
"engaging",
"enhanced",
"enjoyed",
"enormous",
"enough",
"epic",
"equal",
"equipped",
"eternal",
"ethical",
"evident",
"evolved",
"evolving",
"exact",
"excited",
"exciting",
"exotic",
"expert",
"factual",
"fair",
"faithful",
"famous",
"fancy",
"fast",
"feasible",
"fine",
"finer",
"firm",
"first",
"fit",
"fitting",
"fleet",
"flexible",
"flowing",
"fluent",
"flying",
"fond",
"frank",
"free",
"fresh",
"full",
"fun",
"funny",
"game",
"generous",
"gentle",
"genuine",
"giving",
"glad",
"glorious",
"glowing",
"golden",
"good",
"gorgeous",
"grand",
"grateful",
"great",
"growing",
"grown",
"guided",
"guiding",
"handy",
"happy",
"hardy",
"harmless",
"healthy",
"helped",
"helpful",
"helping",
"heroic",
"hip",
"holy",
"honest",
"hopeful",
"hot",
"huge",
"humane",
"humble",
"humorous",
"ideal",
"immense",
"immortal",
"immune",
"improved",
"in",
"included",
"infinite",
"informed",
"innocent",
"inspired",
"integral",
"intense",
"intent",
"internal",
"intimate",
"inviting",
"joint",
"just",
"keen",
"key",
"kind",
"knowing",
"known",
"large",
"lasting",
"leading",
"learning",
"legal",
"legible",
"lenient",
"liberal",
"light",
"liked",
"literate",
"live",
"living",
"logical",
"loved",
"loving",
"loyal",
"lucky",
"magical",
"magnetic",
"main",
"major",
"many",
"massive",
"master",
"mature",
"maximum",
"measured",
"meet",
"merry",
"mighty",
"mint",
"model",
"modern",
"modest",
"moral",
"more",
"moved",
"moving",
"musical",
"mutual",
"national",
"native",
"natural",
"nearby",
"neat",
"needed",
"neutral",
"new",
"next",
"nice",
"noble",
"normal",
"notable",
"noted",
"novel",
"obliging",
"on",
"one",
"open",
"optimal",
"optimum",
"organic",
"oriented",
"outgoing",
"patient",
"peaceful",
"perfect",
"pet",
"picked",
"pleasant",
"pleased",
"pleasing",
"poetic",
"polished",
"polite",
"popular",
"positive",
"possible",
"powerful",
"precious",
"precise",
"premium",
"prepared",
"present",
"pretty",
"primary",
"prime",
"pro",
"probable",
"profound",
"promoted",
"prompt",
"proper",
"proud",
"proven",
"pumped",
"pure",
"quality",
"quick",
"quiet",
"rapid",
"rare",
"rational",
"ready",
"real",
"refined",
"regular",
"related",
"relative",
"relaxed",
"relaxing",
"relevant",
"relieved",
"renewed",
"renewing",
"resolved",
"rested",
"rich",
"right",
"robust",
"romantic",
"ruling",
"sacred",
"safe",
"saved",
"saving",
"secure",
"select",
"selected",
"sensible",
"set",
"settled",
"settling",
"sharing",
"sharp",
"shining",
"simple",
"sincere",
"singular",
"skilled",
"smart",
"smashing",
"smiling",
"smooth",
"social",
"solid",
"sought",
"sound",
"special",
"splendid",
"square",
"stable",
"star",
"steady",
"sterling",
"still",
"stirred",
"stirring",
"striking",
"strong",
"stunning",
"subtle",
"suitable",
"suited",
"summary",
"sunny",
"super",
"superb",
"supreme",
"sure",
"sweeping",
"sweet",
"talented",
"teaching",
"tender",
"thankful",
"thorough",
"tidy",
"tight",
"together",
"tolerant",
"top",
"topical",
"tops",
"touched",
"touching",
"tough",
"true",
"trusted",
"trusting",
"trusty",
"ultimate",
"unbiased",
"uncommon",
"unified",
"unique",
"united",
"up",
"upright",
"upward",
"usable",
"useful",
"valid",
"valued",
"vast",
"verified",
"viable",
"vital",
"vocal",
"wanted",
"warm",
"wealthy",
"welcome",
"welcomed",
"well",
"whole",
"willing",
"winning",
"wired",
"wise",
"witty",
"wondrous",
"workable",
"working",
"worthy",
]
animals = [
"ox",
"ant",
"ape",
"asp",
"bat",
"bee",
"boa",
"bug",
"cat",
"cod",
"cow",
"cub",
"doe",
"dog",
"eel",
"eft",
"elf",
"elk",
"emu",
"ewe",
"fly",
"fox",
"gar",
"gnu",
"hen",
"hog",
"imp",
"jay",
"kid",
"kit",
"koi",
"lab",
"man",
"owl",
"pig",
"pug",
"pup",
"ram",
"rat",
"ray",
"yak",
"bass",
"bear",
"bird",
"boar",
"buck",
"bull",
"calf",
"chow",
"clam",
"colt",
"crab",
"crow",
"dane",
"deer",
"dodo",
"dory",
"dove",
"drum",
"duck",
"fawn",
"fish",
"flea",
"foal",
"fowl",
"frog",
"gnat",
"goat",
"grub",
"gull",
"hare",
"hawk",
"ibex",
"joey",
"kite",
"kiwi",
"lamb",
"lark",
"lion",
"loon",
"lynx",
"mako",
"mink",
"mite",
"mole",
"moth",
"mule",
"mutt",
"newt",
"orca",
"oryx",
"pika",
"pony",
"puma",
"seal",
"shad",
"slug",
"sole",
"stag",
"stud",
"swan",
"tahr",
"teal",
"tick",
"toad",
"tuna",
"wasp",
"wolf",
"worm",
"wren",
"yeti",
"adder",
"akita",
"alien",
"aphid",
"bison",
"boxer",
"bream",
"bunny",
"burro",
"camel",
"chimp",
"civet",
"cobra",
"coral",
"corgi",
"crane",
"dingo",
"drake",
"eagle",
"egret",
"filly",
"finch",
"gator",
"gecko",
"ghost",
"ghoul",
"goose",
"guppy",
"heron",
"hippo",
"horse",
"hound",
"husky",
"hyena",
"koala",
"krill",
"leech",
"lemur",
"liger",
"llama",
"louse",
"macaw",
"midge",
"molly",
"moose",
"moray",
"mouse",
"panda",
"perch",
"prawn",
"quail",
"racer",
"raven",
"rhino",
"robin",
"satyr",
"shark",
"sheep",
"shrew",
"skink",
"skunk",
"sloth",
"snail",
"snake",
"snipe",
"squid",
"stork",
"swift",
"swine",
"tapir",
"tetra",
"tiger",
"troll",
"trout",
"viper",
"wahoo",
"whale",
"zebra",
"alpaca",
"amoeba",
"baboon",
"badger",
"beagle",
"bedbug",
"beetle",
"bengal",
"bobcat",
"caiman",
"cattle",
"cicada",
"collie",
"condor",
"cougar",
"coyote",
"dassie",
"donkey",
"dragon",
"earwig",
"falcon",
"feline",
"ferret",
"gannet",
"gibbon",
"glider",
"goblin",
"gopher",
"grouse",
"guinea",
"hermit",
"hornet",
"iguana",
"impala",
"insect",
"jackal",
"jaguar",
"jennet",
"kitten",
"kodiak",
"lizard",
"locust",
"maggot",
"magpie",
"mammal",
"mantis",
"marlin",
"marmot",
"marten",
"martin",
"mayfly",
"minnow",
"monkey",
"mullet",
"muskox",
"ocelot",
"oriole",
"osprey",
"oyster",
"parrot",
"pigeon",
"piglet",
"poodle",
"possum",
"python",
"quagga",
"rabbit",
"raptor",
"rodent",
"roughy",
"salmon",
"sawfly",
"serval",
"shiner",
"shrimp",
"spider",
"sponge",
"tarpon",
"thrush",
"tomcat",
"toucan",
"turkey",
"turtle",
"urchin",
"vervet",
"walrus",
"weasel",
"weevil",
"wombat",
"anchovy",
"anemone",
"bluejay",
"buffalo",
"bulldog",
"buzzard",
"caribou",
"catfish",
"chamois",
"cheetah",
"chicken",
"chigger",
"cowbird",
"crappie",
"crawdad",
"cricket",
"dogfish",
"dolphin",
"firefly",
"garfish",
"gazelle",
"gelding",
"giraffe",
"gobbler",
"gorilla",
"goshawk",
"grackle",
"griffon",
"grizzly",
"grouper",
"gryphon",
"haddock",
"hagfish",
"halibut",
"hamster",
"herring",
"jackass",
"javelin",
"jawfish",
"jaybird",
"katydid",
"ladybug",
"lamprey",
"lemming",
"leopard",
"lioness",
"lobster",
"macaque",
"mallard",
"mammoth",
"manatee",
"mastiff",
"meerkat",
"mollusk",
"monarch",
"mongrel",
"monitor",
"monster",
"mudfish",
"muskrat",
"mustang",
"narwhal",
"oarfish",
"octopus",
"opossum",
"ostrich",
"panther",
"peacock",
"pegasus",
"pelican",
"penguin",
"phoenix",
"piranha",
"polecat",
"primate",
"quetzal",
"raccoon",
"rattler",
"redbird",
"redfish",
"reptile",
"rooster",
"sawfish",
"sculpin",
"seagull",
"skylark",
"snapper",
"spaniel",
"sparrow",
"sunbeam",
"sunbird",
"sunfish",
"tadpole",
"termite",
"terrier",
"unicorn",
"vulture",
"wallaby",
"walleye",
"warthog",
"whippet",
"wildcat",
"aardvark",
"airedale",
"albacore",
"anteater",
"antelope",
"arachnid",
"barnacle",
"basilisk",
"blowfish",
"bluebird",
"bluegill",
"bonefish",
"bullfrog",
"cardinal",
"chipmunk",
"cockatoo",
"crawfish",
"crayfish",
"dinosaur",
"doberman",
"duckling",
"elephant",
"escargot",
"flamingo",
"flounder",
"foxhound",
"glowworm",
"goldfish",
"grubworm",
"hedgehog",
"honeybee",
"hookworm",
"humpback",
"kangaroo",
"killdeer",
"kingfish",
"labrador",
"lacewing",
"ladybird",
"lionfish",
"longhorn",
"mackerel",
"malamute",
"marmoset",
"mastodon",
"moccasin",
"mongoose",
"monkfish",
"mosquito",
"pangolin",
"parakeet",
"pheasant",
"pipefish",
"platypus",
"polliwog",
"porpoise",
"reindeer",
"ringtail",
"sailfish",
"scorpion",
"seahorse",
"seasnail",
"sheepdog",
"shepherd",
"silkworm",
"squirrel",
"stallion",
"starfish",
"starling",
"stingray",
"stinkbug",
"sturgeon",
"terrapin",
"titmouse",
"tortoise",
"treefrog",
"werewolf",
"woodcock",
]
|
the-stack_106_26778 | #!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""This modules contains tests for VFS API handlers."""
import StringIO
import zipfile
from grr.gui import api_test_lib
from grr.gui.api_plugins import vfs as vfs_plugin
from grr.lib import access_control
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import flags
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib.aff4_objects import aff4_grr
from grr.lib.aff4_objects import users as aff4_users
from grr.lib.flows.general import filesystem
from grr.lib.flows.general import transfer
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import paths as rdf_paths
class VfsTestMixin(object):
"""A helper mixin providing methods to prepare files and flows for testing.
"""
time_0 = rdfvalue.RDFDatetime(42)
time_1 = time_0 + rdfvalue.Duration("1d")
time_2 = time_1 + rdfvalue.Duration("1d")
def CreateFileVersions(self, client_id, file_path):
"""Add a new version for a file."""
with test_lib.FakeTime(self.time_1):
token = access_control.ACLToken(username="test")
fd = aff4.FACTORY.Create(
client_id.Add(file_path),
aff4.AFF4MemoryStream,
mode="w",
token=token)
fd.Write("Hello World")
fd.Close()
with test_lib.FakeTime(self.time_2):
fd = aff4.FACTORY.Create(
client_id.Add(file_path),
aff4.AFF4MemoryStream,
mode="w",
token=token)
fd.Write("Goodbye World")
fd.Close()
def CreateRecursiveListFlow(self, client_id, token):
flow_args = filesystem.RecursiveListDirectoryArgs()
return flow.GRRFlow.StartFlow(
client_id=client_id,
flow_name="RecursiveListDirectory",
args=flow_args,
token=token)
def CreateMultiGetFileFlow(self, client_id, file_path, token):
pathspec = rdf_paths.PathSpec(
path=file_path, pathtype=rdf_paths.PathSpec.PathType.OS)
flow_args = transfer.MultiGetFileArgs(pathspecs=[pathspec])
return flow.GRRFlow.StartFlow(
client_id=client_id,
flow_name="MultiGetFile",
args=flow_args,
token=token)
class ApiGetFileDetailsHandlerTest(api_test_lib.ApiCallHandlerTest,
VfsTestMixin):
"""Test for ApiGetFileDetailsHandler."""
def setUp(self):
super(ApiGetFileDetailsHandlerTest, self).setUp()
self.handler = vfs_plugin.ApiGetFileDetailsHandler()
self.client_id = self.SetupClients(1)[0]
self.file_path = "fs/os/c/Downloads/a.txt"
self.CreateFileVersions(self.client_id, self.file_path)
def testRaisesOnEmptyPath(self):
args = vfs_plugin.ApiGetFileDetailsArgs(
client_id=self.client_id, file_path="")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testRaisesOnRootPath(self):
args = vfs_plugin.ApiGetFileDetailsArgs(
client_id=self.client_id, file_path="/")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testRaisesIfFirstComponentNotInWhitelist(self):
args = vfs_plugin.ApiGetFileDetailsArgs(
client_id=self.client_id, file_path="/analysis")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testHandlerReturnsNewestVersionByDefault(self):
# Get file version without specifying a timestamp.
args = vfs_plugin.ApiGetFileDetailsArgs(
client_id=self.client_id, file_path=self.file_path)
result = self.handler.Handle(args, token=self.token)
# Should return the newest version.
self.assertEqual(result.file.path, self.file_path)
self.assertAlmostEqual(
result.file.age, self.time_2, delta=rdfvalue.Duration("1s"))
def testHandlerReturnsClosestSpecificVersion(self):
# Get specific version.
args = vfs_plugin.ApiGetFileDetailsArgs(
client_id=self.client_id,
file_path=self.file_path,
timestamp=self.time_1)
result = self.handler.Handle(args, token=self.token)
# The age of the returned version might have a slight deviation.
self.assertEqual(result.file.path, self.file_path)
self.assertAlmostEqual(
result.file.age, self.time_1, delta=rdfvalue.Duration("1s"))
def testResultIncludesDetails(self):
"""Checks if the details include certain attributes.
Instead of using a (fragile) regression test, we enumerate important
attributes here and make sure they are returned.
"""
args = vfs_plugin.ApiGetFileDetailsArgs(
client_id=self.client_id, file_path=self.file_path)
result = self.handler.Handle(args, token=self.token)
attributes_by_type = {}
attributes_by_type["AFF4MemoryStream"] = ["CONTENT"]
attributes_by_type["AFF4MemoryStreamBase"] = ["SIZE"]
attributes_by_type["AFF4Object"] = ["LAST", "SUBJECT", "TYPE"]
details = result.file.details
for type_name, attrs in attributes_by_type.iteritems():
type_obj = next(t for t in details.types if t.name == type_name)
all_attrs = set([a.name for a in type_obj.attributes])
self.assertTrue(set(attrs).issubset(all_attrs))
class ApiListFilesHandlerTest(api_test_lib.ApiCallHandlerTest, VfsTestMixin):
"""Test for ApiListFilesHandler."""
def setUp(self):
super(ApiListFilesHandlerTest, self).setUp()
self.handler = vfs_plugin.ApiListFilesHandler()
self.client_id = self.SetupClients(1)[0]
self.file_path = "fs/os/etc"
def testDoesNotRaiseIfFirstCompomentIsEmpty(self):
args = vfs_plugin.ApiListFilesArgs(client_id=self.client_id, file_path="")
self.handler.Handle(args, token=self.token)
def testDoesNotRaiseIfPathIsRoot(self):
args = vfs_plugin.ApiListFilesArgs(client_id=self.client_id, file_path="/")
self.handler.Handle(args, token=self.token)
def testRaisesIfFirstComponentIsNotWhitelisted(self):
args = vfs_plugin.ApiListFilesArgs(
client_id=self.client_id, file_path="/analysis")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testHandlerListsFilesAndDirectories(self):
test_lib.ClientFixture(self.client_id, token=self.token)
# Fetch all children of a directory.
args = vfs_plugin.ApiListFilesArgs(
client_id=self.client_id, file_path=self.file_path)
result = self.handler.Handle(args, token=self.token)
self.assertEqual(len(result.items), 4)
for item in result.items:
# Check that all files are really in the right directory.
self.assertIn(self.file_path, item.path)
def testHandlerFiltersDirectoriesIfFlagIsSet(self):
test_lib.ClientFixture(self.client_id, token=self.token)
# Only fetch sub-directories.
args = vfs_plugin.ApiListFilesArgs(
client_id=self.client_id,
file_path=self.file_path,
directories_only=True)
result = self.handler.Handle(args, token=self.token)
self.assertEqual(len(result.items), 1)
self.assertEqual(result.items[0].is_directory, True)
self.assertIn(self.file_path, result.items[0].path)
class ApiGetFileTextHandlerTest(api_test_lib.ApiCallHandlerTest, VfsTestMixin):
"""Test for ApiGetFileTextHandler."""
def setUp(self):
super(ApiGetFileTextHandlerTest, self).setUp()
self.handler = vfs_plugin.ApiGetFileTextHandler()
self.client_id = self.SetupClients(1)[0]
self.file_path = "fs/os/c/Downloads/a.txt"
self.CreateFileVersions(self.client_id, self.file_path)
def testRaisesOnEmptyPath(self):
args = vfs_plugin.ApiGetFileTextArgs(client_id=self.client_id, file_path="")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testRaisesOnRootPath(self):
args = vfs_plugin.ApiGetFileTextArgs(
client_id=self.client_id, file_path="/")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testRaisesIfFirstComponentNotInWhitelist(self):
args = vfs_plugin.ApiGetFileTextArgs(
client_id=self.client_id, file_path="/analysis")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testDifferentTimestampsYieldDifferentFileContents(self):
args = vfs_plugin.ApiGetFileTextArgs(
client_id=self.client_id,
file_path=self.file_path,
encoding=vfs_plugin.ApiGetFileTextArgs.Encoding.UTF_8)
# Retrieving latest version by not setting a timestamp.
result = self.handler.Handle(args, token=self.token)
self.assertEqual(result.content, "Goodbye World")
self.assertEqual(result.total_size, 13)
# Change timestamp to get a different file version.
args.timestamp = self.time_1
result = self.handler.Handle(args, token=self.token)
self.assertEqual(result.content, "Hello World")
self.assertEqual(result.total_size, 11)
def testEncodingChangesResult(self):
args = vfs_plugin.ApiGetFileTextArgs(
client_id=self.client_id,
file_path=self.file_path,
encoding=vfs_plugin.ApiGetFileTextArgs.Encoding.UTF_16)
# Retrieving latest version by not setting a timestamp.
result = self.handler.Handle(args, token=self.token)
self.assertNotEqual(result.content, "Goodbye World")
self.assertEqual(result.total_size, 13)
class ApiGetFileBlobHandlerTest(api_test_lib.ApiCallHandlerTest, VfsTestMixin):
def setUp(self):
super(ApiGetFileBlobHandlerTest, self).setUp()
self.handler = vfs_plugin.ApiGetFileBlobHandler()
self.client_id = self.SetupClients(1)[0]
self.file_path = "fs/os/c/Downloads/a.txt"
self.CreateFileVersions(self.client_id, self.file_path)
def testRaisesOnEmptyPath(self):
args = vfs_plugin.ApiGetFileBlobArgs(client_id=self.client_id, file_path="")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testRaisesOnRootPath(self):
args = vfs_plugin.ApiGetFileBlobArgs(
client_id=self.client_id, file_path="/")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testRaisesIfFirstComponentNotInWhitelist(self):
args = vfs_plugin.ApiGetFileBlobArgs(
client_id=self.client_id, file_path="/analysis")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testNewestFileContentIsReturnedByDefault(self):
args = vfs_plugin.ApiGetFileBlobArgs(
client_id=self.client_id, file_path=self.file_path)
result = self.handler.Handle(args, token=self.token)
self.assertTrue(hasattr(result, "GenerateContent"))
self.assertEqual(next(result.GenerateContent()), "Goodbye World")
def testOffsetAndLengthRestrictResult(self):
args = vfs_plugin.ApiGetFileBlobArgs(
client_id=self.client_id, file_path=self.file_path, offset=2, length=3)
result = self.handler.Handle(args, token=self.token)
self.assertTrue(hasattr(result, "GenerateContent"))
self.assertEqual(next(result.GenerateContent()), "odb")
def testReturnsOlderVersionIfTimestampIsSupplied(self):
args = vfs_plugin.ApiGetFileBlobArgs(
client_id=self.client_id,
file_path=self.file_path,
timestamp=self.time_1)
result = self.handler.Handle(args, token=self.token)
self.assertTrue(hasattr(result, "GenerateContent"))
self.assertEqual(next(result.GenerateContent()), "Hello World")
def testLargeFileIsReturnedInMultipleChunks(self):
chars = ["a", "b", "x"]
huge_file_path = "fs/os/c/Downloads/huge.txt"
# Overwrite CHUNK_SIZE in handler for smaller test streams.
self.handler.CHUNK_SIZE = 5
# Create a file that requires several chunks to load.
with aff4.FACTORY.Create(
self.client_id.Add(huge_file_path),
aff4.AFF4MemoryStream,
mode="w",
token=self.token) as fd:
for char in chars:
fd.Write(char * self.handler.CHUNK_SIZE)
args = vfs_plugin.ApiGetFileBlobArgs(
client_id=self.client_id, file_path=huge_file_path)
result = self.handler.Handle(args, token=self.token)
self.assertTrue(hasattr(result, "GenerateContent"))
for chunk, char in zip(result.GenerateContent(), chars):
self.assertEqual(chunk, char * self.handler.CHUNK_SIZE)
class ApiGetFileVersionTimesHandlerTest(api_test_lib.ApiCallHandlerTest,
VfsTestMixin):
def setUp(self):
super(ApiGetFileVersionTimesHandlerTest, self).setUp()
self.client_id = self.SetupClients(1)[0]
self.handler = vfs_plugin.ApiGetFileVersionTimesHandler()
def testRaisesOnEmptyPath(self):
args = vfs_plugin.ApiGetFileVersionTimesArgs(
client_id=self.client_id, file_path="")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testRaisesOnRootPath(self):
args = vfs_plugin.ApiGetFileVersionTimesArgs(
client_id=self.client_id, file_path="/")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testRaisesIfFirstComponentNotInWhitelist(self):
args = vfs_plugin.ApiGetFileVersionTimesArgs(
client_id=self.client_id, file_path="/analysis")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
class ApiGetFileDownloadCommandHandlerTest(api_test_lib.ApiCallHandlerTest,
VfsTestMixin):
def setUp(self):
super(ApiGetFileDownloadCommandHandlerTest, self).setUp()
self.client_id = self.SetupClients(1)[0]
self.handler = vfs_plugin.ApiGetFileDownloadCommandHandler()
def testRaisesOnEmptyPath(self):
args = vfs_plugin.ApiGetFileDownloadCommandArgs(
client_id=self.client_id, file_path="")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testRaisesOnRootPath(self):
args = vfs_plugin.ApiGetFileDownloadCommandArgs(
client_id=self.client_id, file_path="/")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testRaisesIfFirstComponentNotInWhitelist(self):
args = vfs_plugin.ApiGetFileDownloadCommandArgs(
client_id=self.client_id, file_path="/analysis")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
class ApiCreateVfsRefreshOperationHandlerTest(api_test_lib.ApiCallHandlerTest):
"""Test for ApiCreateVfsRefreshOperationHandler."""
def setUp(self):
super(ApiCreateVfsRefreshOperationHandlerTest, self).setUp()
self.handler = vfs_plugin.ApiCreateVfsRefreshOperationHandler()
self.client_id = self.SetupClients(1)[0]
# Choose some directory with pathspec in the ClientFixture.
self.file_path = "fs/os/Users/Shared"
def testRaisesOnEmptyPath(self):
args = vfs_plugin.ApiCreateVfsRefreshOperationArgs(
client_id=self.client_id, file_path="")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testRaisesOnRootPath(self):
args = vfs_plugin.ApiCreateVfsRefreshOperationArgs(
client_id=self.client_id, file_path="/")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testRaisesIfFirstComponentNotInWhitelist(self):
args = vfs_plugin.ApiCreateVfsRefreshOperationArgs(
client_id=self.client_id, file_path="/analysis")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testHandlerRefreshStartsListDirectoryFlow(self):
test_lib.ClientFixture(self.client_id, token=self.token)
args = vfs_plugin.ApiCreateVfsRefreshOperationArgs(
client_id=self.client_id, file_path=self.file_path, max_depth=1)
result = self.handler.Handle(args, token=self.token)
# Check returned operation_id to references a ListDirectory flow.
flow_obj = aff4.FACTORY.Open(result.operation_id, token=self.token)
self.assertEqual(flow_obj.Get(flow_obj.Schema.TYPE), "ListDirectory")
def testHandlerRefreshStartsRecursiveListDirectoryFlow(self):
test_lib.ClientFixture(self.client_id, token=self.token)
args = vfs_plugin.ApiCreateVfsRefreshOperationArgs(
client_id=self.client_id, file_path=self.file_path, max_depth=5)
result = self.handler.Handle(args, token=self.token)
# Check returned operation_id to references a RecursiveListDirectory flow.
flow_obj = aff4.FACTORY.Open(result.operation_id, token=self.token)
self.assertEqual(
flow_obj.Get(flow_obj.Schema.TYPE), "RecursiveListDirectory")
def testNotificationIsSent(self):
test_lib.ClientFixture(self.client_id, token=self.token)
args = vfs_plugin.ApiCreateVfsRefreshOperationArgs(
client_id=self.client_id,
file_path=self.file_path,
max_depth=0,
notify_user=True)
result = self.handler.Handle(args, token=self.token)
# Finish flow and check if there are any new notifications.
flow_urn = rdfvalue.RDFURN(result.operation_id)
client_mock = action_mocks.ActionMock()
for _ in test_lib.TestFlowHelper(
flow_urn,
client_mock,
client_id=self.client_id,
token=self.token,
check_flow_errors=False):
pass
# Get pending notifications and check the newest one.
user_record = aff4.FACTORY.Open(
aff4.ROOT_URN.Add("users").Add(self.token.username),
aff4_type=aff4_users.GRRUser,
mode="r",
token=self.token)
pending_notifications = user_record.Get(
user_record.Schema.PENDING_NOTIFICATIONS)
self.assertIn("Recursive Directory Listing complete",
pending_notifications[0].message)
self.assertEqual(pending_notifications[0].source, str(flow_urn))
class ApiGetVfsRefreshOperationStateHandlerTest(api_test_lib.ApiCallHandlerTest,
VfsTestMixin):
"""Test for GetVfsRefreshOperationStateHandler."""
def setUp(self):
super(ApiGetVfsRefreshOperationStateHandlerTest, self).setUp()
self.handler = vfs_plugin.ApiGetVfsRefreshOperationStateHandler()
self.client_id = self.SetupClients(1)[0]
def testHandlerReturnsCorrectStateForFlow(self):
# Create a mock refresh operation.
self.flow_urn = self.CreateRecursiveListFlow(self.client_id, self.token)
args = vfs_plugin.ApiGetVfsRefreshOperationStateArgs(
operation_id=str(self.flow_urn))
# Flow was started and should be running.
result = self.handler.Handle(args, token=self.token)
self.assertEqual(result.state, "RUNNING")
# Terminate flow.
with aff4.FACTORY.Open(
self.flow_urn, aff4_type=flow.GRRFlow, mode="rw",
token=self.token) as flow_obj:
flow_obj.GetRunner().Error("Fake error")
# Recheck status and see if it changed.
result = self.handler.Handle(args, token=self.token)
self.assertEqual(result.state, "FINISHED")
def testHandlerThrowsExceptionOnArbitraryFlowId(self):
# Create a mock flow.
self.flow_urn = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="Interrogate", token=self.token)
args = vfs_plugin.ApiGetVfsRefreshOperationStateArgs(
operation_id=str(self.flow_urn))
# Our mock flow is not a RecursiveListFlow, so an error should be raised.
with self.assertRaises(vfs_plugin.VfsRefreshOperationNotFoundError):
self.handler.Handle(args, token=self.token)
def testHandlerThrowsExceptionOnUnknownFlowId(self):
# Create args with an operation id not referencing any flow.
args = vfs_plugin.ApiGetVfsRefreshOperationStateArgs(
operation_id="F:12345678")
# Our mock flow can't be read, so an error should be raised.
with self.assertRaises(vfs_plugin.VfsRefreshOperationNotFoundError):
self.handler.Handle(args, token=self.token)
class ApiUpdateVfsFileContentHandlerTest(api_test_lib.ApiCallHandlerTest):
"""Test for ApiUpdateVfsFileContentHandler."""
def setUp(self):
super(ApiUpdateVfsFileContentHandlerTest, self).setUp()
self.handler = vfs_plugin.ApiUpdateVfsFileContentHandler()
self.client_id = self.SetupClients(1)[0]
self.file_path = "fs/os/c/bin/bash"
def testRaisesOnEmptyPath(self):
args = vfs_plugin.ApiUpdateVfsFileContentArgs(
client_id=self.client_id, file_path="")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testRaisesOnRootPath(self):
args = vfs_plugin.ApiUpdateVfsFileContentArgs(
client_id=self.client_id, file_path="/")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testRaisesIfFirstComponentNotInWhitelist(self):
args = vfs_plugin.ApiUpdateVfsFileContentArgs(
client_id=self.client_id, file_path="/analysis")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testHandlerStartsFlow(self):
test_lib.ClientFixture(self.client_id, token=self.token)
args = vfs_plugin.ApiUpdateVfsFileContentArgs(
client_id=self.client_id, file_path=self.file_path)
result = self.handler.Handle(args, token=self.token)
# Check returned operation_id to references a MultiGetFile flow.
flow_obj = aff4.FACTORY.Open(result.operation_id, token=self.token)
self.assertEqual(flow_obj.Get(flow_obj.Schema.TYPE), "MultiGetFile")
class ApiGetVfsFileContentUpdateStateHandlerTest(
api_test_lib.ApiCallHandlerTest, VfsTestMixin):
"""Test for ApiGetVfsFileContentUpdateStateHandler."""
def setUp(self):
super(ApiGetVfsFileContentUpdateStateHandlerTest, self).setUp()
self.handler = vfs_plugin.ApiGetVfsFileContentUpdateStateHandler()
self.client_id = self.SetupClients(1)[0]
def testHandlerReturnsCorrectStateForFlow(self):
# Create a mock refresh operation.
self.flow_urn = self.CreateMultiGetFileFlow(
self.client_id, file_path="fs/os/c/bin/bash", token=self.token)
args = vfs_plugin.ApiGetVfsFileContentUpdateStateArgs(
operation_id=str(self.flow_urn))
# Flow was started and should be running.
result = self.handler.Handle(args, token=self.token)
self.assertEqual(result.state, "RUNNING")
# Terminate flow.
with aff4.FACTORY.Open(
self.flow_urn, aff4_type=flow.GRRFlow, mode="rw",
token=self.token) as flow_obj:
flow_obj.GetRunner().Error("Fake error")
# Recheck status and see if it changed.
result = self.handler.Handle(args, token=self.token)
self.assertEqual(result.state, "FINISHED")
def testHandlerRaisesOnArbitraryFlowId(self):
# Create a mock flow.
self.flow_urn = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="Interrogate", token=self.token)
args = vfs_plugin.ApiGetVfsFileContentUpdateStateArgs(
operation_id=str(self.flow_urn))
# Our mock flow is not a MultiGetFile flow, so an error should be raised.
with self.assertRaises(vfs_plugin.VfsFileContentUpdateNotFoundError):
self.handler.Handle(args, token=self.token)
def testHandlerThrowsExceptionOnUnknownFlowId(self):
# Create args with an operation id not referencing any flow.
args = vfs_plugin.ApiGetVfsRefreshOperationStateArgs(
operation_id="F:12345678")
# Our mock flow can't be read, so an error should be raised.
with self.assertRaises(vfs_plugin.VfsFileContentUpdateNotFoundError):
self.handler.Handle(args, token=self.token)
class VfsTimelineTestMixin(object):
"""A helper mixin providing methods to prepare timelines for testing.
"""
def SetupTestTimeline(self):
self.client_id = self.SetupClients(1)[0]
test_lib.ClientFixture(self.client_id, token=self.token)
# Choose some directory with pathspec in the ClientFixture.
self.folder_path = "fs/os/Users/中国新闻网新闻中/Shared"
self.file_path = self.folder_path + "/a.txt"
file_urn = self.client_id.Add(self.file_path)
for i in range(0, 5):
with test_lib.FakeTime(i):
with aff4.FACTORY.Create(
file_urn, aff4_grr.VFSAnalysisFile, mode="w",
token=self.token) as fd:
stats = rdf_client.StatEntry(
st_mtime=rdfvalue.RDFDatetimeSeconds().Now())
fd.Set(fd.Schema.STAT, stats)
class ApiGetVfsTimelineAsCsvHandlerTest(api_test_lib.ApiCallHandlerTest,
VfsTimelineTestMixin):
def setUp(self):
super(ApiGetVfsTimelineAsCsvHandlerTest, self).setUp()
self.handler = vfs_plugin.ApiGetVfsTimelineAsCsvHandler()
self.SetupTestTimeline()
def testRaisesOnEmptyPath(self):
args = vfs_plugin.ApiGetVfsTimelineAsCsvArgs(
client_id=self.client_id, file_path="")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testRaisesOnRootPath(self):
args = vfs_plugin.ApiGetVfsTimelineAsCsvArgs(
client_id=self.client_id, file_path="/")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testRaisesIfFirstComponentNotInWhitelist(self):
args = vfs_plugin.ApiGetVfsTimelineAsCsvArgs(
client_id=self.client_id, file_path="/analysis")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testTimelineIsReturnedInChunks(self):
# Change chunk size to see if the handler behaves correctly.
self.handler.CHUNK_SIZE = 1
args = vfs_plugin.ApiGetVfsTimelineAsCsvArgs(
client_id=self.client_id, file_path=self.folder_path)
result = self.handler.Handle(args, token=self.token)
# Check rows returned correctly.
self.assertTrue(hasattr(result, "GenerateContent"))
for i in reversed(range(0, 5)):
with test_lib.FakeTime(i):
next_chunk = next(result.GenerateContent()).strip()
timestamp = rdfvalue.RDFDatetime.Now()
if i == 4: # The first row includes the column headings.
self.assertEqual(next_chunk,
"Timestamp,Datetime,Message,Timestamp_desc\r\n"
"%d,%s,%s,MODIFICATION" %
(timestamp.AsMicroSecondsFromEpoch(), str(timestamp),
self.file_path))
else:
self.assertEqual(next_chunk, "%d,%s,%s,MODIFICATION" %
(timestamp.AsMicroSecondsFromEpoch(), str(timestamp),
self.file_path))
def testEmptyTimelineIsReturnedOnNonexistantPath(self):
args = vfs_plugin.ApiGetVfsTimelineAsCsvArgs(
client_id=self.client_id, file_path="fs/non-existant/file/path")
result = self.handler.Handle(args, token=self.token)
self.assertTrue(hasattr(result, "GenerateContent"))
with self.assertRaises(StopIteration):
next(result.GenerateContent())
class ApiGetVfsTimelineHandlerTest(api_test_lib.ApiCallHandlerTest,
VfsTimelineTestMixin):
def setUp(self):
super(ApiGetVfsTimelineHandlerTest, self).setUp()
self.handler = vfs_plugin.ApiGetVfsTimelineHandler()
self.SetupTestTimeline()
def testRaisesOnEmptyPath(self):
args = vfs_plugin.ApiGetVfsTimelineArgs(
client_id=self.client_id, file_path="")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testRaisesOnRootPath(self):
args = vfs_plugin.ApiGetVfsTimelineArgs(
client_id=self.client_id, file_path="/")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testRaisesIfFirstComponentNotInWhitelist(self):
args = vfs_plugin.ApiGetVfsTimelineArgs(
client_id=self.client_id, file_path="/analysis")
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
class ApiGetVfsFilesArchiveHandlerTest(api_test_lib.ApiCallHandlerTest,
VfsTestMixin):
"""Tests for ApiGetVfsFileArchiveHandler."""
def setUp(self):
super(ApiGetVfsFilesArchiveHandlerTest, self).setUp()
self.handler = vfs_plugin.ApiGetVfsFilesArchiveHandler()
self.client_id = self.SetupClients(1)[0]
self.CreateFileVersions(self.client_id, "fs/os/c/Downloads/a.txt")
self.CreateFileVersions(self.client_id, "fs/os/c/b.txt")
def testGeneratesZipArchiveWhenPathIsNotPassed(self):
archive_path1 = "vfs_C_1000000000000000/fs/os/c/Downloads/a.txt"
archive_path2 = "vfs_C_1000000000000000/fs/os/c/b.txt"
result = self.handler.Handle(
vfs_plugin.ApiGetVfsFilesArchiveArgs(client_id=self.client_id),
token=self.token)
out_fd = StringIO.StringIO()
for chunk in result.GenerateContent():
out_fd.write(chunk)
zip_fd = zipfile.ZipFile(out_fd, "r")
self.assertEqual(
set(zip_fd.namelist()), set([archive_path1, archive_path2]))
for path in [archive_path1, archive_path2]:
contents = zip_fd.read(path)
self.assertEqual(contents, "Goodbye World")
def testFiltersArchivedFilesByPath(self):
archive_path = ("vfs_C_1000000000000000_fs_os_c_Downloads/"
"fs/os/c/Downloads/a.txt")
result = self.handler.Handle(
vfs_plugin.ApiGetVfsFilesArchiveArgs(
client_id=self.client_id, file_path="fs/os/c/Downloads"),
token=self.token)
out_fd = StringIO.StringIO()
for chunk in result.GenerateContent():
out_fd.write(chunk)
zip_fd = zipfile.ZipFile(out_fd, "r")
self.assertEqual(zip_fd.namelist(), [archive_path])
contents = zip_fd.read(archive_path)
self.assertEqual(contents, "Goodbye World")
def testNonExistentPathGeneratesEmptyArchive(self):
result = self.handler.Handle(
vfs_plugin.ApiGetVfsFilesArchiveArgs(
client_id=self.client_id, file_path="fs/os/blah/blah"),
token=self.token)
out_fd = StringIO.StringIO()
for chunk in result.GenerateContent():
out_fd.write(chunk)
zip_fd = zipfile.ZipFile(out_fd, "r")
self.assertEqual(zip_fd.namelist(), [])
def testInvalidPathTriggersException(self):
with self.assertRaises(ValueError):
self.handler.Handle(
vfs_plugin.ApiGetVfsFilesArchiveArgs(
client_id=self.client_id, file_path="invalid-prefix/path"),
token=self.token)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
the-stack_106_26779 | import torch
import torch.nn as nn
import torch.optim as optim
from random import random
import random
import numpy as np
import os, time, copy,sys
def pick_best_model_acc(model, best_model ,epoch, v_acc, best_acc, checkpoint_folder, model_name="a"):
if v_acc > best_acc:
best_acc = v_acc
best_model =copy.deepcopy(model)
print(f"best_model of {model_name} in epoch ", epoch +1)
save_checkpoint({'epoch': epoch + 1,
'state_dict': best_model.state_dict(),
},
filename= os.path.join(checkpoint_folder,f'model_{model_name}.pth'))
return best_acc, best_model
def save_checkpoint(state, is_best=0, filename='models/checkpoint.pth.tar'):
torch.save(state, filename)
def pick_best_model(model, best_model ,epoch, v_loss, best_loss, checkpoint_folder, model_name="a"):
if v_loss < best_loss:
best_loss = v_loss
best_model =copy.deepcopy(model)
print(f"best_model of {model_name} in epoch ", epoch +1)
save_checkpoint({'epoch': epoch + 1,
'state_dict': best_model.state_dict(),
},
filename= os.path.join(checkpoint_folder, f'model_{model_name}.pth'))
return best_loss, best_model
|
the-stack_106_26781 | # encoding: utf-8
"""
@author: xingyu liao
@contact: [email protected]
"""
import argparse
import glob
import os
import sys
import cv2
import numpy as np
# import tqdm
sys.path.append("/home/zsy/runtimelib-tensorrt-tiny/build")
import pytrt
def get_parser():
parser = argparse.ArgumentParser(description="trt model inference")
parser.add_argument(
"--model-path",
default="outputs/trt_model/baseline.engine",
help="trt model path"
)
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'",
)
parser.add_argument(
"--output",
default="trt_output",
help="path to save trt model inference results"
)
parser.add_argument(
"--output-name",
help="tensorRT model output name"
)
parser.add_argument(
"--height",
type=int,
default=256,
help="height of image"
)
parser.add_argument(
"--width",
type=int,
default=128,
help="width of image"
)
return parser
def preprocess(image_path, image_height, image_width):
original_image = cv2.imread(image_path)
# the model expects RGB inputs
original_image = original_image[:, :, ::-1]
# Apply pre-processing to image.
img = cv2.resize(original_image, (image_width, image_height), interpolation=cv2.INTER_CUBIC)
img = img.astype("float32").transpose(2, 0, 1)[np.newaxis] # (1, 3, h, w)
return img
def normalize(nparray, order=2, axis=-1):
"""Normalize a N-D numpy array along the specified axis."""
norm = np.linalg.norm(nparray, ord=order, axis=axis, keepdims=True)
return nparray / (norm + np.finfo(np.float32).eps)
if __name__ == "__main__":
args = get_parser().parse_args()
trt = pytrt.Trt()
onnxModel = ""
engineFile = args.model_path
customOutput = []
maxBatchSize = 1
calibratorData = []
mode = 2
trt.CreateEngine(onnxModel, engineFile, customOutput, maxBatchSize, mode, calibratorData)
if not os.path.exists(args.output): os.makedirs(args.output)
if args.input:
if os.path.isdir(args.input[0]):
args.input = glob.glob(os.path.expanduser(args.input[0]))
assert args.input, "The input path(s) was not found"
for path in args.input:
input_numpy_array = preprocess(path, args.height, args.width)
trt.DoInference(input_numpy_array)
feat = trt.GetOutput(args.output_name)
feat = normalize(feat, axis=1)
np.save(os.path.join(args.output, path.replace('.jpg', '.npy').split('/')[-1]), feat)
|
the-stack_106_26782 | import argparse
import torch
import torch.optim as optim
from painter import *
# settings
parser = argparse.ArgumentParser(description="Neural Painter")
parser.add_argument(
"--renderer",
type=str,
default="oilpaintbrush",
metavar="str",
help="renderer: [watercolor, markerpen, oilpaintbrush, rectangle (default oilpaintbrush)",
)
parser.add_argument(
"--vector_file",
type=str,
default="./output/sunflowers_strokes.npz",
metavar="str",
help="path to pre-generated stroke vector file (default: ...)",
)
parser.add_argument(
"--style_img_path",
type=str,
default="./style_images/fire.jpg",
metavar="str",
help="path to style image (default: ...)",
)
parser.add_argument(
"--content_img_path",
type=str,
default="./test_images/sunflowers.jpg",
metavar="str",
help="path to content image (default: ...)",
)
parser.add_argument(
"--transfer_mode",
type=int,
default=1,
metavar="N",
help="style transfer mode, 0: transfer color only, 1: transfer both color and texture, "
"defalt: 1",
)
parser.add_argument(
"--canvas_color",
type=str,
default="black",
metavar="str",
help="canvas_color: [black, white] (default black)",
)
parser.add_argument(
"--canvas_size",
type=int,
default=512,
metavar="str",
help="size of the canvas for stroke rendering",
)
parser.add_argument(
"--keep_aspect_ratio",
action="store_true",
default=False,
help="keep input aspect ratio when saving outputs",
)
parser.add_argument(
"--beta_L1", type=float, default=1.0, help="weight for L1 loss (default: 1.0)"
)
parser.add_argument(
"--beta_sty",
type=float,
default=0.5,
help="weight for vgg style loss (default: 0.5)",
)
parser.add_argument(
"--net_G",
type=str,
default="zou-fusion-net-light",
metavar="str",
help="net_G: plain-dcgan, plain-unet, huang-net, zou-fusion-net, "
"or zou-fusion-net-light (default: zou-fusion-net-light)",
)
parser.add_argument(
"--renderer_checkpoint_dir",
type=str,
default=r"./checkpoints_G_oilpaintbrush_light",
metavar="str",
help="dir to load neu-renderer (default: ./checkpoints_G_oilpaintbrush_light)",
)
parser.add_argument(
"--lr",
type=float,
default=0.005,
help="learning rate for stroke searching (default: 0.005)",
)
parser.add_argument(
"--output_dir",
type=str,
default=r"./output",
metavar="str",
help="dir to save style transfer results (default: ./output)",
)
parser.add_argument(
"--disable_preview",
action="store_true",
default=False,
help="disable cv2.imshow, for running remotely without x-display",
)
args = parser.parse_args()
# Decide which device we want to run on
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def optimize_x(pt):
pt._load_checkpoint()
pt.net_G.eval()
if args.transfer_mode == 0: # transfer color only
pt.x_ctt.requires_grad = False
pt.x_color.requires_grad = True
pt.x_alpha.requires_grad = False
else: # transfer both color and texture
pt.x_ctt.requires_grad = True
pt.x_color.requires_grad = True
pt.x_alpha.requires_grad = True
pt.optimizer_x_sty = optim.RMSprop([pt.x_ctt, pt.x_color, pt.x_alpha], lr=pt.lr)
iters_per_stroke = 100
for i in range(iters_per_stroke):
pt.optimizer_x_sty.zero_grad()
pt.x_ctt.data = torch.clamp(pt.x_ctt.data, 0.1, 1 - 0.1)
pt.x_color.data = torch.clamp(pt.x_color.data, 0, 1)
pt.x_alpha.data = torch.clamp(pt.x_alpha.data, 0, 1)
if args.canvas_color == "white":
pt.G_pred_canvas = torch.ones(
[pt.m_grid * pt.m_grid, 3, pt.net_G.out_size, pt.net_G.out_size]
).to(device)
else:
pt.G_pred_canvas = torch.zeros(
pt.m_grid * pt.m_grid, 3, pt.net_G.out_size, pt.net_G.out_size
).to(device)
pt._forward_pass()
pt._style_transfer_step_states()
pt._backward_x_sty()
pt.optimizer_x_sty.step()
pt.x_ctt.data = torch.clamp(pt.x_ctt.data, 0.1, 1 - 0.1)
pt.x_color.data = torch.clamp(pt.x_color.data, 0, 1)
pt.x_alpha.data = torch.clamp(pt.x_alpha.data, 0, 1)
pt.step_id += 1
print("saving style transfer result...")
v_n = pt._normalize_strokes(pt.x)
v_n = pt._shuffle_strokes_and_reshape(v_n)
final_rendered_image = pt._render(v_n, save_jpgs=False, save_video=False)
pt._save_style_transfer_images(final_rendered_image)
if __name__ == "__main__":
pt = NeuralStyleTransfer(args=args)
optimize_x(pt)
|
the-stack_106_26784 | import socket
from typing import Any, Dict, List
from .abc import AbstractResolver
from .helpers import get_running_loop
__all__ = ('ThreadedResolver', 'AsyncResolver', 'DefaultResolver')
try:
import aiodns
# aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname')
except ImportError: # pragma: no cover
aiodns = None
aiodns_default = False
class ThreadedResolver(AbstractResolver):
"""Use Executor for synchronous getaddrinfo() calls, which defaults to
concurrent.futures.ThreadPoolExecutor.
"""
def __init__(self) -> None:
self._loop = get_running_loop()
async def resolve(self, host: str, port: int=0,
family: int=socket.AF_INET) -> List[Dict[str, Any]]:
infos = await self._loop.getaddrinfo(
host, port, type=socket.SOCK_STREAM, family=family)
hosts = []
for family, _, proto, _, address in infos:
hosts.append(
{'hostname': host,
'host': address[0], 'port': address[1],
'family': family, 'proto': proto,
'flags': socket.AI_NUMERICHOST})
return hosts
async def close(self) -> None:
pass
class AsyncResolver(AbstractResolver):
"""Use the `aiodns` package to make asynchronous DNS lookups"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
if aiodns is None:
raise RuntimeError("Resolver requires aiodns library")
self._loop = get_running_loop()
self._resolver = aiodns.DNSResolver(*args, loop=self._loop, **kwargs)
if not hasattr(self._resolver, 'gethostbyname'):
# aiodns 1.1 is not available, fallback to DNSResolver.query
self.resolve = self._resolve_with_query # type: ignore
async def resolve(self, host: str, port: int=0,
family: int=socket.AF_INET) -> List[Dict[str, Any]]:
try:
resp = await self._resolver.gethostbyname(host, family)
except aiodns.error.DNSError as exc:
msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed"
raise OSError(msg) from exc
hosts = []
for address in resp.addresses:
hosts.append(
{'hostname': host,
'host': address, 'port': port,
'family': family, 'proto': 0,
'flags': socket.AI_NUMERICHOST})
if not hosts:
raise OSError("DNS lookup failed")
return hosts
async def _resolve_with_query(
self, host: str, port: int=0,
family: int=socket.AF_INET) -> List[Dict[str, Any]]:
if family == socket.AF_INET6:
qtype = 'AAAA'
else:
qtype = 'A'
try:
resp = await self._resolver.query(host, qtype)
except aiodns.error.DNSError as exc:
msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed"
raise OSError(msg) from exc
hosts = []
for rr in resp:
hosts.append(
{'hostname': host,
'host': rr.host, 'port': port,
'family': family, 'proto': 0,
'flags': socket.AI_NUMERICHOST})
if not hosts:
raise OSError("DNS lookup failed")
return hosts
async def close(self) -> None:
return self._resolver.cancel()
DefaultResolver = AsyncResolver if aiodns_default else ThreadedResolver
|
the-stack_106_26786 | """
This module lets you practice correcting SYNTAX (notation) errors.
Authors: David Mutchler, Dave Fisher, Vibha Alangar, Amanda Stouder,
their colleagues and Myon McGee.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
###############################################################################
#
# DONE: 2.
# Locate the syntax (notation) errors in this file
# by looking for red underlines.
#
# For each error, try to make sense of its message.
# -- Hover and/or expand as needed -- make sure you see the message!
#
# Then fix the errors, one by one. IMPORTANT:
# -- Fixing one error may bring up additional errors
# (after a few seconds or when you run or save the module).
# -- Each time, fix the error that is nearest the TOP of the module.
# -- Often the SOURCE of the error may be on the line
# just BEFORE the line with a red underline.
# -- New errors may appear during the RUN of the module.
#
# Finish by RUNNING the corrected program
# and making sure that it RUNS CORRECTLY.
# That is, make sure that (per the doc-strings) the program
# prints two calculated values and makes a SimpleTurtle do some things.
#
# When finished, COMMIT-and-PUSH your work, as always.
#
###############################################################################
import rosegraphics as rg
import math
def main():
""" Calls the other functions in this module to demo them. """
print_math()
turtle_fun()
def print_math():
""" Prints some calculated values. """
x = math.cos(math.pi)
print(x)
y = math.sin(math.pi)
print('The sine of PI is', y)
def turtle_fun():
"""
Constructs a TurtleWindow,
constructs a classic SimpleTurtle and asks it to do some things,
and waits for the user to click anywhere in the window to close it.
"""
window = rg.TurtleWindow()
alan = rg.SimpleTurtle()
alan.pen = rg.Pen('blue', 30)
alan.paint_bucket = rg.PaintBucket('yellow')
alan.backward(3 * (47 + 16))
alan.begin_fill()
alan.draw_circle(25)
alan.end_fill()
alan.forward(200)
window.close_on_mouse_click()
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
|
the-stack_106_26788 | ##############################################################################
##
# This file is part of Sardana
##
# http://www.sardana-controls.org/
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Sardana is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Sardana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
"""This module contains the definition of a slit pseudo motor controller
for the Sardana Device Pool"""
__all__ = ["Slit"]
__docformat__ = 'restructuredtext'
from sardana import DataAccess
from sardana.pool.controller import PseudoMotorController
from sardana.pool.controller import DefaultValue, Description, Access, Type
class Slit(PseudoMotorController):
"""A Slit pseudo motor controller for handling gap and offset pseudo
motors. The system uses to real motors sl2t (top slit) and sl2b (bottom
slit)"""
gender = "Slit"
model = "Default Slit"
organization = "Sardana team"
pseudo_motor_roles = "Gap", "Offset"
motor_roles = "sl2t", "sl2b"
ctrl_properties = {'sign': {Type: float,
Description: 'Gap = sign * calculated gap\nOffset = sign * calculated offet',
DefaultValue: 1}, }
axis_attributes = {'example': {Type: int,
Access: DataAccess.ReadWrite,
Description: 'test purposes'}, }
def __init__(self, inst, props, *args, **kwargs):
PseudoMotorController.__init__(self, inst, props, *args, **kwargs)
self._log.debug("Created SLIT %s", inst)
self._example = {}
def CalcPhysical(self, index, pseudo_pos, curr_physical_pos):
half_gap = pseudo_pos[0] / 2.0
if index == 1:
ret = self.sign * (pseudo_pos[1] + half_gap)
else:
ret = self.sign * (half_gap - pseudo_pos[1])
self._log.debug("Slit.CalcPhysical(%d, %s) -> %f",
index, pseudo_pos, ret)
return ret
def CalcPseudo(self, index, physical_pos, curr_pseudo_pos):
gap = physical_pos[1] + physical_pos[0]
if index == 1:
ret = self.sign * gap
else:
ret = self.sign * (physical_pos[0] - gap / 2.0)
return ret
def CalcAllPseudo(self, physical_pos, curr_pseudo_pos):
"""Calculates the positions of all pseudo motors that belong to the
pseudo motor system from the positions of the physical motors."""
gap = physical_pos[1] + physical_pos[0]
return (self.sign * gap,
self.sign * (physical_pos[0] - gap / 2.0))
# def CalcAllPhysical(self, pseudo_pos, curr_physical_pos):
# """Calculates the positions of all motors that belong to the pseudo
# motor system from the positions of the pseudo motors."""
# half_gap = pseudo_pos[0]/2.0
# return (self.sign * (pseudo_pos[1] + half_gap),
# self.sign * (half_gap - pseudo_pos[1]))
def SetAxisExtraPar(self, axis, parameter, value):
self._example[axis] = value
def GetAxisExtraPar(self, axis, parameter):
return self._example.get(axis, -1)
|
the-stack_106_26790 | import torch
import transforms as T
class DetectionPresetTrain:
def __init__(self, data_augmentation, hflip_prob=0.5, mean=(123., 117., 104.)):
if data_augmentation == 'hflip':
self.transforms = T.Compose([
T.RandomHorizontalFlip(p=hflip_prob),
T.PILToTensor(),
T.ConvertImageDtype(torch.float),
])
elif data_augmentation == 'ssd':
self.transforms = T.Compose([
T.RandomPhotometricDistort(),
T.RandomZoomOut(fill=list(mean)),
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
T.PILToTensor(),
T.ConvertImageDtype(torch.float),
])
elif data_augmentation == 'ssdlite':
self.transforms = T.Compose([
T.RandomIoUCrop(),
T.RandomHorizontalFlip(p=hflip_prob),
T.PILToTensor(),
T.ConvertImageDtype(torch.float),
])
else:
raise ValueError(f'Unknown data augmentation policy "{data_augmentation}"')
def __call__(self, img, target):
return self.transforms(img, target)
class DetectionPresetEval:
def __init__(self):
self.transforms = T.ToTensor()
def __call__(self, img, target):
return self.transforms(img, target)
|
the-stack_106_26791 | """Grid example."""
from flow.controllers import GridRouter, IDMController, RLController
from flow.controllers.routing_controllers import MinicityRouter
from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams
from flow.core.params import VehicleParams, PersonParams
from flow.core.params import TrafficLightParams
from flow.core.params import SumoCarFollowingParams, SumoLaneChangeParams
from flow.core.params import InFlows
# from flow.envs.ring.accel import AccelEnv, ADDITIONAL_ENV_PARAMS
from flow.envs.dispatch_and_reposition import DispatchAndRepositionEnv, ADDITIONAL_ENV_PARAMS
from flow.networks import GridnxmNetwork
v_enter = 10
inner_length = 50
n_rows = 4
n_columns = 4
grid_array = {
"inner_length": inner_length,
"row_num": n_rows,
"col_num": n_columns,
"sub_edge_num": 1
}
def get_non_flow_params(enter_speed, add_net_params):
"""Define the network and initial params in the absence of inflows.
Note that when a vehicle leaves a network in this case, it is immediately
returns to the start of the row/column it was traversing, and in the same
direction as it was before.
Parameters
----------
enter_speed : float
initial speed of vehicles as they enter the network.
add_net_params: dict
additional network-specific parameters (unique to the grid)
Returns
-------
flow.core.params.InitialConfig
parameters specifying the initial configuration of vehicles in the
network
flow.core.params.NetParams
network-specific parameters used to generate the network
"""
additional_init_params = {'enter_speed': enter_speed}
initial = InitialConfig(
x0=2.5, spacing='uniform', min_gap=10, additional_params=additional_init_params) # gap needs to be large enough
net = NetParams(additional_params=add_net_params)
return initial, net
persons = PersonParams()
vehicles = VehicleParams()
vehicles.add(
veh_id="idm",
acceleration_controller=(IDMController, {}),
routing_controller=(MinicityRouter, {}),
car_following_params=SumoCarFollowingParams(
speed_mode='all_checks',
min_gap=5,
decel=10.0, # avoid collisions at emergency stops
max_speed=10,
),
lane_change_params=SumoLaneChangeParams(
lane_change_mode="no_lc_safe",
),
initial_speed=0,
num_vehicles=25)
vehicles.add(
veh_id="taxi",
initial_speed=0,
acceleration_controller=(RLController, {}),
# routing_controller=(MinicityRouter, {}),
car_following_params=SumoCarFollowingParams(
speed_mode='all_checks',
min_gap=5,
decel=10.0, # avoid collisions at emergency stops
max_speed=10,
),
lane_change_params=SumoLaneChangeParams(
lane_change_mode="sumo_default",
),
num_vehicles=20,
is_taxi=False)
tl_logic = TrafficLightParams(baseline=False)
phases = [{
"duration": "40",
"minDur": "40",
"maxDur": "40",
"state": "GGggrrrrGGggrrrr"
}, {
"duration": "1",
"minDur": "1",
"maxDur": "1",
"state": "yyyyrrrryyyyrrrr"
}, {
"duration": "25",
"minDur": "25",
"maxDur": "25",
"state": "rrrrGGggrrrrGGgg"
}, {
"duration": "1",
"minDur": "1",
"maxDur": "1",
"state": "rrrryyyyrrrryyyy"
}]
tl_logic.add("center9", phases=phases)
tl_logic.add("center10", phases=phases)
tl_logic.add("center5", phases=phases)
tl_logic.add("center6", phases=phases)
additional_net_params = {
"grid_array": grid_array,
"speed_limit": 35,
"horizontal_lanes": 1,
"vertical_lanes": 1,
"print_warnings": False, # warnings in building net
}
initial_config, net_params = get_non_flow_params(
enter_speed=v_enter,
add_net_params=additional_net_params)
additional_params = ADDITIONAL_ENV_PARAMS.copy()
additional_params["time_price"] = 0.02
additional_params["distance_price"] = 0.005
additional_params["pickup_price"] = 1
additional_params["wait_penalty"] = 0.000
additional_params["tle_penalty"] = 0.02
additional_params["person_prob"] = 0.06
additional_params["max_waiting_time"] = 20
additional_params["free_pickup_time"] = 0.0
additional_params["distribution"] = 'mode-15'
additional_params["n_mid_edge"] = 1
additional_params["use_tl"] = True
flow_params = dict(
# name of the experiment
exp_tag='grid-intersection',
# name of the flow environment the experiment is running on
env_name=DispatchAndRepositionEnv,
# name of the network class the experiment is running on
network=GridnxmNetwork,
# simulator that is used by the experiment
simulator='traci',
# sumo-related parameters (see flow.core.params.SumoParams)
sim=SumoParams(
sim_step=1,
render=False,
print_warnings=False,
restart_instance=True
# taxi_dispatch_alg="greedy"
),
# environment related parameters (see flow.core.params.EnvParams)
env=EnvParams(
horizon=500,
additional_params=additional_params,
),
# network-related parameters (see flow.core.params.NetParams and the
# network's documentation or ADDITIONAL_NET_PARAMS component)
net=net_params,
# vehicles to be placed in the network at the start of a rollout (see
# flow.core.params.VehicleParams)
veh=vehicles,
per=persons,
# parameters specifying the positioning of vehicles upon initialization/
# reset (see flow.core.params.InitialConfig)
initial=initial_config,
# traffic lights to be introduced to specific nodes (see
# flow.core.params.TrafficLightParams)
tls=tl_logic,
)
|
the-stack_106_26792 | """
Post-processes the obiwan/, tractor/, data products. Joins the psql db,
input properties, and tractor catalogue measurements for easy anaylsis
later. Uses mpi4py to parallelize to a full production runs' outputs.
"""
import numpy as np
import os
from glob import glob
import pandas as pd
from collections import Counter,defaultdict
from obiwan.db_tools import all_psqlcols_for_ids
# try:
from astrometry.util.fits import fits_table, merge_tables
from astrometry.util.util import Tan
from astrometry.libkd.spherematch import match_radec
# except ImportError:
# pass
def derived_field_dir(brick,data_dir,date):
"""directory to save post-processed tables to"""
return os.path.join(data_dir,'derived_%s' % date,
brick[:3],brick)
def datarelease_dir(drNumber):
"""the tractor catalogues for this DR are the real galaxy catalogues"""
assert(drNumber in ['dr3','dr5'])
proj='/global/project/projectdirs/cosmo/data/legacysurvey'
return os.path.join(proj,drNumber)
def is_bool(obj):
return obj.dtype == bool
def is_numeric(obj):
try:
tmp=obj+5
except TypeError:
return False
return True
def flux2mag(nmgy):
"""converts nanomaggies to AB mag"""
return -2.5 * (np.log10(nmgy) - 9)
class Bit(object):
"""bitmask arithmetic"""
def set(self,value, bit):
"""change bit to 1, bit is 0-indexed"""
return value | (1<<bit)
def clear(self,value, bit):
"""change bit to 0, bit is 0-indexed"""
return value & ~(1<<bit)
class TargetSelection(object):
"""Applies ELG target selection using either DESI or eBOSS criteria"""
def elg_by_measurement(self,tractor,name,
prefix='',anymask=True):
"""Returns bool array elgs as measured by tractor
Args:
prefix: 'tractor_' for randoms table, '' for tractor table
anymask: True to apply anymask cut, False for allmask
"""
self.prefix= prefix
self.anymask= anymask
assert(name in ['desi','eboss_ngc','eboss_sgc'])
if name == 'desi':
return self.desi_elg_by_measurement(tractor)
elif name == 'eboss_ngc':
return self.eboss_elg_by_measurement(tractor,'ngc')
elif name == 'eboss_sgc':
return self.eboss_elg_by_measurement(tractor,'sgc')
def desi_elg_by_measurement(self,tractor):
kw={}
if self.prefix+'brick_primary' in tractor.get_columns():
kw.update(primary=tractor.get(self.prefix+'brick_primary'))
for band,iband in [('g',1),('r',2),('z',4)]:
kw[band+'flux']= tractor.get(self.prefix+'flux_'+band) / \
tractor.get(self.prefix+'mw_transmission_'+band)
return self._desi_elg(**kw)
def eboss_elg_by_measurement(self,tractor,ngc_or_sgc):
kw={}
if self.prefix+'brick_primary' in tractor.get_columns():
kw.update(primary=tractor.get(self.prefix+'brick_primary'))
for key in ['ra','dec']:
kw[key]= tractor.get(key) #self.prefix+key)
for band in 'grz':
kw['psfdepth_'+band]= tractor.get(self.prefix+'psfdepth_'+band)
if self.anymask:
kw['anymask_'+band]= tractor.get(self.prefix+'anymask_'+band)
else:
kw['allmask_'+band]= tractor.get(self.prefix+'allmask_'+band)
kw.update( self.get_grz_mag_dict(tractor) )
return self._eboss_elg(ngc_or_sgc,**kw)
def _desi_elg(self,gflux=None, rflux=None, zflux=None,
primary=None):
"""VERBATIM from
https://github.com/desihub/desitarget/blob/master/py/desitarget/cuts.py
Args:
gflux, rflux, zflux, w1flux, w2flux: array_like
The flux in nano-maggies of g, r, z, w1, and w2 bands.
primary: array_like or None
If given, the BRICK_PRIMARY column of the catalogue.
Returns:
mask : array_like. True if and only the object is an ELG
target.
"""
#----- Emission Line Galaxies
if primary is None:
primary = np.ones_like(gflux, dtype='?')
elg = primary.copy()
elg &= rflux > 10**((22.5-23.4)/2.5) # r<23.4
elg &= zflux > rflux * 10**(0.3/2.5) # (r-z)>0.3
elg &= zflux < rflux * 10**(1.6/2.5) # (r-z)<1.6
# Clip to avoid warnings from negative numbers raised to fractional powers.
rflux = rflux.clip(0)
zflux = zflux.clip(0)
elg &= rflux**2.15 < gflux * zflux**1.15 * 10**(-0.15/2.5) # (g-r)<1.15(r-z)-0.15
elg &= zflux**1.2 < gflux * rflux**0.2 * 10**(1.6/2.5) # (g-r)<1.6-1.2(r-z)
return elg
def _eboss_elg(self,ngc_or_sgc,
primary=None,ra=None,dec=None,
gmag=None,rmag=None,zmag=None,
anymask_g=None,anymask_r=None,anymask_z=None,
allmask_g=None,allmask_r=None,allmask_z=None,
psfdepth_g=None,psfdepth_r=None,psfdepth_z=None):
"""
Johan's target selection
Does NOT do:
tycho2inblob == False
SDSS bright object mask & 0 < V < 11.5 mag Tycho2 stars mask
custom mask for eboss23
"""
assert(ngc_or_sgc in ['ngc','sgc'])
if primary is None:
primary = np.ones(len(ra), bool)
if psfdepth_g is None:
depth_selection = np.ones(len(ra), bool)
else:
# Johan's cut
# https://github.com/DriftingPig/ipynb/blob/master/obiwan_match.py#L96
gL = 62.79716079
rL = 30.05661087
zL_ngc = 11.0
zL_sgc = 12.75
depth_selection= (psfdepth_g > gL) & (psfdepth_r > rL)
if ngc_or_sgc == 'ngc':
depth_selection= (depth_selection) & (psfdepth_z > zL_ngc)
else:
depth_selection= (depth_selection) & (psfdepth_z > zL_sgc)
if not (anymask_g is None):
mask= ((anymask_g == 0) &
(anymask_r == 0) &
(anymask_z == 0))
elif not (allmask_g is None):
mask= ((allmask_g == 0) &
(allmask_r == 0) &
(allmask_z == 0))
else:
mask = np.ones(len(ra), bool)
gr= gmag - rmag
rz= rmag - zmag
if ngc_or_sgc == 'ngc':
colorCut= ((gmag > 21.825) &
(gmag < 22.9) &
(-0.068 * rz + 0.457 < gr) &
(gr < 0.112 * rz + 0.773) &
(0.637 * gr + 0.399 < rz) &
(rz < -0.555 * gr + 1.901))
elif ngc_or_sgc == 'sgc':
colorCut= ((gmag > 21.825) &
(gmag < 22.825) &
(-0.068 * rz + 0.457 < gr) &
(gr < 0.112 * rz + 0.773) &
(0.218 * gr + 0.571 < rz) &
(rz < -0.555 * gr + 1.901))
return ((primary) &
(depth_selection) &
(colorCut) &
(mask))
def get_grz_mag_dict(self,tractor):
d={}
for band,iband in [('g',1),('r',2),('z',4)]:
flux_ext= tractor.get(self.prefix+'flux_'+band) / \
tractor.get(self.prefix+'mw_transmission_'+band)
d[band+'mag']= flux2mag(flux_ext)
return d
class RandomsTable(object):
"""Creates the uniform,obiwan_a,obiwan_b randoms tables for a single brick
Final table has same number rows as uniform table, and the obiwan
rows are filled in wherever there is a matching unique_id
between uniform and obiwan. A bitmask column 'obiwan_mask' which says
whether the random was recovered by Tractor or not, and whether
the random is near a previously existing real source in a DR
catalogue, like DR3 or DR5
"""
def __init__(self, data_dir,dr3_or_dr5,db_randoms_table,
date='mm-dd-yyyy'):
self.data_dir= data_dir
self.dr3_or_dr5= dr3_or_dr5
self.db_randoms_table= db_randoms_table
self.date= date
self.number_rsdirs= self.num_rsdirs_for_completion()
def run(self,brick):
derived_dir= derived_field_dir(brick,self.data_dir,self.date)
final_table_fn= os.path.join(derived_dir,'randoms.fits')
# Already exist and readable table?
notExist=True
if os.path.exists(final_table_fn):
try:
tmp= fits_table(final_table_fn)
notExist=False
print('skipping brick %s, already exists' % brick)
except OSError:
print('Trouble reading %s, deleting and redoing it' % final_table_fn)
os.remove(final_table_fn)
notExist=True
rsdirs,brickDone= self.get_rsdirs(brick)
if notExist and brickDone:
tab= self.merge_randoms_tables(brick,rsdirs)
self.add_flag_for_realsources(tab,brick)
self.add_targets_mask(tab)
# Write
self.write_table(tab,final_table_fn)
def get_rsdirs(self,brick):
"""get list of rsdirs for a given brick
Returns:
tuple of rsdirs list and whether the brick is finished or not
"""
search= os.path.join(self.data_dir,'tractor',
brick[:3],brick,
'rs*','tractor-%s.fits' % brick)
rsdirs= glob(search)
rsdirs= [os.path.dirname(dr)
for dr in rsdirs]
if self.number_rsdirs is None:
# overide for using all rsdirs to be used
brickDone=True
elif len(rsdirs) == self.number_rsdirs:
brickDone=True
elif len(rsdirs) < self.number_rsdirs:
print('brick %s not complete, %d/%d rsdirs exists' % \
(brick,len(rsdirs),self.number_rsdirs))
brickDone=False
else:
raise ValueError('brick %s more rsdirs than should be possible %d/%d' % \
(brick,len(rsdirs),self.number_rsdirs))
return rsdirs,brickDone
def merge_randoms_tables(self,brick,rsdirs):
"""Computes final joined randoms tables
Includes uniform randoms, info from psql db, which of these were recovered
by Tractor, and the associated tractor info for those
Args:
brick: brickname
Returns:
joined randoms table
"""
uniform=[]
for dr in rsdirs:
simcat= fits_table((os.path.join(dr,'simcat-elg-%s.fits' % brick)
.replace('/tractor/','/obiwan/')))
idsadded= fits_table((os.path.join(dr,'sim_ids_added.fits')
.replace('/tractor/','/obiwan/')))
# Uniform randoms (injected at touching at least 1 ccd)
assert(len(idsadded) == len(set(idsadded.id)))
simcat.cut( pd.Series(simcat.id).isin(idsadded.id) )
simcat.set('unique_id',self.unique_id(simcat.id.astype(str),
brick,os.path.basename(dr)))
# PSQL
simcat= self.add_psql_to_uniform_table(simcat,self.db_randoms_table)
# Recovered by Tractor
tractor= fits_table(os.path.join(dr,'tractor-%s.fits' % brick))
tractor.cut(tractor.brick_primary)
cols= np.array(tractor.get_columns())
del_cols= cols[(pd.Series(cols)
.str.startswith('apflux_'))]
for col in del_cols:
if col in ['apflux_resid_g','apflux_resid_r','apflux_resid_z',
'apflux_g','apflux_r','apflux_z']:
continue
tractor.delete_column(col)
# nearest match in (ra2,dec2) for each point in (ra1,dec1)
I,J,d = match_radec(simcat.ra,simcat.dec,
tractor.ra,tractor.dec, 1./3600,
nearest=True)
assert(np.all(d <= 1./3600))
tractor.cut(J)
add_vals={}
for trac_key in tractor.get_columns():
key= 'tractor_'+trac_key
if is_bool(tractor.get(trac_key)):
add_vals[key]= np.zeros(len(simcat),bool)
elif is_numeric(tractor.get(trac_key)):
shp= (len(simcat),) + tractor.get(trac_key).shape[1:]
add_vals[key]= np.zeros(shp) +np.nan
else:
add_vals[key]= np.array([4*' ']*len(simcat))
add_vals[key][I]= tractor.get(trac_key)
simcat.set(key,add_vals[key])
# Mask
mask= np.zeros(len(simcat),dtype=np.int8)
mask[I]= Bit().set(mask[I],0)
simcat.set('obiwan_mask',mask)
# add to list uniform tables
uniform.append(simcat)
return merge_tables(uniform, columns='fillzero')
def unique_id(self,id_array,brick,rs_dir):
"""For a given random injected into a brick during a given iteration
Args:
id_array: randoms ids
brick: brick
rs_dir: like rs0 or rs300
"""
ids= np.array(id_array,dtype=object) + "_%s_%s" % (brick,rs_dir)
# FITS can't handle numpy type 'object'
return ids.astype(str)
def add_psql_to_uniform_table(self,uniform,db_randoms_table):
"""Add randoms db columns from psql to the uniform randoms table
Args:
uniform: fits table
db_randoms_table: name of the psql db table
"""
db_dict= all_psqlcols_for_ids(uniform.id, db_randoms_table=db_randoms_table)
if any(db_dict['id'] - uniform.id != 0):
sort_db= np.argsort(db_dict['id'])
sort_uniform= np.argsort(uniform.id)
for key in db_dict.keys():
db_dict[key]= db_dict[key][sort_db]
uniform= uniform[sort_uniform]
assert(all(db_dict['id'] - uniform.id == 0))
for key,val in db_dict.items():
if key in ['id']:
pass
uniform.set('psql_%s' % key,val)
return uniform
def add_flag_for_realsources(self,tab,brick):
"""Flag sources also in DR3, DR5
Args:
tab: table returned by merged_randoms_table()
"""
real= fits_table(os.path.join(datarelease_dir(self.dr3_or_dr5),
'tractor',brick[:3],
'tractor-%s.fits' % brick))
# nearest match in (ra2,dec2) for each point in (ra1,dec1)
I,J,d = match_radec(tab.ra,tab.dec,
real.ra,real.dec, 1./3600,
nearest=True)
assert(np.all(d <= 1./3600))
bool_matched= np.zeros(len(tab),bool)
bool_matched[I]= True
recovered_and_matched= ((tab.obiwan_mask == 1) &
(bool_matched))
if len(tab[recovered_and_matched]) > 0:
mask= tab.obiwan_mask
mask[recovered_and_matched]= Bit().set(mask[recovered_and_matched],1)
tab.set('obiwan_mask',mask)
def add_targets_mask(self,table):
TS= TargetSelection(prefix='tractor_')
mask= np.zeros(len(table),dtype=np.int8)
for survey_ts,bit in [('eboss_ngc',0),
('eboss_sgc',1),
('desi',2)]:
keep= TS.run(table,survey_ts)
if len(table[keep]) > 0:
mask[keep]= Bit().set(mask[keep],bit)
table.set('targets_mask',mask)
def write_table(self,tab,fn):
"""Write the merged randoms table is doesn't already exist"""
if not os.path.exists(fn):
tab.writeto(fn)
print('Wrote %s' % fn)
def num_rsdirs_for_completion(self):
outdir_name= os.path.basename(self.data_dir)
if outdir_name == 'eboss_elg':
n=1
elif outdir_name == 'elg_dr5_500per':
n=7
elif outdir_name == 'elg_dr5_1000per':
n=4
elif 'subset' in outdir_name:
n=None
else:
raise ValueError('%s not one of the above options' % outdir_name)
return n
def fraction_recovered(randoms):
"""Return fraction of randoms detected and measured by Legacypipe
Args:
randoms: ra,dec, and properties of all source added to a bricks
"""
return len(randoms[randoms.obiwan_mask == 1]) / float(len(randoms))
def bin_by_mag(randoms, func_to_apply, band=None,bin_minmax=(18.,26.),nbins=20):
'''bins data and result of func_to_apply(randoms) into the bucket
Args:
randoms: randoms.fits table
func_to_apply: operates on a randoms table, return val to store in bucket
band: bin by mag in this band
'''
assert(band in 'grz')
mag= flux2mag(randoms.get(band+'flux') / randoms.get('mw_transmission_'+band))
bin_edges= np.linspace(bin_minmax[0],bin_minmax[1],num= nbins+1)
vals={}
vals['binc']= (bin_edges[1:]+bin_edges[:-1])/2.
vals['val']=np.zeros(nbins)+np.nan
for i,low,hi in zip(range(nbins), bin_edges[:-1],bin_edges[1:]):
keep= ((mag > low) &
(mag <= hi))
if np.where(keep)[0].size > 0:
vals['val'][i]= func_to_apply(randoms[keep])
else:
vals['val'][i]=np.nan
return vals
def depth_at_half_recovered(randoms,band):
"""bin by mag in given mag, return bin center where frac recovered first drops below 50%"""
binc_and_vals= bin_by_mag(randoms, func_to_apply= fraction_recovered,
band=band,bin_minmax=(18.,26.),nbins=20)
for i,val in enumerate(binc_and_vals['val']):
if not np.isfinite(val):
continue
elif val <= 0.5:
break
#if i == len(binc_and_vals['val'])-1 and val > 0.5:
if val > 0.5:
raise ValueError('val=',val)
return np.nan
else:
return binc_and_vals['binc'][i]
class SummaryTable(object):
"""Writes one table per brick, with brick averaged quantities
derived table "randoms.fits" must exist. Joins the brick summary
quantities from a data release with a similar set from the
randoms.fits table. Each brick's table has one
row and all tables get merged to make the eatmap plots
"""
def __init__(self, data_dir,dr3_or_dr5,date='mm-dd-yyyy'):
self.data_dir= data_dir
self.dr3_or_dr5= dr3_or_dr5
self.date= date
self.surveyBricks = fits_table(os.path.join(os.environ['LEGACY_SURVEY_DIR'],
'survey-bricks.fits.gz'))
def run(self,brick):
#summary_DR= self.brick_summary([brick])
#summary_obi= self.brick_summary_obiwan(brick,prefix='tractor_')
summary_obi= self.brick_summary_obiwan_brief(brick,prefix='tractor_')
#self.add_obiwan_to_DR_table(summary_DR,summary_obi)
# Write
derived_dir= derived_field_dir(brick,self.data_dir,self.date)
fn= os.path.join(derived_dir,'summary.fits')
#self.write_table(summary_DR,fn)
self.write_table(summary_obi,fn)
def write_table(self,tab,fn):
if not os.path.exists(fn):
tab.writeto(fn)
print('Wrote %s' % fn)
def add_obiwan_to_DR_table(self,summary_DR,summary_obi):
"""adds the summary_obi columsn to the summary_DR table
Args:
summary_DR: brick summary for the data release bricks
summary_obiwan: brick summary for the obiwan bricks
"""
prefix='obiwan_'
for col in summary_obi.get_columns():
summary_DR.set(prefix+col, summary_obi.get(col))
del summary_obi
def brick_summary_obiwan_brief(self,brick,prefix=''):
"""brick_summary_obiwan but only 3-4 quantities"""
randoms_fn= os.path.join(derived_field_dir(brick,
self.data_dir,self.date),
'randoms.fits')
T = fits_table(randoms_fn)
isRec= T.obiwan_mask == 1
summary= defaultdict(list)
summary['frac_recovered'].append( len(T[isRec])/ float(len(T)))
for band in 'grz':
summary['galdepth_'+band]= np.median(T.get(prefix+'galdepth_'+band))
# Type
T=fits_table()
T.set('frac_recovered', np.array(summary['frac_recovered']).astype(np.float32))
for b in 'grz':
T.set('galdepth_'+b, np.array(summary[b+'galdepth']).astype(np.float32))
return T
def brick_summary_obiwan(self,brick,prefix=''):
"""brick summary for obiwan
Args:
prefix: prefix for obiwan tractor columns, e.g. tractor_
"""
randoms_fn= os.path.join(derived_field_dir(brick,
self.data_dir,self.date),
'randoms.fits')
T = fits_table(randoms_fn)
brickset = set()
summary= defaultdict(list)
nnhist = 6
# Obiwan stuff
was_recovered= T.obiwan_mask == 1
summary['frac_recovered'].append( len(T[was_recovered])/ float(len(T)))
for b in 'grz':
summary['depth_at_half_recovered_'+b].append( depth_at_half_recovered(T,band=b))
W = H = 3600
# H=3600
# xx,yy = np.meshgrid(np.arange(W), np.arange(H))
unique = np.ones((H,W), bool)
tlast = 0
brickset.add(brick)
for key in ['gn','rn','zn']:
summary[key].append(0)
for key in ['gnhist','rnhist','znhist']:
summary[key].append([0 for i in range(nnhist)])
ibrick = np.nonzero(self.surveyBricks.brickname == brick)[0][0]
summary['ibricks'].append(ibrick)
#T.cut(T.brick_primary)
summary['nsrcs'].append(len(T))
types = Counter([t.strip() for t in T.get(prefix+'type')])
for typ in 'psf simp rex exp dev comp'.split(' '):
summary['n'+typ].append(types[typ.upper()])
print('N sources', summary['nsrcs'][-1])
for b in 'grz':
summary[b+'psfsize'].append(np.median(T.get(prefix+'psfsize_'+b)))
summary[b+'psfdepth'].append(np.median(T.get(prefix+'psfdepth_'+b)))
summary[b+'galdepth'].append(np.median(T.get(prefix+'galdepth_'+b)))
summary[b+'trans'].append(np.median(T.get(prefix+'mw_transmission_'+b)))
summary['ebv'].append(np.median(T.get(prefix+'ebv')))
br = self.surveyBricks[ibrick]
#print('Computing unique brick pixels...')
pixscale = 0.262/3600.
wcs = Tan(br.ra, br.dec, W/2.+0.5, H/2.+0.5,
-pixscale, 0., 0., pixscale,
float(W), float(H))
unique[:,:] = True
self.find_unique_pixels(wcs, W, H, unique,
br.ra1, br.ra2, br.dec1, br.dec2)
U = np.flatnonzero(unique)
#print(len(U), 'of', W*H, 'pixels are unique to this brick')
ibricks = np.array(summary['ibricks'])
#print('Maximum number of sources:', max(nsrcs))
T = fits_table()
#T.brickname = np.array([brick])
#T.ra = self.surveyBricks.ra [ibricks]
#T.dec = self.surveyBricks.dec[ibricks]
T.set('frac_recovered', np.array(summary['frac_recovered']).astype(np.float32))
for b in 'grz':
T.set('depth_at_half_recovered_'+b, np.array(summary['depth_at_half_recovered_'+b]).astype(np.float32))
T.set('nexp_'+b, np.array(summary[b+'n']).astype(np.int16))
T.set('nexphist_'+b, np.array(summary[b+'nhist']).astype(np.int32))
T.set('nobjs', np.array(summary['nsrcs']).astype(np.int16))
T.set('psfsize_'+b, np.array(summary[b+'psfsize']).astype(np.float32))
T.set('trans_'+b, np.array(summary[b+'trans']).astype(np.float32))
T.set('ext_'+b, -2.5 * np.log10(T.get('trans_'+b)))
for typ in 'psf simp rex exp dev comp'.split(' '):
T.set('n'+typ, np.array(summary['n'+typ]).astype(np.int16))
with np.errstate(divide='ignore'):
for b in 'grz':
T.set('psfdepth_'+b, (-2.5*(-9.+np.log10(5.*np.sqrt(1. / np.array(summary[b+'psfdepth']))))).astype(np.float32))
T.set('galdepth_'+b, (-2.5*(-9.+np.log10(5.*np.sqrt(1. / np.array(summary[b+'galdepth']))))).astype(np.float32))
for k in ['psfdepth_g', 'psfdepth_r', 'psfdepth_z',
'galdepth_g', 'galdepth_r', 'galdepth_z']:
v = T.get(k)
v[np.logical_not(np.isfinite(v))] = 0.
T.ebv = np.array(summary['ebv']).astype(np.float32)
return T
def brick_summary(self,bricklist=[]):
"""
Args:
bricklist: Give a single brick as a list of length 1, e.g. [brick]
"""
assert(len(bricklist) == 1)
brickset = set()
gn = []
rn = []
zn = []
gnhist = []
rnhist = []
znhist = []
nnhist = 6
gdepth = []
rdepth = []
zdepth = []
ibricks = []
nsrcs = []
npsf = []
nsimp = []
nrex = []
nexp = []
ndev = []
ncomp = []
gpsfsize = []
rpsfsize = []
zpsfsize = []
gpsfdepth = []
rpsfdepth = []
zpsfdepth = []
ggaldepth = []
rgaldepth = []
zgaldepth = []
wise_nobs = []
wise_trans = []
ebv = []
gtrans = []
rtrans = []
ztrans = []
#sfd = SFDMap()
W = H = 3600
# H=3600
# xx,yy = np.meshgrid(np.arange(W), np.arange(H))
unique = np.ones((H,W), bool)
tlast = 0
dirprefix= datarelease_dir(self.dr3_or_dr5)
for ibrick,brick in enumerate(bricklist):
#words = fn.split('/')
#dirprefix = '/'.join(words[:-4])
#print('Directory prefix:', dirprefix)
#words = words[-4:]
#brick = words[2]
#print('Brick', brick)
tfn = os.path.join(dirprefix, 'tractor', brick[:3], 'tractor-%s.fits'%brick)
if self.dr3_or_dr5 == 'dr5':
columns=['brick_primary', 'type',
'psfsize_g', 'psfsize_r', 'psfsize_z',
'psfdepth_g', 'psfdepth_r', 'psfdepth_z',
'galdepth_g', 'galdepth_r', 'galdepth_z',
'ebv',
'mw_transmission_g', 'mw_transmission_r', 'mw_transmission_z',
'nobs_w1', 'nobs_w2', 'nobs_w3', 'nobs_w4',
'nobs_g', 'nobs_r', 'nobs_z',
'mw_transmission_w1', 'mw_transmission_w2', 'mw_transmission_w3', 'mw_transmission_w4']
elif self.dr3_or_dr5 == 'dr3':
columns=['brick_primary', 'type', 'decam_psfsize',
'decam_depth', 'decam_galdepth',
'ebv', 'decam_mw_transmission',
'decam_nobs',
'wise_nobs', 'wise_mw_transmission']
try:
T = fits_table(tfn, columns=columns)
#print('Read %s' % tfn)
except:
print('Failed to read %s' % tfn)
return None
#print('Failed to read FITS table', tfn)
#import traceback
#traceback.print_exc()
#print('Carrying on.')
#continue
if self.dr3_or_dr5 == 'dr5':
hasBands= [band for band in 'grz' if any(T.get('nobs_'+band) > 0)]
elif self.dr3_or_dr5 == 'dr3':
hasBands= [band
for band,iband in [('g',1),('r',2),('z',4)]
if any(T.decam_nobs[:,iband] > 0)]
brickset.add(brick)
gn.append(0)
rn.append(0)
zn.append(0)
gnhist.append([0 for i in range(nnhist)])
rnhist.append([0 for i in range(nnhist)])
znhist.append([0 for i in range(nnhist)])
index = -1
ibrick = np.nonzero(self.surveyBricks.brickname == brick)[0][0]
ibricks.append(ibrick)
T.cut(T.brick_primary)
nsrcs.append(len(T))
types = Counter([t.strip() for t in T.type])
npsf.append(types['PSF'])
nsimp.append(types['SIMP'])
nrex.append(types['REX'])
nexp.append(types['EXP'])
ndev.append(types['DEV'])
ncomp.append(types['COMP'])
print('N sources', nsrcs[-1])
if self.dr3_or_dr5 == 'dr5':
gpsfsize.append(np.median(T.psfsize_g))
rpsfsize.append(np.median(T.psfsize_r))
zpsfsize.append(np.median(T.psfsize_z))
gpsfdepth.append(np.median(T.psfdepth_g))
rpsfdepth.append(np.median(T.psfdepth_r))
zpsfdepth.append(np.median(T.psfdepth_z))
ggaldepth.append(np.median(T.galdepth_g))
rgaldepth.append(np.median(T.galdepth_r))
zgaldepth.append(np.median(T.galdepth_z))
wise_nobs.append(np.median(
np.vstack((T.nobs_w1, T.nobs_w2, T.nobs_w3, T.nobs_w4)).T,
axis=0))
wise_trans.append(np.median(
np.vstack((T.mw_transmission_w1,
T.mw_transmission_w2,
T.mw_transmission_w3,
T.mw_transmission_w4)).T,
axis=0))
gtrans.append(np.median(T.mw_transmission_g))
rtrans.append(np.median(T.mw_transmission_r))
ztrans.append(np.median(T.mw_transmission_z))
elif self.dr3_or_dr5 == 'dr3':
gpsfsize.append(np.median(T.decam_psfsize[:,1]))
rpsfsize.append(np.median(T.decam_psfsize[:,2]))
zpsfsize.append(np.median(T.decam_psfsize[:,4]))
gpsfdepth.append(np.median(T.decam_depth[:,1]))
rpsfdepth.append(np.median(T.decam_depth[:,2]))
zpsfdepth.append(np.median(T.decam_depth[:,4]))
ggaldepth.append(np.median(T.decam_galdepth[:,1]))
rgaldepth.append(np.median(T.decam_galdepth[:,2]))
zgaldepth.append(np.median(T.decam_galdepth[:,4]))
wise_nobs.append(np.median(T.wise_nobs, axis=0))
wise_trans.append(np.median(T.wise_mw_transmission, axis=0))
gtrans.append(np.median(T.decam_mw_transmission[:,1]))
rtrans.append(np.median(T.decam_mw_transmission[:,2]))
ztrans.append(np.median(T.decam_mw_transmission[:,4]))
ebv.append(np.median(T.ebv))
br = self.surveyBricks[ibrick]
#print('Computing unique brick pixels...')
pixscale = 0.262/3600.
wcs = Tan(br.ra, br.dec, W/2.+0.5, H/2.+0.5,
-pixscale, 0., 0., pixscale,
float(W), float(H))
unique[:,:] = True
self.find_unique_pixels(wcs, W, H, unique,
br.ra1, br.ra2, br.dec1, br.dec2)
U = np.flatnonzero(unique)
#print(len(U), 'of', W*H, 'pixels are unique to this brick')
index = bricklist.index(brick)
assert(index == len(bricklist)-1)
# Does a check on the legacysurvey-{brick}-nexp*.fits files
if False:
#filepart = words[-1]
#filepart = filepart.replace('.fits.gz', '')
#filepart = filepart.replace('.fits.fz', '')
#print('File:', filepart)
#band = filepart[-1]
#assert(band in 'grz')
nlist,nhist = dict(g=(gn,gnhist), r=(rn,rnhist), z=(zn,znhist))[band]
for band in hasBands:
fn= os.path.join(dirprefix, 'coadd',
brick[:3],brick,
'legacysurvey-%s-nexp-%s.fits.gz' % (brick,band))
upix = fitsio.read(fn).flat[U]
med = np.median(upix)
print('Band', band, ': Median', med)
nlist[index] = med
hist = nhist[index]
for i in range(nnhist):
if i < nnhist-1:
hist[i] = np.sum(upix == i)
else:
hist[i] = np.sum(upix >= i)
assert(sum(hist) == len(upix))
print('Number of exposures histogram:', hist)
ibricks = np.array(ibricks)
#print('Maximum number of sources:', max(nsrcs))
T = fits_table()
T.brickname = np.array(bricklist)
T.ra = self.surveyBricks.ra [ibricks]
T.dec = self.surveyBricks.dec[ibricks]
T.nexp_g = np.array(gn).astype(np.int16)
T.nexp_r = np.array(rn).astype(np.int16)
T.nexp_z = np.array(zn).astype(np.int16)
T.nexphist_g = np.array(gnhist).astype(np.int32)
T.nexphist_r = np.array(rnhist).astype(np.int32)
T.nexphist_z = np.array(znhist).astype(np.int32)
T.nobjs = np.array(nsrcs).astype(np.int16)
T.npsf = np.array(npsf ).astype(np.int16)
T.nsimp = np.array(nsimp).astype(np.int16)
T.nrex = np.array(nrex ).astype(np.int16)
T.nexp = np.array(nexp ).astype(np.int16)
T.ndev = np.array(ndev ).astype(np.int16)
T.ncomp = np.array(ncomp).astype(np.int16)
T.psfsize_g = np.array(gpsfsize).astype(np.float32)
T.psfsize_r = np.array(rpsfsize).astype(np.float32)
T.psfsize_z = np.array(zpsfsize).astype(np.float32)
with np.errstate(divide='ignore'):
T.psfdepth_g = (-2.5*(-9.+np.log10(5.*np.sqrt(1. / np.array(gpsfdepth))))).astype(np.float32)
T.psfdepth_r = (-2.5*(-9.+np.log10(5.*np.sqrt(1. / np.array(rpsfdepth))))).astype(np.float32)
T.psfdepth_z = (-2.5*(-9.+np.log10(5.*np.sqrt(1. / np.array(zpsfdepth))))).astype(np.float32)
T.galdepth_g = (-2.5*(-9.+np.log10(5.*np.sqrt(1. / np.array(ggaldepth))))).astype(np.float32)
T.galdepth_r = (-2.5*(-9.+np.log10(5.*np.sqrt(1. / np.array(rgaldepth))))).astype(np.float32)
T.galdepth_z = (-2.5*(-9.+np.log10(5.*np.sqrt(1. / np.array(zgaldepth))))).astype(np.float32)
for k in ['psfdepth_g', 'psfdepth_r', 'psfdepth_z', 'galdepth_g', 'galdepth_r', 'galdepth_z']:
v = T.get(k)
v[np.logical_not(np.isfinite(v))] = 0.
T.ebv = np.array(ebv).astype(np.float32)
T.trans_g = np.array(gtrans).astype(np.float32)
T.trans_r = np.array(rtrans).astype(np.float32)
T.trans_z = np.array(ztrans).astype(np.float32)
T.ext_g = -2.5 * np.log10(T.trans_g)
T.ext_r = -2.5 * np.log10(T.trans_r)
T.ext_z = -2.5 * np.log10(T.trans_z)
T.wise_nobs = np.array(wise_nobs).astype(np.int16)
T.trans_wise = np.array(wise_trans).astype(np.float32)
T.ext_w1 = -2.5 * np.log10(T.trans_wise[:,0])
T.ext_w2 = -2.5 * np.log10(T.trans_wise[:,1])
T.ext_w3 = -2.5 * np.log10(T.trans_wise[:,2])
T.ext_w4 = -2.5 * np.log10(T.trans_wise[:,3])
return T
def find_unique_pixels(self,wcs, W, H, unique, ra1,ra2,dec1,dec2):
if unique is None:
unique = np.ones((H,W), bool)
# scan the outer annulus of pixels, and shrink in until all pixels
# are unique.
step = 10
for i in range(0, W//2, step):
nu,ntot = self._ring_unique(wcs, W, H, i, unique, ra1,ra2,dec1,dec2)
#print('Pixel', i, ': nu/ntot', nu, ntot)
if nu > 0:
i -= step
break
unique[:i,:] = False
unique[H-1-i:,:] = False
unique[:,:i] = False
unique[:,W-1-i:] = False
for j in range(max(i+1, 0), W//2):
nu,ntot = self._ring_unique(wcs, W, H, j, unique, ra1,ra2,dec1,dec2)
#print('Pixel', j, ': nu/ntot', nu, ntot)
if nu == ntot:
break
return unique
def _ring_unique(self,wcs, W, H, i, unique, ra1,ra2,dec1,dec2):
lo, hix, hiy = i, W-i-1, H-i-1
# one slice per side; we double-count the last pix of each side.
sidex = slice(lo,hix+1)
sidey = slice(lo,hiy+1)
top = (lo, sidex)
bot = (hiy, sidex)
left = (sidey, lo)
right = (sidey, hix)
xx = np.arange(W)
yy = np.arange(H)
nu,ntot = 0,0
for slc in [top, bot, left, right]:
#print('xx,yy', xx[slc], yy[slc])
(yslc,xslc) = slc
rr,dd = wcs.pixelxy2radec(xx[xslc]+1, yy[yslc]+1)
U = (rr >= ra1 ) * (rr < ra2 ) * (dd >= dec1) * (dd < dec2)
#print('Pixel', i, ':', np.sum(U), 'of', len(U), 'pixels are unique')
unique[slc] = U
nu += np.sum(U)
ntot += len(U)
#if allin:
# print('Scanned to pixel', i)
# break
return nu,ntot
def main_mpi(bricks=[],doWhat=None,dr3_or_dr5=None,
db_randoms_table=None,
nproc=1,data_dir='./',date='mm-dd-yyyy'):
"""
Args:
nproc: > 1 for mpi4py
bricks: list of bricks
"""
if nproc > 1:
from mpi4py.MPI import COMM_WORLD as comm
bricks= np.array_split(bricks, comm.size)[comm.rank]
else:
class MyComm(object):
def __init__(self):
self.rank=0
comm= MyComm()
if doWhat == 'randoms':
tabMaker= RandomsTable(data_dir,dr3_or_dr5,db_randoms_table,
date=date)
elif doWhat == 'summary':
tabMaker= SummaryTable(data_dir,dr3_or_dr5,date=date)
for cnt,brick in enumerate(bricks):
if (cnt+1) % 10 == 0:
print('rank %d: %d/%d' % (comm.rank,cnt+1,len(bricks)))
dr= derived_field_dir(brick,data_dir,date)
try:
os.makedirs(dr)
except OSError:
pass
tabMaker.run(brick)
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--doWhat', type=str, choices=['randoms','summary'],required=True)
parser.add_argument('--data_dir', type=str, required=True,
help='path to obiwan/, tractor/ dirs')
parser.add_argument('--db_randoms_table', type=str, choices=['obiwan_eboss_elg',
'obiwan_elg_dr5','obiwan_cosmos'],required=False)
parser.add_argument('--nproc', type=int, default=1, help='set to > 1 to run mpi4py')
parser.add_argument('--bricks_fn', type=str, default=None,
help='specify a fn listing bricks to run, or a single default brick will be ran')
parser.add_argument('--dr3_or_dr5', type=str, choices=['dr5','dr3'],
help='for obiwan_randoms_b',required=True)
parser.add_argument('--date', type=str,help='mm-dd-yyyy, to label derived directory by',required=True)
args = parser.parse_args()
# Bricks to run
if args.bricks_fn is None:
bricks= ['1266p292']
else:
bricks= np.loadtxt(args.bricks_fn,dtype=str)
kwargs= vars(args)
for dropCol in ['bricks_fn']:
del kwargs[dropCol]
kwargs.update(bricks=bricks)
main_mpi(**kwargs)
|
the-stack_106_26793 | """ Calculates quantities required in semi-visible jet models """
import math
def calc_alpha_d(n_c, n_f, Lambda_d):
b_param = calc_b_param(n_c, n_f)
alpha_d = -2.0*math.pi / (b_param * math.log(Lambda_d/1000.0))
return alpha_d
def calc_lambda_d(n_c, n_f, alpha_d):
b_param = calc_b_param(n_c, n_f)
Lambda_d = 1000.0 * math.exp(-2.0*math.pi / (alpha_d*b_param))
return Lambda_d
def calc_b_param(n_c, n_f):
b_param = (11.0/3.0)*float(n_c) - (2.0/3.0)*float(n_f)
return b_param
def calc_lambda_d_from_str(n_c, n_f, alpha_d, m_dh):
if not isinstance(alpha_d, str):
raise TypeError("alpha_d must be a string")
elif not any(alpha_d == x for x in ['peak', 'low', 'high']):
raise ValueError("alpha_d must equal 'peak', 'low', or 'high'")
else:
Lambda_d_peak = 3.2 * math.pow(m_dh, 0.8)
if alpha_d == "peak":
Lambda_d = Lambda_d_peak
else:
alpha_d_peak = calc_alpha_d(n_c, n_f, Lambda_d_peak)
if alpha_d == "low":
a_d = 0.5 * alpha_d_peak
elif alpha_d == "high":
a_d = 1.5 * alpha_d_peak
Lambda_d = calc_lambda_d(n_c, n_f, a_d)
return Lambda_d
|
the-stack_106_26794 | """Support for the Automatic platform."""
import asyncio
from datetime import timedelta
import json
import logging
import os
from aiohttp import web
import voluptuous as vol
from homeassistant.components.device_tracker import (
ATTR_ATTRIBUTES, ATTR_DEV_ID, ATTR_GPS, ATTR_GPS_ACCURACY, ATTR_HOST_NAME,
ATTR_MAC, PLATFORM_SCHEMA)
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_time_interval
_LOGGER = logging.getLogger(__name__)
ATTR_FUEL_LEVEL = 'fuel_level'
AUTOMATIC_CONFIG_FILE = '.automatic/session-{}.json'
CONF_CLIENT_ID = 'client_id'
CONF_CURRENT_LOCATION = 'current_location'
CONF_DEVICES = 'devices'
CONF_SECRET = 'secret'
DATA_CONFIGURING = 'automatic_configurator_clients'
DATA_REFRESH_TOKEN = 'refresh_token'
DEFAULT_SCOPE = ['location', 'trip', 'vehicle:events', 'vehicle:profile']
DEFAULT_TIMEOUT = 5
EVENT_AUTOMATIC_UPDATE = 'automatic_update'
FULL_SCOPE = DEFAULT_SCOPE + ['current_location']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_SECRET): cv.string,
vol.Optional(CONF_CURRENT_LOCATION, default=False): cv.boolean,
vol.Optional(CONF_DEVICES): vol.All(cv.ensure_list, [cv.string]),
})
def _get_refresh_token_from_file(hass, filename):
"""Attempt to load session data from file."""
path = hass.config.path(filename)
if not os.path.isfile(path):
return None
try:
with open(path) as data_file:
data = json.load(data_file)
if data is None:
return None
return data.get(DATA_REFRESH_TOKEN)
except ValueError:
return None
def _write_refresh_token_to_file(hass, filename, refresh_token):
"""Attempt to store session data to file."""
path = hass.config.path(filename)
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'w+') as data_file:
json.dump({
DATA_REFRESH_TOKEN: refresh_token
}, data_file)
@asyncio.coroutine
def async_setup_scanner(hass, config, async_see, discovery_info=None):
"""Validate the configuration and return an Automatic scanner."""
import aioautomatic
hass.http.register_view(AutomaticAuthCallbackView())
scope = FULL_SCOPE if config.get(CONF_CURRENT_LOCATION) else DEFAULT_SCOPE
client = aioautomatic.Client(
client_id=config[CONF_CLIENT_ID],
client_secret=config[CONF_SECRET],
client_session=async_get_clientsession(hass),
request_kwargs={'timeout': DEFAULT_TIMEOUT})
filename = AUTOMATIC_CONFIG_FILE.format(config[CONF_CLIENT_ID])
refresh_token = yield from hass.async_add_job(
_get_refresh_token_from_file, hass, filename)
@asyncio.coroutine
def initialize_data(session):
"""Initialize the AutomaticData object from the created session."""
hass.async_add_job(
_write_refresh_token_to_file, hass, filename,
session.refresh_token)
data = AutomaticData(
hass, client, session, config.get(CONF_DEVICES), async_see)
# Load the initial vehicle data
vehicles = yield from session.get_vehicles()
for vehicle in vehicles:
hass.async_create_task(data.load_vehicle(vehicle))
# Create a task instead of adding a tracking job, since this task will
# run until the websocket connection is closed.
hass.loop.create_task(data.ws_connect())
if refresh_token is not None:
try:
session = yield from client.create_session_from_refresh_token(
refresh_token)
yield from initialize_data(session)
return True
except aioautomatic.exceptions.AutomaticError as err:
_LOGGER.error(str(err))
configurator = hass.components.configurator
request_id = configurator.async_request_config(
"Automatic", description=(
"Authorization required for Automatic device tracker."),
link_name="Click here to authorize Home Assistant.",
link_url=client.generate_oauth_url(scope),
entity_picture="/static/images/logo_automatic.png",
)
@asyncio.coroutine
def initialize_callback(code, state):
"""Call after OAuth2 response is returned."""
try:
session = yield from client.create_session_from_oauth_code(
code, state)
yield from initialize_data(session)
configurator.async_request_done(request_id)
except aioautomatic.exceptions.AutomaticError as err:
_LOGGER.error(str(err))
configurator.async_notify_errors(request_id, str(err))
return False
if DATA_CONFIGURING not in hass.data:
hass.data[DATA_CONFIGURING] = {}
hass.data[DATA_CONFIGURING][client.state] = initialize_callback
return True
class AutomaticAuthCallbackView(HomeAssistantView):
"""Handle OAuth finish callback requests."""
requires_auth = False
url = '/api/automatic/callback'
name = 'api:automatic:callback'
@callback
def get(self, request): # pylint: disable=no-self-use
"""Finish OAuth callback request."""
hass = request.app['hass']
params = request.query
response = web.HTTPFound('/states')
if 'state' not in params or 'code' not in params:
if 'error' in params:
_LOGGER.error(
"Error authorizing Automatic: %s", params['error'])
return response
_LOGGER.error(
"Error authorizing Automatic. Invalid response returned")
return response
if DATA_CONFIGURING not in hass.data or \
params['state'] not in hass.data[DATA_CONFIGURING]:
_LOGGER.error("Automatic configuration request not found")
return response
code = params['code']
state = params['state']
initialize_callback = hass.data[DATA_CONFIGURING][state]
hass.async_create_task(initialize_callback(code, state))
return response
class AutomaticData:
"""A class representing an Automatic cloud service connection."""
def __init__(self, hass, client, session, devices, async_see):
"""Initialize the automatic device scanner."""
self.hass = hass
self.devices = devices
self.vehicle_info = {}
self.vehicle_seen = {}
self.client = client
self.session = session
self.async_see = async_see
self.ws_reconnect_handle = None
self.ws_close_requested = False
self.client.on_app_event(
lambda name, event: self.hass.async_create_task(
self.handle_event(name, event)))
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, self.ws_close())
@asyncio.coroutine
def handle_event(self, name, event):
"""Coroutine to update state for a real time event."""
import aioautomatic
self.hass.bus.async_fire(EVENT_AUTOMATIC_UPDATE, event.data)
if event.vehicle.id not in self.vehicle_info:
# If vehicle hasn't been seen yet, request the detailed
# info for this vehicle.
_LOGGER.info("New vehicle found")
try:
vehicle = yield from event.get_vehicle()
except aioautomatic.exceptions.AutomaticError as err:
_LOGGER.error(str(err))
return
yield from self.get_vehicle_info(vehicle)
if event.created_at < self.vehicle_seen[event.vehicle.id]:
# Skip events received out of order
_LOGGER.debug("Skipping out of order event. Event Created %s. "
"Last seen event: %s", event.created_at,
self.vehicle_seen[event.vehicle.id])
return
self.vehicle_seen[event.vehicle.id] = event.created_at
kwargs = self.vehicle_info[event.vehicle.id]
if kwargs is None:
# Ignored device
return
# If this is a vehicle status report, update the fuel level
if name == "vehicle:status_report":
fuel_level = event.vehicle.fuel_level_percent
if fuel_level is not None:
kwargs[ATTR_ATTRIBUTES][ATTR_FUEL_LEVEL] = fuel_level
# Send the device seen notification
if event.location is not None:
kwargs[ATTR_GPS] = (event.location.lat, event.location.lon)
kwargs[ATTR_GPS_ACCURACY] = event.location.accuracy_m
yield from self.async_see(**kwargs)
@asyncio.coroutine
def ws_connect(self, now=None):
"""Open the websocket connection."""
import aioautomatic
self.ws_close_requested = False
if self.ws_reconnect_handle is not None:
_LOGGER.debug("Retrying websocket connection")
try:
ws_loop_future = yield from self.client.ws_connect()
except aioautomatic.exceptions.UnauthorizedClientError:
_LOGGER.error("Client unauthorized for websocket connection. "
"Ensure Websocket is selected in the Automatic "
"developer application event delivery preferences")
return
except aioautomatic.exceptions.AutomaticError as err:
if self.ws_reconnect_handle is None:
# Show log error and retry connection every 5 minutes
_LOGGER.error("Error opening websocket connection: %s", err)
self.ws_reconnect_handle = async_track_time_interval(
self.hass, self.ws_connect, timedelta(minutes=5))
return
if self.ws_reconnect_handle is not None:
self.ws_reconnect_handle()
self.ws_reconnect_handle = None
_LOGGER.info("Websocket connected")
try:
yield from ws_loop_future
except aioautomatic.exceptions.AutomaticError as err:
_LOGGER.error(str(err))
_LOGGER.info("Websocket closed")
# If websocket was close was not requested, attempt to reconnect
if not self.ws_close_requested:
self.hass.loop.create_task(self.ws_connect())
@asyncio.coroutine
def ws_close(self):
"""Close the websocket connection."""
self.ws_close_requested = True
if self.ws_reconnect_handle is not None:
self.ws_reconnect_handle()
self.ws_reconnect_handle = None
yield from self.client.ws_close()
@asyncio.coroutine
def load_vehicle(self, vehicle):
"""Load the vehicle's initial state and update hass."""
kwargs = yield from self.get_vehicle_info(vehicle)
yield from self.async_see(**kwargs)
@asyncio.coroutine
def get_vehicle_info(self, vehicle):
"""Fetch the latest vehicle info from automatic."""
import aioautomatic
name = vehicle.display_name
if name is None:
name = ' '.join(filter(None, (
str(vehicle.year), vehicle.make, vehicle.model)))
if self.devices is not None and name not in self.devices:
self.vehicle_info[vehicle.id] = None
return
self.vehicle_info[vehicle.id] = kwargs = {
ATTR_DEV_ID: vehicle.id,
ATTR_HOST_NAME: name,
ATTR_MAC: vehicle.id,
ATTR_ATTRIBUTES: {
ATTR_FUEL_LEVEL: vehicle.fuel_level_percent,
}
}
self.vehicle_seen[vehicle.id] = \
vehicle.updated_at or vehicle.created_at
if vehicle.latest_location is not None:
location = vehicle.latest_location
kwargs[ATTR_GPS] = (location.lat, location.lon)
kwargs[ATTR_GPS_ACCURACY] = location.accuracy_m
return kwargs
trips = []
try:
# Get the most recent trip for this vehicle
trips = yield from self.session.get_trips(
vehicle=vehicle.id, limit=1)
except aioautomatic.exceptions.AutomaticError as err:
_LOGGER.error(str(err))
if trips:
location = trips[0].end_location
kwargs[ATTR_GPS] = (location.lat, location.lon)
kwargs[ATTR_GPS_ACCURACY] = location.accuracy_m
if trips[0].ended_at >= self.vehicle_seen[vehicle.id]:
self.vehicle_seen[vehicle.id] = trips[0].ended_at
return kwargs
|
the-stack_106_26796 | # -*- coding: utf-8 -*-
import nltk
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.corpus import stopwords
from nltk.stem.arlstem import ARLSTem
from sklearn.metrics.pairwise import cosine_similarity, linear_kernel
from nltk.tokenize import WordPunctTokenizer
import numpy as np
import pickle
from numpy import dot, array
from scipy import sparse
import argparse
nltk.download('stopwords')
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--ngrams", type=int, default=1, help="n-gram order")
parser.add_argument("-k", "--topk", type=int, default=10, help="number of documents retriever should return")
parser.add_argument('-w', '--wiki-path', help='Path of arwiki.p', required=True)
parser.add_argument('-o', '--output-dir', help='Where to place the retrivers', required=True)
class TfidfRetriever:
SYMBOLS = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~\"'
def __init__(self, docs, k, ngrams, vectorizer=None, tfidf_matrix=None):
self.k = k # number of documents to return
self.tokenizer = WordPunctTokenizer()
self.stemmer = ARLSTem()
self.docs = docs
self.stopwords = stopwords.words('arabic')
self.vectorizer = TfidfVectorizer(ngram_range=(1, ngrams), norm=None, stop_words=self.stopwords)
if tfidf_matrix is None or vectorizer is None:
docs_stemmed = self.docs_stem()
self.tfidf_matrix = self.vectorizer.fit_transform(docs_stemmed)
else:
self.vectorizer = vectorizer
self.tfidf_matrix = tfidf_matrix
def docs_stem(self):
docs_stemmed = []
for d in self.docs:
docs_stemmed.append(self.stem_string(d))
return docs_stemmed
def stem_string(self, str):
str_tokens = self.tokenizer.tokenize(str)
str_processed = ""
for token in str_tokens:
has_symbol = False
for s in self.SYMBOLS:
if s in token:
has_symbol = True
break
if not has_symbol:
str_processed += token + " "
return str_processed
def get_topk_docs_scores(self, query):
"""
:param query: question as string
:return: the top k articles with each of their paragraphs seperated by '###' as python list of strings
"""
qeury = self.stem_string(query)
query_tfidf = self.vectorizer.transform([query])
similarities_raw = linear_kernel(self.tfidf_matrix, query_tfidf)
similarities = []
for s in similarities_raw:
similarities.append(s[0])
indices_sorted = np.argsort(similarities)[::-1] # reverse order
top_docs = []
docs_scores = []
i = 0
while i < min(self.k, len(self.docs)):
doc = self.docs[indices_sorted[i]]
top_docs.append(doc)
docs_scores.append(similarities[indices_sorted[i]])
i += 1
norm_cst = np.sum(np.asarray(docs_scores))
docs_scores = np.asarray(docs_scores)
docs_scores = docs_scores / norm_cst
return top_docs, docs_scores
def get_topk_docs(self, query):
"""
:param query: question as string
:return: the top k articles with each of their paragraphs seperated by '###' as python list of strings
"""
qeury = self.stem_string(query)
query_tfidf = self.vectorizer.transform([query])
similarities_raw = linear_kernel(self.tfidf_matrix, query_tfidf)
similarities = []
for s in similarities_raw:
similarities.append(s[0])
indices_sorted = np.argsort(similarities)[::-1] # reverse order
top_docs = []
scores = []
i = 0
while i < min(self.k, len(self.docs)):
doc = self.docs[indices_sorted[i]]
top_docs.append(doc)
i += 1
norm_cst = np.sum(np.asarray(scores))
return top_docs
class TfidfRetriever_sys:
SYMBOLS = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~\"'
def __init__(self, docs, k, ngrams, vectorizer=None, tfidf_matrix=None):
self.k = k # number of documents to return
self.tokenizer = WordPunctTokenizer()
self.stemmer = ARLSTem()
self.docs = docs
self.vectorizer = TfidfVectorizer(ngram_range=(1, ngrams), norm=None)
if tfidf_matrix is None or vectorizer is None:
self.tfidf_matrix = self.vectorizer.fit_transform(docs)
else:
self.vectorizer = vectorizer
self.tfidf_matrix = tfidf_matrix
def get_topk_docs(self, query):
"""
:param query: question as string
:return: the top k articles with each of their paragraphs seperated by '###' as python list of strings
"""
query_tfidf = self.vectorizer.transform([query])
similarities_raw = linear_kernel(self.tfidf_matrix, query_tfidf)
similarities = []
for s in similarities_raw:
similarities.append(s[0])
indices_sorted = np.argsort(similarities)[::-1] # reverse order
top_docs = []
i = 0
while i < min(self.k, len(self.docs)):
doc = self.docs[indices_sorted[i]]
top_docs.append(doc)
i += 1
return top_docs
class HierarchicalTfidf:
def __init__(self, base_retriever, k1 , k2):
self.r = base_retriever
self.r.k = k1
self.k = k2
def get_topk_docs_scores(self, query):
docs = self.r.get_topk_docs(query)
pars = []
for doc in docs:
ps = doc.split("###")
for p in ps:
pars.append(p)
r2 = TfidfRetriever(pars, self.k, 4)
top_docs, docs_scores = r2.get_topk_docs_scores(query)
return top_docs, docs_scores
def get_topk_docs(self, query):
docs = self.r.get_topk_docs(query)
r2 = TfidfRetriever_sys(docs, self.k, 4)
top_docs = r2.get_topk_docs(query)
return top_docs
def build_tfidfretriever(wiki_path, output_path, ngrams, k):
wiki_data = pickle.load(open(wiki_path, "rb"))
docs = []
i = 0
for art, pars in wiki_data.items():
article_text = ""
for p in pars:
article_text += p +"### "
docs.append(article_text)
i += 1
print("finished building documents")
r = TfidfRetriever(docs, k, ngrams)
pickle.dump(r, open(output_path+"/tfidfretriever.p", "wb"))
def main():
args = parser.parse_args()
build_tfidfretriever(args.wiki_path, args.output_dir, args.ngrams, args.topk)
if __name__ == "__main__":
main()
|
the-stack_106_26798 | # Copyright (c) 2013, VHRS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
import math
from datetime import datetime,timedelta
from frappe.utils import getdate, cint, add_months, date_diff, add_days, nowdate, \
get_datetime_str, cstr, get_datetime, time_diff, time_diff_in_seconds
def execute(filters=None):
if not filters:
filters = {}
columns = get_columns()
data = []
row = []
conditions, filters = get_conditions(filters)
total = from_time = late_in = shift_in_time = 0
attendance = get_attendance(conditions,filters)
from_date = filters.get("from_date")
to_date = filters.get("to_date")
for att in attendance:
if att.name:row = [att.name]
else:row = ["-"]
if att.attendance_date:row += [att.attendance_date]
else:row = ["-"]
if att.employee:row += [att.employee]
else:row += ["-"]
if att.employee_name:row += [att.employee_name]
else:row += ["-"]
if att.department:row += [att.department]
else:row += ["-"]
if att.business_unit:row += [att.business_unit]
else:row += ["-"]
if att.location:row += [att.location]
else:row += ["-"]
working_shift = frappe.db.get_value("Employee", {'employee':att.employee},['working_shift'])
if att.in_time:
dt = datetime.strptime(att.in_time, "%d/%m/%Y %H:%M:%S")
from_time = dt.time()
shift_in_time = frappe.db.get_value("Working Shift",working_shift,"in_time")
emp_in_time = timedelta(hours=from_time.hour,minutes=from_time.minute,seconds=from_time.second)
if emp_in_time > shift_in_time:
late_in = emp_in_time - shift_in_time
else:
late_in = ''
row += [from_time.isoformat()]
else:row += ["-"]
if att.out_time:
dt = datetime.strptime(att.out_time, "%d/%m/%Y %H:%M:%S")
end_time = dt.time()
shift_out_time = frappe.db.get_value("Working Shift",working_shift,"out_time")
emp_out_time = timedelta(hours=end_time.hour,minutes=end_time.minute,seconds=end_time.second)
if emp_out_time < shift_out_time:
early_out = shift_out_time - emp_out_time
else:
early_out = ''
row += [end_time.isoformat()]
else:row += ["-"]
if att.overtime :row += [att.overtime]
else:row += ["-"]
data.append(row)
return columns, data
def get_columns():
columns = [
_("Name") + ":Link/Attendance:100",
_("Attendance Date") + ":Date:100",
_("Employee") + ":Link/Employee:100",
_("Employee Name") + ":Data:180",
_("Department") + ":Data:90",
_("Business Unit") + ":Data:90",
_("Location") + ":Data:90",
_("In Time") + ":Data:90",
_("Out Time") + ":Data:90",
_("Overtime") + ":Data:90",
]
return columns
def get_attendance(conditions,filters):
attendance = frappe.db.sql("""select att.overtime as overtime,att.status as status,att.location as location, att.name as name,att.department as department,att.business_unit as business_unit,att.attendance_date as attendance_date,att.work_time as work_time,att.employee as employee, att.employee_name as employee_name,att.status as status,att.in_time as in_time,att.out_time as out_time from `tabAttendance` att
where att.status = "Present" %s order by att.attendance_date """ % conditions, filters, as_dict=1)
return attendance
def get_conditions(filters):
conditions = ""
if filters.get("from_date"): conditions += "and att.attendance_date >= %(from_date)s"
if filters.get("to_date"): conditions += " and att.attendance_date <= %(to_date)s"
if filters.get("location"): conditions += " and att.location = %(location)s"
if filters.get("business_unit"): conditions += " and att.business_unit = %(business_unit)s"
return conditions, filters |
the-stack_106_26799 | """
### BEGIN NODE INFO
[info]
name = Serial Server
version = 1.5.1
description = Gives access to serial devices via pyserial.
instancename = %LABRADNODE% Serial Server
[startup]
cmdline = %PYTHON% %FILE%
timeout = 20
[shutdown]
message = 987654321
timeout = 20
### END NODE INFO
"""
import os
import time
import collections
from labrad.types import Value
from labrad.errors import Error
from labrad.server import setting, Signal
from twisted.internet import reactor, threads
from twisted.internet.task import deferLater
from twisted.internet.defer import inlineCallbacks, returnValue
from serial import Serial
from serial.tools import list_ports
from serial.serialutil import SerialException
from EGGS_labrad.lib.servers.polling_server import PollingServer
# ERRORS
class NoPortSelectedError(Error):
"""Please open a port first."""
code = 1
class NoPortsAvailableError(Error):
"""No serial ports are available."""
code = 3
SerialDevice = collections.namedtuple('SerialDevice', ['name', 'devicepath'])
PORTSIGNAL = 539410
class SerialServer(PollingServer):
"""
Provides access to a computer's serial (COM) ports.
"""
name = '%LABRADNODE% Serial Server'
POLL_ON_STARTUP = True
port_update = Signal(PORTSIGNAL, 'signal: port update', '(s,*s)')
def initServer(self):
super().initServer()
self.enumerate_serial_pyserial()
def _poll(self):
self.enumerate_serial_pyserial()
def enumerate_serial_windows(self):
"""
Manually Enumerate the first 40 COM ports.
pyserial includes a function to enumerate device names, but it
possibly doesn't work right on windows for COM ports above 4.
http://stackoverflow.com/questions/12090503/listing-available-com-ports-with-python
"""
self.SerialPorts = []
print('Searching for COM ports:')
for a in range(1, 40):
COMexists = True
dev_name = 'COM{}'.format(a)
dev_path = r'\\.\{}'.format(dev_name)
try:
ser = Serial(dev_name)
ser.close()
except SerialException as e:
if e.message.find('cannot find') >= 0:
COMexists = False
if COMexists:
self.SerialPorts.append(SerialDevice(dev_name, dev_path))
print(" ", dev_name)
if not len(self.SerialPorts):
print(' none')
def enumerate_serial_pyserial(self):
"""
This uses the pyserial built-in device enumeration.
We ignore the pyserial "human readable" device name
because that appears to make no sense. For instance, a
particular FTDI USB-Serial adapter shows up as 'Microsoft
Corp. Optical Mouse 200'.
Following the example from the above windows version, we try to open
each port and ignore it if we can't.
"""
self.SerialPorts = []
dev_list = list_ports.comports()
for d in dev_list:
dev_path = d[0]
try:
ser = Serial(dev_path)
ser.close()
except SerialException as e:
pass
else:
_, _, dev_name = dev_path.rpartition(os.sep)
self.SerialPorts.append(SerialDevice(dev_name, dev_path))
# send out signal
port_list_tmp = [x.name for x in self.SerialPorts]
self.port_update(self.name, port_list_tmp)
def expireContext(self, c):
if 'PortObject' in c:
c['PortObject'].close()
def getPort(self, c):
try:
return c['PortObject']
except Exception as e:
raise NoPortSelectedError()
@setting(1, 'List Serial Ports', returns=['*s: List of serial ports'])
def list_serial_ports(self, c):
"""
Retrieves a list of all serial ports.
NOTES:
This list contains all ports installed on the computer,
including ones that are already in use by other programs.
"""
port_list = [x.name for x in self.SerialPorts]
return port_list
@setting(10, 'Open', port=[': Open the first available port', 's: Port to open, e.g. COM4'],
returns=['s: Opened port'])
def open(self, c, port=''):
"""
Opens a serial port in the current context.
args:
port device name as returned by list_serial_ports.
On windows, the device name will generally be of the form
COM1 or COM42 (i.e., without the device prefix \\\\.\\). On
linux, it will be the device node name (ttyUSB0) without the
/dev/ prefix. This is case insensitive on windows, case sensitive
on Linux. For compatibility, always use the same case.
"""
c['Timeout'] = 0
c['Debug'] = False
if 'PortObject' in c:
c['PortObject'].close()
del c['PortObject']
if not port:
for i in range(len(self.SerialPorts)):
try:
c['PortObject'] = Serial(self.SerialPorts[i].devicepath,
timeout=0)
break
except SerialException:
pass
if 'PortObject' not in c:
raise NoPortsAvailableError()
else:
for x in self.SerialPorts:
if os.path.normcase(x.name) == os.path.normcase(port):
try:
c['PortObject'] = Serial(x.devicepath, timeout=0)
return x.name
except SerialException as e:
if e.message.find('cannot find') >= 0:
raise Error(code=1, msg=e.message)
else:
raise Error(code=2, msg=e.message)
raise Error(code=1, msg='Unknown port %s' % (port,))
@setting(11, 'Close', returns=[''])
def close(self, c):
"""Closes the current serial port."""
if 'PortObject' in c:
c['PortObject'].close()
del c['PortObject']
@setting(20, 'Baudrate', data=[': List baudrates', 'w: Set baudrate (0: query current)'],
returns=['w: Selected baudrate', '*w: Available baudrates'])
def baudrate(self, c, data=None):
"""Sets the baudrate."""
ser = self.getPort(c)
baudrates = list(ser.BAUDRATES)
#allow non-standard baud rates
baudrates.extend([28800])
if data is None:
return baudrates
else:
if data in baudrates:
ser.baudrate = data
return int(ser.baudrate)
@setting(21, 'Bytesize', data=[': List bytesizes', 'w: Set bytesize (0: query current)'], returns=['*w: Available bytesizes', 'w: Selected bytesize'])
def bytesize(self, c, data=None):
"""Sets the bytesize."""
ser = self.getPort(c)
bytesizes = ser.BYTESIZES
if data is None:
return bytesizes
else:
if data in bytesizes:
ser.bytesize = data
return int(ser.bytesize)
@setting(22, 'Parity', data=[': List parities', 's: Set parity (empty: query current)'], returns=['*s: Available parities', 's: Selected parity'])
def parity(self, c, data=None):
"""Sets the parity."""
ser = self.getPort(c)
parities = ser.PARITIES
if data is None:
return parities
else:
data = data.upper()
if data in parities:
ser.parity = data
return ser.parity
@setting(23, 'Stopbits', data=[': List stopbits', 'w: Set stopbits (0: query current)'],
returns=['*w: Available stopbits', 'w: Selected stopbits'])
def stopbits(self, c, data=None):
"""Sets the number of stop bits."""
ser = self.getPort(c)
stopbits = ser.STOPBITS
if data is None:
return stopbits
else:
if data in stopbits:
ser.stopbits = data
return int(ser.stopbits)
@setting(25, 'Timeout', data=[': Return immediately', 'v[s]: Timeout to use (max: 5min)'],
returns=['v[s]: Timeout being used (0 for immediate return)'])
def timeout(self, c, data=Value(0, 's')):
"""Sets a timeout for read operations."""
c['Timeout'] = min(data['s'], 300)
return Value(c['Timeout'], 's')
@setting(26, 'Serial Debug', status='b', returns='b')
def serialDebug(self, c, status=None):
"""Sets/gets the debug setting."""
if status is not None:
c['Debug'] = status
return c['Debug']
@setting(30, 'RTS', data=['b'], returns=['b'])
def RTS(self, c, data):
"""Sets the state of the RTS line."""
ser = self.getPort(c)
ser.rts = int(data)
return data
@setting(31, 'DTR', data=['b'], returns=['b'])
def DTR(self, c, data):
"""Sets the state of the DTR line."""
ser = self.getPort(c)
ser.dtr = int(data)
return data
@setting(40, 'Write', data=['s: Data to send', '*w: Byte-data to send'],
returns=['w: Bytes sent'])
def write(self, c, data):
"""Sends data over the port."""
ser = self.getPort(c)
# encode as needed
if type(data) == str:
data = data.encode()
ser.write(data)
# debug output
if c['Debug']:
print(ser.name, ' WRITE:\t', data)
return int(len(data))
@setting(41, 'Write Line', data=['s: Data to send'], returns=['w: Bytes sent'])
def write_line(self, c, data):
"""Sends data over the port appending CR LF."""
ser = self.getPort(c)
# encode as needed
if type(data) == str:
data = data.encode()
data += b'\r\n'
ser.write(data)
# debug output
if c['Debug']:
print(ser.name, ' WRITE:\t', data)
return int(len(data))
@setting(42, 'Pause', duration='v[s]: Time to pause', returns=[])
def pause(self, c, duration):
_ = yield deferLater(reactor, duration['s'], lambda: None)
return
@inlineCallbacks
def deferredRead(self, ser, timeout, count=1):
"""
"""
# killit stops the read
killit = False
def doRead(count):
"""Waits until it reads <count> characters or is told to stop."""
d = b''
while not killit:
d = ser.read(count)
if d:
break
time.sleep(0.001)
return d
# read until the timeout
data = threads.deferToThread(doRead, count)
timeout_object = []
start_time = time.time()
r = yield util.maybeTimeout(data, min(timeout, 300), timeout_object)
killit = True
# check if we have timed out
if r == timeout_object:
elapsed = time.time() - start_time
print("deferredRead timed out after {} seconds".format(elapsed))
r = b''
if r == b'':
r = ser.read(count)
returnValue(r)
@inlineCallbacks
def readSome(self, c, count=0):
ser = self.getPort(c)
if count == 0:
returnValue(ser.read(10000))
timeout = c['Timeout']
if timeout == 0:
returnValue(ser.read(count))
# read until we either hit timeout or meet character count
recd = b''
while len(recd) < count:
# try to read remaining characters
r = ser.read(count - len(recd))
# if nothing, keep reading until timeout
if r == b'':
r = yield self.deferredRead(ser, timeout, count - len(recd))
if r == b'':
ser.close()
ser.open()
break
recd += r
returnValue(recd)
@setting(50, 'Read', count=[': Read all bytes in buffer', 'w: Read this many bytes'],
returns=['s: Received data'])
def read(self, c, count=0):
"""
Read data from the port.
Args:
count: bytes to read.
If count=0, reads the contents of the buffer (non-blocking). Otherwise,
reads for up to <count> characters or the timeout, whichever is first
"""
ans = yield self.readSome(c, count)
# debug output
ser_name = self.getPort(c).name
if c['Debug']:
print(ser_name, ' READ:\t', ans)
returnValue(ans)
@setting(51, 'Read as Words', data=[': Read all bytes in buffer', 'w: Read this many bytes'],
returns=['*w: Received data'])
def read_as_words(self, c, data=0):
"""Read data from the port."""
ans = yield self.readSome(c, data)
ans = [int(ord(x)) for x in ans]
# debug output
ser_name = self.getPort(c).name
if c['Debug']:
print(ser_name, ' READ:\t', ans)
returnValue(ans)
@setting(52, 'Read Line', data=[': Read until LF, ignoring CRs', 's: Other delimiter to use'],
returns=['s: Received data'])
def read_line(self, c, data=''):
"""Read data from the port, up to but not including the specified delimiter."""
ser = self.getPort(c)
timeout = c['Timeout']
# set default end character if not specified
if data:
# ensure end character is of type byte
if type(data) != bytes:
data = bytes(data, encoding='utf-8')
delim, skip = data, b''
else:
delim, skip = b'\n', b'\r'
recd = b''
while True:
r = ser.read(1)
# only try a deferred read if there is a timeout
if r == b'' and timeout > 0:
r = yield self.deferredRead(ser, timeout)
# stop if r is empty or the delimiter
if r in (b'', delim):
break
elif r != skip:
recd += r
if c['Debug']:
print(ser.name, ' READ:\t', recd)
returnValue(recd)
@setting(61, 'Flush Input', returns='')
def flush_input(self, c):
"""Flush the input buffer."""
ser = self.getPort(c)
yield ser.reset_input_buffer()
@setting(62, 'Flush Output', returns='')
def flush_output(self, c):
"""Flush the output buffer."""
ser = self.getPort(c)
yield ser.reset_output_buffer()
__server__ = SerialServer()
if __name__ == '__main__':
from labrad import util
util.runServer(__server__)
|
the-stack_106_26800 | """GUI frontend for atamaTracker
"""
import cv2
from . import graphics
from .geometry import Point
# constants
ESC = 27
LEFT_ARROW = 63234
class EventListener(object):
"""Listener for mouse events
Public properties:
clicked_points -- [list] List of Point instances
is_pressed -- [bool] Boolean whether the left button is pressed
"""
is_pressed = False
def __init__(self, window):
self.clicked_points = []
self.window = window
cv2.setMouseCallback(self.window.name, self.__on_mouse_click)
def get_xy(self):
"""Listen mouse event and return clicked coordinates.
"""
# reset stored coordinates
self.clicked_points = []
key = cv2.waitKey(0)
if key == ESC:
raise UserCancelException
return self.clicked_points
def __on_mouse_click(self, event, x, y, flags, param):
"""Mouse event callback.
"""
if event == cv2.EVENT_LBUTTONDOWN:
point = Point(x, y)
self.is_pressed = True
if not self.__is_clicked(point):
self.clicked_points.append(point)
self.window.draw_marker(point)
self.window.display()
elif event == cv2.EVENT_LBUTTONUP:
self.is_pressed = False
elif event == cv2.EVENT_MOUSEMOVE and self.is_pressed:
pass
def __is_clicked(self, point):
"""Check whether the given point has already been clicked.
"""
for p in self.clicked_points:
if p.distance(point) <= graphics.Marker.RADIUS + 2: # +2 for buffer
return point
return None
class Window(object):
"""Window object.
Public properties:
name -- [str] Window name
image -- [str] Current image that shown in the window
"""
def __init__(self, name):
self.name = name
cv2.namedWindow(self.name)
def close(self):
"""Close window.
"""
cv2.destroyWindow(self.name)
def display(self):
"""Update window contents.
"""
cv2.imshow(self.name, self.image)
def draw_marker(self, point, frame_size=0):
"""Draw a circle at the desired coordinate on the image.
"""
graphics.draw_marker(self.image, point, frame_size)
class UserCancelException(Exception):
"""User performed cancel.
"""
pass
|
the-stack_106_26805 | #!/usr/bin/env python3
import os
import re
import sys
from setuptools import find_packages, setup
REQUIRED_MAJOR = 3
REQUIRED_MINOR = 6
# Check for python version
if sys.version_info < (REQUIRED_MAJOR, REQUIRED_MINOR):
error = (
"Your version of python ({major}.{minor}) is too old. You need "
"python >= {required_major}.{required_minor}."
).format(
major=sys.version_info.major,
minor=sys.version_info.minor,
required_minor=REQUIRED_MINOR,
required_major=REQUIRED_MAJOR,
)
sys.exit(error)
TEST_REQUIRES = ["pytest", "pytest-cov"]
DEV_REQUIRES = TEST_REQUIRES + ["black", "flake8", "sphinx", "sphinx-autodoc-typehints"]
TUTORIALS_REQUIRES = ["jupyter", "matplotlib", "cma", "torchvision"]
# get version string from module
with open(os.path.join(os.path.dirname(__file__), "botorch/__init__.py"), "r") as f:
version = re.search(r"__version__ = ['\"]([^'\"]*)['\"]", f.read(), re.M).group(1)
setup(
name="botorch",
version=version,
description="Bayesian Optimization in PyTorch",
author="Facebook, Inc.",
license="MIT",
url="https://botorch.org",
project_urls={
"Documentation": "https://botorch.org",
"Source": "https://github.com/pytorch/botorch",
"conda": "https://anaconda.org/pytorch/botorch",
},
keywords=["Bayesian optimization", "PyTorch"],
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
],
python_requires=">=3.6",
install_requires=["torch>=1.1", "gpytorch>=0.3.2", "scipy"],
packages=find_packages(),
extras_require={
"dev": DEV_REQUIRES,
"test": TEST_REQUIRES,
"tutorials": TUTORIALS_REQUIRES,
},
)
|
the-stack_106_26806 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import threading
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_feed
from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.contrib.tpu.python.tpu import training_loop
from tensorflow.contrib.tpu.python.tpu import util as util_lib
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_DEFAULT_NAME_SCOPE = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY]
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.GLOBAL_STEP])
def _create_iterations_per_loop():
with variable_scope.variable_scope(_DEFAULT_NAME_SCOPE,
reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[],
use_resource=True)
def _sync_variables_ops():
# Gets the variables back from TPU nodes. This means the variables updated
# by TPU will now be *synced* to host memory.
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps runnining in TPU
system before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
_DEFAULT_JOB_NAME = 'tpu_worker'
_DEFAULT_COORDINATOR_JOB_NAME = 'coordinator'
_LOCAL_MASTERS = ('', 'local')
def _tpu_job(run_config, mode):
"""Returns the job name to use to place TPU computations on.
Args:
run_config: The tpu_config.RunConfig used for this custom estimator.
mode: A model_fn_lib.ModeKeys value.
Returns:
A string containing the job name, or None if no job should be specified.
Raises:
ValueError: If the user needs to specify a tpu_job_name, because we are
unable to infer the job name automatically, or if the user-specified job
names are inappropriate.
"""
# If the user specifies the tpu_job_name, use that.
if run_config.tpu_config.tpu_job_name:
return run_config.tpu_config.tpu_job_name
# The tpu job is determined by the run_config. Right now, this method is
# required as tpu_config is not part of the RunConfig.
master = (run_config.evaluation_master if mode == model_fn_lib.ModeKeys.EVAL
else run_config.master)
if master in _LOCAL_MASTERS:
return None
if (not run_config.session_config or
not run_config.session_config.cluster_def.job):
return _DEFAULT_JOB_NAME
cluster_def = run_config.session_config.cluster_def
job_names = set([job.name for job in cluster_def.job])
if _DEFAULT_JOB_NAME in job_names:
# b/37868888 tracks allowing ClusterSpec propagation to reuse job names.
raise ValueError('Currently, tpu_worker is not an allowed job name.')
if len(job_names) == 1:
return cluster_def.job[0].name
if len(job_names) == 2:
if _DEFAULT_COORDINATOR_JOB_NAME in job_names:
job_names.remove(_DEFAULT_COORDINATOR_JOB_NAME)
return job_names.pop()
# TODO(b/67716447): Include more sophisticated heuristics.
raise ValueError(
'Could not infer TPU job name. Please specify a tpu_job_name as part of '
'your TPUConfig.')
def _is_running_on_cpu(use_tpu, mode, eval_batch_size):
"""Determines whether the input_fn and model_fn should be invoked on CPU."""
return ((not use_tpu) or mode == model_fn_lib.ModeKeys.PREDICT or
(mode == model_fn_lib.ModeKeys.EVAL and eval_batch_size is None))
def _per_shard_batch_size(global_batch_size, run_config, use_tpu):
"""Returns the batch size for each shard."""
if use_tpu:
return global_batch_size // run_config.tpu_config.num_shards
else:
return global_batch_size
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
class TPUEstimatorSpec(collections.namedtuple('TPUEstimatorSpec', [
'mode',
'predictions',
'loss',
'train_op',
'eval_metrics',
'export_outputs'])):
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, 'predictions, 'loss', 'train_op', and
'export_outputs`.
TPU evaluation expects a slightly different signature from the
${tf.estimator.Estimator}. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple.
See `TPUEstimator` for MNIST example how to specify the `eval_metrics`.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
if eval_metrics is not None:
_EvalMetrics.validate(eval_metrics)
return super(TPUEstimatorSpec, cls).__new__(cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
eval_metric_ops = _EvalMetrics.to_metric_metric_ops_for_cpu(
self.eval_metrics)
return model_fn_lib.EstimatorSpec(mode=self.mode,
predictions=self.predictions,
loss=self.loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs)
class _InfeedOutfeedThreadBaseController(object):
"""This wraps the infeed/outfeed thread and stops when Estimator finishes."""
def __init__(self, thd):
self._signal_queue = Queue.Queue()
thd.daemon = True
thd.start()
self._thd = thd
def block_and_get_signal(self):
return self._signal_queue.get()
def send_next_batch_signal(self, signal=_SIGNAL.NEXT_BATCH):
self._signal_queue.put(signal)
def join(self):
self._signal_queue.put(_SIGNAL.STOP)
self._thd.join()
class _OutfeedThreadController(_InfeedOutfeedThreadBaseController):
"""This wraps the outfeed thread and stops when Estimator finishes."""
def __init__(self, session, dequeue_ops):
super(_OutfeedThreadController, self).__init__(
threading.Thread(target=self._execute_dequeue_ops,
args=(session, dequeue_ops)))
def _execute_dequeue_ops(self, session, dequeue_ops):
count = 0
while True:
signal = self.block_and_get_signal()
if signal == _SIGNAL.STOP:
logging.info('Stop outfeed thread.')
return
iterations = signal
for i in range(iterations):
logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
session.run(dequeue_ops)
count += 1
def join(self):
logging.info('Waiting for Outfeed Thread to exit.')
super(_OutfeedThreadController, self).join()
class _InfeedThreadController(_InfeedOutfeedThreadBaseController):
"""This wraps the infeed thread and stops when Estimator finishes."""
def __init__(self, session, enqueue_ops):
super(_InfeedThreadController, self).__init__(
threading.Thread(target=self._input_thread_fn_for_loading,
args=(session, enqueue_ops)))
def _input_thread_fn_for_loading(self, session, enqueue_ops):
count = 0
try:
while True:
signal = self._signal_queue.get()
if signal == _SIGNAL.STOP:
logging.info('Stop Infeed input thread.')
return
iterations = signal
for i in range(iterations):
logging.debug('Infeed enqueue for iteration (%d, %d)', count, i)
session.run(enqueue_ops)
count += 1
except Exception: # pylint: disable=broad-except
logging.error(
'Failed running infeed, closing session.\n'
'You may see an exception from your main session after this.',
exc_info=1
)
session.close()
def join(self):
logging.info('Waiting for Infeed Thread to exit.')
super(_InfeedThreadController, self).join()
class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self, run_config, mode, enqueue_fn, dequeue_ops=None):
self._tpu_job = _tpu_job(run_config, mode)
self._enqueue_fn = enqueue_fn
self._dequeue_ops = dequeue_ops
def begin(self):
self._enqueue_ops = self._enqueue_fn()
self._iterations_per_loop_var = _create_iterations_per_loop()
logging.info('TPU job name %s', self._tpu_job)
self._init_op = [tpu.initialize_system(job=self._tpu_job)]
self._finalize_op = [tpu.shutdown_system(job=self._tpu_job)]
def after_create_session(self, session, coord):
logging.info('Init TPU system')
session.run(self._init_op,
options=config_pb2.RunOptions(timeout_in_ms=5*60*1000))
logging.info('Start infeed thread controller')
self._infeed_thd_controller = _InfeedThreadController(
session, self._enqueue_ops)
if self._dequeue_ops is not None:
logging.info('Start outfeed thread controller')
self._outfeed_thd_controller = _OutfeedThreadController(
session, self._dequeue_ops)
def before_run(self, run_context):
logging.info('Enqueue next batch of data to infeed.')
iterations = run_context.session.run(self._iterations_per_loop_var)
self._infeed_thd_controller.send_next_batch_signal(iterations)
if self._dequeue_ops is not None:
logging.info('Dequeue next batch of data from outfeed.')
self._outfeed_thd_controller.send_next_batch_signal(iterations)
def end(self, session):
logging.info('Stop infeed thread controller')
self._infeed_thd_controller.join()
if self._dequeue_ops is not None:
logging.info('Stop output thread controller')
self._outfeed_thd_controller.join()
logging.info('Shutdown TPU system.')
session.run(self._finalize_op)
class _TPUStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for iterations_per_loop, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self, iterations, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
Args:
iterations: The number of iterations to run optimizer per training loop.
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError('One of num_steps or last_step must be specified.')
if num_steps is not None and last_step is not None:
raise ValueError('Only one of num_steps or last_step can be specified.')
self._num_steps = num_steps
self._last_step = last_step
self._iterations = iterations
def _next_iterations(self, global_step, last_step):
gap = last_step - global_step
return min(gap, self._iterations)
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_iterations_per_loop()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._last_step is None:
self._last_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(iterations, session=session)
def after_run(self, run_context, run_values):
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._last_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(iterations,
session=run_context.session)
class _SetEvalIterationsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _PerShardOutput(object):
"""Wraps input_fn's outputs into per-shard outputs.
Used so that the model_fn can distinguish between sharded input and unsharded
inputs (e.g., for export_savedmodel()).
"""
def __init__(self, output):
self.output = output
def as_list(self):
return self.output
class _InputsHolder(object):
"""A inputs holder holds the `features` and `labels' for TPU system.
Model inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separatedly to underlying methods. For TPU training, TPUEstimator
expects multiple `features` and `labels` tuples one for each shard.
In addition, TPUEstimator allows various different structures for inputs
(namely `features` and `labels`). `features` can be `Tensor` or dict of
string name to `Tensor`, and `labels` could be `None`, `Tensor`, or dict of
string name to `Tensor`. TPU infeed/outfeed library expects flattened tensor
list. So, `features` and `labels` need to be flattened, before infeed enqueue,
and the structure of them needs to be recorded, in order to restore them after
infeed dequeue.
`_InputsHolder` could hold the `features` and `labels` tuple for all shards
(usually multi-host TPU training) or for one host (usually for single-host TPU
evaluation), records the structure details (including presence, dict or single
tensor, dict names), validates the structure consistency cross all shards, and
encapsulates the flatten/unflatten logic.
"""
def __init__(self, features=None, labels=None, num_shards=None):
"""Constructor.
Args:
features: features for one host or a list of features one for each shard
(must be type `_PerShardOutput`). Once provided, the corresponding
`labels` should be set also and this `_InputsHolder` is frozen to
prevent from future modification. If `None`, it is expected to add
features and labels for each shard by calling `append_tuple` later.
labels: labels for one host or a list of labels one for each shard
(must be type `_PerShardOutput`).
num_shards: Number of shards in the TPU system. Must be provided unless it
can be deduced from `features`.
Raises:
ValueError: If both `sharded_features` and `num_shards` are `None`.
"""
# Holds the features and labels for all shards.
self._feature_list = []
self._label_list = []
# Holds the structure of inputs
self._feature_names = []
self._label_names = []
self._has_labels = False
# Internal state.
self._initialized = False
self._frozen = False
self._sharded = False
if features is None:
if num_shards is None:
raise ValueError(
'`features` and `num_shards` cannot be both None')
self._num_shards = num_shards
elif isinstance(features, _PerShardOutput):
self._from_sharded_inputs(features, labels, num_shards)
else:
if num_shards is None:
raise ValueError(
'`num_shards` cannot be None for unsharded features.')
self._from_unsharded_inputs(features, labels, num_shards)
def _from_unsharded_inputs(self, features, labels, num_shards):
"""Initializes the inputs with unsharded features and labels."""
self._num_shards = num_shards
if labels is not None:
self._has_labels = True
self.append_tuple((features, labels))
else:
self.append_tuple(features)
self._sharded = False
self._frozen = True
def _from_sharded_inputs(self, sharded_features, sharded_labels, num_shards):
"""Initializes the inputs with sharded features and labels."""
if not isinstance(sharded_features, _PerShardOutput):
raise ValueError('`sharded_features` must have type `_PerShardOutput`.')
features = sharded_features.as_list()
if num_shards is not None and num_shards != len(features):
raise ValueError(
'`num_shards` should be same as the length of sharded_features.')
self._num_shards = len(features)
if not self._num_shards:
raise ValueError('`sharded_features` should not be empty.')
if sharded_labels is not None:
if not isinstance(sharded_labels, _PerShardOutput):
raise ValueError('sharded_labels` must have type `_PerShardOutput`.')
self._has_labels = True
labels = sharded_labels.as_list()
if self._num_shards != len(labels):
raise ValueError(
'Length of `sharded_features` and `sharded_labels` mismatch.')
if self._has_labels:
for (f, l) in zip(features, labels):
self.append_tuple((f, l))
else:
for f in features:
self.append_tuple(f)
self._sharded = True
self._frozen = True
def _extract_key_names(self, tensor_or_dict):
if tensor_or_dict is None:
return []
return tensor_or_dict.keys() if isinstance(tensor_or_dict, dict) else []
def _validate(self, features, labels):
has_labels = labels is not None
feature_names = self._extract_key_names(features)
label_names = self._extract_key_names(labels)
if self._initialized:
self._sharded = True
# The following should never happen.
assert feature_names == self._feature_names, 'feature keys mismatched'
assert label_names == self._label_names, 'label keys mismatched'
assert has_labels == self._has_labels, 'label presence mismatched'
else:
self._initialized = True
self._feature_names = feature_names
self._label_names = label_names
self._has_labels = has_labels
@property
def sharded(self):
if not self._frozen:
raise RuntimeError('_InputsHolder has not been frozen yet.')
return self._sharded
@property
def num_shards(self):
if not self._frozen:
raise RuntimeError('_InputsHolder has not been frozen yet.')
return self._num_shards
def append_tuple(self, inputs):
"""Appends `inputs` for one shard into holder.
Args:
inputs: The return from `input_fn`, which could be features or tuple of
(features, labels). After the first `inputs` appended into
`_InputsHolder`, the structure of `features` and `labels is recorded.
Any future invocation should provide the `inputs` with same structure.
Raises:
RuntimeError: If the internal data has been frozen already.
"""
if self._frozen:
raise RuntimeError('InputsHolder has frozen, which cannot be mutated.')
# input_fn may return either features or (features, labels)
if isinstance(inputs, tuple):
features, labels = inputs
else:
features, labels = inputs, None
self._validate(features, labels)
self._feature_list.append(features)
if labels is not None:
self._label_list.append(labels)
def as_features_and_labels_tuple(self):
"""Returns features and labels as grouped tuple.
This is intended to be used to pass features and labels for all shards from
input_fn to model_fn as the parent class `Estimator` does not have the
concept of shards. So, grouped tuple is required.
Once called, the internal data is frozen and `append_tuple` cannot be
invoked anymore.
Returns:
A tuple of features and labels. Both have type `_PerShardOutput`, holding
the inputs for all shards. `labels` could be `None`.
Raises:
RuntimeError: If the internal data has not been initialized.
"""
self._frozen = True
if not self._initialized:
raise RuntimeError('InputsHolder has not been initialized.')
assert len(self._feature_list) == self._num_shards
if not self._label_list or all(l is None for l in self._label_list):
return _PerShardOutput(self._feature_list), None
assert len(self._label_list) == self._num_shards
return (_PerShardOutput(self._feature_list),
_PerShardOutput(self._label_list))
def as_sharded_flattened_inputs(self):
"""Flatten the features and label as tensor lists for all shards.
Flattened tensor list contains all tensors in `features` (dict) and `labels`
(dict). Conceptually, it has the predicated structure like:
```python
flatten_list = []
for name in features:
flatten_list.append(features[name])
for name in labels:
flatten_list.append(labels[name])
```
This method handles the label is None case and single tensor case nicely.
Once called, the internal data is frozen and `append_tuple` cannot be
invokded anymore.
Returns:
A list of flattened inputs one for each shard.
Raises:
RuntimeError: If the internal data has not been initialized.
ValueError: If the inputs are sharded.
"""
self._frozen = True
if not self._initialized:
raise RuntimeError('InputsHolder has not been initialized.')
if not self._sharded:
raise ValueError('Inputs are not sharded.')
sharded_inputs = []
for shard in range(self._num_shards):
flattened_inputs = self._as_flattened_inputs(
self._feature_list[shard],
self._label_list[shard] if self._has_labels else None)
sharded_inputs.append(flattened_inputs)
return sharded_inputs
def as_flattened_inputs(self):
"""Flatten the features and label as a single tensor list for one host."""
self._frozen = True
if not self._initialized:
raise RuntimeError('InputsHolder has not been initialized.')
if self._sharded:
raise ValueError('Inputs are sharded.')
return self._as_flattened_inputs(
self._feature_list[0],
self._label_list[0] if self._has_labels else None)
def _as_flattened_inputs(self, features, labels):
"""Flattens the `features` and `labels` to a single tensor list."""
flattened_inputs = []
if self._feature_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend([features[name] for name in self._feature_names])
else:
flattened_inputs.append(features)
if labels is not None:
if self._label_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend([labels[name] for name in self._label_names])
else:
flattened_inputs.append(labels)
return flattened_inputs
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Once called, the internal data is frozen and `append_tuple` cannot be
invokded anymore.
Args:
flattened_inputs: Flattened inputs for one each, which should be created
by the `as_sharded_flattened_inputs` API.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
RuntimeError: If the internal data has not been initialized.
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
self._frozen = True
if not self._initialized:
raise RuntimeError('InputsHolder has not been initialized.')
expected_num_features = (len(self._feature_names) if self._feature_names
else 1)
if self._has_labels:
expected_num_labels = (len(self._label_names) if self._label_names
else 1)
else:
expected_num_labels = 0
expected_num_tensors = expected_num_features + expected_num_labels
if expected_num_tensors != len(flattened_inputs):
raise ValueError(
'The number of flattened tensors mismatches expected num. '
'Expected {}, got {}'.format(expected_num_tensors,
len(flattened_inputs)))
if self._feature_names:
unflattened_features = dict(zip(self._feature_names,
flattened_inputs[:expected_num_features]))
else:
# Single tensor case
unflattened_features = flattened_inputs[0]
if expected_num_labels == 0:
unflattened_label = None
elif self._label_names:
unflattened_label = dict(zip(self._label_names,
flattened_inputs[expected_num_features:]))
else:
# Single tensor case.
unflattened_label = flattened_inputs[expected_num_features]
return unflattened_features, unflattened_label
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, mode, train_batch_size,
eval_batch_size):
self._model_fn = model_fn
self._config = config
self._params = params
self._mode = mode
self._train_batch_size = train_batch_size
self._eval_batch_size = eval_batch_size
def call_without_tpu(self, features, labels):
# Let CrossShardOptimizer be called without TPU in model_fn, since it's
# common to set the train_op even when running evaluate() or predict().
with tpu_function.tpu_shard_context(1):
return self._call_model_fn(features, labels, use_tpu=False)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A Fn representing the train step for TPU.
"""
def train_step(loss):
"""Training step function for use inside a while loop."""
del loss # unused; required in function signature.
features, labels = dequeue_fn()
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels, use_tpu=True))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
with ops.control_dependencies([train_op]):
return array_ops.identity(loss)
return train_step
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn and eval_metrics. The eval_fn representing the eval
step for TPU. and eval_metrics is an `_EvalMetrics` instance.
"""
eval_metrics = _EvalMetrics()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
features, labels = dequeue_fn()
tpu_estimator_spec = self._call_model_fn(features, labels, use_tpu=True)
if not isinstance(tpu_estimator_spec, TPUEstimatorSpec):
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
eval_metrics.record(tpu_estimator_spec)
outfeed_ops = tpu_ops.outfeed_enqueue_tuple(eval_metrics.outfeed_tensors)
with ops.control_dependencies([outfeed_ops]):
return math_ops.add(total_loss, loss)
return eval_step, eval_metrics
@property
def config(self):
return self._config
def _call_model_fn(self, features, labels, use_tpu):
"""Calls the model_fn with required parameters."""
model_fn_args = util.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
else:
if labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError(
'model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
if self._mode == model_fn_lib.ModeKeys.TRAIN:
params[_BATCH_SIZE_KEY] = _per_shard_batch_size(
self._train_batch_size, config, use_tpu)
elif (self._mode == model_fn_lib.ModeKeys.EVAL and
self._eval_batch_size is not None):
params[_BATCH_SIZE_KEY] = _per_shard_batch_size(
self._eval_batch_size, config, use_tpu)
estimator_spec = self._model_fn(features=features, **kwargs)
if (not use_tpu) and isinstance(estimator_spec, TPUEstimatorSpec):
return estimator_spec.as_estimator_spec()
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, TPUEstimatorSpec):
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(err_msg.format('training_chief_hooks'))
if estimator_spec.training_hooks:
raise ValueError(err_msg.format('training_hooks'))
if estimator_spec.evaluation_hooks:
raise ValueError(err_msg.format('evaluation_hooks'))
return estimator_spec
class _EvalMetrics(object):
"""Class wraps TPUEstimator.eval_metrics."""
def __init__(self):
self._metric_fn = None
self._is_dict = False
self._tensor_keys = []
self._tensors = []
self._tensor_dtypes = []
self._tensor_shapes = []
self._recorded = False
@staticmethod
def validate(eval_metrics):
"""Validates the `eval_metrics` in `TPUEstimatorSpec`."""
if not isinstance(eval_metrics, (tuple, list)):
raise ValueError('eval_metrics should be tuple or list')
if len(eval_metrics) != 2:
raise ValueError('eval_metrics should have two elements.')
if not callable(eval_metrics[0]):
raise TypeError('eval_metrics[0] should be callable.')
if not isinstance(eval_metrics[1], (tuple, list, dict)):
raise ValueError('eval_metrics[1] should be tuple or list, or dict.')
if isinstance(eval_metrics[1], (tuple, list)):
fn_args = util.fn_args(eval_metrics[0])
if 'self' in fn_args:
fn_args = tuple([arg for arg in fn_args if arg != 'self'])
if len(eval_metrics[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.eval_metrics, length of tensors does not '
'match method args of metric_fn.')
@staticmethod
def to_metric_metric_ops_for_cpu(eval_metrics):
"""Converts `TPUEstimatorSpec.eval_metrics` to `eval_metric_ops` for CPU."""
if not eval_metrics:
return None
_EvalMetrics.validate(eval_metrics)
metric_fn, tensors = eval_metrics
if isinstance(tensors, (tuple, list)):
return metric_fn(*tensors)
else:
# Must be dict.
try:
return metric_fn(**tensors)
except TypeError as e:
logging.warning(
'Exception while calling metric_fn for evalution: %s. '
'It is likely the tensors (eval_metrics[1]) do not match the '
'metric_fn arguments', e)
raise e
def record(self, spec):
"""Records the eval_metrics structure in `spec`."""
if self._recorded:
raise RuntimeError('Eval metrics have been recorded already.')
self._metric_fn, tensor_list_or_dict = spec.eval_metrics
if isinstance(tensor_list_or_dict, dict):
self._is_dict = True
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys.append(key)
self._tensors.append(tensor)
self._tensor_dtypes.append(tensor.dtype)
self._tensor_shapes.append(tensor.shape)
else:
# List or tuple.
self._is_dict = False
self._tensors = tensor_list_or_dict
for tensor in tensor_list_or_dict:
self._tensor_dtypes.append(tensor.dtype)
self._tensor_shapes.append(tensor.shape)
self._recorded = True
@property
def outfeed_tensors(self):
if not self._recorded:
raise RuntimeError('Eval metrics have not been recorded yet')
return self._tensors
def to_metric_metric_ops_for_tpu(self, run_config, dummy_update_op):
"""Creates the eval_metric_ops now based on the TPU outfeed.
`eval_metric_ops` is defined in `EstimatorSpec`. From all shards, tensors
are dequeued from outfeed and then concatenated (along batch size dimension)
to form global-like tensors. All global-like tensors are passed to the
metric fn.
Args:
run_config: A `RunConfig` instance.
dummy_update_op: A dummy update op.
Returns:
A tuple of (`eval_metric_ops` and `update_ops`), where `update_ops` should
be invoked in Outfeed dequeue thread, which drive the outfeed dequeue and
update the state of metrics.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
num_shards = run_config.tpu_config.num_shards
job = _tpu_job(run_config, model_fn_lib.ModeKeys.EVAL)
job_device = '' if job is None else ('/job:%s' % job)
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
for i in xrange(len(self._tensors)):
dequeue_ops.append([])
# Outfeed ops execute on each JF node.
for i in xrange(num_shards):
with ops.device('%s/task:%d/device:TPU:%d' % (job_device, i / 8, i % 8)):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=self._tensor_dtypes, shapes=self._tensor_shapes)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# It is assumed evaluation always happends on single host TPU system. So,
# place all ops on tpu host if possible.
with ops.device('{}/device:CPU:0'.format(job_device)):
for i, item in enumerate(dequeue_ops):
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preseve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# TODO(xiejw): Allow users to specify the axis for batch size dimension.
dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)
if self._is_dict:
dequeue_ops = dict(zip(self._tensor_keys, dequeue_ops))
try:
eval_metric_ops = self._metric_fn(**dequeue_ops)
except TypeError as e:
logging.warning(
'Exception while calling metric_fn for evalution: %s. '
'It is likely the tensors (eval_metrics[1]) do not match the '
'metric_fn arguments', e)
raise e
else:
eval_metric_ops = self._metric_fn(*dequeue_ops)
eval_update_ops = []
for k, v in eval_metric_ops.items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
return eval_metric_ops, eval_update_ops
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
If `use_tpu` is false, all training, evaluation, and predict are executed on
CPU.
For training, TPUEstimator transforms a global batch size in params to a
per-shard batch size when calling the `input_fn` and `model_fn`. Users should
specify `train_batch_size` in constructor, and then get the batch size for
each shard in `input_fn` and `model_fn` by `params['batch_size']`. If
`TPUConfig.per_host_input_for_training` is `True`, `input_fn` is invoked per
host rather than per shard. In this case, a global batch size is transformed a
per-host batch size in params for `input_fn`, but `model_fn` still gets
per-shard batch size.
For evaluation, if `eval_batch_size` is None, it is executed on CPU, even if
`use_tpu` is `True`. If `eval_batch_size` is not `None`, it is executed on
TPU, which is an experimental feature. In this case, `model_fn` should return
`TPUEstimatorSpec` instead of `EstimatorSpec`, which expects the
`eval_metrics` for TPU evaluation.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
Current limitations:
1. TPU evaluation only works on single host.
2. `input_fn` for evaluation should not throw OutOfRange error for all
evaluation steps and all batches should have the same size.
Example (MNIST):
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Predict support on TPU is not yet implemented. So, `predict` and
`export_savedmodel` are executed on CPU, even if `use_tpu` is true.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
batch_axis=None):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator`. For training, the
returned `EstimatorSpec` cannot have hooks as it is not supported in
`TPUEstimator`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently,
- TPU training respects this bit.
- If true, see `eval_batch_size` for evaluate support.
- Predict still happens on CPU.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`. Must be divisible by
`config.tpu_config.num_shards`.
eval_batch_size: An int representing the global training batch size.
Currently, if `None`, evaluation is still executed on CPU (even when
`use_tpu` is True). In near future, `use_tpu` will be the only option to
switch between TPU/CPU evaluation.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False, batch_axis is ignored.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError(
'{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
if not isinstance(train_batch_size, int):
raise ValueError('`train_batch_size` must be an int')
if train_batch_size < 1:
raise ValueError('`train_batch_size` must be positive')
# The specified batch size is the batch size for the entire computation.
# The input_fn and model_fn are called per-shard, so we want to calculate
# the per-shard batch size and pass that.
if train_batch_size % config.tpu_config.num_shards != 0:
raise ValueError(
'train batch size {} must be divisible by number of shards {}'
.format(train_batch_size, config.tpu_config.num_shards))
if eval_batch_size is not None:
if config.tpu_config.num_shards > 8:
raise NotImplementedError(
'TPU evaluation is only supported with one host.')
if eval_batch_size % config.tpu_config.num_shards != 0:
raise ValueError(
'eval batch size {} must be divisible by number of shards {}'
.format(eval_batch_size, config.tpu_config.num_shards))
if (config.tpu_config.num_shards > 8 and
config.tpu_config.per_host_input_for_training):
# TODO(b/67051042): Support per_host input pipelines when num_shards > 8
raise NotImplementedError(
'Per-host input pipelines only available for num_shards <= 8')
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = _augment_model_fn(model_fn, train_batch_size,
eval_batch_size, use_tpu,
batch_axis)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params)
self._use_tpu = use_tpu
self._train_batch_size = train_batch_size
self._eval_batch_size = eval_batch_size
self._iterations_per_training_loop = (
self._config.tpu_config.iterations_per_loop)
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
if _is_running_on_cpu(self._use_tpu, model_fn_lib.ModeKeys.TRAIN,
self._eval_batch_size):
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [_TPUStopAtStepHook(self._iterations_per_training_loop,
steps, max_steps)]
def _convert_eval_steps_to_hooks(self, steps):
if _is_running_on_cpu(self._use_tpu, model_fn_lib.ModeKeys.EVAL,
self._eval_batch_size):
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
hooks = []
hooks.append(evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps))
hooks.append(_SetEvalIterationsHook(steps))
return hooks
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
Either features or (features, labels) where features and labels are:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = util.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
if mode == model_fn_lib.ModeKeys.TRAIN:
kwargs['params'][_BATCH_SIZE_KEY] = (
_per_shard_batch_size(self._train_batch_size, config, self._use_tpu)
if not config.tpu_config.per_host_input_for_training else
self._train_batch_size)
elif (mode == model_fn_lib.ModeKeys.EVAL and
self._eval_batch_size is not None):
# For TPU evaluation, input_fn is invoked for one host (instead of shard).
kwargs['params'][_BATCH_SIZE_KEY] = self._eval_batch_size
if _is_running_on_cpu(self._use_tpu, mode, self._eval_batch_size):
with ops.device('/device:CPU:0'):
return input_fn(**kwargs)
job = _tpu_job(config, mode)
def placement_function(index):
if job is None:
return '/replica:0/task:0/device:CPU:0'
else:
return '/job:%s/task:%d/device:CPU:0' % (job, index / 8)
if mode == model_fn_lib.ModeKeys.TRAIN:
if not config.tpu_config.per_host_input_for_training:
# Now for TPU training.
num_shards = config.tpu_config.num_shards
inputs = _InputsHolder(num_shards=num_shards)
for i in range(config.tpu_config.num_shards):
with ops.device(placement_function(i)):
inputs.append_tuple(input_fn(**kwargs))
return inputs.as_features_and_labels_tuple()
else:
# TODO(xiejw): Extend this to multi-host support.
with ops.device(placement_function(0)):
return input_fn(**kwargs)
# Now for TPU evaluation.
with ops.device(placement_function(0)):
return input_fn(**kwargs)
# TODO(b/64607814): Ensure batch_axis works with nested structures.
def _create_infeed_enqueue_ops_and_dequeue_fn(inputs_holder, run_config,
batch_axis, mode):
"""Utility to convert input_fn to enqueue and dequeue fns for TPU.
Args:
inputs_holder: An `_InputsHolder` holding features and labels.
run_config: A `RunConfig` instance.
batch_axis: A python list of batch dimensions.
mode: ModeKeys
Returns:
A tuple of (dequeue_fn, enqueue_fn)
"""
if inputs_holder.sharded:
sharded_inputs = inputs_holder.as_sharded_flattened_inputs()
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(sharded_inputs[0]))
infeed_queue.set_configuration_from_sharded_input_tensors(sharded_inputs)
else:
unsharded_inputs = inputs_holder.as_flattened_inputs()
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_inputs],
tuple_shapes=[t.shape for t in unsharded_inputs],
shard_dimensions=batch_axis)
infeed_queue.set_number_of_shards(inputs_holder.num_shards)
def dequeue_fn():
"""dequeue_fn is used by the train_step in TPU to retrieve the tensors."""
values = infeed_queue.generate_dequeue_op()
return inputs_holder.unflatten_features_and_labels(values)
def tpu_ordinal_function(index):
"""Return the TPU ordinal associated with a shard.
Required because the enqueue ops are placed on CPU.
Args:
index: the shard index
Returns:
The ordinal of the TPU device the shard's infeed should be placed on.
"""
return index % 8
def enqueue_fn():
"""enqueue_fn is used to add ops to the graph to send tensors."""
if inputs_holder.sharded:
return infeed_queue.generate_enqueue_ops(
sharded_inputs, tpu_ordinal_function=tpu_ordinal_function)
else:
job = _tpu_job(run_config, mode)
def placement_function(index):
if job is None:
return '/replica:0/task:0/device:CPU:0'
else:
# This assumes that if using more than 8 shards,
# the job configuration varies 'task'.
return '/job:%s/task:%d/device:CPU:0' % (job, index / 8)
return infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_inputs, placement_function=placement_function)
return (dequeue_fn, enqueue_fn)
def _augment_model_fn(model_fn, train_batch_size, eval_batch_size, use_tpu,
batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, mode,
train_batch_size, eval_batch_size)
# TODO(jhseu): Move to PREDICT to TPU.
if _is_running_on_cpu(use_tpu, mode, eval_batch_size):
logging.info('Running %s on CPU', mode)
return model_fn_wrapper.call_without_tpu(features, labels)
inputs = _InputsHolder(features=features, labels=labels,
num_shards=config.tpu_config.num_shards)
dequeue_fn, enqueue_fn = _create_infeed_enqueue_ops_and_dequeue_fn(
inputs, config, batch_axis, mode)
if mode == model_fn_lib.ModeKeys.TRAIN:
loss = _train_on_tpu_system(model_fn_wrapper, dequeue_fn)
hooks = [
TPUInfeedOutfeedSessionHook(config, mode, enqueue_fn),
training.LoggingTensorHook(
{'loss': array_ops.identity(loss),
'step': training.get_global_step()},
every_n_secs=30)
]
summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with ops.control_dependencies([loss]):
update_ops = _sync_variables_ops()
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph()
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_hooks=hooks,
train_op=control_flow_ops.group(*update_ops))
# Now eval.
total_loss, eval_metric_ops = _eval_on_tpu_system(
model_fn_wrapper, dequeue_fn)
iterations_per_loop_var = _create_iterations_per_loop()
mean_loss = math_ops.div(
total_loss,
math_ops.cast(iterations_per_loop_var, dtype=total_loss.dtype))
# Creates a dummy metric update_op for all metrics. Estimator expects all
# metrics in eval_metric_ops have update_op and calls them one by one. The
# real metric update_ops are invoked in a separated thread. So, here give
# Estimator the dummy op for all metrics.
with ops.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor), reads
# all variables back from TPU and updates the eval step counter properly.
internal_ops_to_run = _sync_variables_ops()
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
with ops.control_dependencies(internal_ops_to_run):
dummy_update_op = control_flow_ops.no_op()
eval_metric_ops, eval_update_ops = (
eval_metric_ops.to_metric_metric_ops_for_tpu(
config, dummy_update_op))
hooks = [
TPUInfeedOutfeedSessionHook(config, mode, enqueue_fn, eval_update_ops),
]
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops)
return _model_fn
def _eval_on_tpu_system(model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
config = model_fn_wrapper.config.tpu_config
num_shards = config.num_shards
iterations_per_loop_var = _create_iterations_per_loop()
single_tpu_eval_step, eval_metric_ops = (
model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn))
def multi_tpu_eval_steps_on_single_shard():
return training_loop.repeat(iterations_per_loop_var,
single_tpu_eval_step,
[_ZERO_LOSS],
name='loop')
(loss,) = tpu.shard(multi_tpu_eval_steps_on_single_shard,
inputs=[],
num_shards=num_shards,
outputs_from_all_shards=False)
return loss, eval_metric_ops
def _train_on_tpu_system(model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
num_shards = model_fn_wrapper.config.tpu_config.num_shards
iterations_per_loop_var = _create_iterations_per_loop()
single_tpu_train_step = model_fn_wrapper.convert_to_single_tpu_train_step(
dequeue_fn)
def multi_tpu_train_steps_on_single_shard():
return training_loop.repeat(
iterations_per_loop_var,
single_tpu_train_step,
[_INITIAL_LOSS],
name=b'loop')
(loss,) = tpu.shard(multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=num_shards,
outputs_from_all_shards=False)
return loss
def _validate_tpu_training_graph():
"""Validate graph before running distributed training.
Raises:
ValueError: If the graph seems invalid for running on device
"""
operations = ops.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [o for o in operations
if o.type == _CROSS_REPLICA_SUM_OP]
if not cross_replica_sum_ops:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
|
the-stack_106_26808 | """
A jones calculus example.
You can use the package for simple normal incidence Jones calculus. In the
:mod:`dtmm.jones` you will find all the functionality to work with jones calculus.
For example, we can compute the transmittance properties of a simple Twisted
Nematic director profile. We compute wavelength-dependent transmittance of
normally white and normally black TN modes. We build a left-handed twisted
nematic in a 4 microns cell and in the first minimum condition - max transmission
at 550 nm.
The matrix creation functions in the jones module obey numpy broadcasting rules,
so we can build and multiply matrices at different wavelengths simultaneously.
See the source code of the example below.
"""
import numpy as np
from dtmm import jones
import matplotlib.pyplot as plt
#---------------------------
# user options
#---------------------------
#:thickness of LC cell in microns
thickness = 4
#: number of layers (should be high enough...)
nlayers = 100
#: which wavelengths to compute (in nanometers)
k = np.linspace(2*np.pi/700 ,2*np.pi/400, 200)
wavelengths = 2*np.pi/k
#:ordinary refractive index of LC
no = 1.5
#:extraordinary
ne = 1.62
#---------------
# implementation
#---------------
step = thickness*1000/nlayers #in nanometers
x_jvec = jones.jonesvec((1,0))
y_jvec = jones.jonesvec((0,1))
phis = np.linspace(0, np.pi/2, nlayers) #twist rotation angle
phase = (ne - no) * k * step #phase retardation in each of the layers
matrices = [jones.polarizer(x_jvec)] #x polarizer
#add retarders... left handed TN
for phi in phis:
matrices.append(jones.retarder(phase, phi))
#next, we multiply matrices together in reverse order ...tn2.tn1.tn0.x
jmat = jones.multi_dot(matrices, reverse = True)
normally_white_jmat = jones.dotmm(jones.polarizer(y_jvec), jmat) #crossed polarizers
normally_black_jmat = jones.dotmm(jones.polarizer(x_jvec), jmat) #parallel polarizers
nw_jvec = jones.dotmv(normally_white_jmat, x_jvec)
nb_jvec = jones.dotmv(normally_black_jmat, x_jvec)
plt.title("First minimum TN transmittance")
plt.plot(wavelengths, jones.jones_intensity(nw_jvec), label = "Normally white mode")
plt.plot(wavelengths, jones.jones_intensity(nb_jvec), label = "Normally black mode")
plt.legend()
plt.xlabel("wavelength")
plt.ylabel("transmittance")
|
the-stack_106_26809 | '''
Created on May 30, 2019
@author: mohammedmostafa
'''
import numpy as np
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
import matplotlib.pyplot as plt
xs = np.random.choice(np.arange(-3,3,.01),500)
ys = xs**2
x_test=np.linspace(-3,3,1000)
y_test=x_test**2
model = Sequential();
model.add(Dense(units=10,input_dim=1, activation='relu'))
model.add(Dense(units=1))
model.compile(optimizer="sgd", loss = "mean_squared_error")
model.summary()
history = model.fit(xs, ys, epochs=400, verbose=1)
results = model.predict(x_test )
plt.plot(x_test,results,c='r')
plt.plot(x_test,y_test,c='b')
plt.show()
|
the-stack_106_26810 | # XXX TO DO:
# - popup menu
# - support partial or total redisplay
# - key bindings (instead of quick-n-dirty bindings on Canvas):
# - up/down arrow keys to move focus around
# - ditto for page up/down, home/end
# - left/right arrows to expand/collapse & move out/in
# - more doc strings
# - add icons for "file", "module", "class", "method"; better "python" icon
# - callback for selection???
# - multiple-item selection
# - tooltips
# - redo geometry without magic numbers
# - keep track of object ids to allow more careful cleaning
# - optimize tree redraw after expand of subnode
import os
from tkinter import *
from tkinter.ttk import Scrollbar
from idlelib import zoomheight
from idlelib.config import idleConf
ICONDIR = "Icons"
# Look for Icons subdirectory in the same directory as this module
try:
_icondir = os.path.join(os.path.dirname(__file__), ICONDIR)
except NameError:
_icondir = ICONDIR
if os.path.isdir(_icondir):
ICONDIR = _icondir
elif not os.path.isdir(ICONDIR):
raise RuntimeError("can't find icon directory (%r)" % (ICONDIR,))
def listicons(icondir=ICONDIR):
"""Utility to display the available icons."""
root = Tk()
import glob
list = glob.glob(os.path.join(icondir, "*.gif"))
list.sort()
images = []
row = column = 0
for file in list:
name = os.path.splitext(os.path.basename(file))[0]
image = PhotoImage(file=file, master=root)
images.append(image)
label = Label(root, image=image, bd=1, relief="raised")
label.grid(row=row, column=column)
label = Label(root, text=name)
label.grid(row=row+1, column=column)
column = column + 1
if column >= 10:
row = row+2
column = 0
root.images = images
class TreeNode:
def __init__(self, canvas, parent, item):
self.canvas = canvas
self.parent = parent
self.item = item
self.state = 'collapsed'
self.selected = False
self.children = []
self.x = self.y = None
self.iconimages = {} # cache of PhotoImage instances for icons
def destroy(self):
for c in self.children[:]:
self.children.remove(c)
c.destroy()
self.parent = None
def geticonimage(self, name):
try:
return self.iconimages[name]
except KeyError:
pass
file, ext = os.path.splitext(name)
ext = ext or ".gif"
fullname = os.path.join(ICONDIR, file + ext)
image = PhotoImage(master=self.canvas, file=fullname)
self.iconimages[name] = image
return image
def select(self, event=None):
if self.selected:
return
self.deselectall()
self.selected = True
self.canvas.delete(self.image_id)
self.drawicon()
self.drawtext()
def deselect(self, event=None):
if not self.selected:
return
self.selected = False
self.canvas.delete(self.image_id)
self.drawicon()
self.drawtext()
def deselectall(self):
if self.parent:
self.parent.deselectall()
else:
self.deselecttree()
def deselecttree(self):
if self.selected:
self.deselect()
for child in self.children:
child.deselecttree()
def flip(self, event=None):
if self.state == 'expanded':
self.collapse()
else:
self.expand()
self.item.OnDoubleClick()
return "break"
def expand(self, event=None):
if not self.item._IsExpandable():
return
if self.state != 'expanded':
self.state = 'expanded'
self.update()
self.view()
def collapse(self, event=None):
if self.state != 'collapsed':
self.state = 'collapsed'
self.update()
def view(self):
top = self.y - 2
bottom = self.lastvisiblechild().y + 17
height = bottom - top
visible_top = self.canvas.canvasy(0)
visible_height = self.canvas.winfo_height()
visible_bottom = self.canvas.canvasy(visible_height)
if visible_top <= top and bottom <= visible_bottom:
return
x0, y0, x1, y1 = self.canvas._getints(self.canvas['scrollregion'])
if top >= visible_top and height <= visible_height:
fraction = top + height - visible_height
else:
fraction = top
fraction = float(fraction) / y1
self.canvas.yview_moveto(fraction)
def lastvisiblechild(self):
if self.children and self.state == 'expanded':
return self.children[-1].lastvisiblechild()
else:
return self
def update(self):
if self.parent:
self.parent.update()
else:
oldcursor = self.canvas['cursor']
self.canvas['cursor'] = "watch"
self.canvas.update()
self.canvas.delete(ALL) # XXX could be more subtle
self.draw(7, 2)
x0, y0, x1, y1 = self.canvas.bbox(ALL)
self.canvas.configure(scrollregion=(0, 0, x1, y1))
self.canvas['cursor'] = oldcursor
def draw(self, x, y):
# XXX This hard-codes too many geometry constants!
dy = 20
self.x, self.y = x, y
self.drawicon()
self.drawtext()
if self.state != 'expanded':
return y + dy
# draw children
if not self.children:
sublist = self.item._GetSubList()
if not sublist:
# _IsExpandable() was mistaken; that's allowed
return y+17
for item in sublist:
child = self.__class__(self.canvas, self, item)
self.children.append(child)
cx = x+20
cy = y + dy
cylast = 0
for child in self.children:
cylast = cy
self.canvas.create_line(x+9, cy+7, cx, cy+7, fill="gray50")
cy = child.draw(cx, cy)
if child.item._IsExpandable():
if child.state == 'expanded':
iconname = "minusnode"
callback = child.collapse
else:
iconname = "plusnode"
callback = child.expand
image = self.geticonimage(iconname)
id = self.canvas.create_image(x+9, cylast+7, image=image)
# XXX This leaks bindings until canvas is deleted:
self.canvas.tag_bind(id, "<1>", callback)
self.canvas.tag_bind(id, "<Double-1>", lambda x: None)
id = self.canvas.create_line(x+9, y+10, x+9, cylast+7,
##stipple="gray50", # XXX Seems broken in Tk 8.0.x
fill="gray50")
self.canvas.tag_lower(id) # XXX .lower(id) before Python 1.5.2
return cy
def drawicon(self):
if self.selected:
imagename = (self.item.GetSelectedIconName() or
self.item.GetIconName() or
"openfolder")
else:
imagename = self.item.GetIconName() or "folder"
image = self.geticonimage(imagename)
id = self.canvas.create_image(self.x, self.y, anchor="nw", image=image)
self.image_id = id
self.canvas.tag_bind(id, "<1>", self.select)
self.canvas.tag_bind(id, "<Double-1>", self.flip)
def drawtext(self):
textx = self.x+20-1
texty = self.y-4
labeltext = self.item.GetLabelText()
if labeltext:
id = self.canvas.create_text(textx, texty, anchor="nw",
text=labeltext)
self.canvas.tag_bind(id, "<1>", self.select)
self.canvas.tag_bind(id, "<Double-1>", self.flip)
x0, y0, x1, y1 = self.canvas.bbox(id)
textx = max(x1, 200) + 10
text = self.item.GetText() or "<no text>"
try:
self.entry
except AttributeError:
pass
else:
self.edit_finish()
try:
self.label
except AttributeError:
# padding carefully selected (on Windows) to match Entry widget:
self.label = Label(self.canvas, text=text, bd=0, padx=2, pady=2)
theme = idleConf.CurrentTheme()
if self.selected:
self.label.configure(idleConf.GetHighlight(theme, 'hilite'))
else:
self.label.configure(idleConf.GetHighlight(theme, 'normal'))
id = self.canvas.create_window(textx, texty,
anchor="nw", window=self.label)
self.label.bind("<1>", self.select_or_edit)
self.label.bind("<Double-1>", self.flip)
self.text_id = id
def select_or_edit(self, event=None):
if self.selected and self.item.IsEditable():
self.edit(event)
else:
self.select(event)
def edit(self, event=None):
self.entry = Entry(self.label, bd=0, highlightthickness=1, width=0)
self.entry.insert(0, self.label['text'])
self.entry.selection_range(0, END)
self.entry.pack(ipadx=5)
self.entry.focus_set()
self.entry.bind("<Return>", self.edit_finish)
self.entry.bind("<Escape>", self.edit_cancel)
def edit_finish(self, event=None):
try:
entry = self.entry
del self.entry
except AttributeError:
return
text = entry.get()
entry.destroy()
if text and text != self.item.GetText():
self.item.SetText(text)
text = self.item.GetText()
self.label['text'] = text
self.drawtext()
self.canvas.focus_set()
def edit_cancel(self, event=None):
try:
entry = self.entry
del self.entry
except AttributeError:
return
entry.destroy()
self.drawtext()
self.canvas.focus_set()
class TreeItem:
"""Abstract class representing tree items.
Methods should typically be overridden, otherwise a default action
is used.
"""
def __init__(self):
"""Constructor. Do whatever you need to do."""
def GetText(self):
"""Return text string to display."""
def GetLabelText(self):
"""Return label text string to display in front of text (if any)."""
expandable = None
def _IsExpandable(self):
"""Do not override! Called by TreeNode."""
if self.expandable is None:
self.expandable = self.IsExpandable()
return self.expandable
def IsExpandable(self):
"""Return whether there are subitems."""
return 1
def _GetSubList(self):
"""Do not override! Called by TreeNode."""
if not self.IsExpandable():
return []
sublist = self.GetSubList()
if not sublist:
self.expandable = 0
return sublist
def IsEditable(self):
"""Return whether the item's text may be edited."""
def SetText(self, text):
"""Change the item's text (if it is editable)."""
def GetIconName(self):
"""Return name of icon to be displayed normally."""
def GetSelectedIconName(self):
"""Return name of icon to be displayed when selected."""
def GetSubList(self):
"""Return list of items forming sublist."""
def OnDoubleClick(self):
"""Called on a double-click on the item."""
# Example application
class FileTreeItem(TreeItem):
"""Example TreeItem subclass -- browse the file system."""
def __init__(self, path):
self.path = path
def GetText(self):
return os.path.basename(self.path) or self.path
def IsEditable(self):
return os.path.basename(self.path) != ""
def SetText(self, text):
newpath = os.path.dirname(self.path)
newpath = os.path.join(newpath, text)
if os.path.dirname(newpath) != os.path.dirname(self.path):
return
try:
os.rename(self.path, newpath)
self.path = newpath
except OSError:
pass
def GetIconName(self):
if not self.IsExpandable():
return "python" # XXX wish there was a "file" icon
def IsExpandable(self):
return os.path.isdir(self.path)
def GetSubList(self):
try:
names = os.listdir(self.path)
except OSError:
return []
names.sort(key = os.path.normcase)
sublist = []
for name in names:
item = FileTreeItem(os.path.join(self.path, name))
sublist.append(item)
return sublist
# A canvas widget with scroll bars and some useful bindings
class ScrolledCanvas:
def __init__(self, master, **opts):
if 'yscrollincrement' not in opts:
opts['yscrollincrement'] = 17
self.master = master
self.frame = Frame(master)
self.frame.rowconfigure(0, weight=1)
self.frame.columnconfigure(0, weight=1)
self.canvas = Canvas(self.frame, **opts)
self.canvas.grid(row=0, column=0, sticky="nsew")
self.vbar = Scrollbar(self.frame, name="vbar")
self.vbar.grid(row=0, column=1, sticky="nse")
self.hbar = Scrollbar(self.frame, name="hbar", orient="horizontal")
self.hbar.grid(row=1, column=0, sticky="ews")
self.canvas['yscrollcommand'] = self.vbar.set
self.vbar['command'] = self.canvas.yview
self.canvas['xscrollcommand'] = self.hbar.set
self.hbar['command'] = self.canvas.xview
self.canvas.bind("<Key-Prior>", self.page_up)
self.canvas.bind("<Key-Next>", self.page_down)
self.canvas.bind("<Key-Up>", self.unit_up)
self.canvas.bind("<Key-Down>", self.unit_down)
#if isinstance(master, Toplevel) or isinstance(master, Tk):
self.canvas.bind("<Alt-Key-2>", self.zoom_height)
self.canvas.focus_set()
def page_up(self, event):
self.canvas.yview_scroll(-1, "page")
return "break"
def page_down(self, event):
self.canvas.yview_scroll(1, "page")
return "break"
def unit_up(self, event):
self.canvas.yview_scroll(-1, "unit")
return "break"
def unit_down(self, event):
self.canvas.yview_scroll(1, "unit")
return "break"
def zoom_height(self, event):
zoomheight.zoom_height(self.master)
return "break"
def _tree_widget(parent): # htest #
top = Toplevel(parent)
x, y = map(int, parent.geometry().split('+')[1:])
top.geometry("+%d+%d" % (x+50, y+175))
sc = ScrolledCanvas(top, bg="white", highlightthickness=0, takefocus=1)
sc.frame.pack(expand=1, fill="both", side=LEFT)
item = FileTreeItem(ICONDIR)
node = TreeNode(sc.canvas, None, item)
node.expand()
if __name__ == '__main__':
# test_tree is currently a copy of this
from idlelib.idle_test.htest import run
run(_tree_widget)
|
the-stack_106_26811 | #!/usr/bin/env python
from gppylib.gplog import *
from gppylib.gpcatalog import *
import re
class ForeignKeyCheck:
"""
PURPOSE: detect differences between foreign key and reference key values among catalogs
"""
def __init__(self, db_connection, logger, shared_option, autoCast):
self.db_connection = db_connection
self.logger = logger
self.shared_option = shared_option
self.autoCast = autoCast
self.query_filters = dict()
self.query_filters['pg_appendonly.relid'] = "(relstorage='a' or relstorage='c')"
self.query_filters['pg_attribute.attrelid'] = "true"
self.query_filters["pg_index.indexrelid"] = "(relkind='i')"
def runCheck(self, tables):
foreign_key_issues = dict()
for cat in sorted(tables):
issues = self.checkTableForeignKey(cat)
if issues:
foreign_key_issues[cat.getTableName()] = issues
return foreign_key_issues
def checkTableForeignKey(self, cat):
"""
return: list of issues in tuple (pkcatname, fields, results) format for the given catalog
"""
catname = cat.getTableName()
fkeylist = cat.getForeignKeys()
isShared = cat.isShared()
pkeylist = cat.getPrimaryKey()
coltypes = cat.getTableColtypes()
# skip tables without fkey
if len(fkeylist) <= 0:
return
if len(cat.getPrimaryKey()) <= 0:
return
# skip these master-only tables
skipped_masteronly = ['gp_relation_node', 'pg_description',
'pg_shdescription', 'pg_stat_last_operation',
'pg_stat_last_shoperation', 'pg_statistic']
if catname in skipped_masteronly:
return
# skip shared/non-shared tables
if self.shared_option:
if re.match("none", self.shared_option, re.I) and isShared:
return
if re.match("only", self.shared_option, re.I) and not isShared:
return
# primary key lists
# cat1.objid as gp_fastsequence_objid
cat1_pkeys_column_rename = []
pkey_aliases = []
# build array of catalog primary keys (with aliases) and
# primary key alias list
for pk in pkeylist:
cat1_pkeys_column_rename.append('cat1.' + pk + ' as %s_%s' % (catname, pk))
pkey_aliases.append('%s_%s' % (catname, pk))
self.logger.info('Building %d queries to check FK constraint on table %s' % (len(fkeylist), catname))
issue_list = list()
for fkeydef in fkeylist:
castedFkey = [c + self.autoCast.get(coltypes[c], '') for c in fkeydef.getColumns()]
fkeystr = ', '.join(castedFkey)
pkeystr = ', '.join(fkeydef.getPKey())
pkcatname = fkeydef.getPkeyTableName()
catname_filter = '%s.%s' % (catname, fkeydef.getColumns()[0])
#
# The goal of this check is to validate foreign keys, which are associations between two tables.
# We want to find a missing foreign key entry or a missing reference key entry when comparing
# two tables that are supposed to have references to one another, either bidirectionally, where
# both tables know about the other, or unidirectionally where one side of the comparison expects
# the other to know about it.
#
# When both sides of a comparison demand a reference on the other side,
# we can do a full join to look for missing entries. In cases where the association (foreign key) is
# unidirectional, we validate only one side of the comparison,
# using a left outer join to look for missing entries on only one side of the comparison.
#
# In the full-join case, we are explicitly talking about pg_class vs. catalog, and
# we use a filter to select only the entries of interest in pg_class--the entries that
# are foreign keys--using a very specific filtering condition, since the full join would otherwise contain
# unwanted entries from pg_class.
#
can_use_full_join = self.query_filters.has_key(catname_filter) and pkcatname == 'pg_class'
if can_use_full_join:
qry = self.get_fk_query_full_join(catname, pkcatname, fkeystr, pkeystr,
pkey_aliases, cat1pkeys=cat1_pkeys_column_rename, filter=self.query_filters[catname_filter])
else:
qry = self.get_fk_query_left_join(catname, pkcatname, fkeystr, pkeystr, pkey_aliases, cat1_pkeys_column_rename)
issue_list += self._validate_relation(catname, fkeystr, pkcatname, pkeystr, qry)
return issue_list
def _validate_relation(self, catname, fkeystr, pkcatname, pkeystr, qry):
issue_list = []
try:
curs = self.db_connection.query(qry)
nrows = curs.ntuples()
if nrows == 0:
self.logger.info('[OK] Foreign key check for %s(%s) referencing %s(%s)' %
(catname, fkeystr, pkcatname, pkeystr))
else:
self.logger.info('[FAIL] Foreign key check for %s(%s) referencing %s(%s)' %
(catname, fkeystr, pkcatname, pkeystr))
self.logger.error(' %s has %d issue(s): entry has NULL reference of %s(%s)' %
(catname, nrows, pkcatname, pkeystr))
fields = curs.listfields()
log_literal(self.logger, logging.ERROR, " " + " | ".join(fields))
for row in curs.getresult():
log_literal(self.logger, logging.ERROR, " " + " | ".join(map(str, row)))
results = curs.getresult()
issue_list.append((pkcatname, fields, results))
except Exception as e:
err_msg = '[ERROR] executing: Foreign key check for catalog table {0}. Query : \n {1}\n'.format(catname, qry)
err_msg += str(e)
raise Exception(err_msg)
return issue_list
# -------------------------------------------------------------------------------
def get_fk_query_left_join(self, catname, pkcatname, fkeystr, pkeystr, pkeys, cat1pkeys):
qry = """
SELECT {primary_key_alias}, missing_catalog, present_key, {cat2_dot_pk},
array_agg(gp_segment_id order by gp_segment_id) as segids
FROM (
SELECT (case when cat1.{FK1} is not NULL then '{CATALOG2}' when cat2.{PK2} is not NULL then '{CATALOG1}' end) as missing_catalog,
(case when cat1.{FK1} is not NULL then '{FK1}' when cat2.{PK2} is not NULL then '{PK2}' end) as present_key,
cat1.gp_segment_id, {cat1_dot_pk}, cat1.{FK1} as {cat2_dot_pk}
FROM
gp_dist_random('{CATALOG1}') cat1 LEFT OUTER JOIN
gp_dist_random('{CATALOG2}') cat2
ON (cat1.gp_segment_id = cat2.gp_segment_id AND
cat1.{FK1} = cat2.{PK2} )
WHERE cat2.{PK2} is NULL
AND cat1.{FK1} != 0
UNION ALL
SELECT (case when cat1.{FK1} is not NULL then '{CATALOG2}' when cat2.{PK2} is not NULL then '{CATALOG1}' end) as missing_catalog,
(case when cat1.{FK1} is not NULL then '{FK1}' when cat2.{PK2} is not NULL then '{PK2}' end) as present_key,
-1 as gp_segment_id, {cat1_dot_pk}, cat1.{FK1} as {cat2_dot_pk}
FROM
{CATALOG1} cat1 LEFT OUTER JOIN
{CATALOG2} cat2
ON (cat1.gp_segment_id = cat2.gp_segment_id AND
cat1.{FK1} = cat2.{PK2} )
WHERE cat2.{PK2} is NULL
AND cat1.{FK1} != 0
ORDER BY {primary_key_alias}, gp_segment_id
) allresults
GROUP BY {primary_key_alias}, {cat2_dot_pk}, missing_catalog, present_key
""".format(FK1=fkeystr,
PK2=pkeystr,
CATALOG1=catname,
CATALOG2=pkcatname,
cat1_dot_pk=', '.join(cat1pkeys),
cat2_dot_pk='%s_%s' % (pkcatname, pkeystr),
primary_key_alias=', '.join(pkeys))
return qry
def get_fk_query_full_join(self, catname, pkcatname, fkeystr, pkeystr, pkeys, cat1pkeys, filter):
qry = """
SELECT {primary_key_alias}, missing_catalog, present_key, {cat2_dot_pk},
array_agg(gp_segment_id order by gp_segment_id) as segids
FROM (
SELECT (case when cat1.{FK1} is not NULL then '{CATALOG2}' when cat2.{PK2} is not NULL then '{CATALOG1}' end) as missing_catalog,
(case when cat1.{FK1} is not NULL then '{FK1}' when cat2.{PK2} is not NULL then '{PK2}' end) as present_key,
COALESCE(cat1.gp_segment_id,cat2.gp_segment_id) as gp_segment_id , {cat1_dot_pk}, COALESCE(cat1.{FK1}, cat2.{PK2}) as {cat2_dot_pk}
FROM
gp_dist_random('{CATALOG1}') cat1 FULL OUTER JOIN
gp_dist_random('{CATALOG2}') cat2
ON (cat1.gp_segment_id = cat2.gp_segment_id AND
cat1.{FK1} = cat2.{PK2} )
WHERE (cat2.{PK2} is NULL or cat1.{FK1} is NULL)
AND {filter}
UNION ALL
SELECT (case when cat1.{FK1} is not NULL then '{CATALOG2}' when cat2.{PK2} is not NULL then '{CATALOG1}' end) as missing_catalog,
(case when cat1.{FK1} is not NULL then '{FK1}' when cat2.{PK2} is not NULL then '{PK2}' end) as present_key,
-1, {cat1_dot_pk}, COALESCE(cat1.{FK1}, cat2.{PK2}) as {cat2_dot_pk}
FROM
{CATALOG1} cat1 FULL OUTER JOIN
{CATALOG2} cat2
ON (cat1.gp_segment_id = cat2.gp_segment_id AND
cat1.{FK1} = cat2.{PK2} )
WHERE (cat2.{PK2} is NULL or cat1.{FK1} is NULL)
AND {filter}
ORDER BY {primary_key_alias}, gp_segment_id
) allresults
GROUP BY {primary_key_alias}, {cat2_dot_pk}, missing_catalog, present_key
""".format(FK1=fkeystr,
PK2=pkeystr,
CATALOG1=catname,
CATALOG2=pkcatname,
cat1_dot_pk=', '.join(cat1pkeys),
cat2_dot_pk='%s_%s' % (pkcatname, pkeystr),
primary_key_alias=', '.join(pkeys),
filter=filter)
return qry
|
the-stack_106_26812 | from collections import OrderedDict
import dask.dataframe as dd
import pandas as pd
import pytest
from dask.dataframe.utils import tm
import ibis
import ibis.expr.datatypes as dt
from ... import connect, execute
@pytest.fixture(scope="module")
def value():
return OrderedDict([("fruit", "pear"), ("weight", 0)])
@pytest.fixture(scope="module")
def struct_client(value):
df = dd.from_pandas(
pd.DataFrame(
{
"s": [
OrderedDict([("fruit", "apple"), ("weight", None)]),
value,
OrderedDict([("fruit", "pear"), ("weight", 1)]),
],
"key": list("aab"),
"value": [1, 2, 3],
}
),
npartitions=1,
)
return connect({"t": df})
@pytest.fixture
def struct_table(struct_client):
return struct_client.table(
"t",
schema={
"s": dt.Struct.from_tuples(
[("fruit", dt.string), ("weight", dt.int8)]
)
},
)
def test_struct_field_literal(value):
struct = ibis.literal(value)
assert struct.type() == dt.Struct.from_tuples(
[("fruit", dt.string), ("weight", dt.int8)]
)
expr = struct['fruit']
result = execute(expr)
assert result == "pear"
expr = struct['weight']
result = execute(expr)
assert result == 0
def test_struct_field_series(struct_table):
t = struct_table
expr = t.s['fruit']
result = expr.execute()
expected = dd.from_pandas(
pd.Series(["apple", "pear", "pear"], name="fruit"), npartitions=1,
)
tm.assert_series_equal(result.compute(), expected.compute())
def test_struct_field_series_group_by_key(struct_table):
t = struct_table
expr = t.groupby(t.s['fruit']).aggregate(total=t.value.sum())
result = expr.execute()
expected = dd.from_pandas(
pd.DataFrame([("apple", 1), ("pear", 5)], columns=["fruit", "total"]),
npartitions=1,
)
tm.assert_frame_equal(result.compute(), expected.compute())
def test_struct_field_series_group_by_value(struct_table):
t = struct_table
expr = t.groupby(t.key).aggregate(total=t.s['weight'].sum())
result = expr.execute()
# these are floats because we have a NULL value in the input data
expected = dd.from_pandas(
pd.DataFrame([("a", 0.0), ("b", 1.0)], columns=["key", "total"]),
npartitions=1,
)
tm.assert_frame_equal(result.compute(), expected.compute())
|
the-stack_106_26814 | #!/usr/bin/env python3
import sys
import subprocess
# Custom Enum for Operations
dmt_counter=0
def dmtCounter(reset=False):
global dmt_counter
if reset:
dmt_counter = 0
result = dmt_counter
dmt_counter += 1
return result
#Operations
OP_PUSH=dmtCounter(True)
OP_PLUS=dmtCounter()
OP_MINUS=dmtCounter()
OP_DISPLAY=dmtCounter()
OP_RAWSTACK=dmtCounter()
OP_DUMP=dmtCounter()
COUNT_OPS=dmtCounter()
#Operations Definded as usable code:
def mov(x):
return (OP_PUSH, x)
def add():
return (OP_PLUS, )
def sub():
return (OP_MINUS, )
def hlt():
return (OP_DUMP, )
def stk():
return(OP_RAWSTACK, )
def dis():
return (OP_DISPLAY, )
#Simulate Code with an interpretor:
def simulateProgram(program):
stack = []
for op in program:
assert COUNT_OPS == 6, "Expected Operation Handling Error"
if op[0] == OP_PUSH:
stack.append(op[1])
elif op[0] == OP_PLUS:
a = stack.pop()
b = stack.pop()
stack.append(a + b)
elif op[0] == OP_MINUS:
a = stack.pop()
b = stack.pop()
stack.append(b - a)
elif op[0] == OP_DISPLAY:
z = stack.pop()
print(z)
elif op[0] == OP_RAWSTACK:
print("Current Stack:", stack)
elif op[0] == OP_DUMP:
a = stack.pop()
print(a)
else:
assert False, "Unreachable"
#Compile code in Assembly for Execution
def compileProgram(program, out_file_path):
with open(out_file_path, "w") as out:
out.write("segment .text\n")
#Dump Func
out.write("global _start\n")
out.write("_start:\n")
for op in program:
assert COUNT_OPS == 6, "Expected Operation Handling Error"
if op[0] == OP_PUSH:
out.write(" ;; -- mov %d --\n" % op[1])
out.write(" push %d\n" % op[1])
elif op[0] == OP_PLUS:
out.write(" ;; -- add --\n")
out.write(" pop rax\n")
out.write(" pop rbx\n")
out.write(" add rax, rbx\n")
out.write(" push rax\n")
elif op[0] == OP_MINUS:
out.write(" ;; -- sub --\n")
out.write(" pop rax\n")
out.write(" pop rbx\n")
out.write(" sub rbx, rax\n")
out.write(" push rax\n")
elif op[0] == OP_RAWSTACK:
out.write(" ;; -- stk --\n")
out.write(" ;; -- TODO")
elif op[0] == OP_DISPLAY:
out.write(" ;; -- dis --\n")
out.write(" ;; -- TODO")
elif op[0] == OP_DUMP:
out.write(" ;; -- hlt --\n")
out.write(" pop rbx\n")
#out.write(" call dump\n")
else:
assert False, "Unreachable"
out.write(" mov rax, 60\n")
out.write(" mov rdi, 0\n")
out.write(" syscall\n")
#Code for Compilation & Simulation
# TODO: unhardcode program
program=[
stk(),
mov(34),
mov(35),
stk(),
add(),
hlt(),
]
def usage():
print("Usage: DMT <SUBCOMMAND> [ARGS]")
print("SUBCOMMANDS:")
print(" sim Simulate the program")
print(" com Compile the Program")
def call_cmd(cmd):
print(cmd)
subprocess.call(cmd)
if __name__ == '__main__':
if len(sys.argv) < 2:
usage()
print("Please provide a subcommand")
exit(1)
subcommand = sys.argv[1]
if subcommand == "sim":
simulateProgram(program)
elif subcommand == "com":
compileProgram(program, "test.asm")
call_cmd(["nasm", "-felf64", "test.asm"])
call_cmd(["ld", "-o", "test", "test.o"])
#call_cmd(["rm", "output.asm", "output.o"])
else:
usage()
print("ERROR: Unknow subcommand %s" % (subcommand))
exit(1)
|
the-stack_106_26815 | #!/usr/bin/python3 -u
import zlib
from random import randint
import os
from Crypto.Cipher import Salsa20
flag = open("./flag").read()
def compress(text):
return zlib.compress(bytes(text.encode("utf-8")))
def encrypt(plaintext):
secret = os.urandom(32)
cipher = Salsa20.new(key=secret)
return cipher.nonce + cipher.encrypt(plaintext)
def main():
while True:
usr_input = input("Enter your text to be encrypted: ")
compressed_text = compress(flag + usr_input)
encrypted = encrypt(compressed_text)
nonce = encrypted[:8]
encrypted_text = encrypted[8:]
print(nonce)
print(encrypted_text)
print(len(encrypted_text))
if __name__ == '__main__':
main()
|
the-stack_106_26816 | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup #网页解析
import re
import urllib.request
import xlwt #Excel操作
import sqlite3 #sqlite数据库操作
def main():
baseurl = 'https://movie.douban.com/top250?start='
datalist = getData(baseurl)
savepath = '豆瓣电影top250.xls'
saveData(datalist,savepath)
def askURL(url):
head = {
# 自填
}
request = urllib.request.Request(url,headers=head)
try:
response = urllib.request.urlopen(request)
html = response.read().decode("utf-8")
# print(html)
except urllib.error.HTTPError as e:
if hasattr(e,"code"): #判断对象e中是否含有code属性
print(e.code) #code被挪到HTTPError里面去了
if hasattr(e,"reason"):
print(e.reason)
return html
# askURL('https://movie.douban.com/top250?start=')
def getData(baseurl):
datalist = []
for i in range(0,10): # 走循环,访问玩豆瓣电影top250
url = baseurl + str(i*25) # 将int转换成str与str连接
html = askURL(url)
soup = BeautifulSoup(html,"html.parser")
for item in soup.find_all('div',class_='item'): #class 是python的关键字,不加下划线会产生歧义
# print(item)
# break
data = []
item = str(item)
link = re.findall(r'<a href="(.*?)">',item)[0]
data.append(link)
img = re.findall(r'<img.* src="(.*?)".*>', item, re.S)[0]
data.append(img)
title = re.findall(r'<span class="title">(.*?)</span>',item)
if len(title) == 2: #中文名和英文名
data.append(title[0])
data.append((title[1].replace("/","")))
else:
data.append(title[0])
data.append("None")
score = re.findall(r'<span.* property=.*>(.*)</span>', item)[0]
data.append(score)
judge = re.findall(r'<span>(\d*)人评价</span>', item)[0]
data.append(judge)
inq = re.findall(r'<span class="inq">(.*?)</span>',item)
if len(inq) != 0:
data.append(inq[0])
else:
data.append("None")
# IndexError: list index out of range原因:list是空的,没有一个元素,进行list[0] 就会出现这个错误
bd = re.findall(r'<p class="">(.*?)</p>', item, re.S)[0]
bd = re.sub(r'\n', ' ', bd) # 去掉换行符
bd = re.sub(r'<br(\S+)?/>(\S+)?', '', bd) # 替换<br>为空格
data.append(bd.strip()) # 去掉空格
# print(link)
# print(title)
# print(img)
# print(bd)
# print(score)
# print(judge)
datalist.append(data)
return datalist
def saveData(datelist,savepath):
print("爬取中。。。")
# style_compression=0说明了是否允许改变excel表格样式
document = xlwt.Workbook(encoding="utf-8", style_compression=0)
# 第二参数用于确认同一个cell单元是否可以重设值。
sheet = document.add_sheet('列表', cell_overwrite_ok=True)
col = ("序号(排名)","链接","海报","中文名","英文名","评分","评分人数","引言","概况")
for i in range(0,9):
sheet.write(0,i,col[i])
for j in range(0,250):
print(f"爬取第{j+1}条")
movie_data = datelist[j]
sheet.write(j+1,0,j+1)
for n in range(0,8):
sheet.write(j+1,n+1,movie_data[n])
document.save(savepath)
if __name__ == '__main__':
main()
|
the-stack_106_26818 | # Circuit Playground Express Hot Potato
#
# Author: Carter Nelson
# MIT License (https://opensource.org/licenses/MIT)
import time
import random
import math
import board
from analogio import AnalogIn
from adafruit_circuitplayground.express import cpx
# This brings in the song to play
import melody
number_of_notes = len(melody.melody)
SHAKE_THRESHOLD = 30
def get_total_accel():
# Compute total acceleration
X = 0
Y = 0
Z = 0
for count in range(10):
x,y,z = cpx.acceleration
X = X + x
Y = Y + y
Z = Z + z
time.sleep(0.001)
X = X / 10
Y = Y / 10
Z = Z / 10
return math.sqrt(X*X + Y*Y + Z*Z)
# Seed the random function with noise
a4 = AnalogIn(board.A4)
a5 = AnalogIn(board.A5)
a6 = AnalogIn(board.A6)
a7 = AnalogIn(board.A7)
seed = a4.value
seed += a5.value
seed += a6.value
seed += a7.value
random.seed(seed)
# Set the NeoPixels all red
cpx.pixels.fill(0xFF0000)
# Loop forever
while True:
# Wait for shaking
while get_total_accel() < SHAKE_THRESHOLD:
pass # do nothing
# Game length
game_length = random.randint(number_of_notes, 6*number_of_notes)
# Game play with melody
note_to_play = 0
for game_step in range(game_length):
# Add some flare using the NeoPixels
cpx.pixels.fill(0)
cpx.pixels[random.randint(0,9)] = ( random.randint(0,255),
random.randint(0,255),
random.randint(0,255) )
# Play the note
note_duration = 1 / melody.tempo[note_to_play]
note = melody.melody[note_to_play]
note = note if note <= 3500 else 3500
if note == 0:
time.sleep(note_duration)
else:
cpx.play_tone(note, note_duration)
# Increment and check the note counter
note_to_play += 1
note_to_play = note_to_play if note_to_play < number_of_notes else 0
#
# GAME OVER
#
# Set the NeoPixels all red
cpx.pixels.fill(0xFF0000)
# Delay a bit so can't just reset with a shake
time.sleep(2) |
the-stack_106_26819 | import os
import cv2 as cv
import matplotlib.pylab as plt
import numpy as np
from console_progressbar import ProgressBar
from scipy.interpolate import interp1d
from scipy.signal import gaussian, convolve
from config import num_classes
def compute_class_prior(do_plot=False):
categories_folder = 'data/instance-level_human_parsing/Training/Category_ids'
names = [f for f in os.listdir(categories_folder) if f.lower().endswith('.png')]
num_samples = len(names)
prior_prob = np.zeros(num_classes)
pb = ProgressBar(total=num_samples, prefix='Compute class prior', suffix='', decimals=3, length=50, fill='=')
for i in range(num_samples):
name = names[i]
filename = os.path.join(categories_folder, name)
category = np.ravel(cv.imread(filename, 0))
counts = np.bincount(category)
idxs = np.nonzero(counts)[0]
prior_prob[idxs] += counts[idxs]
pb.print_progress_bar(i + 1)
prior_prob = prior_prob / (1.0 * np.sum(prior_prob))
# Save
np.save(os.path.join(data_dir, "prior_prob.npy"), prior_prob)
if do_plot:
plt.hist(prior_prob, bins=100)
plt.yscale("log")
plt.show()
def smooth_class_prior(sigma=5, do_plot=False):
prior_prob = np.load(os.path.join(data_dir, "prior_prob.npy"))
# add an epsilon to prior prob to avoid 0 vakues and possible NaN
prior_prob += 1E-3 * np.min(prior_prob)
# renormalize
prior_prob = prior_prob / (1.0 * np.sum(prior_prob))
# Smooth with gaussian
f = interp1d(np.arange(prior_prob.shape[0]), prior_prob)
xx = np.linspace(0, prior_prob.shape[0] - 1, 1000)
yy = f(xx)
window = gaussian(2000, sigma) # 2000 pts in the window, sigma=5
smoothed = convolve(yy, window / window.sum(), mode='same')
fout = interp1d(xx, smoothed)
prior_prob_smoothed = np.array([fout(i) for i in range(prior_prob.shape[0])])
prior_prob_smoothed = prior_prob_smoothed / np.sum(prior_prob_smoothed)
# Save
file_name = os.path.join(data_dir, "prior_prob_smoothed.npy")
np.save(file_name, prior_prob_smoothed)
if do_plot:
plt.plot(prior_prob)
plt.plot(prior_prob_smoothed, "g--")
plt.plot(xx, smoothed, "r-")
plt.yscale("log")
plt.show()
def compute_prior_factor(gamma=0.5, alpha=1, do_plot=False):
file_name = os.path.join(data_dir, "prior_prob_smoothed.npy")
prior_prob_smoothed = np.load(file_name)
u = np.ones_like(prior_prob_smoothed)
u = u / np.sum(1.0 * u)
prior_factor = (1 - gamma) * prior_prob_smoothed + gamma * u
prior_factor = np.power(prior_factor, -alpha)
# renormalize
prior_factor = prior_factor / (np.sum(prior_factor * prior_prob_smoothed))
file_name = os.path.join(data_dir, "prior_factor.npy")
np.save(file_name, prior_factor)
if do_plot:
plt.plot(prior_factor)
plt.yscale("log")
plt.show()
if __name__ == '__main__':
data_dir = 'data/'
do_plot = True
compute_class_prior(do_plot=True)
smooth_class_prior(do_plot=True)
compute_prior_factor(do_plot=True)
|
the-stack_106_26820 | from zipfile import ZipFile
import io
from io import StringIO
from urllib import request
import csv
from .interface import ServiceInterface
from .logger import debug
class CsvZipServiceInterface(ServiceInterface):
TYPE = "csv-zip"
@classmethod
def key(cls):
return cls.TYPE.lower()
def __init__(self, name, url=None, filename=None, **kargs):
super(CsvZipServiceInterface, self).__init__(**kargs)
self.url = url
self.filename = filename
self.loaded = False
self.name = name
self.update_url = url
self.data = {}
self.name_data = {}
def update(self, **kargs):
if self.update_url is not None:
debug("Updating %s from url: %s" % (self.name, self.url))
r = request.urlopen(self.update_url)
self.load_from_file(r)
return True
return False
def load_from_url(self):
if self.url is not None:
debug("Loading %s from file: %s" % (self.name, self.url))
r = request.urlopen(self.url)
return self.load_from_file(r)
return self.name_data
def load_from_filename(self):
if self.filename is not None:
debug("Loading %s from file: %s" % (self.name, self.filename))
r = open(self.filename, 'rb')
return self.load_from_file(r)
return self.name_data
def load_from_file(self, file_obj):
raw_zip_bytes = file_obj.read()
zip_bytes = io.BytesIO(raw_zip_bytes)
zf = ZipFile(zip_bytes)
names = zf.namelist()
for name in names:
if name not in self.name_data:
self.name_data[name] = {}
for name in names:
raw_str_data = str(zf.read(name).decode('utf-8')).splitlines()
raw_csv_data = StringIO("\n".join(raw_str_data))
rows = [row for row in csv.reader(raw_csv_data)]
self.name_data[name] = dict([(d, r) for r, d in rows])
return self.name_data
def load(self, **kargs):
if self.filename is not None:
self.load_from_filename()
self.loaded = True
elif self.url is not None:
self.load_from_url()
self.loaded = True
return self.name_data
def check(self, domain, **kargs):
debug("Checking for %s in: %s" % (domain, self.name))
root_domain = domain
if len(root_domain.split('.')) < 1:
return {}
elif len(root_domain.split('.')) > 2:
root_domain = ".".join(root_domain.split('.')[-2:])
for name, info_dict in self.name_data.items():
if root_domain in info_dict:
return {"rank": info_dict[root_domain],
"root_domain": root_domain}
return {}
@classmethod
def parse_toml(cls, toml_dict):
bt = toml_dict.get('type', None)
if bt is None or bt != cls.key():
raise Exception('Attempting to parse the wrong block type')
name = toml_dict.get('name', 'unknown')
filename = toml_dict.get('filename', None)
url = toml_dict.get('url', None)
if url is None and filename is None:
raise Exception('Must specify a valid source')
return cls(name, url=url, filename=filename)
|
the-stack_106_26821 | import logging
import queue
import traceback
from http.server import BaseHTTPRequestHandler, HTTPServer
from multiprocessing import Process, Queue
from .pact_request_handler import PactRequestHandler
_providers = {}
log = logging.getLogger(__name__)
def getMockServer(pact):
if pact.provider.name not in _providers:
_providers[pact.provider.name] = Server(pact)
return _providers[pact.provider.name]
class Server:
def __init__(self, pact):
self.pact = pact
self.interactions = Queue()
self.results = Queue()
self.process = Process(target=run_server, args=(pact, self.interactions, self.results))
self.process.start()
def setup(self, interactions):
for interaction in interactions:
self.interactions.put_nowait(interaction)
def verify(self):
while not self.results.empty():
result = self.results.get()
if result['status'] == 'error':
raise MockServer.Error(result['reason'])
if result['status'] == 'failed':
raise AssertionError(result['reason'])
def terminate(self):
self.process.terminate()
def run_server(pact, interactions, results):
httpd = MockServer(pact, interactions, results)
httpd.serve_forever()
class MockServer(HTTPServer):
def __init__(self, pact, interactions, results):
self.pact = pact
self.incoming_interactions = interactions
self.outgoing_results = results
server_address = ('', pact.port)
super().__init__(server_address, MockHTTPRequestHandler)
self.interactions = []
self.log = logging.getLogger(__name__ + '.' + pact.provider.name)
self.log.addHandler(logging.FileHandler(f'{pact.log_dir}/{pact.provider.name}.log'))
self.log.setLevel(logging.DEBUG)
self.log.propagate = False
class Error(Exception):
pass
class MockHTTPRequestHandler(BaseHTTPRequestHandler, PactRequestHandler):
def __init__(self, request, client_address, server):
self.response_status_code = None
self.response_headers = {}
self.response_body = None
PactRequestHandler.__init__(self, server.pact)
BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def error_result(self, message, content='', status='error', status_code=500):
self.server.outgoing_results.put({'status': status, 'reason': message})
self.response_status_code = status_code
self.response_headers = {'Content-Type': 'text/plain; charset=utf-8'}
self.response_body = (content or message).encode('utf8')
def run_request(self, method):
try:
self.body = None
for header in self.headers:
if header.lower() == 'content-length':
self.body = self.rfile.read(int(self.headers[header]))
self.validate_request(method)
except AssertionError as e:
self.error_result(str(e))
except Exception as e:
self.error_result(f'Internal Error: {e}', traceback.format_exc())
self.send_response(self.response_status_code)
for header in self.response_headers:
self.send_header(header, self.response_headers[header])
self.end_headers()
if self.response_body:
self.wfile.write(self.response_body)
def get_interaction(self, path):
try:
interaction = self.server.incoming_interactions.get(False)
except queue.Empty:
raise AssertionError(f'Request at {path} received but no interaction registered') from None
return interaction
def handle_success(self, interaction):
self.server.outgoing_results.put({'status': 'success'})
def handle_failure(self, reason):
self.error_result(reason, status='failed', status_code=418)
def respond_for_interaction(self, interaction):
self.response_status_code = interaction['response']['status']
if 'headers' in interaction['response']:
self.response_headers.update(interaction['response']['headers'])
if 'body' in interaction['response']:
self.response_body = self.handle_response_encoding(interaction['response'], self.response_headers)
def do_DELETE(self):
self.run_request('DELETE')
def do_GET(self):
self.run_request('GET')
def do_HEAD(self):
self.run_request('HEAD')
def do_POST(self):
self.run_request('POST')
def do_PUT(self):
self.run_request('PUT')
def log_message(self, format, *args):
self.server.log.info("MockServer %s\n" % format % args)
|
the-stack_106_26822 |
import os
import queue
import shlex
import select
import threading as mt
import subprocess as sp
from .constants import RUNNING, DONE, FAILED
from .misc import is_string
# ------------------------------------------------------------------------------
#
def sh_callout(cmd, stdout=True, stderr=True, shell=False):
'''
call a shell command, return `[stdout, stderr, retval]`.
'''
# convert string into arg list if needed
if not shell and is_string(cmd): cmd = shlex.split(cmd)
if stdout: stdout = sp.PIPE
else : stdout = None
if stderr: stderr = sp.PIPE
else : stderr = None
p = sp.Popen(cmd, stdout=stdout, stderr=stderr, shell=shell)
if not stdout and not stderr:
ret = p.wait()
else:
stdout, stderr = p.communicate()
ret = p.returncode
return stdout.decode("utf-8"), stderr.decode("utf-8"), ret
# ------------------------------------------------------------------------------
#
def sh_callout_bg(cmd, stdout=None, stderr=None, shell=False):
'''
call a shell command in the background. Do not attempt to pipe STDOUT/ERR,
but only support writing to named files.
'''
# pipes won't work - see sh_callout_async
if stdout == sp.PIPE: raise ValueError('stdout pipe unsupported')
if stderr == sp.PIPE: raise ValueError('stderr pipe unsupported')
# openfile descriptors for I/O, if needed
if is_string(stdout): stdout = open(stdout, 'w')
if is_string(stderr): stderr = open(stderr, 'w')
# convert string into arg list if needed
if not shell and is_string(cmd): cmd = shlex.split(cmd)
sp.Popen(cmd, stdout=stdout, stderr=stderr, shell=shell)
return
# ------------------------------------------------------------------------------
#
def sh_callout_async(cmd, stdin=True, stdout=True, stderr=True, shell=False):
'''
Run a command, and capture stdout/stderr if so flagged. The call will
return an PROC object instance on which the captured output can be retrieved
line by line (I/O is line buffered). When the process is done, a `None`
will be returned on the I/O queues.
Line breaks are stripped.
stdout/stderr: True [default], False, string
- False : discard I/O
- True : capture I/O as queue [default]
- string: capture I/O as queue, also write to named file
shell: True, False [default]
- pass to popen
PROC:
- PROC.stdout : `queue.Queue` instance delivering stdout lines
- PROC.stderr : `queue.Queue` instance delivering stderr lines
- PROC.state : ru.RUNNING, ru.DONE, ru.FAILED
- PROC.rc : returncode (None while ru.RUNNING)
- PROC.stdout_filename: name of stdout file (when available)
- PROC.stderr_filename: name of stderr file (when available)
'''
# NOTE: Fucking python screws up stdio buffering when threads are used,
# *even if the treads do not perform stdio*. Its possible that the
# logging module interfers, too. Either way, I am fed up debugging
# this shit, and give up. This method does not work for threaded
# python applications.
assert(False), 'this is broken for python apps'
# --------------------------------------------------------------------------
class _P(object):
'''
internal representation of a process
'''
# ----------------------------------------------------------------------
def __init__(self, cmd, stdin, stdout, stderr, shell):
cmd = cmd.strip()
self._in_c = bool(stdin) # flag stdin capture
self._out_c = bool(stdout) # flag stdout capture
self._err_c = bool(stderr) # flag stderr capture
self._in_r , self._in_w = os.pipe() # put stdin to child
self._out_r, self._out_w = os.pipe() # get stdout from child
self._err_r, self._err_w = os.pipe() # get stderr from child
self._in_o = os.fdopen(self._in_r) # file object for in ep
self._out_o = os.fdopen(self._out_r) # file object for out ep
self._err_o = os.fdopen(self._err_r) # file object for err ep
self._in_q = queue.Queue() # get stdin from parent
self._out_q = queue.Queue() # put stdout to parent
self._err_q = queue.Queue() # put stderr to parent
if is_string(stdout): self._out_f = open(stdout, 'w')
else : self._out_f = None
if is_string(stderr): self._err_f = open(stderr, 'w')
else : self._err_f = None
self.state = RUNNING
self._proc = sp.Popen(cmd, stdin=self._in_r,
stdout=self._out_w,
stderr=self._err_w,
shell=shell,
bufsize=1)
t = mt.Thread(target=self._watch)
t.daemon = True
t.start()
self.rc = None # return code
@property
def stdin(self):
if not self._in_c:
raise RuntimeError('stdin not captured')
return self._in_q
@property
def stdout(self):
if not self._out_c:
raise RuntimeError('stdout not captured')
return self._out_q
@property
def stderr(self):
if not self._err_c:
raise RuntimeError('stderr not captured')
return self._err_q
@property
def stdout_filename(self):
if not self._out_f:
raise RuntimeError('stdout not recorded')
return self._out_f.name
@property
def stderr_filename(self):
if not self._err_f:
raise RuntimeError('stderr not recorded')
return self._err_f.name
def kill(self):
self._proc.terminate()
# ----------------------------------------------------------------------
def _watch(self):
poller = select.poll()
poller.register(self._out_r, select.POLLIN | select.POLLHUP)
poller.register(self._err_r, select.POLLIN | select.POLLHUP)
# try forever to read stdin, stdout and stderr, stop only when
# either signals that process (parent or child) died
while True:
# check for input
data = self._in_q.get_nowait()
if data:
self._out_o.write(data)
self._out_f.write(data)
active = False
fds = poller.poll(100) # timeout configurable (ms)
for fd,mode in fds:
if mode & select.POLLHUP:
# fd died - grab data from other fds
continue
if fd == self._out_r:
o_in = self._out_o
q_out = self._out_q
f_out = self._out_f
elif fd == self._err_r:
o_in = self._err_o
q_out = self._err_q
f_out = self._err_f
line = o_in.readline() # `bufsize=1` in `popen`
if line:
# found valid data (active)
active = True
if q_out: q_out.put(line.rstrip('\n'))
if f_out: f_out.write(line)
# no data received - check process health
if not active and self._proc.poll() is not None:
# process is dead
self.rc = self._proc.returncode
if self.rc == 0: self.state = DONE
else : self.state = FAILED
if self._out_q: self._out_q.put(None) # signal EOF
if self._err_q: self._err_q.put(None) # signal EOF
if self._out_q: self._out_q.join() # ensure reads
if self._err_q: self._err_q.join() # ensure reads
return # finishes thread
# --------------------------------------------------------------------------
return _P(cmd=cmd, stdin=stdin, stdout=stdout, stderr=stderr, shell=shell)
# ------------------------------------------------------------------------------
|
the-stack_106_26827 | import os
import sys
Plugins = []
debug = True
def default_params():
return {'method':'GET','page':1}
def LoadPlugins(destdir='plugins'):
ss = [ f for f in os.listdir(destdir) if os.path.isfile(os.path.join(destdir,f)) and f!='__init__.py' ]
sys.path.insert( 0, destdir)
for s in ss:
print('Found plugin', s)
p = __import__(s.split('.')[0])
Plugins.append(p)
print(Plugins)
|
the-stack_106_26828 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import os
import argparse
import pandas as pd
from PIL import Image
def pil_loader(image_path):
with open(image_path, "rb") as f:
image_bytes = f.read()
image = Image.open(io.BytesIO(image_bytes)).convert('RGB')
return image
def preprocess_cc(root_dir, caption_tsv, download_report_tsv):
"""
Not all the images in cc3m dataset downloaded can be opened by PIL. This function attempts to read
each image specified by the tsv file and filter out the ones that cannot be opened. The output is
processed_labels.csv that will be required in ConceptualCaptions dataset above.
"""
train_col_names = ["caption", "url"]
download_col_names = ["filename", "split", "type", "size", "status", "url"]
caption_file = os.path.join(root_dir, caption_tsv)
download_file = os.path.join(root_dir, download_report_tsv)
captions_df = pd.read_csv(caption_file, sep="\t", quotechar='"', names=train_col_names)
download_df = pd.read_csv(download_file, sep="\t", quotechar='"', names=download_col_names)[
["filename", "url"]]
data_df = captions_df.merge(download_df, on="url", how="inner")[["filename", "caption"]]
invalid_indices = []
for i in range(len(data_df)):
if i % 5000 == 0:
print("Loading {} / {}".format(i, len(data_df)))
try:
filename, caption = data_df.iloc[i]
image_path = os.path.join(root_dir, filename)
sample = pil_loader(image_path)
except:
invalid_indices.append(i)
print("Number of invalid indices: {}".format(len(invalid_indices)))
data_df = data_df.drop(index=invalid_indices)
data_df.reset_index(drop=True, inplace=True)
label_dir = os.path.join(root_dir, "processed_labels.csv")
data_df.to_csv(label_dir)
def preprocess_train(root_dir):
preprocess_cc(root_dir,
"Train-GCC-training.tsv",
"downloaded_training_report.tsv")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Conceptual Captions Preprocessing', add_help=False)
parser.add_argument('--cc_root', default='/data/cc')
args = parser.parse_args()
preprocess_train(args.cc_root)
|
the-stack_106_26829 | import numpy as np
import networkx as nx
import opensfm.reconstruction
def test_triangulate_track_equirectangular():
graph = nx.Graph()
graph.add_node('im1', bipartite=0)
graph.add_node('im2', bipartite=0)
graph.add_node('1', bipartite=1)
graph.add_edge('im1', '1', feature=(0,0))
graph.add_edge('im2', '1', feature=(-0.1, 0))
reconstruction = {
"cameras": {
"theta": {
"projection_type": "equirectangular"
}
},
"shots" : {
'im1': {
"camera": "theta",
"rotation": [0.0, 0.0, 0.0],
"translation": [0.0, 0.0, 0.0],
},
'im2': {
"camera": "theta",
"rotation": [0, 0, 0.0],
"translation": [-1, 0, 0.0],
},
},
"points" : {
},
}
opensfm.reconstruction.triangulate_track(
'1', graph, reconstruction, {}, 0.01, 2.0)
assert '1' in reconstruction['points']
p = reconstruction['points']['1']['coordinates']
assert np.allclose(p, [0, 0, 1.3763819204711])
if __name__ == "__main__":
test_triangulate_track_equirectangular()
|
the-stack_106_26832 | import sys
import copy
import types
import inspect
__all__ = ['dataclass',
'field',
'Field',
'FrozenInstanceError',
'InitVar',
'MISSING',
# Helper functions.
'fields',
'asdict',
'astuple',
'make_dataclass',
'replace',
'is_dataclass',
]
# Conditions for adding methods. The boxes indicate what action the
# dataclass decorator takes. For all of these tables, when I talk
# about init=, repr=, eq=, order=, unsafe_hash=, or frozen=, I'm
# referring to the arguments to the @dataclass decorator. When
# checking if a dunder method already exists, I mean check for an
# entry in the class's __dict__. I never check to see if an
# attribute is defined in a base class.
# Key:
# +=========+=========================================+
# + Value | Meaning |
# +=========+=========================================+
# | <blank> | No action: no method is added. |
# +---------+-----------------------------------------+
# | add | Generated method is added. |
# +---------+-----------------------------------------+
# | raise | TypeError is raised. |
# +---------+-----------------------------------------+
# | None | Attribute is set to None. |
# +=========+=========================================+
# __init__
#
# +--- init= parameter
# |
# v | | |
# | no | yes | <--- class has __init__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __repr__
#
# +--- repr= parameter
# |
# v | | |
# | no | yes | <--- class has __repr__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __setattr__
# __delattr__
#
# +--- frozen= parameter
# |
# v | | |
# | no | yes | <--- class has __setattr__ or __delattr__ in __dict__?
# +=======+=======+=======+
# | False | | | <- the default
# +-------+-------+-------+
# | True | add | raise |
# +=======+=======+=======+
# Raise because not adding these methods would break the "frozen-ness"
# of the class.
# __eq__
#
# +--- eq= parameter
# |
# v | | |
# | no | yes | <--- class has __eq__ in __dict__?
# +=======+=======+=======+
# | False | | |
# +-------+-------+-------+
# | True | add | | <- the default
# +=======+=======+=======+
# __lt__
# __le__
# __gt__
# __ge__
#
# +--- order= parameter
# |
# v | | |
# | no | yes | <--- class has any comparison method in __dict__?
# +=======+=======+=======+
# | False | | | <- the default
# +-------+-------+-------+
# | True | add | raise |
# +=======+=======+=======+
# Raise because to allow this case would interfere with using
# functools.total_ordering.
# __hash__
# +------------------- unsafe_hash= parameter
# | +----------- eq= parameter
# | | +--- frozen= parameter
# | | |
# v v v | | |
# | no | yes | <--- class has explicitly defined __hash__
# +=======+=======+=======+========+========+
# | False | False | False | | | No __eq__, use the base class __hash__
# +-------+-------+-------+--------+--------+
# | False | False | True | | | No __eq__, use the base class __hash__
# +-------+-------+-------+--------+--------+
# | False | True | False | None | | <-- the default, not hashable
# +-------+-------+-------+--------+--------+
# | False | True | True | add | | Frozen, so hashable, allows override
# +-------+-------+-------+--------+--------+
# | True | False | False | add | raise | Has no __eq__, but hashable
# +-------+-------+-------+--------+--------+
# | True | False | True | add | raise | Has no __eq__, but hashable
# +-------+-------+-------+--------+--------+
# | True | True | False | add | raise | Not frozen, but hashable
# +-------+-------+-------+--------+--------+
# | True | True | True | add | raise | Frozen, so hashable
# +=======+=======+=======+========+========+
# For boxes that are blank, __hash__ is untouched and therefore
# inherited from the base class. If the base is object, then
# id-based hashing is used.
# Note that a class may already have __hash__=None if it specified an
# __eq__ method in the class body (not one that was created by
# @dataclass).
# See _hash_action (below) for a coded version of this table.
# Raised when an attempt is made to modify a frozen class.
class FrozenInstanceError(AttributeError): pass
# A sentinel object for default values to signal that a
# default factory will be used.
# This is given a nice repr() which will appear in the function
# signature of dataclasses' constructors.
class _HAS_DEFAULT_FACTORY_CLASS:
def __repr__(self):
return '<factory>'
_HAS_DEFAULT_FACTORY = _HAS_DEFAULT_FACTORY_CLASS()
# A sentinel object to detect if a parameter is supplied or not. Use
# a class to give it a better repr.
class _MISSING_TYPE:
pass
MISSING = _MISSING_TYPE()
# Since most per-field metadata will be unused, create an empty
# read-only proxy that can be shared among all fields.
_EMPTY_METADATA = types.MappingProxyType({})
# Markers for the various kinds of fields and pseudo-fields.
_FIELD = object() # An actual field.
_FIELD_CLASSVAR = object() # Not a field, but a ClassVar.
_FIELD_INITVAR = object() # Not a field, but an InitVar.
# The name of an attribute on the class where we store the Field
# objects. Also used to check if a class is a Data Class.
_FIELDS = '__dataclass_fields__'
# The name of an attribute on the class that stores the parameters to
# @dataclass.
_PARAMS = '__dataclass_params__'
# The name of the function, that if it exists, is called at the end of
# __init__.
_POST_INIT_NAME = '__post_init__'
class _InitVarMeta(type):
def __getitem__(self, params):
return self
class InitVar(metaclass=_InitVarMeta):
pass
# Instances of Field are only ever created from within this module,
# and only from the field() function, although Field instances are
# exposed externally as (conceptually) read-only objects.
# name and type are filled in after the fact, not in __init__. They're
# not known at the time this class is instantiated, but it's
# convenient if they're available later.
# When cls._FIELDS is filled in with a list of Field objects, the name
# and type fields will have been populated.
class Field:
__slots__ = ('name',
'type',
'default',
'default_factory',
'repr',
'hash',
'init',
'compare',
'metadata',
'_field_type', # Private: not to be used by user code.
)
def __init__(self, default, default_factory, init, repr, hash, compare,
metadata):
self.name = None
self.type = None
self.default = default
self.default_factory = default_factory
self.init = init
self.repr = repr
self.hash = hash
self.compare = compare
self.metadata = (_EMPTY_METADATA
if metadata is None or len(metadata) == 0 else
types.MappingProxyType(metadata))
self._field_type = None
def __repr__(self):
return ('Field('
f'name={self.name!r},'
f'type={self.type},'
f'default={self.default},'
f'default_factory={self.default_factory},'
f'init={self.init},'
f'repr={self.repr},'
f'hash={self.hash},'
f'compare={self.compare},'
f'metadata={self.metadata}'
')')
# This is used to support the PEP 487 __set_name__ protocol in the
# case where we're using a field that contains a descriptor as a
# defaul value. For details on __set_name__, see
# https://www.python.org/dev/peps/pep-0487/#implementation-details.
# Note that in _process_class, this Field object is overwritten with
# the default value, so the end result is a descriptor that had
# __set_name__ called on it at the right time.
def __set_name__(self, owner, name):
func = getattr(type(self.default), '__set_name__', None)
if func:
# There is a __set_name__ method on the descriptor,
# call it.
func(self.default, owner, name)
class _DataclassParams:
__slots__ = ('init',
'repr',
'eq',
'order',
'unsafe_hash',
'frozen',
)
def __init__(self, init, repr, eq, order, unsafe_hash, frozen):
self.init = init
self.repr = repr
self.eq = eq
self.order = order
self.unsafe_hash = unsafe_hash
self.frozen = frozen
def __repr__(self):
return ('_DataclassParams('
f'init={self.init},'
f'repr={self.repr},'
f'eq={self.eq},'
f'order={self.order},'
f'unsafe_hash={self.unsafe_hash},'
f'frozen={self.frozen}'
')')
# This function is used instead of exposing Field creation directly,
# so that a type checker can be told (via overloads) that this is a
# function whose type depends on its parameters.
def field(*, default=MISSING, default_factory=MISSING, init=True, repr=True,
hash=None, compare=True, metadata=None):
"""Return an object to identify dataclass fields.
default is the default value of the field. default_factory is a
0-argument function called to initialize a field's value. If init
is True, the field will be a parameter to the class's __init__()
function. If repr is True, the field will be included in the
object's repr(). If hash is True, the field will be included in
the object's hash(). If compare is True, the field will be used in
comparison functions. metadata, if specified, must be a mapping
which is stored but not otherwise examined by dataclass.
It is an error to specify both default and default_factory.
"""
if default is not MISSING and default_factory is not MISSING:
raise ValueError('cannot specify both default and default_factory')
return Field(default, default_factory, init, repr, hash, compare,
metadata)
def _tuple_str(obj_name, fields):
# Return a string representing each field of obj_name as a tuple
# member. So, if fields is ['x', 'y'] and obj_name is "self",
# return "(self.x,self.y)".
# Special case for the 0-tuple.
if not fields:
return '()'
# Note the trailing comma, needed if this turns out to be a 1-tuple.
return f'({",".join([f"{obj_name}.{f.name}" for f in fields])},)'
def _create_fn(name, args, body, *, globals=None, locals=None,
return_type=MISSING):
# Note that we mutate locals when exec() is called. Caller beware!
# The only callers are internal to this module, so no worries
# about external callers.
if locals is None:
locals = {}
return_annotation = ''
if return_type is not MISSING:
locals['_return_type'] = return_type
return_annotation = '->_return_type'
args = ','.join(args)
body = '\n'.join(f' {b}' for b in body)
# Compute the text of the entire function.
txt = f'def {name}({args}){return_annotation}:\n{body}'
exec(txt, globals, locals)
return locals[name]
def _field_assign(frozen, name, value, self_name):
# If we're a frozen class, then assign to our fields in __init__
# via object.__setattr__. Otherwise, just use a simple
# assignment.
# self_name is what "self" is called in this function: don't
# hard-code "self", since that might be a field name.
if frozen:
return f'object.__setattr__({self_name},{name!r},{value})'
return f'{self_name}.{name}={value}'
def _field_init(f, frozen, globals, self_name):
# Return the text of the line in the body of __init__ that will
# initialize this field.
default_name = f'_dflt_{f.name}'
if f.default_factory is not MISSING:
if f.init:
# This field has a default factory. If a parameter is
# given, use it. If not, call the factory.
globals[default_name] = f.default_factory
value = (f'{default_name}() '
f'if {f.name} is _HAS_DEFAULT_FACTORY '
f'else {f.name}')
else:
# This is a field that's not in the __init__ params, but
# has a default factory function. It needs to be
# initialized here by calling the factory function,
# because there's no other way to initialize it.
# For a field initialized with a default=defaultvalue, the
# class dict just has the default value
# (cls.fieldname=defaultvalue). But that won't work for a
# default factory, the factory must be called in __init__
# and we must assign that to self.fieldname. We can't
# fall back to the class dict's value, both because it's
# not set, and because it might be different per-class
# (which, after all, is why we have a factory function!).
globals[default_name] = f.default_factory
value = f'{default_name}()'
else:
# No default factory.
if f.init:
if f.default is MISSING:
# There's no default, just do an assignment.
value = f.name
elif f.default is not MISSING:
globals[default_name] = f.default
value = f.name
else:
# This field does not need initialization. Signify that to
# the caller by returning None.
return None
# Only test this now, so that we can create variables for the
# default. However, return None to signify that we're not going
# to actually do the assignment statement for InitVars.
if f._field_type == _FIELD_INITVAR:
return None
# Now, actually generate the field assignment.
return _field_assign(frozen, f.name, value, self_name)
def _init_param(f):
# Return the __init__ parameter string for this field.
# For example, the equivalent of 'x:int=3' (except instead of 'int',
# reference a variable set to int, and instead of '3', reference a
# variable set to 3).
if f.default is MISSING and f.default_factory is MISSING:
# There's no default, and no default_factory, just
# output the variable name and type.
default = ''
elif f.default is not MISSING:
# There's a default, this will be the name that's used to look it up.
default = f'=_dflt_{f.name}'
elif f.default_factory is not MISSING:
# There's a factory function. Set a marker.
default = '=_HAS_DEFAULT_FACTORY'
return f'{f.name}:_type_{f.name}{default}'
def _init_fn(fields, frozen, has_post_init, self_name):
# fields contains both real fields and InitVar pseudo-fields.
# Make sure we don't have fields without defaults following fields
# with defaults. This actually would be caught when exec-ing the
# function source code, but catching it here gives a better error
# message, and future-proofs us in case we build up the function
# using ast.
seen_default = False
for f in fields:
# Only consider fields in the __init__ call.
if f.init:
if not (f.default is MISSING and f.default_factory is MISSING):
seen_default = True
elif seen_default:
raise TypeError(f'non-default argument {f.name!r} '
'follows default argument')
globals = {'MISSING': MISSING,
'_HAS_DEFAULT_FACTORY': _HAS_DEFAULT_FACTORY}
body_lines = []
for f in fields:
line = _field_init(f, frozen, globals, self_name)
# line is None means that this field doesn't require
# initialization (it's a pseudo-field). Just skip it.
if line:
body_lines.append(line)
# Does this class have a post-init function?
if has_post_init:
params_str = ','.join(f.name for f in fields
if f._field_type is _FIELD_INITVAR)
body_lines.append(f'{self_name}.{_POST_INIT_NAME}({params_str})')
# If no body lines, use 'pass'.
if not body_lines:
body_lines = ['pass']
locals = {f'_type_{f.name}': f.type for f in fields}
return _create_fn('__init__',
[self_name] + [_init_param(f) for f in fields if f.init],
body_lines,
locals=locals,
globals=globals,
return_type=None)
def _repr_fn(fields):
return _create_fn('__repr__',
('self',),
['return self.__class__.__qualname__ + f"(' +
', '.join([f"{f.name}={{self.{f.name}!r}}"
for f in fields]) +
')"'])
def _frozen_get_del_attr(cls, fields):
# XXX: globals is modified on the first call to _create_fn, then the
# modified version is used in the second call. Is this okay?
globals = {'cls': cls,
'FrozenInstanceError': FrozenInstanceError}
if fields:
fields_str = '(' + ','.join(repr(f.name) for f in fields) + ',)'
else:
# Special case for the zero-length tuple.
fields_str = '()'
return (_create_fn('__setattr__',
('self', 'name', 'value'),
(f'if type(self) is cls or name in {fields_str}:',
' raise FrozenInstanceError(f"cannot assign to field {name!r}")',
f'super(cls, self).__setattr__(name, value)'),
globals=globals),
_create_fn('__delattr__',
('self', 'name'),
(f'if type(self) is cls or name in {fields_str}:',
' raise FrozenInstanceError(f"cannot delete field {name!r}")',
f'super(cls, self).__delattr__(name)'),
globals=globals),
)
def _cmp_fn(name, op, self_tuple, other_tuple):
# Create a comparison function. If the fields in the object are
# named 'x' and 'y', then self_tuple is the string
# '(self.x,self.y)' and other_tuple is the string
# '(other.x,other.y)'.
return _create_fn(name,
('self', 'other'),
[ 'if other.__class__ is self.__class__:',
f' return {self_tuple}{op}{other_tuple}',
'return NotImplemented'])
def _hash_fn(fields):
self_tuple = _tuple_str('self', fields)
return _create_fn('__hash__',
('self',),
[f'return hash({self_tuple})'])
def _get_field(cls, a_name, a_type):
# Return a Field object for this field name and type. ClassVars
# and InitVars are also returned, but marked as such (see
# f._field_type).
# If the default value isn't derived from Field, then it's
# only a normal default value. Convert it to a Field().
default = getattr(cls, a_name, MISSING)
if isinstance(default, Field):
f = default
else:
if isinstance(default, types.MemberDescriptorType):
# This is a field in __slots__, so it has no default value.
default = MISSING
f = field(default=default)
# Assume it's a normal field until proven otherwise.
f._field_type = _FIELD
# Only at this point do we know the name and the type. Set them.
f.name = a_name
f.type = a_type
# If typing has not been imported, then it's impossible for
# any annotation to be a ClassVar. So, only look for ClassVar
# if typing has been imported.
typing = sys.modules.get('typing')
if typing is not None:
# This test uses a typing internal class, but it's the best
# way to test if this is a ClassVar.
if (type(a_type) is typing._GenericAlias and
a_type.__origin__ is typing.ClassVar):
# This field is a ClassVar, so it's not a field.
f._field_type = _FIELD_CLASSVAR
if f._field_type is _FIELD:
# Check if this is an InitVar.
if a_type is InitVar:
# InitVars are not fields, either.
f._field_type = _FIELD_INITVAR
# Validations for fields. This is delayed until now, instead of
# in the Field() constructor, since only here do we know the field
# name, which allows better error reporting.
# Special restrictions for ClassVar and InitVar.
if f._field_type in (_FIELD_CLASSVAR, _FIELD_INITVAR):
if f.default_factory is not MISSING:
raise TypeError(f'field {f.name} cannot have a '
'default factory')
# Should I check for other field settings? default_factory
# seems the most serious to check for. Maybe add others.
# For example, how about init=False (or really,
# init=<not-the-default-init-value>)? It makes no sense for
# ClassVar and InitVar to specify init=<anything>.
# For real fields, disallow mutable defaults for known types.
if f._field_type is _FIELD and isinstance(f.default, (list, dict, set)):
raise ValueError(f'mutable default {type(f.default)} for field '
f'{f.name} is not allowed: use default_factory')
return f
def _set_new_attribute(cls, name, value):
# Never overwrites an existing attribute. Returns True if the
# attribute already exists.
if name in cls.__dict__:
return True
setattr(cls, name, value)
return False
# Decide if/how we're going to create a hash function. Key is
# (unsafe_hash, eq, frozen, does-hash-exist). Value is the action to
# take. The common case is to do nothing, so instead of providing a
# function that is a no-op, use None to signify that.
def _hash_set_none(cls, fields):
return None
def _hash_add(cls, fields):
flds = [f for f in fields if (f.compare if f.hash is None else f.hash)]
return _hash_fn(flds)
def _hash_exception(cls, fields):
# Raise an exception.
raise TypeError(f'Cannot overwrite attribute __hash__ '
f'in class {cls.__name__}')
#
# +-------------------------------------- unsafe_hash?
# | +------------------------------- eq?
# | | +------------------------ frozen?
# | | | +---------------- has-explicit-hash?
# | | | |
# | | | | +------- action
# | | | | |
# v v v v v
_hash_action = {(False, False, False, False): None,
(False, False, False, True ): None,
(False, False, True, False): None,
(False, False, True, True ): None,
(False, True, False, False): _hash_set_none,
(False, True, False, True ): None,
(False, True, True, False): _hash_add,
(False, True, True, True ): None,
(True, False, False, False): _hash_add,
(True, False, False, True ): _hash_exception,
(True, False, True, False): _hash_add,
(True, False, True, True ): _hash_exception,
(True, True, False, False): _hash_add,
(True, True, False, True ): _hash_exception,
(True, True, True, False): _hash_add,
(True, True, True, True ): _hash_exception,
}
# See https://bugs.python.org/issue32929#msg312829 for an if-statement
# version of this table.
def _process_class(cls, init, repr, eq, order, unsafe_hash, frozen):
# Now that dicts retain insertion order, there's no reason to use
# an ordered dict. I am leveraging that ordering here, because
# derived class fields overwrite base class fields, but the order
# is defined by the base class, which is found first.
fields = {}
setattr(cls, _PARAMS, _DataclassParams(init, repr, eq, order,
unsafe_hash, frozen))
# Find our base classes in reverse MRO order, and exclude
# ourselves. In reversed order so that more derived classes
# override earlier field definitions in base classes.
# As long as we're iterating over them, see if any are frozen.
any_frozen_base = False
has_dataclass_bases = False
for b in cls.__mro__[-1:0:-1]:
# Only process classes that have been processed by our
# decorator. That is, they have a _FIELDS attribute.
base_fields = getattr(b, _FIELDS, None)
if base_fields:
has_dataclass_bases = True
for f in base_fields.values():
fields[f.name] = f
if getattr(b, _PARAMS).frozen:
any_frozen_base = True
# Annotations that are defined in this class (not in base
# classes). If __annotations__ isn't present, then this class
# adds no new annotations. We use this to compute fields that
# are added by this class.
# Fields are found from cls_annotations, which is guaranteed to be
# ordered. Default values are from class attributes, if a field
# has a default. If the default value is a Field(), then it
# contains additional info beyond (and possibly including) the
# actual default value. Pseudo-fields ClassVars and InitVars are
# included, despite the fact that they're not real fields.
# That's dealt with later.
cls_annotations = cls.__dict__.get('__annotations__', {})
# Now find fields in our class. While doing so, validate some
# things, and set the default values (as class attributes)
# where we can.
cls_fields = [_get_field(cls, name, type)
for name, type in cls_annotations.items()]
for f in cls_fields:
fields[f.name] = f
# If the class attribute (which is the default value for
# this field) exists and is of type 'Field', replace it
# with the real default. This is so that normal class
# introspection sees a real default value, not a Field.
if isinstance(getattr(cls, f.name, None), Field):
if f.default is MISSING:
# If there's no default, delete the class attribute.
# This happens if we specify field(repr=False), for
# example (that is, we specified a field object, but
# no default value). Also if we're using a default
# factory. The class attribute should not be set at
# all in the post-processed class.
delattr(cls, f.name)
else:
setattr(cls, f.name, f.default)
# Do we have any Field members that don't also have annotations?
for name, value in cls.__dict__.items():
if isinstance(value, Field) and not name in cls_annotations:
raise TypeError(f'{name!r} is a field but has no type annotation')
# Check rules that apply if we are derived from any dataclasses.
if has_dataclass_bases:
# Raise an exception if any of our bases are frozen, but we're not.
if any_frozen_base and not frozen:
raise TypeError('cannot inherit non-frozen dataclass from a '
'frozen one')
# Raise an exception if we're frozen, but none of our bases are.
if not any_frozen_base and frozen:
raise TypeError('cannot inherit frozen dataclass from a '
'non-frozen one')
# Remember all of the fields on our class (including bases). This also
# marks this class as being a dataclass.
setattr(cls, _FIELDS, fields)
# Was this class defined with an explicit __hash__? Note that if
# __eq__ is defined in this class, then python will automatically
# set __hash__ to None. This is a heuristic, as it's possible
# that such a __hash__ == None was not auto-generated, but it
# close enough.
class_hash = cls.__dict__.get('__hash__', MISSING)
has_explicit_hash = not (class_hash is MISSING or
(class_hash is None and '__eq__' in cls.__dict__))
# If we're generating ordering methods, we must be generating
# the eq methods.
if order and not eq:
raise ValueError('eq must be true if order is true')
if init:
# Does this class have a post-init function?
has_post_init = hasattr(cls, _POST_INIT_NAME)
# Include InitVars and regular fields (so, not ClassVars).
flds = [f for f in fields.values()
if f._field_type in (_FIELD, _FIELD_INITVAR)]
_set_new_attribute(cls, '__init__',
_init_fn(flds,
frozen,
has_post_init,
# The name to use for the "self" param
# in __init__. Use "self" if possible.
'__dataclass_self__' if 'self' in fields
else 'self',
))
# Get the fields as a list, and include only real fields. This is
# used in all of the following methods.
field_list = [f for f in fields.values() if f._field_type is _FIELD]
if repr:
flds = [f for f in field_list if f.repr]
_set_new_attribute(cls, '__repr__', _repr_fn(flds))
if eq:
# Create _eq__ method. There's no need for a __ne__ method,
# since python will call __eq__ and negate it.
flds = [f for f in field_list if f.compare]
self_tuple = _tuple_str('self', flds)
other_tuple = _tuple_str('other', flds)
_set_new_attribute(cls, '__eq__',
_cmp_fn('__eq__', '==',
self_tuple, other_tuple))
if order:
# Create and set the ordering methods.
flds = [f for f in field_list if f.compare]
self_tuple = _tuple_str('self', flds)
other_tuple = _tuple_str('other', flds)
for name, op in [('__lt__', '<'),
('__le__', '<='),
('__gt__', '>'),
('__ge__', '>='),
]:
if _set_new_attribute(cls, name,
_cmp_fn(name, op, self_tuple, other_tuple)):
raise TypeError(f'Cannot overwrite attribute {name} '
f'in class {cls.__name__}. Consider using '
'functools.total_ordering')
if frozen:
for fn in _frozen_get_del_attr(cls, field_list):
if _set_new_attribute(cls, fn.__name__, fn):
raise TypeError(f'Cannot overwrite attribute {fn.__name__} '
f'in class {cls.__name__}')
# Decide if/how we're going to create a hash function.
hash_action = _hash_action[bool(unsafe_hash),
bool(eq),
bool(frozen),
has_explicit_hash]
if hash_action:
# No need to call _set_new_attribute here, since by the time
# we're here the overwriting is unconditional.
cls.__hash__ = hash_action(cls, field_list)
if not getattr(cls, '__doc__'):
# Create a class doc-string.
cls.__doc__ = (cls.__name__ +
str(inspect.signature(cls)).replace(' -> None', ''))
return cls
# _cls should never be specified by keyword, so start it with an
# underscore. The presence of _cls is used to detect if this
# decorator is being called with parameters or not.
def dataclass(_cls=None, *, init=True, repr=True, eq=True, order=False,
unsafe_hash=False, frozen=False):
"""Returns the same class as was passed in, with dunder methods
added based on the fields defined in the class.
Examines PEP 526 __annotations__ to determine fields.
If init is true, an __init__() method is added to the class. If
repr is true, a __repr__() method is added. If order is true, rich
comparison dunder methods are added. If unsafe_hash is true, a
__hash__() method function is added. If frozen is true, fields may
not be assigned to after instance creation.
"""
def wrap(cls):
return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen)
# See if we're being called as @dataclass or @dataclass().
if _cls is None:
# We're called with parens.
return wrap
# We're called as @dataclass without parens.
return wrap(_cls)
def fields(class_or_instance):
"""Return a tuple describing the fields of this dataclass.
Accepts a dataclass or an instance of one. Tuple elements are of
type Field.
"""
# Might it be worth caching this, per class?
try:
fields = getattr(class_or_instance, _FIELDS)
except AttributeError:
raise TypeError('must be called with a dataclass type or instance')
# Exclude pseudo-fields. Note that fields is sorted by insertion
# order, so the order of the tuple is as the fields were defined.
return tuple(f for f in fields.values() if f._field_type is _FIELD)
def _is_dataclass_instance(obj):
"""Returns True if obj is an instance of a dataclass."""
return not isinstance(obj, type) and hasattr(obj, _FIELDS)
def is_dataclass(obj):
"""Returns True if obj is a dataclass or an instance of a
dataclass."""
return hasattr(obj, _FIELDS)
def asdict(obj, *, dict_factory=dict):
"""Return the fields of a dataclass instance as a new dictionary mapping
field names to field values.
Example usage:
@dataclass
class C:
x: int
y: int
c = C(1, 2)
assert asdict(c) == {'x': 1, 'y': 2}
If given, 'dict_factory' will be used instead of built-in dict.
The function applies recursively to field values that are
dataclass instances. This will also look into built-in containers:
tuples, lists, and dicts.
"""
if not _is_dataclass_instance(obj):
raise TypeError("asdict() should be called on dataclass instances")
return _asdict_inner(obj, dict_factory)
def _asdict_inner(obj, dict_factory):
if _is_dataclass_instance(obj):
result = []
for f in fields(obj):
value = _asdict_inner(getattr(obj, f.name), dict_factory)
result.append((f.name, value))
return dict_factory(result)
elif isinstance(obj, (list, tuple)):
return type(obj)(_asdict_inner(v, dict_factory) for v in obj)
elif isinstance(obj, dict):
return type(obj)((_asdict_inner(k, dict_factory), _asdict_inner(v, dict_factory))
for k, v in obj.items())
else:
return copy.deepcopy(obj)
def astuple(obj, *, tuple_factory=tuple):
"""Return the fields of a dataclass instance as a new tuple of field values.
Example usage::
@dataclass
class C:
x: int
y: int
c = C(1, 2)
assert astuple(c) == (1, 2)
If given, 'tuple_factory' will be used instead of built-in tuple.
The function applies recursively to field values that are
dataclass instances. This will also look into built-in containers:
tuples, lists, and dicts.
"""
if not _is_dataclass_instance(obj):
raise TypeError("astuple() should be called on dataclass instances")
return _astuple_inner(obj, tuple_factory)
def _astuple_inner(obj, tuple_factory):
if _is_dataclass_instance(obj):
result = []
for f in fields(obj):
value = _astuple_inner(getattr(obj, f.name), tuple_factory)
result.append(value)
return tuple_factory(result)
elif isinstance(obj, (list, tuple)):
return type(obj)(_astuple_inner(v, tuple_factory) for v in obj)
elif isinstance(obj, dict):
return type(obj)((_astuple_inner(k, tuple_factory), _astuple_inner(v, tuple_factory))
for k, v in obj.items())
else:
return copy.deepcopy(obj)
def make_dataclass(cls_name, fields, *, bases=(), namespace=None, init=True,
repr=True, eq=True, order=False, unsafe_hash=False,
frozen=False):
"""Return a new dynamically created dataclass.
The dataclass name will be 'cls_name'. 'fields' is an iterable
of either (name), (name, type) or (name, type, Field) objects. If type is
omitted, use the string 'typing.Any'. Field objects are created by
the equivalent of calling 'field(name, type [, Field-info])'.
C = make_dataclass('C', ['x', ('y', int), ('z', int, field(init=False))], bases=(Base,))
is equivalent to:
@dataclass
class C(Base):
x: 'typing.Any'
y: int
z: int = field(init=False)
For the bases and namespace parameters, see the builtin type() function.
The parameters init, repr, eq, order, unsafe_hash, and frozen are passed to
dataclass().
"""
if namespace is None:
namespace = {}
else:
# Copy namespace since we're going to mutate it.
namespace = namespace.copy()
anns = {}
for item in fields:
if isinstance(item, str):
name = item
tp = 'typing.Any'
elif len(item) == 2:
name, tp, = item
elif len(item) == 3:
name, tp, spec = item
namespace[name] = spec
anns[name] = tp
namespace['__annotations__'] = anns
# We use `types.new_class()` instead of simply `type()` to allow dynamic creation
# of generic dataclassses.
cls = types.new_class(cls_name, bases, {}, lambda ns: ns.update(namespace))
return dataclass(cls, init=init, repr=repr, eq=eq, order=order,
unsafe_hash=unsafe_hash, frozen=frozen)
def replace(obj, **changes):
"""Return a new object replacing specified fields with new values.
This is especially useful for frozen classes. Example usage:
@dataclass(frozen=True)
class C:
x: int
y: int
c = C(1, 2)
c1 = replace(c, x=3)
assert c1.x == 3 and c1.y == 2
"""
# We're going to mutate 'changes', but that's okay because it's a new
# dict, even if called with 'replace(obj, **my_changes)'.
if not _is_dataclass_instance(obj):
raise TypeError("replace() should be called on dataclass instances")
# It's an error to have init=False fields in 'changes'.
# If a field is not in 'changes', read its value from the provided obj.
for f in getattr(obj, _FIELDS).values():
if not f.init:
# Error if this field is specified in changes.
if f.name in changes:
raise ValueError(f'field {f.name} is declared with '
'init=False, it cannot be specified with '
'replace()')
continue
if f.name not in changes:
changes[f.name] = getattr(obj, f.name)
# Create the new object, which calls __init__() and
# __post_init__() (if defined), using all of the init fields
# we've added and/or left in 'changes'. If there are values
# supplied in changes that aren't fields, this will correctly
# raise a TypeError.
return obj.__class__(**changes)
|
the-stack_106_26834 | import os
DIRNAME = os.path.dirname(__file__)
DEBUG = True
DATABASE_ENGINE = 'sqlite3'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(DIRNAME, 'example.sqlite').replace('\\','/'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
STATIC_URL = '/static/'
INTERNAL_IPS = ('127.0.0.1',)
SITE_ID = 1
SECRET_KEY = 'lolz'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.admin',
'django.contrib.sessions',
'django.contrib.sites',
'knowledge',
'django_coverage',
'mock',
)
ROOT_URLCONF = 'tests.urls'
COVERAGE_REPORT_HTML_OUTPUT_DIR = os.path.join(DIRNAME, 'reports').replace('\\','/')
TEMPLATE_DIRS = (
os.path.join(DIRNAME, 'templates').replace('\\','/')
)
LOGIN_REDIRECT_URL = '/admin/'
|
the-stack_106_26835 | import logging
from utils import Logger
import pandas as pd
from datetime import datetime
from typing import Any, Dict, IO, List, Tuple, Union
from pandas.io.parsers import TextFileReader
## import local files
from interfaces.src.DataInterface import DataInterface
class CSVInterface(DataInterface):
def __init__(self, game_id:str, filepath_or_buffer:Union[str, IO[bytes]], delim:str = ','):
# set up data from params
super().__init__(game_id=game_id)
self._file : Union[str, IO[bytes]] = filepath_or_buffer
self._delimiter : str = delim
# set up data from file
self._data : pd.DataFrame = pd.DataFrame()
def _open(self) -> bool:
try:
self._data = pd.read_csv(filepath_or_buffer=self._file, delimiter=self._delimiter, parse_dates=['server_time', 'client_time'])
self._is_open = True
return True
except FileNotFoundError as err:
Logger.Log(f"Could not find file {self._file}.", logging.ERROR)
return False
def _close(self) -> bool:
self._is_open = False
return True
def _allIDs(self) -> List[str]:
return self._data['session_id'].unique().tolist()
def _fullDateRange(self) -> Dict[str,datetime]:
min_time = pd.to_datetime(self._data['server_time'].min())
max_time = pd.to_datetime(self._data['server_time'].max())
return {'min':min_time, 'max':max_time}
def _rowsFromIDs(self, id_list: List[str], versions: Union[List[int],None]=None) -> List[Tuple]:
if self.IsOpen() and self._data != None:
return list(self._data.loc[self._data['session_id'].isin(id_list)].itertuples(index=False, name=None))
else:
return []
def _IDsFromDates(self, min:datetime, max:datetime, versions: Union[List[int],None]=None) -> List[str]:
if not self._data.empty:
server_times = pd.to_datetime(self._data['server_time'])
if versions is not None and versions is not []:
mask = self._data.loc[(server_times >= min) & (server_times <= max) & (self._data['app_version'].isin(versions))]
else:
mask = self._data.loc[(server_times >= min) & (server_times <= max)]
return mask['session_id'].unique().tolist()
else:
return []
def _datesFromIDs(self, id_list:List[int], versions: Union[List[int],None]=None) -> Dict[str, datetime]:
min_date = self._data[self._data['session_id'].isin(id_list)]['server_time'].min()
max_date = self._data[self._data['session_id'].isin(id_list)]['server_time'].max()
return {'min':pd.to_datetime(min_date), 'max':pd.to_datetime(max_date)}
|
the-stack_106_26836 | import torch
from torch.nn import functional as F
from linear_nets import MLP,fc_layer
from exemplars import ExemplarHandler
from continual_learner import ContinualLearner
from replayer import Replayer
import utils
class Classifier(ContinualLearner, Replayer, ExemplarHandler):
'''Model for classifying images, "enriched" as "ContinualLearner"-, Replayer- and ExemplarHandler-object.'''
def __init__(self, image_size, image_channels, classes,
fc_layers=3, fc_units=1000, fc_drop=0, fc_bn=False, fc_nl="relu", gated=False,
bias=True, excitability=False, excit_buffer=False, binaryCE=False, binaryCE_distill=False, AGEM=False):
# configurations
super().__init__()
self.classes = classes
self.label = "Classifier"
self.fc_layers = fc_layers
# settings for training
self.binaryCE = binaryCE #-> use binary (instead of multiclass) prediction error
self.binaryCE_distill = binaryCE_distill #-> for classes from previous tasks, use the by the previous model
# predicted probs as binary targets (only in Class-IL with binaryCE)
self.AGEM = AGEM #-> use gradient of replayed data as inequality constraint for (instead of adding it to)
# the gradient of the current data (as in A-GEM, see Chaudry et al., 2019; ICLR)
# check whether there is at least 1 fc-layer
if fc_layers<1:
raise ValueError("The classifier needs to have at least 1 fully-connected layer.")
######------SPECIFY MODEL------######
# flatten image to 2D-tensor
self.flatten = utils.Flatten()
# fully connected hidden layers
self.fcE = MLP(input_size=image_channels*image_size**2, output_size=fc_units, layers=fc_layers-1,
hid_size=fc_units, drop=fc_drop, batch_norm=fc_bn, nl=fc_nl, bias=bias,
excitability=excitability, excit_buffer=excit_buffer, gated=gated)
mlp_output_size = fc_units if fc_layers>1 else image_channels*image_size**2
# classifier
self.classifier = fc_layer(mlp_output_size, classes, excit_buffer=True, nl='none', drop=fc_drop)
def list_init_layers(self):
'''Return list of modules whose parameters could be initialized differently (i.e., conv- or fc-layers).'''
list = []
list += self.fcE.list_init_layers()
list += self.classifier.list_init_layers()
return list
@property
def name(self):
return "{}_c{}".format(self.fcE.name, self.classes)
def forward(self, x):
final_features = self.fcE(self.flatten(x))
return self.classifier(final_features)
def feature_extractor(self, images):
return self.fcE(self.flatten(images))
def train_a_batch(self, x, y, scores=None, x_=None, y_=None, scores_=None, rnt=0.5, active_classes=None, task=1):
'''Train model for one batch ([x],[y]), possibly supplemented with replayed data ([x_],[y_/scores_]).
[x] <tensor> batch of inputs (could be None, in which case only 'replayed' data is used)
[y] <tensor> batch of corresponding labels
[scores] None or <tensor> 2Dtensor:[batch]x[classes] predicted "scores"/"logits" for [x]
NOTE: only to be used for "BCE with distill" (only when scenario=="class")
[x_] None or (<list> of) <tensor> batch of replayed inputs
[y_] None or (<list> of) <tensor> batch of corresponding "replayed" labels
[scores_] None or (<list> of) <tensor> 2Dtensor:[batch]x[classes] predicted "scores"/"logits" for [x_]
[rnt] <number> in [0,1], relative importance of new task
[active_classes] None or (<list> of) <list> with "active" classes
[task] <int>, for setting task-specific mask'''
# Set model to training-mode
self.train()
# Reset optimizer
self.optimizer.zero_grad()
# Should gradient be computed separately for each task? (needed when a task-mask is combined with replay)
gradient_per_task = True if ((self.mask_dict is not None) and (x_ is not None)) else False
##--(1)-- REPLAYED DATA --##
if x_ is not None:
# In the Task-IL scenario, [y_] or [scores_] is a list and [x_] needs to be evaluated on each of them
# (in case of 'exact' or 'exemplar' replay, [x_] is also a list!
TaskIL = (type(y_)==list) if (y_ is not None) else (type(scores_)==list)
if not TaskIL:
y_ = [y_]
scores_ = [scores_]
active_classes = [active_classes] if (active_classes is not None) else None
n_replays = len(y_) if (y_ is not None) else len(scores_)
# Prepare lists to store losses for each replay
loss_replay = [None]*n_replays
predL_r = [None]*n_replays
distilL_r = [None]*n_replays
# Run model (if [x_] is not a list with separate replay per task and there is no task-specific mask)
if (not type(x_)==list) and (self.mask_dict is None):
y_hat_all = self(x_)
# Loop to evalute predictions on replay according to each previous task
for replay_id in range(n_replays):
# -if [x_] is a list with separate replay per task, evaluate model on this task's replay
if (type(x_)==list) or (self.mask_dict is not None):
x_temp_ = x_[replay_id] if type(x_)==list else x_
if self.mask_dict is not None:
self.apply_XdGmask(task=replay_id+1)
y_hat_all = self(x_temp_)
# -if needed (e.g., Task-IL or Class-IL scenario), remove predictions for classes not in replayed task
y_hat = y_hat_all if (active_classes is None) else y_hat_all[:, active_classes[replay_id]]
# Calculate losses
if (y_ is not None) and (y_[replay_id] is not None):
if self.binaryCE:
binary_targets_ = utils.to_one_hot(y_[replay_id].cpu(), y_hat.size(1)).to(y_[replay_id].device)
predL_r[replay_id] = F.binary_cross_entropy_with_logits(
input=y_hat, target=binary_targets_, reduction='none'
).sum(dim=1).mean() #--> sum over classes, then average over batch
else:
predL_r[replay_id] = F.cross_entropy(y_hat, y_[replay_id], reduction='mean')
if (scores_ is not None) and (scores_[replay_id] is not None):
# n_classes_to_consider = scores.size(1) #--> with this version, no zeroes are added to [scores]!
n_classes_to_consider = y_hat.size(1) #--> zeros will be added to [scores] to make it this size!
kd_fn = utils.loss_fn_kd_binary if self.binaryCE else utils.loss_fn_kd
distilL_r[replay_id] = kd_fn(scores=y_hat[:, :n_classes_to_consider],
target_scores=scores_[replay_id], T=self.KD_temp)
# Weigh losses
if self.replay_targets=="hard":
loss_replay[replay_id] = predL_r[replay_id]
elif self.replay_targets=="soft":
loss_replay[replay_id] = distilL_r[replay_id]
# If needed, perform backward pass before next task-mask (gradients of all tasks will be accumulated)
if gradient_per_task:
weight = 1 if self.AGEM else (1 - rnt)
weighted_replay_loss_this_task = weight * loss_replay[replay_id] / n_replays
weighted_replay_loss_this_task.backward()
# Calculate total replay loss
loss_replay = None if (x_ is None) else sum(loss_replay) / n_replays
# If using A-GEM, calculate and store averaged gradient of replayed data
if self.AGEM and x_ is not None:
# Perform backward pass to calculate gradient of replayed batch (if not yet done)
if not gradient_per_task:
loss_replay.backward()
# Reorganize the gradient of the replayed batch as a single vector
grad_rep = []
for p in self.parameters():
if p.requires_grad:
grad_rep.append(p.grad.view(-1))
grad_rep = torch.cat(grad_rep)
# Reset gradients (with A-GEM, gradients of replayed batch should only be used as inequality constraint)
self.optimizer.zero_grad()
##--(2)-- CURRENT DATA --##
if x is not None:
# If requested, apply correct task-specific mask
if self.mask_dict is not None:
self.apply_XdGmask(task=task)
# Run model
y_hat = self(x)
# -if needed, remove predictions for classes not in current task
if active_classes is not None:
class_entries = active_classes[-1] if type(active_classes[0])==list else active_classes
y_hat = y_hat[:, class_entries]
# Calculate prediction loss
if self.binaryCE:
# -binary prediction loss
binary_targets = utils.to_one_hot(y.cpu(), y_hat.size(1)).to(y.device)
if self.binaryCE_distill and (scores is not None):
classes_per_task = int(y_hat.size(1) / task)
binary_targets = binary_targets[:, -(classes_per_task):]
binary_targets = torch.cat([torch.sigmoid(scores / self.KD_temp), binary_targets], dim=1)
predL = None if y is None else F.binary_cross_entropy_with_logits(
input=y_hat, target=binary_targets, reduction='none'
).sum(dim=1).mean() #--> sum over classes, then average over batch
else:
# -multiclass prediction loss
predL = None if y is None else F.cross_entropy(input=y_hat, target=y, reduction='mean')
# Weigh losses
loss_cur = predL
# Calculate training-precision
precision = None if y is None else (y == y_hat.max(1)[1]).sum().item() / x.size(0)
# If backward passes are performed per task (e.g., XdG combined with replay), perform backward pass
if gradient_per_task:
weighted_current_loss = rnt*loss_cur
weighted_current_loss.backward()
else:
precision = predL = None
# -> it's possible there is only "replay" [e.g., for offline with task-incremental learning]
# Combine loss from current and replayed batch
if x_ is None or self.AGEM:
loss_total = loss_cur
else:
loss_total = loss_replay if (x is None) else rnt*loss_cur+(1-rnt)*loss_replay
##--(3)-- ALLOCATION LOSSES --##
# Add SI-loss (Zenke et al., 2017)
surrogate_loss = self.surrogate_loss()
if self.si_c>0:
loss_total += self.si_c * surrogate_loss
# Add EWC-loss
ewc_loss = self.ewc_loss()
if self.ewc_lambda>0:
loss_total += self.ewc_lambda * ewc_loss
# Backpropagate errors (if not yet done)
if not gradient_per_task:
loss_total.backward()
# If using A-GEM, potentially change gradient:
if self.AGEM and x_ is not None:
# -reorganize gradient (of current batch) as single vector
grad_cur = []
for p in self.parameters():
if p.requires_grad:
grad_cur.append(p.grad.view(-1))
grad_cur = torch.cat(grad_cur)
# -check inequality constrain
angle = (grad_cur*grad_rep).sum()
if angle < 0:
# -if violated, project the gradient of the current batch onto the gradient of the replayed batch ...
length_rep = (grad_rep*grad_rep).sum()
grad_proj = grad_cur-(angle/length_rep)*grad_rep
# -...and replace all the gradients within the model with this projected gradient
index = 0
for p in self.parameters():
if p.requires_grad:
n_param = p.numel() # number of parameters in [p]
p.grad.copy_(grad_proj[index:index+n_param].view_as(p))
index += n_param
# Take optimization-step
self.optimizer.step()
# Return the dictionary with different training-loss split in categories
return {
'loss_total': loss_total.item(),
'loss_current': loss_cur.item() if x is not None else 0,
'loss_replay': loss_replay.item() if (loss_replay is not None) and (x is not None) else 0,
'pred': predL.item() if predL is not None else 0,
'pred_r': sum(predL_r).item()/n_replays if (x_ is not None and predL_r[0] is not None) else 0,
'distil_r': sum(distilL_r).item()/n_replays if (x_ is not None and distilL_r[0] is not None) else 0,
'ewc': ewc_loss.item(), 'si_loss': surrogate_loss.item(),
'precision': precision if precision is not None else 0.,
}
|
the-stack_106_26838 | import pytest
from quart import Quart
@pytest.mark.asyncio
@pytest.mark.parametrize(
'debug, testing, present',
[(True, True, False), (True, False, True), (False, True, False), (False, False, False)],
)
async def test_debug(debug: bool, testing: bool, present: bool) -> None:
app = Quart(__name__)
app.debug = debug
app.testing = testing
@app.route('/')
async def error() -> None:
raise Exception('Unique error')
test_client = app.test_client()
response = await test_client.get('/')
assert response.status_code == 500
assert (b'Unique error' in (await response.get_data())) is present # type: ignore
|
the-stack_106_26840 | from setuptools import setup, find_packages, Command
from setuptools.command.build_py import build_py
from distutils import dir_util
from distutils.util import convert_path
from pathlib import Path
import os
import re
import string
import textwrap
from typing import Dict, NamedTuple, List, Sequence, Optional, TypeVar, Tuple
import ast
# NOTE: have to programmatically include third-party dependencies in `setup.py`.
RUAMEL_YAML_VERSION = "ruamel.yaml==0.16.5"
try:
import ruamel.yaml # noqa: F401
except ImportError:
import pip
pip.main(["install", RUAMEL_YAML_VERSION])
from ruamel.yaml import YAML
MARKO_VERSION = "marko==1.0.2"
try:
import marko # noqa: F401
except ImportError:
import pip
pip.main(["install", MARKO_VERSION])
from marko.block import Heading, FencedCode, LinkRefDef, BlankLine
from marko.inline import CodeSpan
from marko.ext.gfm import gfm
from marko.ext.gfm.elements import Table
# Definitions in context.py
PHASE0 = 'phase0'
ALTAIR = 'altair'
MERGE = 'merge'
specs = [PHASE0, ALTAIR, MERGE]
class ProtocolDefinition(NamedTuple):
# just function definitions currently. May expand with configuration vars in future.
functions: Dict[str, str]
class VariableDefinition(NamedTuple):
type_name: Optional[str]
value: str
comment: Optional[str] # e.g. "noqa: E501"
class SpecObject(NamedTuple):
functions: Dict[str, str]
protocols: Dict[str, ProtocolDefinition]
custom_types: Dict[str, str]
config_vars: Dict[str, VariableDefinition]
vars: Dict[str, VariableDefinition]
ssz_objects: Dict[str, str]
dataclasses: Dict[str, str]
class SpecConfig(NamedTuple):
md_files: List[Path]
py_prefix: List[Path]
py_suffix: List[Path]
overridden_functions: List[str]
predefined_vars: Dict[str, str]
def _get_name_from_heading(heading: Heading) -> Optional[str]:
last_child = heading.children[-1]
if isinstance(last_child, CodeSpan):
return last_child.children
return None
def _get_source_from_code_block(block: FencedCode) -> str:
return block.children[0].children.strip()
def _get_function_name_from_source(source: str) -> str:
fn = ast.parse(source).body[0]
assert isinstance(fn, ast.FunctionDef)
return fn.name
def _get_self_type_from_source(source: str) -> Optional[str]:
fn = ast.parse(source).body[0]
assert isinstance(fn, ast.FunctionDef)
args = fn.args.args
if len(args) == 0:
return None
if args[0].arg != 'self':
return None
if args[0].annotation is None:
return None
assert isinstance(args[0].annotation, ast.Name)
return args[0].annotation.id
def _get_class_info_from_source(source: str) -> Tuple[str, Optional[str]]:
class_def = ast.parse(source).body[0]
assert isinstance(class_def, ast.ClassDef)
base = class_def.bases[0]
if isinstance(base, ast.Name):
parent_class: Optional[str] = base.id
else:
# NOTE: SSZ definition derives from earlier phase...
# e.g. `phase0.SignedBeaconBlock`
# TODO: check for consistency with other phases
parent_class = None
return class_def.name, parent_class
def _is_constant_id(name: str) -> bool:
if name[0] not in string.ascii_uppercase + '_':
return False
return all(map(lambda c: c in string.ascii_uppercase + '_' + string.digits, name[1:]))
ETH2_SPEC_COMMENT_PREFIX = "eth2spec:"
def _get_eth2_spec_comment(child: LinkRefDef) -> Optional[str]:
_, _, title = child._parse_info
if not (title[0] == "(" and title[len(title) - 1] == ")"):
return None
title = title[1:len(title) - 1]
if not title.startswith(ETH2_SPEC_COMMENT_PREFIX):
return None
return title[len(ETH2_SPEC_COMMENT_PREFIX):].strip()
def _parse_value(name: str, typed_value: str) -> VariableDefinition:
comment = None
if name == "BLS12_381_Q":
comment = "noqa: E501"
typed_value = typed_value.strip()
if '(' not in typed_value or typed_value.startswith("get_generalized_index"):
return VariableDefinition(type_name=None, value=typed_value, comment=comment)
i = typed_value.index('(')
type_name = typed_value[:i]
return VariableDefinition(type_name=type_name, value=typed_value[i + 1:-1], comment=comment)
def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str]) -> SpecObject:
functions: Dict[str, str] = {}
protocols: Dict[str, ProtocolDefinition] = {}
config_vars: Dict[str, VariableDefinition] = {}
vars: Dict[str, VariableDefinition] = {}
ssz_objects: Dict[str, str] = {}
dataclasses: Dict[str, str] = {}
custom_types: Dict[str, str] = {}
with open(file_name) as source_file:
document = gfm.parse(source_file.read())
current_name = None
should_skip = False
for child in document.children:
if isinstance(child, BlankLine):
continue
if should_skip:
should_skip = False
continue
if isinstance(child, Heading):
current_name = _get_name_from_heading(child)
elif isinstance(child, FencedCode):
if child.lang != "python":
continue
source = _get_source_from_code_block(child)
if source.startswith("def"):
current_name = _get_function_name_from_source(source)
self_type_name = _get_self_type_from_source(source)
function_def = "\n".join(line.rstrip() for line in source.splitlines())
if self_type_name is None:
functions[current_name] = function_def
else:
if self_type_name not in protocols:
protocols[self_type_name] = ProtocolDefinition(functions={})
protocols[self_type_name].functions[current_name] = function_def
elif source.startswith("@dataclass"):
class_name, _ = _get_class_info_from_source(source)
assert class_name == current_name
dataclasses[class_name] = "\n".join(line.rstrip() for line in source.splitlines())
elif source.startswith("class"):
class_name, parent_class = _get_class_info_from_source(source)
# check consistency with spec
assert class_name == current_name
if parent_class:
assert parent_class == "Container"
# NOTE: trim whitespace from spec
ssz_objects[current_name] = "\n".join(line.rstrip() for line in source.splitlines())
else:
raise Exception("unrecognized python code element")
elif isinstance(child, Table):
for row in child.children:
cells = row.children
if len(cells) >= 2:
name_cell = cells[0]
name = name_cell.children[0].children
value_cell = cells[1]
value = value_cell.children[0].children
if isinstance(value, list):
# marko parses `**X**` as a list containing a X
value = value[0].children
if not _is_constant_id(name):
# Check for short type declarations
if (value.startswith("uint") or value.startswith("Bytes")
or value.startswith("ByteList") or value.startswith("Union")):
custom_types[name] = value
continue
value_def = _parse_value(name, value)
if name in preset:
vars[name] = VariableDefinition(value_def.type_name, preset[name], value_def.comment)
elif name in config:
config_vars[name] = VariableDefinition(value_def.type_name, config[name], value_def.comment)
else:
vars[name] = value_def
elif isinstance(child, LinkRefDef):
comment = _get_eth2_spec_comment(child)
if comment == "skip":
should_skip = True
return SpecObject(
functions=functions,
protocols=protocols,
custom_types=custom_types,
config_vars=config_vars,
vars=vars,
ssz_objects=ssz_objects,
dataclasses=dataclasses,
)
def is_spec_defined_type(value: str) -> bool:
return value.startswith('ByteList') or value.startswith('Union')
def objects_to_spec(preset_name: str,
spec_object: SpecObject,
fork: str,
ordered_class_objects: Dict[str, str],
spec_config: SpecConfig,
python_prefixes: List[str],
python_suffixes: List[str]) -> str:
"""
Given all the objects that constitute a spec, combine them into a single pyfile.
"""
new_type_definitions = (
'\n\n'.join(
[
f"class {key}({value}):\n pass\n"
for key, value in spec_object.custom_types.items()
if not is_spec_defined_type(value)
]
)
+ ('\n\n' if len([key for key, value in spec_object.custom_types.items()
if is_spec_defined_type(value)]) > 0 else '')
+ '\n\n'.join(
[
f"{key} = {value}\n"
for key, value in spec_object.custom_types.items()
if is_spec_defined_type(value)
]
)
)
def format_protocol(protocol_name: str, protocol_def: ProtocolDefinition) -> str:
protocol = f"class {protocol_name}(Protocol):"
for fn_source in protocol_def.functions.values():
fn_source = fn_source.replace("self: " + protocol_name, "self")
protocol += "\n\n" + textwrap.indent(fn_source, " ")
return protocol
protocols_spec = '\n\n\n'.join(format_protocol(k, v) for k, v in spec_object.protocols.items())
functions_spec = '\n\n\n'.join(spec_object.functions.values())
# Access global dict of config vars for runtime configurables
for name in spec_object.config_vars.keys():
functions_spec = functions_spec.replace(name, 'config.' + name)
def format_config_var(name: str, vardef: VariableDefinition) -> str:
if vardef.type_name is None:
out = f'{name}={vardef.value},'
else:
out = f'{name}={vardef.type_name}({vardef.value}),'
if vardef.comment is not None:
out += f' # {vardef.comment}'
return out
config_spec = 'class Configuration(NamedTuple):\n'
config_spec += ' PRESET_BASE: str\n'
config_spec += '\n'.join(f' {k}: {v.type_name if v.type_name is not None else "int"}'
for k, v in spec_object.config_vars.items())
config_spec += '\n\n\nconfig = Configuration(\n'
config_spec += f' PRESET_BASE="{preset_name}",\n'
config_spec += '\n'.join(' ' + format_config_var(k, v) for k, v in spec_object.config_vars.items())
config_spec += '\n)\n'
def format_constant(name: str, vardef: VariableDefinition) -> str:
if vardef.type_name is None:
out = f'{name} = {vardef.value}'
else:
out = f'{name} = {vardef.type_name}({vardef.value})'
if vardef.comment is not None:
out += f' # {vardef.comment}'
return out
vars_spec = '\n'.join(format_constant(k, v)
for k, v in spec_object.vars.items()
if k not in spec_config.predefined_vars)
predefined_vars_spec = '\n'.join(f'{k} = {v}' for k, v in spec_config.predefined_vars.items())
predefined_vars_check = '\n'.join(map(lambda x: 'assert %s == %s' % (x, spec_object.vars[x].value),
spec_config.predefined_vars.keys()))
ordered_class_objects_spec = '\n\n\n'.join(ordered_class_objects.values())
spec = (f'PRESET_NAME = "{preset_name}"\n\n'
+ '\n\n'.join(python_prefixes)
+ '\n\n' + f"fork = \'{fork}\'"
+ ('\n\n' + predefined_vars_spec if predefined_vars_spec != '' else '')
+ '\n\n\n' + new_type_definitions
+ '\n\n' + vars_spec
+ '\n\n\n' + config_spec
+ '\n\n' + ordered_class_objects_spec
+ ('\n\n\n' + protocols_spec if protocols_spec != '' else '')
+ '\n\n\n' + functions_spec
+ ('\n\n\n' + predefined_vars_check if predefined_vars_check != '' else '')
+ '\n\n\n' + '\n\n'.join(python_suffixes)
)
return spec
def combine_protocols(old_protocols: Dict[str, ProtocolDefinition],
new_protocols: Dict[str, ProtocolDefinition]) -> Dict[str, ProtocolDefinition]:
for key, value in new_protocols.items():
if key not in old_protocols:
old_protocols[key] = value
else:
functions = combine_dicts(old_protocols[key].functions, value.functions)
old_protocols[key] = ProtocolDefinition(functions=functions)
return old_protocols
T = TypeVar('T')
def combine_dicts(old_dict: Dict[str, T], new_dict: Dict[str, T]) -> Dict[str, T]:
return {**old_dict, **new_dict}
ignored_dependencies = [
'bit', 'boolean', 'Vector', 'List', 'Container', 'BLSPubkey', 'BLSSignature',
'Bytes1', 'Bytes4', 'Bytes20', 'Bytes32', 'Bytes48', 'Bytes96', 'Bitlist', 'Bitvector',
'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',
'bytes', 'byte', 'ByteList', 'ByteVector',
'Dict', 'dict', 'field', 'ceillog2', 'floorlog2', 'Set',
]
def dependency_order_class_objects(objects: Dict[str, str], custom_types: Dict[str, str]) -> None:
"""
Determines which SSZ Object is dependent on which other and orders them appropriately
"""
items = list(objects.items())
for key, value in items:
dependencies = []
for line in value.split('\n'):
if not re.match(r'\s+\w+: .+', line):
continue # skip whitespace etc.
line = line[line.index(':') + 1:] # strip of field name
if '#' in line:
line = line[:line.index('#')] # strip of comment
dependencies.extend(re.findall(r'(\w+)', line)) # catch all legible words, potential dependencies
dependencies = list(filter(lambda x: '_' not in x and x.upper() != x, dependencies)) # filter out constants
dependencies = list(filter(lambda x: x not in ignored_dependencies, dependencies))
dependencies = list(filter(lambda x: x not in custom_types, dependencies))
for dep in dependencies:
key_list = list(objects.keys())
for item in [dep, key] + key_list[key_list.index(dep) + 1:]:
objects[item] = objects.pop(item)
def combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject:
"""
Takes in two spec variants (as tuples of their objects) and combines them using the appropriate combiner function.
"""
protocols = combine_protocols(spec0.protocols, spec1.protocols)
functions = combine_dicts(spec0.functions, spec1.functions)
custom_types = combine_dicts(spec0.custom_types, spec1.custom_types)
vars = combine_dicts(spec0.vars, spec1.vars)
config_vars = combine_dicts(spec0.config_vars, spec1.config_vars)
ssz_objects = combine_dicts(spec0.ssz_objects, spec1.ssz_objects)
dataclasses = combine_dicts(spec0.dataclasses, spec1.dataclasses)
return SpecObject(
functions=functions,
protocols=protocols,
custom_types=custom_types,
vars=vars,
config_vars=config_vars,
ssz_objects=ssz_objects,
dataclasses=dataclasses,
)
def parse_config_vars(conf: Dict[str, str]) -> Dict[str, str]:
"""
Parses a dict of basic str/int/list types into a dict for insertion into the spec code.
"""
out: Dict[str, str] = dict()
for k, v in conf.items():
if isinstance(v, str) and (v.startswith("0x") or k == 'PRESET_BASE'):
# Represent byte data with string, to avoid misinterpretation as big-endian int.
# Everything is either byte data or an integer, with PRESET_BASE as one exception.
out[k] = f"'{v}'"
else:
out[k] = str(int(v))
return out
def load_preset(preset_files: Sequence[Path]) -> Dict[str, str]:
"""
Loads the a directory of preset files, merges the result into one preset.
"""
preset: Dict[str, str] = {}
for fork_file in preset_files:
yaml = YAML(typ='base')
fork_preset: dict = yaml.load(fork_file)
if fork_preset is None: # for empty YAML files
continue
if not set(fork_preset.keys()).isdisjoint(preset.keys()):
duplicates = set(fork_preset.keys()).intersection(set(preset.keys()))
raise Exception(f"duplicate config var(s) in preset files: {', '.join(duplicates)}")
preset.update(fork_preset)
assert preset != {}
return parse_config_vars(preset)
def load_config(config_path: Path) -> Dict[str, str]:
"""
Loads the given configuration file.
"""
yaml = YAML(typ='base')
config_data = yaml.load(config_path)
return parse_config_vars(config_data)
def build_spec(preset_name: str, fork: str, source_files: Sequence[Path], preset_files: Sequence[Path],
config_file: Path, spec_config: SpecConfig) -> str:
preset = load_preset(preset_files)
config = load_config(config_file)
all_specs = [get_spec(spec, preset, config) for spec in source_files]
python_prefixes = []
for path in spec_config.py_prefix:
with open(path) as file:
python_prefixes.append(file.read())
python_suffixes = []
for path in spec_config.py_suffix:
with open(path) as file:
python_suffixes.append(file.read())
spec_object = SpecObject(
functions={},
protocols={},
custom_types={},
vars={},
config_vars={},
ssz_objects={},
dataclasses={},
)
for value in all_specs:
spec_object = combine_spec_objects(spec_object, value)
for function in spec_config.overridden_functions:
if function in spec_object.functions:
del spec_object.functions[function]
class_objects = {**spec_object.ssz_objects, **spec_object.dataclasses}
dependency_order_class_objects(class_objects, spec_object.custom_types)
return objects_to_spec(preset_name, spec_object, fork, class_objects, spec_config,
python_prefixes, python_suffixes)
def combine_spec_configs(spec_config1: SpecConfig, spec_config2: SpecConfig) -> SpecConfig:
return SpecConfig(
md_files=spec_config1.md_files + spec_config2.md_files,
py_prefix=spec_config1.py_prefix + spec_config2.py_prefix,
py_suffix=spec_config1.py_suffix + spec_config2.py_suffix,
overridden_functions=spec_config1.overridden_functions + spec_config2.overridden_functions,
predefined_vars=combine_dicts(spec_config1.predefined_vars, spec_config2.predefined_vars),
)
def load_spec_config(spec_dot_yaml_path: Path) -> SpecConfig:
yaml = YAML(typ='base')
spec_dot_yaml = yaml.load(spec_dot_yaml_path)
if "depends" in spec_dot_yaml:
spec_config = load_spec_config(spec_dot_yaml_path.parent / spec_dot_yaml["depends"])
else:
spec_config = SpecConfig(
md_files=[],
py_prefix=[],
py_suffix=[],
overridden_functions=[],
predefined_vars={},
)
def amend_paths(paths: List[Path]) -> List[Path]:
return [spec_dot_yaml_path.parent / path for path in paths]
this_spec_config = SpecConfig(
md_files=amend_paths(spec_dot_yaml.get("md_files", [])),
py_prefix=amend_paths(spec_dot_yaml.get("py_prefix", [])),
py_suffix=amend_paths(spec_dot_yaml.get("py_suffix", [])),
overridden_functions=spec_dot_yaml.get("overridden_functions", []),
predefined_vars=spec_dot_yaml.get("predefined_vars", {}),
)
return combine_spec_configs(spec_config, this_spec_config)
class BuildTarget(NamedTuple):
name: str
preset_paths: List[Path]
config_path: Path
class PySpecCommand(Command):
"""Convert spec markdown files to a spec python file"""
description = "Convert spec markdown files to a spec python file"
spec_fork: str
md_doc_paths: List[Path]
parsed_md_doc_paths: List[str]
build_targets: str
parsed_build_targets: List[BuildTarget]
out_dir: str
spec_dir: Optional[Path]
spec_config: SpecConfig
# The format is (long option, short option, description).
user_options = [
('spec-fork=', None, "Spec fork to tag build with. Used to select spec-dir default."),
('build-targets=', None, "Names, directory paths of compile-time presets, and default config paths."),
('out-dir=', None, "Output directory to write spec package to"),
('spec-dir=', None, "Directory to find specification in")
]
def initialize_options(self) -> None:
"""Set default values for options."""
# Each user option must be listed here with their default value.
self.spec_fork = PHASE0
self.out_dir = 'pyspec_output'
self.build_targets = """
minimal:presets/minimal:configs/minimal.yaml
mainnet:presets/mainnet:configs/mainnet.yaml
"""
self.spec_dir = None
def finalize_options(self) -> None:
"""Post-process options."""
if self.spec_dir is None:
if self.spec_fork == PHASE0:
self.spec_dir = Path("specs/phase0/")
elif self.spec_fork == ALTAIR:
self.spec_dir = Path("specs/altair/")
elif self.spec_fork == MERGE:
self.spec_dir = Path("specs/merge/")
else:
raise Exception('spec dir not specified and spec fork "%s" is unknown', self.spec_fork)
self.spec_config = load_spec_config(self.spec_dir / "spec.yaml")
self.md_doc_paths = [path for path in self.spec_config.md_files]
for filename in self.md_doc_paths:
if not os.path.exists(filename):
raise Exception('Pyspec markdown input file "%s" does not exist.' % filename)
self.parsed_build_targets = []
for target in self.build_targets.split():
target = target.strip()
data = target.split(':')
if len(data) != 3:
raise Exception('invalid target, expected "name:preset_dir:config_file" format, but got: %s' % target)
name, preset_dir_path, config_path = data
if any((c not in string.digits + string.ascii_letters) for c in name):
raise Exception('invalid target name: "%s"' % name)
if not os.path.exists(preset_dir_path):
raise Exception('Preset dir "%s" does not exist' % preset_dir_path)
_, _, preset_file_names = next(os.walk(preset_dir_path))
preset_paths = [(Path(preset_dir_path) / name) for name in preset_file_names]
if not os.path.exists(config_path):
raise Exception('Config file "%s" does not exist' % config_path)
self.parsed_build_targets.append(BuildTarget(name, preset_paths, Path(config_path)))
def run(self) -> None:
if not self.dry_run:
dir_util.mkpath(self.out_dir)
for (name, preset_paths, config_path) in self.parsed_build_targets:
spec_str = build_spec(name, self.spec_fork, self.md_doc_paths, preset_paths,
config_path, self.spec_config)
if self.dry_run:
self.announce('dry run successfully prepared contents for spec.'
f' out dir: "{self.out_dir}", spec fork: "{self.spec_fork}", build target: "{name}"')
self.debug_print(spec_str)
else:
with open(os.path.join(self.out_dir, name + '.py'), 'w') as out:
out.write(spec_str)
if not self.dry_run:
with open(os.path.join(self.out_dir, '__init__.py'), 'w') as out:
# `mainnet` is the default spec.
out.write("from . import mainnet as spec # noqa:F401\n")
class BuildPyCommand(build_py):
"""Customize the build command to run the spec-builder on setup.py build"""
def initialize_options(self) -> None:
super(BuildPyCommand, self).initialize_options()
def run_pyspec_cmd(self, spec_fork: str, **opts: Dict) -> None:
cmd_obj: PySpecCommand = self.distribution.reinitialize_command("pyspec")
cmd_obj.spec_fork = spec_fork
cmd_obj.out_dir = os.path.join(self.build_lib, 'eth2spec', spec_fork)
for k, v in opts.items():
setattr(cmd_obj, k, v)
self.run_command('pyspec')
def run(self) -> None:
for fork in specs:
self.run_pyspec_cmd(spec_fork=fork)
super(BuildPyCommand, self).run()
class PyspecDevCommand(Command):
"""Build the markdown files in-place to their source location for testing."""
description = "Build the markdown files in-place to their source location for testing."
user_options: List[Tuple[str, Optional[str], str]] = []
def initialize_options(self) -> None:
pass
def finalize_options(self) -> None:
pass
def run_pyspec_cmd(self, spec_fork: str, **opts: Dict) -> None:
cmd_obj: PySpecCommand = self.distribution.reinitialize_command("pyspec")
cmd_obj.spec_fork = spec_fork
eth2spec_dir = convert_path(self.distribution.package_dir['eth2spec'])
cmd_obj.out_dir = os.path.join(eth2spec_dir, spec_fork)
for k, v in opts.items():
setattr(cmd_obj, k, v)
self.run_command('pyspec')
def run(self) -> None:
print("running build_py command")
for fork in specs:
self.run_pyspec_cmd(spec_fork=fork)
commands = {
'pyspec': PySpecCommand,
'build_py': BuildPyCommand,
'pyspecdev': PyspecDevCommand,
}
with open("README.md", "rt", encoding="utf8") as f:
readme = f.read()
# How to use "VERSION.txt" file:
# - dev branch contains "X.Y.Z.dev", where "X.Y.Z" is the target version to release dev into.
# -> Changed as part of 'master' backport to 'dev'
# - master branch contains "X.Y.Z", where "X.Y.Z" is the current version.
# -> Changed as part of 'dev' release (or other branch) into 'master'
# -> In case of a commit on master without git tag, target the next version
# with ".postN" (release candidate, numbered) suffixed.
# See https://www.python.org/dev/peps/pep-0440/#public-version-identifiers
with open(os.path.join('tests', 'core', 'pyspec', 'eth2spec', 'VERSION.txt')) as f:
spec_version = f.read().strip()
setup(
name='eth2spec',
version=spec_version,
description="Eth2 spec, provided as Python package for tooling and testing",
long_description=readme,
long_description_content_type="text/markdown",
author="ethereum",
url="https://github.com/ethereum/eth2.0-specs",
include_package_data=False,
package_data={'configs': ['*.yaml'],
'presets': ['*.yaml'],
'specs': ['**/*.md'],
'eth2spec': ['VERSION.txt']},
package_dir={
"eth2spec": "tests/core/pyspec/eth2spec",
"configs": "configs",
"presets": "presets",
"specs": "specs",
},
packages=find_packages(where='tests/core/pyspec') + ['configs', 'specs'],
py_modules=["eth2spec"],
cmdclass=commands,
python_requires=">=3.8, <4",
extras_require={
"test": ["pytest>=4.4", "pytest-cov", "pytest-xdist"],
"lint": ["flake8==3.7.7", "mypy==0.812"],
"generator": ["python-snappy==0.5.4"],
},
install_requires=[
"eth-utils>=1.3.0,<2",
"eth-typing>=2.1.0,<3.0.0",
"pycryptodome==3.9.4",
"py_ecc==5.2.0",
"milagro_bls_binding==1.6.3",
"dataclasses==0.6",
"remerkleable==0.1.21",
RUAMEL_YAML_VERSION,
"lru-dict==1.1.6",
MARKO_VERSION,
]
)
|
the-stack_106_26845 | ################################# LICENSE ##################################
# Copyright (c) 2009, South African Astronomical Observatory (SAAO) #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer #
# in the documentation and/or other materials provided with the #
# distribution. #
# * Neither the name of the South African Astronomical Observatory #
# (SAAO) nor the names of its contributors may be used to endorse #
# or promote products derived from this software without specific #
# prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE SAAO ''AS IS'' AND ANY EXPRESS OR #
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL THE SAAO BE LIABLE FOR ANY #
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS #
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
############################################################################
"""SLOTUTC is a tool to fix the UTC time in SALTICAM slot mode imaging. Because
of a timing errror in OS being used, the clock loses time between seconds and
the UTC time is inaccurate between updates of the system clock (which is
currently updated at the beginning of each second.
As such this program reads in a set of slotmode data and calculates what the
true exposure plus deadtime is. Once calculated, it will go through the images
and correct the UTC times to the correct value. This process assumes that the
UTC time for an image right after the second turns is a fiducial time. In
addition, the program will fix the timing for a series of frames where the
timing of the frames is based on the reading out the wrong number of frames.
20080119
* Fixed bug that wrote negative times
20080217
* Added the addition fixed to the timing
.. note::
This information belongs in the SVN logfile.
"""
# Ensure python 2.5 compatibility
import os
import sys
import time
import re
from pyraf import iraf
import saltsafekey
import saltsafeio
import salttime
import slottool
import pyfits
import numpy as np
from saltsafelog import logging
from salterror import SaltError, SaltIOError
# Make sure the plotting functions work with an older version of matplotlib
try:
import matplotlib.pyplot as plt
except ImportError:
import matplotlib.pylab as plt
def slotutcfix(images,update,outfile,ampperccd,ignorexp,droplimit,inter,plotdata,logfile,verbose,debug):
with logging(logfile,debug) as log:
# set up the variables
utc_list = []
# is the input file specified?
saltsafeio.filedefined('Input',images)
# if the input file is a list, does it exist?
if images[0] == '@':
saltsafeio.listexists('Input',images)
# parse list of input files and place them in order
infiles=saltsafeio.listparse('Raw image',images,'','','')
infiles.sort()
# check input files exist
saltsafeio.filesexist(infiles,'','r')
# check to see if the output file exists and if so, clobber it
if os.path.isfile(outfile):
try:
os.remove(outfile)
except:
raise SaltIOError('File ' + outfile + ' can not be removed')
# open the outfile
if outfile:
try:
fout=open(outfile,'w')
except:
raise SaltIOError('File ' + outfile + ' can not be opened')
# get time of first exposure and basic information about the observations
infile=infiles[0]
struct=saltsafeio.openfits(infile)
# check to make sure slotmode data
detmode=saltsafekey.get('DETMODE',struct[0], infile)
if detmode != 'Slot Mode':
raise SaltIOError('Data are not Slot Mode Observations')
# Check to see if SLOTUTCFIX has already been run
# and print a warning if they have
if saltsafekey.found('SLOTUTC', struct[0]):
message='Data have already been processed by SLOTUTCFIX'
log.warning(message)
# check to make sure that it is the right version of the software
scamver=saltsafekey.get('DETSWV', struct[0], infile)
try:
scamver=float(scamver.split('-')[-1])
if 4.42 <= scamver <= 5.00:
pass
else:
raise SaltError('cannot currently correct this version of the SCAM software.')
except:
raise SaltError('Not able to read software version')
# requested exposure time
req_texp=saltsafekey.get('EXPTIME',struct[0],infile)
# how many extensions?
nextend=saltsafekey.get('NEXTEND',struct[0],infile)
# how many amplifiers
amplifiers=saltsafekey.get('NCCDS',struct[0],infile)
amplifiers = int(ampperccd*float(amplifiers))
if ampperccd>0:
nframes = nextend/amplifiers
nstep=amplifiers
else:
nframes = nextend
nstep=1
# how many total frame and unique times
ntotal=nextend*len(infiles)
nunique=len(infiles)*nframes-ignorexp+1
# Create arrays necessary for analysis
id_arr=np.arange(nunique)
utc_arr=np.zeros(nunique,dtype=float)
# Read in each file and make a list of the UTC values
if verbose:
log.message('Reading in files to create list of UTC values.')
j=0
for n,infile in enumerate(infiles):
# Show progress
if verbose:
percent=100.*float(n)/float(len(infiles))
ctext='Percentage Complete: %.2f\r' % percent
sys.stdout.write(ctext)
sys.stdout.flush()
struct=saltsafeio.openfits(infile)
if not len(struct)-1==nextend:
raise SaltIOError(infile,' has a different number of extensions from the first file')
# Skip through the frames and read in the utc
istart=1
if infile==infiles[0]:
istart=ignorexp*amplifiers+1
for i in range(istart,len(struct), amplifiers):
try:
utc_list.append(saltsafekey.get('UTC-OBS', struct[i], infile))
utc_arr[j]=slottool.getobstime(struct[i], infile)
j += 1
except Exception as e:
raise SaltIOError('Unable to create array of UTC times. Please check the number of extensions in the files')
# close FITS file
saltsafeio.closefits(struct)
# set up the other important arrays
try:
diff_arr=utc_arr.copy()
diff_arr[1:]=diff_arr[1:]-utc_arr[:-1]
diff_arr[0]=-1
dsec_arr=utc_arr-utc_arr.astype(int)
except:
raise SaltIOError('Unable to create timing arrays')
# calculate the real exposure time
if verbose:
log.message('Calculating real exposure time.')
real_expt, med_expt, t_start, t_arr, ysum_arr=calculate_realexptime(id_arr, utc_arr, dsec_arr, diff_arr, req_texp, utc_list)
# plot the results
if plotdata:
if verbose:
log.message('Plotting data.')
plt.ion()
plt.plot(t_arr,ysum_arr,linewidth=0.5,linestyle='-',marker='',color='b')
plt.xlabel('Time (s)')
plt.ylabel('Fit')
# Calculate the corrrect values
if verbose:
log.message('Calculating correct values')
i_start = abs(utc_arr-t_start).argmin()
t_diff=utc_arr*0.0+real_expt
nd=utc_arr*0.0
ndrop=0
for i in range(len(utc_arr)):
if utc_arr[i] >= t_start:
t_new=t_start+real_expt*(i-i_start+ndrop)
t_diff[i]=utc_arr[i]-t_new
while (t_diff[i]>real_expt and nd[i] < droplimit):
nd[i]+= 1
t_new=t_start+real_expt*(i-i_start+ndrop+nd[i])
t_diff[i]=utc_arr[i]-t_new
if (nd[i]<droplimit):
ndrop += nd[i]
else:
t_new=t_start+real_expt*(i-i_start)
t_diff[i]=utc_arr[i]-t_new
while (t_diff[i]>real_expt and nd[i] < droplimit):
nd[i]+= 1
t_new=t_start+real_expt*(i-i_start-nd[i])
t_diff[i]=utc_arr[i]-t_new
# calculate the corrected timestamp by counting 6 record files forward and
# 8 recored + unrecorded files back--or just 8*t_exp forward.
# if the object is near the end of the run, then just replace it with
# the correct value assuming no dropped exposures.
# first make the array of new times
new_arr=utc_arr-t_diff
# Next loop through them to find the corrected time
corr_arr=utc_arr*0.0
for i in range(len(new_arr)):
if i+6 < len(new_arr)-1:
corr_arr[i]=new_arr[i+6]-8*real_expt
else:
corr_arr[i]=new_arr[i]-2*real_expt
t_diff=utc_arr-corr_arr
# write out the first results
msg="Dwell Time=%5.3f Requested Exposure Time=%5.3f Nobs = %i Dropped = %i" % (real_expt, req_texp, nunique, ndrop)
if verbose:
log.message(msg)
if outfile:
fout.write('#'+msg+'\n')
fout.write('#%23s %2s %12s %12s %10s %8s %4s \n' % ('File', 'N', 'UTC_old', 'UTC_new', 'UTC_new(s)', 'Diff', 'drop' ))
# Give the user a chance to update the value
if inter:
message='Update headers with a dwell time of %5.3f s [y/n]? ' % real_expt
update=saltsafeio.yn_ask(message)
if not update:
message='Set Dwell Time manually [y/n]? '
update=saltsafeio.yn_ask(message)
if update:
message='New Dwell Time: '
real_expt=saltsafeio.ask(message)
try:
real_expt=float(real_expt)
except Exception as e:
msg='Could not set user dwell time because %s' % e
raise SaltError(msg)
# If requested, update the UTC times
if update or outfile:
if verbose:
log.message('Updating UTC times')
j=0
for n,infile in enumerate(infiles):
# Show progress
if verbose:
percent=100.*float(n)/float(len(infiles))
ctext='Percentage Complete: %.2f\r' % percent
sys.stdout.write(ctext)
sys.stdout.flush()
struct=saltsafeio.openupdatefits(infile)
# Skip through the frames and read in the utc
istart=1
if infile==infiles[0]:
istart=ignorexp*amplifiers+1
for i in range(istart,len(struct), amplifiers):
for k in range(0,amplifiers):
if update:
struct[i+k]=updateheaders(struct[i+k],i+k, t_diff[j], real_expt, utc_list[j], infile)
if outfile:
utc_new=saltsafekey.get('UTC-OBS', struct[i+k], infile)
utc_new_sec=slottool.getobstime(struct[i], infile)
fout.write('%25s %2i %12s %12s %7.3f %5.4f %4i \n' % (infile, i, utc_list[j], utc_new, utc_new_sec, t_diff[j], nd[j] ))
j += 1
# Housekeeping key words
if update:
history = 'SALTUTCFIX -- '
history += 'images='+infile+' '
saltsafekey.housekeeping(struct[0],'SLOTUTC','UTC has been corrected',history,infile)
# update fits file
if update:
saltsafeio.updatefits(struct)
saltsafeio.closefits(struct)
# close outfile
if outfile:
fout.close()
# Keep plot window open
if plotdata:
plt.show()
def updateheaders(struct, ext, tdiff, real_expt, utc, infile):
# exit if tdiff wasn't updated
if tdiff == real_expt:
msg='No adequate correction found for frame %i in file %s' % (ext, infile)
raise SaltError(msg)
return struct
# calculate the new utc value
try:
ntime=salttime.sex2dec(utc)
ntime=ntime-tdiff/3600.0
newutc=salttime.dec2sex(ntime)
except Exception as e:
msg='Could not update UTC in %i header of image %s because %s' % (ext, infile, e)
raise SaltError(msg)
return struct
# update the headers
if utc==saltsafekey.get('UTC-OBS', struct):
expt_string='%5.4f' % real_expt
td_string='%5.4f' % tdiff
if not saltsafekey.found('DUTC', struct):
try:
saltsafekey.put('UTC-OBS', newutc, struct, infile)
saltsafekey.put('TIME-OBS', newutc, struct, infile)
saltsafekey.new('DWETIME', expt_string, 'Dwell Time', struct, infile)
saltsafekey.new('DUTC', td_string, 'Change in UTC time', struct, infile)
except Exception as e:
msg='Could not update %i header of image %s because %s' % (ext, infile, e)
raise SaltIOError(msg)
else:
try:
saltsafekey.put('UTC-OBS', newutc, struct, infile)
saltsafekey.put('TIME-OBS', newutc, struct, infile)
saltsafekey.put('DWETIME', real_expt, struct, infile)
saltsafekey.put('DUTC', tdiff, struct, infile)
except Exception as e:
msg='Could not update %i header of image %s because %s' % (ext, infile, e)
raise SaltError(msg)
else:
raise SaltIOError('Frame missing from list of times')
return struct
def calculate_realexptime(id_arr, utc_arr, dsec_arr, diff_arr, req_texp, utc_list):
"""Calculates the real exposure time.
This makes the following assumptions:
#. That the measurement after the turn of the second is a fiducial
#. That there is an integer number of frames between each fiducial exposure
#. We then set up a metric which is Y=np.sum(i-int(i)) where i=dt/t_exp
#. Then the minimum of Y is found between the requested exposure time and the median time difference
#. And the best exposure time is the time at that minimum
returns median exposure time and real exposure time
"""
t_exp=0
# calculate the median time
try:
t_wrong=np.median(diff_arr)
except:
raise SaltError('Unable to calculate median time difference')
# Compress the arrays to find those closest to the second mark
mask=(dsec_arr<t_wrong)*(diff_arr>0)
t=np.compress(mask,utc_arr)
s=np.compress(mask,dsec_arr)
id=np.compress(mask,id_arr)
# Now set up the components in the equation
try:
t_start=t[0]
dt=t[1:]-t[0]
except Exception as e:
msg='Unable to set up necessary arrays because %s' % e
raise SaltError(msg)
# Now find the one that minimizes the equation y=np.sum(i-int(i))
t_max=t_wrong*1.2
if t_max < req_texp+0.05:
t_max=req_texp+0.15
try:
t_arr, ysum_arr=find_real_time(dt, req_texp, t_max)
t_real=t_arr[ysum_arr.argmin()]
except Exception as e:
msg='Unable to calculate real dwell time because %s' % e
raise SaltError(msg)
return t_real, t_wrong, t_start, t_arr, ysum_arr
def find_real_time(dt, t_min, t_max):
"""Find the real exposure+dead time
returns float
"""
t_e=np.arange(t_min,t_max,0.0001)
ysum=t_e*0.0
for i in range(len(t_e)):
ysum[i]=ntime_func(dt, t_e[i])
return t_e, ysum
def ntime_func(dt, t_e):
"""Merit function to determine best time
Weighted for the number of objects in each step
return float
"""
y=0
for j in range(len(dt)):
i=dt[j]/t_e
y += abs(i-round(i))
return y
# -----------------------------------------------------------
# main code
parfile = iraf.osfn("slottools$slotutcfix.par")
t = iraf.IrafTaskFactory(taskname="slotutcfix",value=parfile,function=slotutcfix,pkgname='slottools')
|
the-stack_106_26846 | # Python test set -- built-in functions
import ast
import builtins
import collections
import decimal
import fractions
import io
import locale
import os
import pickle
import platform
import random
import re
import sys
import traceback
import types
import unittest
import warnings
from contextlib import ExitStack
from operator import neg
from test.support import (
EnvironmentVarGuard, TESTFN, check_warnings, swap_attr, unlink)
from test.support import check_impl_detail, cpython_only
from test.support.script_helper import assert_python_ok
from unittest.mock import MagicMock, patch
try:
import pty, signal
except ImportError:
pty = signal = None
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
class StrSquares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max:
raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(str(n*n))
n += 1
return self.sofar[i]
class BitBucket:
def write(self, line):
pass
test_conv_no_sign = [
('0', 0),
('1', 1),
('9', 9),
('10', 10),
('99', 99),
('100', 100),
('314', 314),
(' 314', 314),
('314 ', 314),
(' \t\t 314 \t\t ', 314),
(repr(sys.maxsize), sys.maxsize),
(' 1x', ValueError),
(' 1 ', 1),
(' 1\02 ', ValueError),
('', ValueError),
(' ', ValueError),
(' \t\t ', ValueError),
(str(br'\u0663\u0661\u0664 ','raw-unicode-escape'), 314),
(chr(0x200), ValueError),
]
test_conv_sign = [
('0', 0),
('1', 1),
('9', 9),
('10', 10),
('99', 99),
('100', 100),
('314', 314),
(' 314', ValueError),
('314 ', 314),
(' \t\t 314 \t\t ', ValueError),
(repr(sys.maxsize), sys.maxsize),
(' 1x', ValueError),
(' 1 ', ValueError),
(' 1\02 ', ValueError),
('', ValueError),
(' ', ValueError),
(' \t\t ', ValueError),
(str(br'\u0663\u0661\u0664 ','raw-unicode-escape'), 314),
(chr(0x200), ValueError),
]
class TestFailingBool:
def __bool__(self):
raise RuntimeError
class TestFailingIter:
def __iter__(self):
raise RuntimeError
def filter_char(arg):
return ord(arg) > ord("d")
def map_char(arg):
return chr(ord(arg)+1)
class BuiltinTest(unittest.TestCase):
# Helper to check picklability
def check_iter_pickle(self, it, seq, proto):
itorg = it
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(list(it), seq)
#test the iterator after dropping one from it
it = pickle.loads(d)
try:
next(it)
except StopIteration:
return
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(list(it), seq[1:])
def test_import(self):
__import__('sys')
__import__('time')
__import__('string')
__import__(name='sys')
__import__(name='time', level=0)
self.assertRaises(ImportError, __import__, 'spamspam')
self.assertRaises(TypeError, __import__, 1, 2, 3, 4)
self.assertRaises(ValueError, __import__, '')
self.assertRaises(TypeError, __import__, 'sys', name='sys')
# Relative import outside of a package with no __package__ or __spec__ (bpo-37409).
with self.assertWarns(ImportWarning):
self.assertRaises(ImportError, __import__, '',
{'__package__': None, '__spec__': None, '__name__': '__main__'},
locals={}, fromlist=('foo',), level=1)
# embedded null character
self.assertRaises(ModuleNotFoundError, __import__, 'string\x00')
def test_abs(self):
# int
self.assertEqual(abs(0), 0)
self.assertEqual(abs(1234), 1234)
self.assertEqual(abs(-1234), 1234)
self.assertTrue(abs(-sys.maxsize-1) > 0)
# float
self.assertEqual(abs(0.0), 0.0)
self.assertEqual(abs(3.14), 3.14)
self.assertEqual(abs(-3.14), 3.14)
# str
self.assertRaises(TypeError, abs, 'a')
# bool
self.assertEqual(abs(True), 1)
self.assertEqual(abs(False), 0)
# other
self.assertRaises(TypeError, abs)
self.assertRaises(TypeError, abs, None)
class AbsClass(object):
def __abs__(self):
return -5
self.assertEqual(abs(AbsClass()), -5)
def test_all(self):
self.assertEqual(all([2, 4, 6]), True)
self.assertEqual(all([2, None, 6]), False)
self.assertRaises(RuntimeError, all, [2, TestFailingBool(), 6])
self.assertRaises(RuntimeError, all, TestFailingIter())
self.assertRaises(TypeError, all, 10) # Non-iterable
self.assertRaises(TypeError, all) # No args
self.assertRaises(TypeError, all, [2, 4, 6], []) # Too many args
self.assertEqual(all([]), True) # Empty iterator
self.assertEqual(all([0, TestFailingBool()]), False)# Short-circuit
S = [50, 60]
self.assertEqual(all(x > 42 for x in S), True)
S = [50, 40, 60]
self.assertEqual(all(x > 42 for x in S), False)
def test_any(self):
self.assertEqual(any([None, None, None]), False)
self.assertEqual(any([None, 4, None]), True)
self.assertRaises(RuntimeError, any, [None, TestFailingBool(), 6])
self.assertRaises(RuntimeError, any, TestFailingIter())
self.assertRaises(TypeError, any, 10) # Non-iterable
self.assertRaises(TypeError, any) # No args
self.assertRaises(TypeError, any, [2, 4, 6], []) # Too many args
self.assertEqual(any([]), False) # Empty iterator
self.assertEqual(any([1, TestFailingBool()]), True) # Short-circuit
S = [40, 60, 30]
self.assertEqual(any(x > 42 for x in S), True)
S = [10, 20, 30]
self.assertEqual(any(x > 42 for x in S), False)
def test_ascii(self):
self.assertEqual(ascii(''), '\'\'')
self.assertEqual(ascii(0), '0')
self.assertEqual(ascii(()), '()')
self.assertEqual(ascii([]), '[]')
self.assertEqual(ascii({}), '{}')
a = []
a.append(a)
self.assertEqual(ascii(a), '[[...]]')
a = {}
a[0] = a
self.assertEqual(ascii(a), '{0: {...}}')
# Advanced checks for unicode strings
def _check_uni(s):
self.assertEqual(ascii(s), repr(s))
_check_uni("'")
_check_uni('"')
_check_uni('"\'')
_check_uni('\0')
_check_uni('\r\n\t .')
# Unprintable non-ASCII characters
_check_uni('\x85')
_check_uni('\u1fff')
_check_uni('\U00012fff')
# Lone surrogates
_check_uni('\ud800')
_check_uni('\udfff')
# Issue #9804: surrogates should be joined even for printable
# wide characters (UCS-2 builds).
self.assertEqual(ascii('\U0001d121'), "'\\U0001d121'")
# All together
s = "'\0\"\n\r\t abcd\x85é\U00012fff\uD800\U0001D121xxx."
self.assertEqual(ascii(s),
r"""'\'\x00"\n\r\t abcd\x85\xe9\U00012fff\ud800\U0001d121xxx.'""")
def test_neg(self):
x = -sys.maxsize-1
self.assertTrue(isinstance(x, int))
self.assertEqual(-x, sys.maxsize+1)
def test_callable(self):
self.assertTrue(callable(len))
self.assertFalse(callable("a"))
self.assertTrue(callable(callable))
self.assertTrue(callable(lambda x, y: x + y))
self.assertFalse(callable(__builtins__))
def f(): pass
self.assertTrue(callable(f))
class C1:
def meth(self): pass
self.assertTrue(callable(C1))
c = C1()
self.assertTrue(callable(c.meth))
self.assertFalse(callable(c))
# __call__ is looked up on the class, not the instance
c.__call__ = None
self.assertFalse(callable(c))
c.__call__ = lambda self: 0
self.assertFalse(callable(c))
del c.__call__
self.assertFalse(callable(c))
class C2(object):
def __call__(self): pass
c2 = C2()
self.assertTrue(callable(c2))
c2.__call__ = None
self.assertTrue(callable(c2))
class C3(C2): pass
c3 = C3()
self.assertTrue(callable(c3))
def test_chr(self):
self.assertEqual(chr(32), ' ')
self.assertEqual(chr(65), 'A')
self.assertEqual(chr(97), 'a')
self.assertEqual(chr(0xff), '\xff')
self.assertRaises(ValueError, chr, 1<<24)
self.assertEqual(chr(sys.maxunicode),
str('\\U0010ffff'.encode("ascii"), 'unicode-escape'))
self.assertRaises(TypeError, chr)
self.assertEqual(chr(0x0000FFFF), "\U0000FFFF")
self.assertEqual(chr(0x00010000), "\U00010000")
self.assertEqual(chr(0x00010001), "\U00010001")
self.assertEqual(chr(0x000FFFFE), "\U000FFFFE")
self.assertEqual(chr(0x000FFFFF), "\U000FFFFF")
self.assertEqual(chr(0x00100000), "\U00100000")
self.assertEqual(chr(0x00100001), "\U00100001")
self.assertEqual(chr(0x0010FFFE), "\U0010FFFE")
self.assertEqual(chr(0x0010FFFF), "\U0010FFFF")
self.assertRaises(ValueError, chr, -1)
self.assertRaises(ValueError, chr, 0x00110000)
self.assertRaises((OverflowError, ValueError), chr, 2**32)
def test_cmp(self):
self.assertTrue(not hasattr(builtins, "cmp"))
def test_compile(self):
compile('print(1)\n', '', 'exec')
bom = b'\xef\xbb\xbf'
compile(bom + b'print(1)\n', '', 'exec')
compile(source='pass', filename='?', mode='exec')
compile(dont_inherit=0, filename='tmp', source='0', mode='eval')
compile('pass', '?', dont_inherit=1, mode='exec')
compile(memoryview(b"text"), "name", "exec")
self.assertRaises(TypeError, compile)
self.assertRaises(ValueError, compile, 'print(42)\n', '<string>', 'badmode')
self.assertRaises(ValueError, compile, 'print(42)\n', '<string>', 'single', 0xff)
self.assertRaises(ValueError, compile, chr(0), 'f', 'exec')
self.assertRaises(TypeError, compile, 'pass', '?', 'exec',
mode='eval', source='0', filename='tmp')
compile('print("\xe5")\n', '', 'exec')
self.assertRaises(ValueError, compile, chr(0), 'f', 'exec')
self.assertRaises(ValueError, compile, str('a = 1'), 'f', 'bad')
# test the optimize argument
codestr = '''def f():
"""doc"""
debug_enabled = False
if __debug__:
debug_enabled = True
try:
assert False
except AssertionError:
return (True, f.__doc__, debug_enabled, __debug__)
else:
return (False, f.__doc__, debug_enabled, __debug__)
'''
def f(): """doc"""
values = [(-1, __debug__, f.__doc__, __debug__, __debug__),
(0, True, 'doc', True, True),
(1, False, 'doc', False, False),
(2, False, None, False, False)]
for optval, *expected in values:
# test both direct compilation and compilation via AST
codeobjs = []
codeobjs.append(compile(codestr, "<test>", "exec", optimize=optval))
tree = ast.parse(codestr)
codeobjs.append(compile(tree, "<test>", "exec", optimize=optval))
for code in codeobjs:
ns = {}
exec(code, ns)
rv = ns['f']()
self.assertEqual(rv, tuple(expected))
def test_delattr(self):
sys.spam = 1
delattr(sys, 'spam')
self.assertRaises(TypeError, delattr)
def test_dir(self):
# dir(wrong number of arguments)
self.assertRaises(TypeError, dir, 42, 42)
# dir() - local scope
local_var = 1
self.assertIn('local_var', dir())
# dir(module)
self.assertIn('exit', dir(sys))
# dir(module_with_invalid__dict__)
class Foo(types.ModuleType):
__dict__ = 8
f = Foo("foo")
self.assertRaises(TypeError, dir, f)
# dir(type)
self.assertIn("strip", dir(str))
self.assertNotIn("__mro__", dir(str))
# dir(obj)
class Foo(object):
def __init__(self):
self.x = 7
self.y = 8
self.z = 9
f = Foo()
self.assertIn("y", dir(f))
# dir(obj_no__dict__)
class Foo(object):
__slots__ = []
f = Foo()
self.assertIn("__repr__", dir(f))
# dir(obj_no__class__with__dict__)
# (an ugly trick to cause getattr(f, "__class__") to fail)
class Foo(object):
__slots__ = ["__class__", "__dict__"]
def __init__(self):
self.bar = "wow"
f = Foo()
self.assertNotIn("__repr__", dir(f))
self.assertIn("bar", dir(f))
# dir(obj_using __dir__)
class Foo(object):
def __dir__(self):
return ["kan", "ga", "roo"]
f = Foo()
self.assertTrue(dir(f) == ["ga", "kan", "roo"])
# dir(obj__dir__tuple)
class Foo(object):
def __dir__(self):
return ("b", "c", "a")
res = dir(Foo())
self.assertIsInstance(res, list)
self.assertTrue(res == ["a", "b", "c"])
# dir(obj__dir__not_sequence)
class Foo(object):
def __dir__(self):
return 7
f = Foo()
self.assertRaises(TypeError, dir, f)
# dir(traceback)
try:
raise IndexError
except:
self.assertEqual(len(dir(sys.exc_info()[2])), 4)
# test that object has a __dir__()
self.assertEqual(sorted([].__dir__()), dir([]))
def test_divmod(self):
self.assertEqual(divmod(12, 7), (1, 5))
self.assertEqual(divmod(-12, 7), (-2, 2))
self.assertEqual(divmod(12, -7), (-2, -2))
self.assertEqual(divmod(-12, -7), (1, -5))
self.assertEqual(divmod(-sys.maxsize-1, -1), (sys.maxsize+1, 0))
for num, denom, exp_result in [ (3.25, 1.0, (3.0, 0.25)),
(-3.25, 1.0, (-4.0, 0.75)),
(3.25, -1.0, (-4.0, -0.75)),
(-3.25, -1.0, (3.0, -0.25))]:
result = divmod(num, denom)
self.assertAlmostEqual(result[0], exp_result[0])
self.assertAlmostEqual(result[1], exp_result[1])
self.assertRaises(TypeError, divmod)
def test_eval(self):
self.assertEqual(eval('1+1'), 2)
self.assertEqual(eval(' 1+1\n'), 2)
globals = {'a': 1, 'b': 2}
locals = {'b': 200, 'c': 300}
self.assertEqual(eval('a', globals) , 1)
self.assertEqual(eval('a', globals, locals), 1)
self.assertEqual(eval('b', globals, locals), 200)
self.assertEqual(eval('c', globals, locals), 300)
globals = {'a': 1, 'b': 2}
locals = {'b': 200, 'c': 300}
bom = b'\xef\xbb\xbf'
self.assertEqual(eval(bom + b'a', globals, locals), 1)
self.assertEqual(eval('"\xe5"', globals), "\xe5")
self.assertRaises(TypeError, eval)
self.assertRaises(TypeError, eval, ())
self.assertRaises(SyntaxError, eval, bom[:2] + b'a')
class X:
def __getitem__(self, key):
raise ValueError
self.assertRaises(ValueError, eval, "foo", {}, X())
def test_general_eval(self):
# Tests that general mappings can be used for the locals argument
class M:
"Test mapping interface versus possible calls from eval()."
def __getitem__(self, key):
if key == 'a':
return 12
raise KeyError
def keys(self):
return list('xyz')
m = M()
g = globals()
self.assertEqual(eval('a', g, m), 12)
self.assertRaises(NameError, eval, 'b', g, m)
self.assertEqual(eval('dir()', g, m), list('xyz'))
self.assertEqual(eval('globals()', g, m), g)
self.assertEqual(eval('locals()', g, m), m)
self.assertRaises(TypeError, eval, 'a', m)
class A:
"Non-mapping"
pass
m = A()
self.assertRaises(TypeError, eval, 'a', g, m)
# Verify that dict subclasses work as well
class D(dict):
def __getitem__(self, key):
if key == 'a':
return 12
return dict.__getitem__(self, key)
def keys(self):
return list('xyz')
d = D()
self.assertEqual(eval('a', g, d), 12)
self.assertRaises(NameError, eval, 'b', g, d)
self.assertEqual(eval('dir()', g, d), list('xyz'))
self.assertEqual(eval('globals()', g, d), g)
self.assertEqual(eval('locals()', g, d), d)
# Verify locals stores (used by list comps)
eval('[locals() for i in (2,3)]', g, d)
eval('[locals() for i in (2,3)]', g, collections.UserDict())
class SpreadSheet:
"Sample application showing nested, calculated lookups."
_cells = {}
def __setitem__(self, key, formula):
self._cells[key] = formula
def __getitem__(self, key):
return eval(self._cells[key], globals(), self)
ss = SpreadSheet()
ss['a1'] = '5'
ss['a2'] = 'a1*6'
ss['a3'] = 'a2*7'
self.assertEqual(ss['a3'], 210)
# Verify that dir() catches a non-list returned by eval
# SF bug #1004669
class C:
def __getitem__(self, item):
raise KeyError(item)
def keys(self):
return 1 # used to be 'a' but that's no longer an error
self.assertRaises(TypeError, eval, 'dir()', globals(), C())
def test_exec(self):
g = {}
exec('z = 1', g)
if '__builtins__' in g:
del g['__builtins__']
self.assertEqual(g, {'z': 1})
exec('z = 1+1', g)
if '__builtins__' in g:
del g['__builtins__']
self.assertEqual(g, {'z': 2})
g = {}
l = {}
with check_warnings():
warnings.filterwarnings("ignore", "global statement",
module="<string>")
exec('global a; a = 1; b = 2', g, l)
if '__builtins__' in g:
del g['__builtins__']
if '__builtins__' in l:
del l['__builtins__']
self.assertEqual((g, l), ({'a': 1}, {'b': 2}))
def test_exec_globals(self):
if check_impl_detail():
# strict __builtins__ compliance (CPython)
code = compile("print('Hello World!')", "", "exec")
# no builtin function
self.assertRaisesRegex(NameError, "name 'print' is not defined",
exec, code, {'__builtins__': {}})
# __builtins__ must be a mapping type
self.assertRaises(TypeError,
exec, code, {'__builtins__': 123})
# no __build_class__ function
code = compile("class A: pass", "", "exec")
if True:
self.assertRaisesRegex(NameError, "__build_class__ not found",
exec, code, {'__builtins__': {}})
class frozendict_error(Exception):
pass
class frozendict(dict):
def __setitem__(self, key, value):
raise frozendict_error("frozendict is readonly")
# read-only builtins
if isinstance(__builtins__, types.ModuleType):
frozen_builtins = frozendict(__builtins__.__dict__)
else:
frozen_builtins = frozendict(__builtins__)
code = compile("__builtins__['superglobal']=2; print(superglobal)", "test", "exec")
self.assertRaises(frozendict_error,
exec, code, {'__builtins__': frozen_builtins})
# read-only globals
namespace = frozendict({})
code = compile("x=1", "test", "exec")
self.assertRaises(frozendict_error,
exec, code, namespace)
def test_exec_redirected(self):
savestdout = sys.stdout
sys.stdout = None # Whatever that cannot flush()
try:
# Used to raise SystemError('error return without exception set')
exec('a')
except NameError:
pass
finally:
sys.stdout = savestdout
def test_filter(self):
self.assertEqual(list(filter(lambda c: 'a' <= c <= 'z', 'Hello World')), list('elloorld'))
self.assertEqual(list(filter(None, [1, 'hello', [], [3], '', None, 9, 0])), [1, 'hello', [3], 9])
self.assertEqual(list(filter(lambda x: x > 0, [1, -3, 9, 0, 2])), [1, 9, 2])
self.assertEqual(list(filter(None, Squares(10))), [1, 4, 9, 16, 25, 36, 49, 64, 81])
self.assertEqual(list(filter(lambda x: x%2, Squares(10))), [1, 9, 25, 49, 81])
def identity(item):
return 1
filter(identity, Squares(5))
self.assertRaises(TypeError, filter)
class BadSeq(object):
def __getitem__(self, index):
if index<4:
return 42
raise ValueError
self.assertRaises(ValueError, list, filter(lambda x: x, BadSeq()))
def badfunc():
pass
self.assertRaises(TypeError, list, filter(badfunc, range(5)))
# test bltinmodule.c::filtertuple()
self.assertEqual(list(filter(None, (1, 2))), [1, 2])
self.assertEqual(list(filter(lambda x: x>=3, (1, 2, 3, 4))), [3, 4])
self.assertRaises(TypeError, list, filter(42, (1, 2)))
def test_filter_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f1 = filter(filter_char, "abcdeabcde")
f2 = filter(filter_char, "abcdeabcde")
self.check_iter_pickle(f1, list(f2), proto)
def test_getattr(self):
self.assertTrue(getattr(sys, 'stdout') is sys.stdout)
self.assertRaises(TypeError, getattr, sys, 1)
self.assertRaises(TypeError, getattr, sys, 1, "foo")
self.assertRaises(TypeError, getattr)
self.assertRaises(AttributeError, getattr, sys, chr(sys.maxunicode))
# unicode surrogates are not encodable to the default encoding (utf8)
self.assertRaises(AttributeError, getattr, 1, "\uDAD1\uD51E")
def test_hasattr(self):
self.assertTrue(hasattr(sys, 'stdout'))
self.assertRaises(TypeError, hasattr, sys, 1)
self.assertRaises(TypeError, hasattr)
self.assertEqual(False, hasattr(sys, chr(sys.maxunicode)))
# Check that hasattr propagates all exceptions outside of
# AttributeError.
class A:
def __getattr__(self, what):
raise SystemExit
self.assertRaises(SystemExit, hasattr, A(), "b")
class B:
def __getattr__(self, what):
raise ValueError
self.assertRaises(ValueError, hasattr, B(), "b")
def test_hash(self):
hash(None)
self.assertEqual(hash(1), hash(1))
self.assertEqual(hash(1), hash(1.0))
hash('spam')
self.assertEqual(hash('spam'), hash(b'spam'))
hash((0,1,2,3))
def f(): pass
self.assertRaises(TypeError, hash, [])
self.assertRaises(TypeError, hash, {})
# Bug 1536021: Allow hash to return long objects
class X:
def __hash__(self):
return 2**100
self.assertEqual(type(hash(X())), int)
class Z(int):
def __hash__(self):
return self
self.assertEqual(hash(Z(42)), hash(42))
def test_hex(self):
self.assertEqual(hex(16), '0x10')
self.assertEqual(hex(-16), '-0x10')
self.assertRaises(TypeError, hex, {})
def test_id(self):
id(None)
id(1)
id(1.0)
id('spam')
id((0,1,2,3))
id([0,1,2,3])
id({'spam': 1, 'eggs': 2, 'ham': 3})
# Test input() later, alphabetized as if it were raw_input
def test_iter(self):
self.assertRaises(TypeError, iter)
self.assertRaises(TypeError, iter, 42, 42)
lists = [("1", "2"), ["1", "2"], "12"]
for l in lists:
i = iter(l)
self.assertEqual(next(i), '1')
self.assertEqual(next(i), '2')
self.assertRaises(StopIteration, next, i)
def test_isinstance(self):
class C:
pass
class D(C):
pass
class E:
pass
c = C()
d = D()
e = E()
self.assertTrue(isinstance(c, C))
self.assertTrue(isinstance(d, C))
self.assertTrue(not isinstance(e, C))
self.assertTrue(not isinstance(c, D))
self.assertTrue(not isinstance('foo', E))
self.assertRaises(TypeError, isinstance, E, 'foo')
self.assertRaises(TypeError, isinstance)
def test_issubclass(self):
class C:
pass
class D(C):
pass
class E:
pass
c = C()
d = D()
e = E()
self.assertTrue(issubclass(D, C))
self.assertTrue(issubclass(C, C))
self.assertTrue(not issubclass(C, D))
self.assertRaises(TypeError, issubclass, 'foo', E)
self.assertRaises(TypeError, issubclass, E, 'foo')
self.assertRaises(TypeError, issubclass)
def test_len(self):
self.assertEqual(len('123'), 3)
self.assertEqual(len(()), 0)
self.assertEqual(len((1, 2, 3, 4)), 4)
self.assertEqual(len([1, 2, 3, 4]), 4)
self.assertEqual(len({}), 0)
self.assertEqual(len({'a':1, 'b': 2}), 2)
class BadSeq:
def __len__(self):
raise ValueError
self.assertRaises(ValueError, len, BadSeq())
class InvalidLen:
def __len__(self):
return None
self.assertRaises(TypeError, len, InvalidLen())
class FloatLen:
def __len__(self):
return 4.5
self.assertRaises(TypeError, len, FloatLen())
class NegativeLen:
def __len__(self):
return -10
self.assertRaises(ValueError, len, NegativeLen())
class HugeLen:
def __len__(self):
return sys.maxsize + 1
self.assertRaises(OverflowError, len, HugeLen())
class HugeNegativeLen:
def __len__(self):
return -sys.maxsize-10
self.assertRaises(ValueError, len, HugeNegativeLen())
class NoLenMethod(object): pass
self.assertRaises(TypeError, len, NoLenMethod())
def test_map(self):
self.assertEqual(
list(map(lambda x: x*x, range(1,4))),
[1, 4, 9]
)
try:
from math import sqrt
except ImportError:
def sqrt(x):
return pow(x, 0.5)
self.assertEqual(
list(map(lambda x: list(map(sqrt, x)), [[16, 4], [81, 9]])),
[[4.0, 2.0], [9.0, 3.0]]
)
self.assertEqual(
list(map(lambda x, y: x+y, [1,3,2], [9,1,4])),
[10, 4, 6]
)
def plus(*v):
accu = 0
for i in v: accu = accu + i
return accu
self.assertEqual(
list(map(plus, [1, 3, 7])),
[1, 3, 7]
)
self.assertEqual(
list(map(plus, [1, 3, 7], [4, 9, 2])),
[1+4, 3+9, 7+2]
)
self.assertEqual(
list(map(plus, [1, 3, 7], [4, 9, 2], [1, 1, 0])),
[1+4+1, 3+9+1, 7+2+0]
)
self.assertEqual(
list(map(int, Squares(10))),
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
)
def Max(a, b):
if a is None:
return b
if b is None:
return a
return max(a, b)
self.assertEqual(
list(map(Max, Squares(3), Squares(2))),
[0, 1]
)
self.assertRaises(TypeError, map)
self.assertRaises(TypeError, map, lambda x: x, 42)
class BadSeq:
def __iter__(self):
raise ValueError
yield None
self.assertRaises(ValueError, list, map(lambda x: x, BadSeq()))
def badfunc(x):
raise RuntimeError
self.assertRaises(RuntimeError, list, map(badfunc, range(5)))
def test_map_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
m1 = map(map_char, "Is this the real life?")
m2 = map(map_char, "Is this the real life?")
self.check_iter_pickle(m1, list(m2), proto)
def test_max(self):
self.assertEqual(max('123123'), '3')
self.assertEqual(max(1, 2, 3), 3)
self.assertEqual(max((1, 2, 3, 1, 2, 3)), 3)
self.assertEqual(max([1, 2, 3, 1, 2, 3]), 3)
self.assertEqual(max(1, 2, 3.0), 3.0)
self.assertEqual(max(1, 2.0, 3), 3)
self.assertEqual(max(1.0, 2, 3), 3)
self.assertRaises(TypeError, max)
self.assertRaises(TypeError, max, 42)
self.assertRaises(ValueError, max, ())
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, max, BadSeq())
for stmt in (
"max(key=int)", # no args
"max(default=None)",
"max(1, 2, default=None)", # require container for default
"max(default=None, key=int)",
"max(1, key=int)", # single arg not iterable
"max(1, 2, keystone=int)", # wrong keyword
"max(1, 2, key=int, abc=int)", # two many keywords
"max(1, 2, key=1)", # keyfunc is not callable
):
try:
exec(stmt, globals())
except TypeError:
pass
else:
self.fail(stmt)
self.assertEqual(max((1,), key=neg), 1) # one elem iterable
self.assertEqual(max((1,2), key=neg), 1) # two elem iterable
self.assertEqual(max(1, 2, key=neg), 1) # two elems
self.assertEqual(max((), default=None), None) # zero elem iterable
self.assertEqual(max((1,), default=None), 1) # one elem iterable
self.assertEqual(max((1,2), default=None), 2) # two elem iterable
self.assertEqual(max((), default=1, key=neg), 1)
self.assertEqual(max((1, 2), default=3, key=neg), 1)
data = [random.randrange(200) for i in range(100)]
keys = dict((elem, random.randrange(50)) for elem in data)
f = keys.__getitem__
self.assertEqual(max(data, key=f),
sorted(reversed(data), key=f)[-1])
def test_min(self):
self.assertEqual(min('123123'), '1')
self.assertEqual(min(1, 2, 3), 1)
self.assertEqual(min((1, 2, 3, 1, 2, 3)), 1)
self.assertEqual(min([1, 2, 3, 1, 2, 3]), 1)
self.assertEqual(min(1, 2, 3.0), 1)
self.assertEqual(min(1, 2.0, 3), 1)
self.assertEqual(min(1.0, 2, 3), 1.0)
self.assertRaises(TypeError, min)
self.assertRaises(TypeError, min, 42)
self.assertRaises(ValueError, min, ())
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, min, BadSeq())
for stmt in (
"min(key=int)", # no args
"min(default=None)",
"min(1, 2, default=None)", # require container for default
"min(default=None, key=int)",
"min(1, key=int)", # single arg not iterable
"min(1, 2, keystone=int)", # wrong keyword
"min(1, 2, key=int, abc=int)", # two many keywords
"min(1, 2, key=1)", # keyfunc is not callable
):
try:
exec(stmt, globals())
except TypeError:
pass
else:
self.fail(stmt)
self.assertEqual(min((1,), key=neg), 1) # one elem iterable
self.assertEqual(min((1,2), key=neg), 2) # two elem iterable
self.assertEqual(min(1, 2, key=neg), 2) # two elems
self.assertEqual(min((), default=None), None) # zero elem iterable
self.assertEqual(min((1,), default=None), 1) # one elem iterable
self.assertEqual(min((1,2), default=None), 1) # two elem iterable
self.assertEqual(min((), default=1, key=neg), 1)
self.assertEqual(min((1, 2), default=1, key=neg), 2)
data = [random.randrange(200) for i in range(100)]
keys = dict((elem, random.randrange(50)) for elem in data)
f = keys.__getitem__
self.assertEqual(min(data, key=f),
sorted(data, key=f)[0])
def test_next(self):
it = iter(range(2))
self.assertEqual(next(it), 0)
self.assertEqual(next(it), 1)
self.assertRaises(StopIteration, next, it)
self.assertRaises(StopIteration, next, it)
self.assertEqual(next(it, 42), 42)
class Iter(object):
def __iter__(self):
return self
def __next__(self):
raise StopIteration
it = iter(Iter())
self.assertEqual(next(it, 42), 42)
self.assertRaises(StopIteration, next, it)
def gen():
yield 1
return
it = gen()
self.assertEqual(next(it), 1)
self.assertRaises(StopIteration, next, it)
self.assertEqual(next(it, 42), 42)
def test_oct(self):
self.assertEqual(oct(100), '0o144')
self.assertEqual(oct(-100), '-0o144')
self.assertRaises(TypeError, oct, ())
def write_testfile(self):
# NB the first 4 lines are also used to test input, below
fp = open(TESTFN, 'w')
self.addCleanup(unlink, TESTFN)
with fp:
fp.write('1+1\n')
fp.write('The quick brown fox jumps over the lazy dog')
fp.write('.\n')
fp.write('Dear John\n')
fp.write('XXX'*100)
fp.write('YYY'*100)
def test_open(self):
self.write_testfile()
fp = open(TESTFN, 'r')
with fp:
self.assertEqual(fp.readline(4), '1+1\n')
self.assertEqual(fp.readline(), 'The quick brown fox jumps over the lazy dog.\n')
self.assertEqual(fp.readline(4), 'Dear')
self.assertEqual(fp.readline(100), ' John\n')
self.assertEqual(fp.read(300), 'XXX'*100)
self.assertEqual(fp.read(1000), 'YYY'*100)
# embedded null bytes and characters
self.assertRaises(ValueError, open, 'a\x00b')
self.assertRaises(ValueError, open, b'a\x00b')
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_open_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that open() uses the current locale
# encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
self.write_testfile()
current_locale_encoding = locale.getpreferredencoding(False)
fp = open(TESTFN, 'w')
with fp:
self.assertEqual(fp.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
def test_open_non_inheritable(self):
fileobj = open(__file__)
with fileobj:
self.assertFalse(os.get_inheritable(fileobj.fileno()))
def test_ord(self):
self.assertEqual(ord(' '), 32)
self.assertEqual(ord('A'), 65)
self.assertEqual(ord('a'), 97)
self.assertEqual(ord('\x80'), 128)
self.assertEqual(ord('\xff'), 255)
self.assertEqual(ord(b' '), 32)
self.assertEqual(ord(b'A'), 65)
self.assertEqual(ord(b'a'), 97)
self.assertEqual(ord(b'\x80'), 128)
self.assertEqual(ord(b'\xff'), 255)
self.assertEqual(ord(chr(sys.maxunicode)), sys.maxunicode)
self.assertRaises(TypeError, ord, 42)
self.assertEqual(ord(chr(0x10FFFF)), 0x10FFFF)
self.assertEqual(ord("\U0000FFFF"), 0x0000FFFF)
self.assertEqual(ord("\U00010000"), 0x00010000)
self.assertEqual(ord("\U00010001"), 0x00010001)
self.assertEqual(ord("\U000FFFFE"), 0x000FFFFE)
self.assertEqual(ord("\U000FFFFF"), 0x000FFFFF)
self.assertEqual(ord("\U00100000"), 0x00100000)
self.assertEqual(ord("\U00100001"), 0x00100001)
self.assertEqual(ord("\U0010FFFE"), 0x0010FFFE)
self.assertEqual(ord("\U0010FFFF"), 0x0010FFFF)
def test_pow(self):
self.assertEqual(pow(0,0), 1)
self.assertEqual(pow(0,1), 0)
self.assertEqual(pow(1,0), 1)
self.assertEqual(pow(1,1), 1)
self.assertEqual(pow(2,0), 1)
self.assertEqual(pow(2,10), 1024)
self.assertEqual(pow(2,20), 1024*1024)
self.assertEqual(pow(2,30), 1024*1024*1024)
self.assertEqual(pow(-2,0), 1)
self.assertEqual(pow(-2,1), -2)
self.assertEqual(pow(-2,2), 4)
self.assertEqual(pow(-2,3), -8)
self.assertAlmostEqual(pow(0.,0), 1.)
self.assertAlmostEqual(pow(0.,1), 0.)
self.assertAlmostEqual(pow(1.,0), 1.)
self.assertAlmostEqual(pow(1.,1), 1.)
self.assertAlmostEqual(pow(2.,0), 1.)
self.assertAlmostEqual(pow(2.,10), 1024.)
self.assertAlmostEqual(pow(2.,20), 1024.*1024.)
self.assertAlmostEqual(pow(2.,30), 1024.*1024.*1024.)
self.assertAlmostEqual(pow(-2.,0), 1.)
self.assertAlmostEqual(pow(-2.,1), -2.)
self.assertAlmostEqual(pow(-2.,2), 4.)
self.assertAlmostEqual(pow(-2.,3), -8.)
for x in 2, 2.0:
for y in 10, 10.0:
for z in 1000, 1000.0:
if isinstance(x, float) or \
isinstance(y, float) or \
isinstance(z, float):
self.assertRaises(TypeError, pow, x, y, z)
else:
self.assertAlmostEqual(pow(x, y, z), 24.0)
self.assertAlmostEqual(pow(-1, 0.5), 1j)
self.assertAlmostEqual(pow(-1, 1/3), 0.5 + 0.8660254037844386j)
self.assertRaises(ValueError, pow, -1, -2, 3)
self.assertRaises(ValueError, pow, 1, 2, 0)
self.assertRaises(TypeError, pow)
def test_input(self):
self.write_testfile()
fp = open(TESTFN, 'r')
savestdin = sys.stdin
savestdout = sys.stdout # Eats the echo
try:
sys.stdin = fp
sys.stdout = BitBucket()
self.assertEqual(input(), "1+1")
self.assertEqual(input(), 'The quick brown fox jumps over the lazy dog.')
self.assertEqual(input('testing\n'), 'Dear John')
# SF 1535165: don't segfault on closed stdin
# sys.stdout must be a regular file for triggering
sys.stdout = savestdout
sys.stdin.close()
self.assertRaises(ValueError, input)
sys.stdout = BitBucket()
sys.stdin = io.StringIO("NULL\0")
self.assertRaises(TypeError, input, 42, 42)
sys.stdin = io.StringIO(" 'whitespace'")
self.assertEqual(input(), " 'whitespace'")
sys.stdin = io.StringIO()
self.assertRaises(EOFError, input)
del sys.stdout
self.assertRaises(RuntimeError, input, 'prompt')
del sys.stdin
self.assertRaises(RuntimeError, input, 'prompt')
finally:
sys.stdin = savestdin
sys.stdout = savestdout
fp.close()
# test_int(): see test_int.py for tests of built-in function int().
def test_repr(self):
self.assertEqual(repr(''), '\'\'')
self.assertEqual(repr(0), '0')
self.assertEqual(repr(()), '()')
self.assertEqual(repr([]), '[]')
self.assertEqual(repr({}), '{}')
a = []
a.append(a)
self.assertEqual(repr(a), '[[...]]')
a = {}
a[0] = a
self.assertEqual(repr(a), '{0: {...}}')
def test_round(self):
self.assertEqual(round(0.0), 0.0)
self.assertEqual(type(round(0.0)), int)
self.assertEqual(round(1.0), 1.0)
self.assertEqual(round(10.0), 10.0)
self.assertEqual(round(1000000000.0), 1000000000.0)
self.assertEqual(round(1e20), 1e20)
self.assertEqual(round(-1.0), -1.0)
self.assertEqual(round(-10.0), -10.0)
self.assertEqual(round(-1000000000.0), -1000000000.0)
self.assertEqual(round(-1e20), -1e20)
self.assertEqual(round(0.1), 0.0)
self.assertEqual(round(1.1), 1.0)
self.assertEqual(round(10.1), 10.0)
self.assertEqual(round(1000000000.1), 1000000000.0)
self.assertEqual(round(-1.1), -1.0)
self.assertEqual(round(-10.1), -10.0)
self.assertEqual(round(-1000000000.1), -1000000000.0)
self.assertEqual(round(0.9), 1.0)
self.assertEqual(round(9.9), 10.0)
self.assertEqual(round(999999999.9), 1000000000.0)
self.assertEqual(round(-0.9), -1.0)
self.assertEqual(round(-9.9), -10.0)
self.assertEqual(round(-999999999.9), -1000000000.0)
self.assertEqual(round(-8.0, -1), -10.0)
self.assertEqual(type(round(-8.0, -1)), float)
self.assertEqual(type(round(-8.0, 0)), float)
self.assertEqual(type(round(-8.0, 1)), float)
# Check even / odd rounding behaviour
self.assertEqual(round(5.5), 6)
self.assertEqual(round(6.5), 6)
self.assertEqual(round(-5.5), -6)
self.assertEqual(round(-6.5), -6)
# Check behavior on ints
self.assertEqual(round(0), 0)
self.assertEqual(round(8), 8)
self.assertEqual(round(-8), -8)
self.assertEqual(type(round(0)), int)
self.assertEqual(type(round(-8, -1)), int)
self.assertEqual(type(round(-8, 0)), int)
self.assertEqual(type(round(-8, 1)), int)
# test new kwargs
self.assertEqual(round(number=-8.0, ndigits=-1), -10.0)
self.assertRaises(TypeError, round)
# test generic rounding delegation for reals
class TestRound:
def __round__(self):
return 23
class TestNoRound:
pass
self.assertEqual(round(TestRound()), 23)
self.assertRaises(TypeError, round, 1, 2, 3)
self.assertRaises(TypeError, round, TestNoRound())
t = TestNoRound()
t.__round__ = lambda *args: args
self.assertRaises(TypeError, round, t)
self.assertRaises(TypeError, round, t, 0)
# Some versions of glibc for alpha have a bug that affects
# float -> integer rounding (floor, ceil, rint, round) for
# values in the range [2**52, 2**53). See:
#
# http://sources.redhat.com/bugzilla/show_bug.cgi?id=5350
#
# We skip this test on Linux/alpha if it would fail.
linux_alpha = (platform.system().startswith('Linux') and
platform.machine().startswith('alpha'))
system_round_bug = round(5e15+1) != 5e15+1
@unittest.skipIf(linux_alpha and system_round_bug,
"test will fail; failure is probably due to a "
"buggy system round function")
def test_round_large(self):
# Issue #1869: integral floats should remain unchanged
self.assertEqual(round(5e15-1), 5e15-1)
self.assertEqual(round(5e15), 5e15)
self.assertEqual(round(5e15+1), 5e15+1)
self.assertEqual(round(5e15+2), 5e15+2)
self.assertEqual(round(5e15+3), 5e15+3)
def test_bug_27936(self):
# Verify that ndigits=None means the same as passing in no argument
for x in [1234,
1234.56,
decimal.Decimal('1234.56'),
fractions.Fraction(123456, 100)]:
self.assertEqual(round(x, None), round(x))
self.assertEqual(type(round(x, None)), type(round(x)))
def test_setattr(self):
setattr(sys, 'spam', 1)
self.assertEqual(sys.spam, 1)
self.assertRaises(TypeError, setattr, sys, 1, 'spam')
self.assertRaises(TypeError, setattr)
# test_str(): see test_unicode.py and test_bytes.py for str() tests.
def test_sum(self):
self.assertEqual(sum([]), 0)
self.assertEqual(sum(list(range(2,8))), 27)
self.assertEqual(sum(iter(list(range(2,8)))), 27)
self.assertEqual(sum(Squares(10)), 285)
self.assertEqual(sum(iter(Squares(10))), 285)
self.assertEqual(sum([[1], [2], [3]], []), [1, 2, 3])
self.assertRaises(TypeError, sum)
self.assertRaises(TypeError, sum, 42)
self.assertRaises(TypeError, sum, ['a', 'b', 'c'])
self.assertRaises(TypeError, sum, ['a', 'b', 'c'], '')
self.assertRaises(TypeError, sum, [b'a', b'c'], b'')
values = [bytearray(b'a'), bytearray(b'b')]
self.assertRaises(TypeError, sum, values, bytearray(b''))
self.assertRaises(TypeError, sum, [[1], [2], [3]])
self.assertRaises(TypeError, sum, [{2:3}])
self.assertRaises(TypeError, sum, [{2:3}]*2, {2:3})
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, sum, BadSeq())
empty = []
sum(([x] for x in range(10)), empty)
self.assertEqual(empty, [])
def test_type(self):
self.assertEqual(type(''), type('123'))
self.assertNotEqual(type(''), type(()))
# We don't want self in vars(), so these are static methods
@staticmethod
def get_vars_f0():
return vars()
@staticmethod
def get_vars_f2():
BuiltinTest.get_vars_f0()
a = 1
b = 2
return vars()
class C_get_vars(object):
def getDict(self):
return {'a':2}
__dict__ = property(fget=getDict)
def test_vars(self):
self.assertEqual(set(vars()), set(dir()))
self.assertEqual(set(vars(sys)), set(dir(sys)))
self.assertEqual(self.get_vars_f0(), {})
self.assertEqual(self.get_vars_f2(), {'a': 1, 'b': 2})
self.assertRaises(TypeError, vars, 42, 42)
self.assertRaises(TypeError, vars, 42)
self.assertEqual(vars(self.C_get_vars()), {'a':2})
def test_zip(self):
a = (1, 2, 3)
b = (4, 5, 6)
t = [(1, 4), (2, 5), (3, 6)]
self.assertEqual(list(zip(a, b)), t)
b = [4, 5, 6]
self.assertEqual(list(zip(a, b)), t)
b = (4, 5, 6, 7)
self.assertEqual(list(zip(a, b)), t)
class I:
def __getitem__(self, i):
if i < 0 or i > 2: raise IndexError
return i + 4
self.assertEqual(list(zip(a, I())), t)
self.assertEqual(list(zip()), [])
self.assertEqual(list(zip(*[])), [])
self.assertRaises(TypeError, zip, None)
class G:
pass
self.assertRaises(TypeError, zip, a, G())
self.assertRaises(RuntimeError, zip, a, TestFailingIter())
# Make sure zip doesn't try to allocate a billion elements for the
# result list when one of its arguments doesn't say how long it is.
# A MemoryError is the most likely failure mode.
class SequenceWithoutALength:
def __getitem__(self, i):
if i == 5:
raise IndexError
else:
return i
self.assertEqual(
list(zip(SequenceWithoutALength(), range(2**30))),
list(enumerate(range(5)))
)
class BadSeq:
def __getitem__(self, i):
if i == 5:
raise ValueError
else:
return i
self.assertRaises(ValueError, list, zip(BadSeq(), BadSeq()))
def test_zip_pickle(self):
a = (1, 2, 3)
b = (4, 5, 6)
t = [(1, 4), (2, 5), (3, 6)]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z1 = zip(a, b)
self.check_iter_pickle(z1, t, proto)
def test_format(self):
# Test the basic machinery of the format() builtin. Don't test
# the specifics of the various formatters
self.assertEqual(format(3, ''), '3')
# Returns some classes to use for various tests. There's
# an old-style version, and a new-style version
def classes_new():
class A(object):
def __init__(self, x):
self.x = x
def __format__(self, format_spec):
return str(self.x) + format_spec
class DerivedFromA(A):
pass
class Simple(object): pass
class DerivedFromSimple(Simple):
def __init__(self, x):
self.x = x
def __format__(self, format_spec):
return str(self.x) + format_spec
class DerivedFromSimple2(DerivedFromSimple): pass
return A, DerivedFromA, DerivedFromSimple, DerivedFromSimple2
def class_test(A, DerivedFromA, DerivedFromSimple, DerivedFromSimple2):
self.assertEqual(format(A(3), 'spec'), '3spec')
self.assertEqual(format(DerivedFromA(4), 'spec'), '4spec')
self.assertEqual(format(DerivedFromSimple(5), 'abc'), '5abc')
self.assertEqual(format(DerivedFromSimple2(10), 'abcdef'),
'10abcdef')
class_test(*classes_new())
def empty_format_spec(value):
# test that:
# format(x, '') == str(x)
# format(x) == str(x)
self.assertEqual(format(value, ""), str(value))
self.assertEqual(format(value), str(value))
# for builtin types, format(x, "") == str(x)
empty_format_spec(17**13)
empty_format_spec(1.0)
empty_format_spec(3.1415e104)
empty_format_spec(-3.1415e104)
empty_format_spec(3.1415e-104)
empty_format_spec(-3.1415e-104)
empty_format_spec(object)
empty_format_spec(None)
# TypeError because self.__format__ returns the wrong type
class BadFormatResult:
def __format__(self, format_spec):
return 1.0
self.assertRaises(TypeError, format, BadFormatResult(), "")
# TypeError because format_spec is not unicode or str
self.assertRaises(TypeError, format, object(), 4)
self.assertRaises(TypeError, format, object(), object())
# tests for object.__format__ really belong elsewhere, but
# there's no good place to put them
x = object().__format__('')
self.assertTrue(x.startswith('<object object at'))
# first argument to object.__format__ must be string
self.assertRaises(TypeError, object().__format__, 3)
self.assertRaises(TypeError, object().__format__, object())
self.assertRaises(TypeError, object().__format__, None)
# --------------------------------------------------------------------
# Issue #7994: object.__format__ with a non-empty format string is
# disallowed
class A:
def __format__(self, fmt_str):
return format('', fmt_str)
self.assertEqual(format(A()), '')
self.assertEqual(format(A(), ''), '')
self.assertEqual(format(A(), 's'), '')
class B:
pass
class C(object):
pass
for cls in [object, B, C]:
obj = cls()
self.assertEqual(format(obj), str(obj))
self.assertEqual(format(obj, ''), str(obj))
with self.assertRaisesRegex(TypeError,
r'\b%s\b' % re.escape(cls.__name__)):
format(obj, 's')
# --------------------------------------------------------------------
# make sure we can take a subclass of str as a format spec
class DerivedFromStr(str): pass
self.assertEqual(format(0, DerivedFromStr('10')), ' 0')
def test_bin(self):
self.assertEqual(bin(0), '0b0')
self.assertEqual(bin(1), '0b1')
self.assertEqual(bin(-1), '-0b1')
self.assertEqual(bin(2**65), '0b1' + '0' * 65)
self.assertEqual(bin(2**65-1), '0b' + '1' * 65)
self.assertEqual(bin(-(2**65)), '-0b1' + '0' * 65)
self.assertEqual(bin(-(2**65-1)), '-0b' + '1' * 65)
def test_bytearray_translate(self):
x = bytearray(b"abc")
self.assertRaises(ValueError, x.translate, b"1", 1)
self.assertRaises(TypeError, x.translate, b"1"*256, 1)
def test_bytearray_extend_error(self):
array = bytearray()
bad_iter = map(int, "X")
self.assertRaises(ValueError, array.extend, bad_iter)
def test_construct_singletons(self):
for const in None, Ellipsis, NotImplemented:
tp = type(const)
self.assertIs(tp(), const)
self.assertRaises(TypeError, tp, 1, 2)
self.assertRaises(TypeError, tp, a=1, b=2)
class TestBreakpoint(unittest.TestCase):
def setUp(self):
# These tests require a clean slate environment. For example, if the
# test suite is run with $PYTHONBREAKPOINT set to something else, it
# will mess up these tests. Similarly for sys.breakpointhook.
# Cleaning the slate here means you can't use breakpoint() to debug
# these tests, but I think that's okay. Just use pdb.set_trace() if
# you must.
self.resources = ExitStack()
self.addCleanup(self.resources.close)
self.env = self.resources.enter_context(EnvironmentVarGuard())
del self.env['PYTHONBREAKPOINT']
self.resources.enter_context(
swap_attr(sys, 'breakpointhook', sys.__breakpointhook__))
def test_breakpoint(self):
with patch('pdb.set_trace') as mock:
breakpoint()
mock.assert_called_once()
def test_breakpoint_with_breakpointhook_set(self):
my_breakpointhook = MagicMock()
sys.breakpointhook = my_breakpointhook
breakpoint()
my_breakpointhook.assert_called_once_with()
def test_breakpoint_with_breakpointhook_reset(self):
my_breakpointhook = MagicMock()
sys.breakpointhook = my_breakpointhook
breakpoint()
my_breakpointhook.assert_called_once_with()
# Reset the hook and it will not be called again.
sys.breakpointhook = sys.__breakpointhook__
with patch('pdb.set_trace') as mock:
breakpoint()
mock.assert_called_once_with()
my_breakpointhook.assert_called_once_with()
def test_breakpoint_with_args_and_keywords(self):
my_breakpointhook = MagicMock()
sys.breakpointhook = my_breakpointhook
breakpoint(1, 2, 3, four=4, five=5)
my_breakpointhook.assert_called_once_with(1, 2, 3, four=4, five=5)
def test_breakpoint_with_passthru_error(self):
def my_breakpointhook():
pass
sys.breakpointhook = my_breakpointhook
self.assertRaises(TypeError, breakpoint, 1, 2, 3, four=4, five=5)
@unittest.skipIf(sys.flags.ignore_environment, '-E was given')
def test_envar_good_path_builtin(self):
self.env['PYTHONBREAKPOINT'] = 'int'
with patch('builtins.int') as mock:
breakpoint('7')
mock.assert_called_once_with('7')
@unittest.skipIf(sys.flags.ignore_environment, '-E was given')
def test_envar_good_path_other(self):
self.env['PYTHONBREAKPOINT'] = 'sys.exit'
with patch('sys.exit') as mock:
breakpoint()
mock.assert_called_once_with()
@unittest.skipIf(sys.flags.ignore_environment, '-E was given')
def test_envar_good_path_noop_0(self):
self.env['PYTHONBREAKPOINT'] = '0'
with patch('pdb.set_trace') as mock:
breakpoint()
mock.assert_not_called()
def test_envar_good_path_empty_string(self):
# PYTHONBREAKPOINT='' is the same as it not being set.
self.env['PYTHONBREAKPOINT'] = ''
with patch('pdb.set_trace') as mock:
breakpoint()
mock.assert_called_once_with()
@unittest.skipIf(sys.flags.ignore_environment, '-E was given')
def test_envar_unimportable(self):
for envar in (
'.', '..', '.foo', 'foo.', '.int', 'int.',
'.foo.bar', '..foo.bar', '/./',
'nosuchbuiltin',
'nosuchmodule.nosuchcallable',
):
with self.subTest(envar=envar):
self.env['PYTHONBREAKPOINT'] = envar
mock = self.resources.enter_context(patch('pdb.set_trace'))
w = self.resources.enter_context(check_warnings(quiet=True))
breakpoint()
self.assertEqual(
str(w.message),
f'Ignoring unimportable $PYTHONBREAKPOINT: "{envar}"')
self.assertEqual(w.category, RuntimeWarning)
mock.assert_not_called()
def test_envar_ignored_when_hook_is_set(self):
self.env['PYTHONBREAKPOINT'] = 'sys.exit'
with patch('sys.exit') as mock:
sys.breakpointhook = int
breakpoint()
mock.assert_not_called()
@unittest.skipUnless(pty, "the pty and signal modules must be available")
class PtyTests(unittest.TestCase):
"""Tests that use a pseudo terminal to guarantee stdin and stdout are
terminals in the test environment"""
@staticmethod
def handle_sighup(signum, frame):
# bpo-40140: if the process is the session leader, os.close(fd)
# of "pid, fd = pty.fork()" can raise SIGHUP signal:
# just ignore the signal.
pass
def run_child(self, child, terminal_input):
old_sighup = signal.signal(signal.SIGHUP, self.handle_sighup)
try:
return self._run_child(child, terminal_input)
finally:
signal.signal(signal.SIGHUP, old_sighup)
def _run_child(self, child, terminal_input):
r, w = os.pipe() # Pipe test results from child back to parent
try:
pid, fd = pty.fork()
except (OSError, AttributeError) as e:
os.close(r)
os.close(w)
self.skipTest("pty.fork() raised {}".format(e))
raise
if pid == 0:
# Child
try:
# Make sure we don't get stuck if there's a problem
signal.alarm(2)
os.close(r)
with open(w, "w") as wpipe:
child(wpipe)
except:
traceback.print_exc()
finally:
# We don't want to return to unittest...
os._exit(0)
# Parent
os.close(w)
os.write(fd, terminal_input)
# Get results from the pipe
with open(r, "r") as rpipe:
lines = []
while True:
line = rpipe.readline().strip()
if line == "":
# The other end was closed => the child exited
break
lines.append(line)
# Check the result was got and corresponds to the user's terminal input
if len(lines) != 2:
# Something went wrong, try to get at stderr
# Beware of Linux raising EIO when the slave is closed
child_output = bytearray()
while True:
try:
chunk = os.read(fd, 3000)
except OSError: # Assume EIO
break
if not chunk:
break
child_output.extend(chunk)
os.close(fd)
child_output = child_output.decode("ascii", "ignore")
self.fail("got %d lines in pipe but expected 2, child output was:\n%s"
% (len(lines), child_output))
# bpo-40155: Close the PTY before waiting for the child process
# completion, otherwise the child process hangs on AIX.
os.close(fd)
# Wait until the child process completes
os.waitpid(pid, 0)
return lines
def check_input_tty(self, prompt, terminal_input, stdio_encoding=None):
if not sys.stdin.isatty() or not sys.stdout.isatty():
self.skipTest("stdin and stdout must be ttys")
def child(wpipe):
# Check the error handlers are accounted for
if stdio_encoding:
sys.stdin = io.TextIOWrapper(sys.stdin.detach(),
encoding=stdio_encoding,
errors='surrogateescape')
sys.stdout = io.TextIOWrapper(sys.stdout.detach(),
encoding=stdio_encoding,
errors='replace')
print("tty =", sys.stdin.isatty() and sys.stdout.isatty(), file=wpipe)
print(ascii(input(prompt)), file=wpipe)
lines = self.run_child(child, terminal_input + b"\r\n")
# Check we did exercise the GNU readline path
self.assertIn(lines[0], {'tty = True', 'tty = False'})
if lines[0] != 'tty = True':
self.skipTest("standard IO in should have been a tty")
input_result = eval(lines[1]) # ascii() -> eval() roundtrip
if stdio_encoding:
expected = terminal_input.decode(stdio_encoding, 'surrogateescape')
else:
expected = terminal_input.decode(sys.stdin.encoding) # what else?
self.assertEqual(input_result, expected)
def test_input_tty(self):
# Test input() functionality when wired to a tty (the code path
# is different and invokes GNU readline if available).
self.check_input_tty("prompt", b"quux")
def test_input_tty_non_ascii(self):
# Check stdin/stdout encoding is used when invoking GNU readline
self.check_input_tty("prompté", b"quux\xe9", "utf-8")
def test_input_tty_non_ascii_unicode_errors(self):
# Check stdin/stdout error handler is used when invoking GNU readline
self.check_input_tty("prompté", b"quux\xe9", "ascii")
def test_input_no_stdout_fileno(self):
# Issue #24402: If stdin is the original terminal but stdout.fileno()
# fails, do not use the original stdout file descriptor
def child(wpipe):
print("stdin.isatty():", sys.stdin.isatty(), file=wpipe)
sys.stdout = io.StringIO() # Does not support fileno()
input("prompt")
print("captured:", ascii(sys.stdout.getvalue()), file=wpipe)
lines = self.run_child(child, b"quux\r")
expected = (
"stdin.isatty(): True",
"captured: 'prompt'",
)
self.assertSequenceEqual(lines, expected)
class TestSorted(unittest.TestCase):
def test_basic(self):
data = list(range(100))
copy = data[:]
random.shuffle(copy)
self.assertEqual(data, sorted(copy))
self.assertNotEqual(data, copy)
data.reverse()
random.shuffle(copy)
self.assertEqual(data, sorted(copy, key=lambda x: -x))
self.assertNotEqual(data, copy)
random.shuffle(copy)
self.assertEqual(data, sorted(copy, reverse=1))
self.assertNotEqual(data, copy)
def test_bad_arguments(self):
# Issue #29327: The first argument is positional-only.
sorted([])
# pypy doesn't support positional only arguments
if check_impl_detail():
with self.assertRaises(TypeError):
sorted(iterable=[])
# Other arguments are keyword-only
sorted([], key=None)
with self.assertRaises(TypeError):
sorted([], None)
def test_inputtypes(self):
s = 'abracadabra'
types = [list, tuple, str]
for T in types:
self.assertEqual(sorted(s), sorted(T(s)))
s = ''.join(set(s)) # unique letters only
types = [str, set, frozenset, list, tuple, dict.fromkeys]
for T in types:
self.assertEqual(sorted(s), sorted(T(s)))
def test_baddecorator(self):
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
self.assertRaises(TypeError, sorted, data, None, lambda x,y: 0)
class ShutdownTest(unittest.TestCase):
# PyPy doesn't do a gc.collect() at shutdown
@cpython_only
def test_cleanup(self):
# Issue #19255: builtins are still available at shutdown
code = """if 1:
import builtins
import sys
class C:
def __del__(self):
print("before")
# Check that builtins still exist
len(())
print("after")
c = C()
# Make this module survive until builtins and sys are cleaned
builtins.here = sys.modules[__name__]
sys.here = sys.modules[__name__]
# Create a reference loop so that this module needs to go
# through a GC phase.
here = sys.modules[__name__]
"""
# Issue #20599: Force ASCII encoding to get a codec implemented in C,
# otherwise the codec may be unloaded before C.__del__() is called, and
# so print("before") fails because the codec cannot be used to encode
# "before" to sys.stdout.encoding. For example, on Windows,
# sys.stdout.encoding is the OEM code page and these code pages are
# implemented in Python
rc, out, err = assert_python_ok("-c", code,
PYTHONIOENCODING="ascii")
self.assertEqual(["before", "after"], out.decode().splitlines())
class TestType(unittest.TestCase):
def test_new_type(self):
A = type('A', (), {})
self.assertEqual(A.__name__, 'A')
self.assertEqual(A.__qualname__, 'A')
self.assertEqual(A.__module__, __name__)
self.assertEqual(A.__bases__, (object,))
self.assertIs(A.__base__, object)
x = A()
self.assertIs(type(x), A)
self.assertIs(x.__class__, A)
class B:
def ham(self):
return 'ham%d' % self
C = type('C', (B, int), {'spam': lambda self: 'spam%s' % self})
self.assertEqual(C.__name__, 'C')
self.assertEqual(C.__qualname__, 'C')
self.assertEqual(C.__module__, __name__)
self.assertEqual(C.__bases__, (B, int))
self.assertIs(C.__base__, int)
self.assertIn('spam', C.__dict__)
self.assertNotIn('ham', C.__dict__)
x = C(42)
self.assertEqual(x, 42)
self.assertIs(type(x), C)
self.assertIs(x.__class__, C)
self.assertEqual(x.ham(), 'ham42')
self.assertEqual(x.spam(), 'spam42')
self.assertEqual(x.to_bytes(2, 'little'), b'\x2a\x00')
def test_type_nokwargs(self):
with self.assertRaises(TypeError):
type('a', (), {}, x=5)
with self.assertRaises(TypeError):
type('a', (), dict={})
def test_type_name(self):
for name in 'A', '\xc4', '\U0001f40d', 'B.A', '42', '':
with self.subTest(name=name):
A = type(name, (), {})
self.assertEqual(A.__name__, name)
self.assertEqual(A.__qualname__, name)
self.assertEqual(A.__module__, __name__)
with self.assertRaises(ValueError):
type('A\x00B', (), {})
with self.assertRaises(ValueError):
type('A\udcdcB', (), {})
with self.assertRaises(TypeError):
type(b'A', (), {})
C = type('C', (), {})
for name in 'A', '\xc4', '\U0001f40d', 'B.A', '42', '':
with self.subTest(name=name):
C.__name__ = name
self.assertEqual(C.__name__, name)
self.assertEqual(C.__qualname__, 'C')
self.assertEqual(C.__module__, __name__)
A = type('C', (), {})
with self.assertRaises(ValueError):
A.__name__ = 'A\x00B'
self.assertEqual(A.__name__, 'C')
with self.assertRaises(ValueError):
A.__name__ = 'A\udcdcB'
self.assertEqual(A.__name__, 'C')
with self.assertRaises(TypeError):
A.__name__ = b'A'
self.assertEqual(A.__name__, 'C')
def test_type_qualname(self):
A = type('A', (), {'__qualname__': 'B.C'})
self.assertEqual(A.__name__, 'A')
self.assertEqual(A.__qualname__, 'B.C')
self.assertEqual(A.__module__, __name__)
with self.assertRaises(TypeError):
type('A', (), {'__qualname__': b'B'})
self.assertEqual(A.__qualname__, 'B.C')
A.__qualname__ = 'D.E'
self.assertEqual(A.__name__, 'A')
self.assertEqual(A.__qualname__, 'D.E')
with self.assertRaises(TypeError):
A.__qualname__ = b'B'
self.assertEqual(A.__qualname__, 'D.E')
def test_type_doc(self):
for doc in 'x', '\xc4', '\U0001f40d', 'x\x00y', b'x', 42, None:
A = type('A', (), {'__doc__': doc})
self.assertEqual(A.__doc__, doc)
if check_impl_detail(): # CPython encodes __doc__ into tp_doc
with self.assertRaises(UnicodeEncodeError):
type('A', (), {'__doc__': 'x\udcdcy'})
A = type('A', (), {})
self.assertEqual(A.__doc__, None)
for doc in 'x', '\xc4', '\U0001f40d', 'x\x00y', 'x\udcdcy', b'x', 42, None:
A.__doc__ = doc
self.assertEqual(A.__doc__, doc)
def test_bad_args(self):
with self.assertRaises(TypeError):
type()
with self.assertRaises(TypeError):
type('A', ())
with self.assertRaises(TypeError):
type('A', (), {}, ())
with self.assertRaises(TypeError):
type('A', (), dict={})
with self.assertRaises(TypeError):
type('A', [], {})
with self.assertRaises(TypeError):
type('A', (), types.MappingProxyType({}))
with self.assertRaises(TypeError):
type('A', (None,), {})
with self.assertRaises(TypeError):
type('A', (bool,), {})
with self.assertRaises(TypeError):
type('A', (int, str), {})
def test_bad_slots(self):
with self.assertRaises(TypeError):
type('A', (), {'__slots__': b'x'})
if check_impl_detail(): # 'int' is variable-sized on CPython 3.x
with self.assertRaises(TypeError):
type('A', (int,), {'__slots__': 'x'})
with self.assertRaises(TypeError):
type('A', (), {'__slots__': ''})
with self.assertRaises(TypeError):
type('A', (), {'__slots__': '42'})
with self.assertRaises(TypeError):
type('A', (), {'__slots__': 'x\x00y'})
with self.assertRaises(ValueError):
type('A', (), {'__slots__': 'x', 'x': 0})
with self.assertRaises(TypeError):
type('A', (), {'__slots__': ('__dict__', '__dict__')})
with self.assertRaises(TypeError):
type('A', (), {'__slots__': ('__weakref__', '__weakref__')})
class B:
pass
with self.assertRaises(TypeError):
type('A', (B,), {'__slots__': '__dict__'})
with self.assertRaises(TypeError):
type('A', (B,), {'__slots__': '__weakref__'})
def test_namespace_order(self):
# bpo-34320: namespace should preserve order
od = collections.OrderedDict([('a', 1), ('b', 2)])
od.move_to_end('a')
expected = list(od.items())
C = type('C', (), od)
self.assertEqual(list(C.__dict__.items())[:2], [('b', 2), ('a', 1)])
def load_tests(loader, tests, pattern):
from doctest import DocTestSuite
tests.addTest(DocTestSuite(builtins))
return tests
if __name__ == "__main__":
unittest.main()
|
the-stack_106_26847 | import _plotly_utils.basevalidators
class YsideValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name='yside', parent_name='layout.grid', **kwargs
):
super(YsideValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'plot'),
role=kwargs.pop('role', 'info'),
values=kwargs.pop(
'values', ['left', 'left plot', 'right plot', 'right']
),
**kwargs
)
|
the-stack_106_26851 | try:
import usocket as socket
except:
import socket
import ussl as ssl
CONTENT = b"""\
HTTP/1.0 200 OK
Hello #%d from MicroPython!
"""
def main(use_stream=True):
s = socket.socket()
# Binding to all interfaces - server will be accessible to other hosts!
ai = socket.getaddrinfo("0.0.0.0", 8443)
print("Bind address info:", ai)
addr = ai[0][-1]
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(addr)
s.listen(5)
print("Listening, connect your browser to https://<this_host>:8443/")
counter = 0
while True:
res = s.accept()
client_s = res[0]
client_addr = res[1]
print("Client address:", client_addr)
print("Client socket:", client_s)
client_s = ssl.wrap_socket(client_s, server_side=True)
print(client_s)
print("Request:")
if use_stream:
# Both CPython and MicroPython SSLSocket objects support read() and
# write() methods.
# Browsers are prone to terminate SSL connection abruptly if they
# see unknown certificate, etc. We must continue in such case -
# next request they issue will likely be more well-behaving and
# will succeed.
try:
req = client_s.read(4096)
print(req)
if req:
client_s.write(CONTENT % counter)
except Exception as e:
print("Exception serving request:", e)
else:
print(client_s.recv(4096))
client_s.send(CONTENT % counter)
client_s.close()
counter += 1
print()
main()
|
the-stack_106_26853 | from setuptools import find_packages, setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="simpletransformers",
version="0.15.6",
author="Thilina Rajapakse",
author_email="[email protected]",
description="An easy-to-use wrapper library for the Transformers library.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ThilinaRajapakse/simpletransformers/",
packages=find_packages(),
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
python_requires=">=3.6",
install_requires=[
"numpy",
"requests",
"tqdm",
"regex",
"transformers",
"scipy",
"scikit-learn",
"seqeval",
"tensorboardx"
],
) |
the-stack_106_26854 | """
Knapsack problem.
Naive recursive implementation. Try all combinations (Bad idea)
Complexity is exponential (2^n) where n is number of items.
"""
# Uncomment the following lines if you want to play around with more than 2000 items
# import sys
# sys.setrecursionlimit(2500)
memo = {}
def ks(capacity_left, n, weights, values, clear_memo=False):
"""
capacity_left (int): remaining storage capacity of a bag
n (int): current item position
weights (list): list of item weights
values (list): list of item values
clear_memo: used for performance tests only
"""
if clear_memo:
memo.clear()
if n == -1 or capacity_left == 0:
# No more items to add
return 0
h = capacity_left * 2000 + n
if h in memo:
# print("memo", capacity_left, n)
return memo[h]
if weights[n] > capacity_left:
# Current item is too heavy for remaining capacity, ignore it and continue
return ks(capacity_left, n-1, weights, values)
else:
# Do not add item, just move the pointer to the left
_without = ks(capacity_left, n-1, weights, values)
# Add item into bag
_with = values[n] + ks(capacity_left-weights[n], n-1, weights, values)
# Save value into memory
val = max(_with, _without)
memo[h] = val
return val
if __name__ == "__main__":
import json
with open("dataset.json", "r") as f:
data = json.load(f)
n = 25
w = data["weights"][:n]
v = data["values"][:n]
c = data["capacities"][n]
best = ks(c, n-1, w, v)
print("Best: ", best, "| Expected:", data["bests"][n])
|
the-stack_106_26855 | """Project models."""
import fnmatch
import logging
import os
import re
from urllib.parse import urlparse
from django.conf import settings
from django.contrib.auth.models import User
from django.core.files.storage import get_storage_class
from django.db import models
from django.db.models import Prefetch
from django.urls import NoReverseMatch, reverse
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.models import TimeStampedModel
from guardian.shortcuts import assign
from six.moves import shlex_quote
from taggit.managers import TaggableManager
from readthedocs.api.v2.client import api
from readthedocs.builds.constants import LATEST, STABLE
from readthedocs.core.resolver import resolve, resolve_domain
from readthedocs.core.utils import broadcast, slugify
from readthedocs.projects import constants
from readthedocs.projects.exceptions import ProjectConfigurationError
from readthedocs.projects.managers import HTMLFileManager
from readthedocs.projects.querysets import (
ChildRelatedProjectQuerySet,
FeatureQuerySet,
ProjectQuerySet,
RelatedProjectQuerySet,
)
from readthedocs.projects.templatetags.projects_tags import sort_version_aware
from readthedocs.projects.validators import (
validate_domain_name,
validate_repository_url,
)
from readthedocs.projects.version_handling import determine_stable_version
from readthedocs.search.parse_json import process_file
from readthedocs.vcs_support.backends import backend_cls
from readthedocs.vcs_support.utils import Lock, NonBlockingLock
log = logging.getLogger(__name__)
storage = get_storage_class()()
class ProjectRelationship(models.Model):
"""
Project to project relationship.
This is used for subprojects
"""
parent = models.ForeignKey(
'Project',
verbose_name=_('Parent'),
related_name='subprojects',
)
child = models.ForeignKey(
'Project',
verbose_name=_('Child'),
related_name='superprojects',
)
alias = models.SlugField(
_('Alias'),
max_length=255,
null=True,
blank=True,
db_index=False,
)
objects = ChildRelatedProjectQuerySet.as_manager()
def __str__(self):
return '{} -> {}'.format(self.parent, self.child)
def save(self, *args, **kwargs): # pylint: disable=arguments-differ
if not self.alias:
self.alias = self.child.slug
super().save(*args, **kwargs)
# HACK
def get_absolute_url(self):
return resolve(self.child)
class Project(models.Model):
"""Project model."""
# Auto fields
pub_date = models.DateTimeField(_('Publication date'), auto_now_add=True)
modified_date = models.DateTimeField(_('Modified date'), auto_now=True)
# Generally from conf.py
users = models.ManyToManyField(
User,
verbose_name=_('User'),
related_name='projects',
)
# A DNS label can contain up to 63 characters.
name = models.CharField(_('Name'), max_length=63)
slug = models.SlugField(_('Slug'), max_length=63, unique=True)
description = models.TextField(
_('Description'),
blank=True,
help_text=_(
'The reStructuredText '
'description of the project',
),
)
repo = models.CharField(
_('Repository URL'),
max_length=255,
validators=[validate_repository_url],
help_text=_('Hosted documentation repository URL'),
db_index=True,
)
repo_type = models.CharField(
_('Repository type'),
max_length=10,
choices=constants.REPO_CHOICES,
default='git',
)
project_url = models.URLField(
_('Project homepage'),
blank=True,
help_text=_('The project\'s homepage'),
)
canonical_url = models.URLField(
_('Canonical URL'),
blank=True,
help_text=_('URL that documentation is expected to serve from'),
)
single_version = models.BooleanField(
_('Single version'),
default=False,
help_text=_(
'A single version site has no translations and only your '
'"latest" version, served at the root of the domain. Use '
'this with caution, only turn it on if you will <b>never</b> '
'have multiple versions of your docs.',
),
)
default_version = models.CharField(
_('Default version'),
max_length=255,
default=LATEST,
help_text=_('The version of your project that / redirects to'),
)
# In default_branch, None means the backend should choose the
# appropriate branch. Eg 'master' for git
default_branch = models.CharField(
_('Default branch'),
max_length=255,
default=None,
null=True,
blank=True,
help_text=_(
'What branch "latest" points to. Leave empty '
'to use the default value for your VCS (eg. '
'<code>trunk</code> or <code>master</code>).',
),
)
requirements_file = models.CharField(
_('Requirements file'),
max_length=255,
default=None,
null=True,
blank=True,
help_text=_(
'A <a '
'href="https://pip.pypa.io/en/latest/user_guide.html#requirements-files">'
'pip requirements file</a> needed to build your documentation. '
'Path from the root of your project.',
),
)
documentation_type = models.CharField(
_('Documentation type'),
max_length=20,
choices=constants.DOCUMENTATION_CHOICES,
default='sphinx',
help_text=_(
'Type of documentation you are building. <a href="'
'http://www.sphinx-doc.org/en/stable/builders.html#sphinx.builders.html.'
'DirectoryHTMLBuilder">More info on sphinx builders</a>.',
),
)
# Project features
cdn_enabled = models.BooleanField(_('CDN Enabled'), default=False)
analytics_code = models.CharField(
_('Analytics code'),
max_length=50,
null=True,
blank=True,
help_text=_(
'Google Analytics Tracking ID '
'(ex. <code>UA-22345342-1</code>). '
'This may slow down your page loads.',
),
)
container_image = models.CharField(
_('Alternative container image'),
max_length=64,
null=True,
blank=True,
)
container_mem_limit = models.CharField(
_('Container memory limit'),
max_length=10,
null=True,
blank=True,
help_text=_(
'Memory limit in Docker format '
'-- example: <code>512m</code> or <code>1g</code>',
),
)
container_time_limit = models.IntegerField(
_('Container time limit in seconds'),
null=True,
blank=True,
)
build_queue = models.CharField(
_('Alternate build queue id'),
max_length=32,
null=True,
blank=True,
)
allow_promos = models.BooleanField(
_('Allow paid advertising'),
default=True,
help_text=_('If unchecked, users will still see community ads.'),
)
ad_free = models.BooleanField(
_('Ad-free'),
default=False,
help_text='If checked, do not show advertising for this project',
)
show_version_warning = models.BooleanField(
_('Show version warning'),
default=False,
help_text=_('Show warning banner in non-stable nor latest versions.'),
)
# Sphinx specific build options.
enable_epub_build = models.BooleanField(
_('Enable EPUB build'),
default=True,
help_text=_(
'Create a EPUB version of your documentation with each build.',
),
)
enable_pdf_build = models.BooleanField(
_('Enable PDF build'),
default=True,
help_text=_(
'Create a PDF version of your documentation with each build.',
),
)
# Other model data.
path = models.CharField(
_('Path'),
max_length=255,
editable=False,
help_text=_(
'The directory where '
'<code>conf.py</code> lives',
),
)
conf_py_file = models.CharField(
_('Python configuration file'),
max_length=255,
default='',
blank=True,
help_text=_(
'Path from project root to <code>conf.py</code> file '
'(ex. <code>docs/conf.py</code>). '
'Leave blank if you want us to find it for you.',
),
)
featured = models.BooleanField(_('Featured'), default=False)
skip = models.BooleanField(_('Skip'), default=False)
install_project = models.BooleanField(
_('Install Project'),
help_text=_(
'Install your project inside a virtualenv using <code>setup.py '
'install</code>',
),
default=False,
)
# This model attribute holds the python interpreter used to create the
# virtual environment
python_interpreter = models.CharField(
_('Python Interpreter'),
max_length=20,
choices=constants.PYTHON_CHOICES,
default='python3',
help_text=_(
'The Python interpreter used to create the virtual '
'environment.',
),
)
use_system_packages = models.BooleanField(
_('Use system packages'),
help_text=_(
'Give the virtual environment access to the global '
'site-packages dir.',
),
default=False,
)
privacy_level = models.CharField(
_('Privacy Level'),
max_length=20,
choices=constants.PRIVACY_CHOICES,
default=settings.DEFAULT_PRIVACY_LEVEL,
help_text=_(
'Level of privacy that you want on the repository. '
'Protected means public but not in listings.',
),
)
version_privacy_level = models.CharField(
_('Version Privacy Level'),
max_length=20,
choices=constants.PRIVACY_CHOICES,
default=settings.DEFAULT_PRIVACY_LEVEL,
help_text=_(
'Default level of privacy you want on built '
'versions of documentation.',
),
)
# Subprojects
related_projects = models.ManyToManyField(
'self',
verbose_name=_('Related projects'),
blank=True,
symmetrical=False,
through=ProjectRelationship,
)
# Language bits
language = models.CharField(
_('Language'),
max_length=20,
default='en',
help_text=_(
'The language the project '
'documentation is rendered in. '
"Note: this affects your project's URL.",
),
choices=constants.LANGUAGES,
)
programming_language = models.CharField(
_('Programming Language'),
max_length=20,
default='words',
help_text=_(
'The primary programming language the project is written in.',
),
choices=constants.PROGRAMMING_LANGUAGES,
blank=True,
)
# A subproject pointed at its main language, so it can be tracked
main_language_project = models.ForeignKey(
'self',
related_name='translations',
on_delete=models.SET_NULL,
blank=True,
null=True,
)
has_valid_webhook = models.BooleanField(
default=False,
help_text=_('This project has been built with a webhook'),
)
has_valid_clone = models.BooleanField(
default=False,
help_text=_('This project has been successfully cloned'),
)
tags = TaggableManager(blank=True)
objects = ProjectQuerySet.as_manager()
all_objects = models.Manager()
class Meta:
ordering = ('slug',)
permissions = (
# Translators: Permission around whether a user can view the
# project
('view_project', _('View Project')),
)
def __str__(self):
return self.name
def save(self, *args, **kwargs): # pylint: disable=arguments-differ
from readthedocs.projects import tasks
first_save = self.pk is None
if not self.slug:
# Subdomains can't have underscores in them.
self.slug = slugify(self.name)
if not self.slug:
raise Exception(_('Model must have slug'))
super().save(*args, **kwargs)
for owner in self.users.all():
assign('view_project', owner, self)
try:
latest = self.versions.filter(slug=LATEST).first()
default_branch = self.get_default_branch()
if latest and latest.identifier != default_branch:
latest.identifier = default_branch
latest.save()
except Exception:
log.exception('Failed to update latest identifier')
try:
if not first_save:
log.info(
'Re-symlinking project and subprojects: project=%s',
self.slug,
)
broadcast(
type='app',
task=tasks.symlink_project,
args=[self.pk],
)
log.info(
'Re-symlinking superprojects: project=%s',
self.slug,
)
for relationship in self.superprojects.all():
broadcast(
type='app',
task=tasks.symlink_project,
args=[relationship.parent.pk],
)
except Exception:
log.exception('failed to symlink project')
try:
if not first_save:
broadcast(
type='app',
task=tasks.update_static_metadata,
args=[self.pk],
)
except Exception:
log.exception('failed to update static metadata')
try:
branch = self.default_branch or self.vcs_repo().fallback_branch
if not self.versions.filter(slug=LATEST).exists():
self.versions.create_latest(identifier=branch)
except Exception:
log.exception('Error creating default branches')
def get_absolute_url(self):
return reverse('projects_detail', args=[self.slug])
def get_docs_url(self, version_slug=None, lang_slug=None, private=None):
"""
Return a URL for the docs.
Always use http for now, to avoid content warnings.
"""
return resolve(
project=self,
version_slug=version_slug,
language=lang_slug,
private=private,
)
def get_builds_url(self):
return reverse(
'builds_project_list',
kwargs={
'project_slug': self.slug,
},
)
def get_canonical_url(self):
if settings.DONT_HIT_DB:
return api.project(self.pk).canonical_url().get()['url']
return self.get_docs_url()
def get_subproject_urls(self):
"""
List subproject URLs.
This is used in search result linking
"""
if settings.DONT_HIT_DB:
return [(proj['slug'], proj['canonical_url']) for proj in
(api.project(self.pk).subprojects().get()['subprojects'])]
return [(proj.child.slug, proj.child.get_docs_url())
for proj in self.subprojects.all()]
def get_storage_path(self, type_, version_slug=LATEST, include_file=True):
"""
Get a path to a build artifact for use with Django's storage system.
:param type_: Media content type, ie - 'pdf', 'htmlzip'
:param version_slug: Project version slug for lookup
:param include_file: Include file name in return
:return: the path to an item in storage
(can be used with ``storage.url`` to get the URL)
"""
folder_path = '{}/{}/{}'.format(
type_,
self.slug,
version_slug,
)
if include_file:
extension = type_.replace('htmlzip', 'zip')
return '{}/{}.{}'.format(
folder_path,
self.slug,
extension,
)
return folder_path
def get_production_media_path(self, type_, version_slug, include_file=True):
"""
Used to see if these files exist so we can offer them for download.
:param type_: Media content type, ie - 'pdf', 'zip'
:param version_slug: Project version slug for lookup
:param include_file: Include file name in return
:type include_file: bool
:returns: Full path to media file or path
"""
if settings.DEFAULT_PRIVACY_LEVEL == 'public' or settings.DEBUG:
path = os.path.join(
settings.MEDIA_ROOT,
type_,
self.slug,
version_slug,
)
else:
path = os.path.join(
settings.PRODUCTION_MEDIA_ARTIFACTS,
type_,
self.slug,
version_slug,
)
if include_file:
path = os.path.join(
path,
'{}.{}'.format(self.slug, type_.replace('htmlzip', 'zip')),
)
return path
def get_production_media_url(self, type_, version_slug, full_path=True):
"""Get the URL for downloading a specific media file."""
try:
path = reverse(
'project_download_media',
kwargs={
'project_slug': self.slug,
'type_': type_,
'version_slug': version_slug,
},
)
except NoReverseMatch:
return ''
if full_path:
path = '//{}{}'.format(settings.PRODUCTION_DOMAIN, path)
return path
def subdomain(self):
"""Get project subdomain from resolver."""
return resolve_domain(self)
def get_downloads(self):
downloads = {}
downloads['htmlzip'] = self.get_production_media_url(
'htmlzip',
self.get_default_version(),
)
downloads['epub'] = self.get_production_media_url(
'epub',
self.get_default_version(),
)
downloads['pdf'] = self.get_production_media_url(
'pdf',
self.get_default_version(),
)
return downloads
@property
def clean_repo(self):
if self.repo.startswith('http://github.com'):
return self.repo.replace('http://github.com', 'https://github.com')
return self.repo
# Doc PATH:
# MEDIA_ROOT/slug/checkouts/version/<repo>
@property
def doc_path(self):
return os.path.join(settings.DOCROOT, self.slug.replace('_', '-'))
def checkout_path(self, version=LATEST):
return os.path.join(self.doc_path, 'checkouts', version)
@property
def pip_cache_path(self):
"""Path to pip cache."""
if settings.GLOBAL_PIP_CACHE and settings.DEBUG:
return settings.GLOBAL_PIP_CACHE
return os.path.join(self.doc_path, '.cache', 'pip')
#
# Paths for symlinks in project doc_path.
#
def translations_symlink_path(self, language=None):
"""Path in the doc_path that we symlink translations."""
if not language:
language = self.language
return os.path.join(self.doc_path, 'translations', language)
#
# End symlink paths
#
def full_doc_path(self, version=LATEST):
"""The path to the documentation root in the project."""
doc_base = self.checkout_path(version)
for possible_path in ['docs', 'doc', 'Doc']:
if os.path.exists(os.path.join(doc_base, '%s' % possible_path)):
return os.path.join(doc_base, '%s' % possible_path)
# No docs directory, docs are at top-level.
return doc_base
def artifact_path(self, type_, version=LATEST):
"""The path to the build html docs in the project."""
return os.path.join(self.doc_path, 'artifacts', version, type_)
def full_build_path(self, version=LATEST):
"""The path to the build html docs in the project."""
return os.path.join(self.conf_dir(version), '_build', 'html')
def full_latex_path(self, version=LATEST):
"""The path to the build LaTeX docs in the project."""
return os.path.join(self.conf_dir(version), '_build', 'latex')
def full_epub_path(self, version=LATEST):
"""The path to the build epub docs in the project."""
return os.path.join(self.conf_dir(version), '_build', 'epub')
# There is currently no support for building man/dash formats, but we keep
# the support there for existing projects. They might have already existing
# legacy builds.
def full_man_path(self, version=LATEST):
"""The path to the build man docs in the project."""
return os.path.join(self.conf_dir(version), '_build', 'man')
def full_dash_path(self, version=LATEST):
"""The path to the build dash docs in the project."""
return os.path.join(self.conf_dir(version), '_build', 'dash')
def full_json_path(self, version=LATEST):
"""The path to the build json docs in the project."""
json_path = os.path.join(self.conf_dir(version), '_build', 'json')
return json_path
def full_singlehtml_path(self, version=LATEST):
"""The path to the build singlehtml docs in the project."""
return os.path.join(self.conf_dir(version), '_build', 'singlehtml')
def rtd_build_path(self, version=LATEST):
"""The destination path where the built docs are copied."""
return os.path.join(self.doc_path, 'rtd-builds', version)
def static_metadata_path(self):
"""The path to the static metadata JSON settings file."""
return os.path.join(self.doc_path, 'metadata.json')
def conf_file(self, version=LATEST):
"""Find a ``conf.py`` file in the project checkout."""
if self.conf_py_file:
conf_path = os.path.join(
self.checkout_path(version),
self.conf_py_file,
)
if os.path.exists(conf_path):
log.info('Inserting conf.py file path from model')
return conf_path
log.warning("Conf file specified on model doesn't exist")
files = self.find('conf.py', version)
if not files:
files = self.full_find('conf.py', version)
if len(files) == 1:
return files[0]
for filename in files:
# When multiples conf.py files, we look up the first one that
# contains the `doc` word in its path and return this one
if filename.find('doc', 70) != -1:
return filename
# If the project has more than one conf.py file but none of them have
# the `doc` word in the path, we raise an error informing this to the user
if len(files) > 1:
raise ProjectConfigurationError(
ProjectConfigurationError.MULTIPLE_CONF_FILES,
)
raise ProjectConfigurationError(ProjectConfigurationError.NOT_FOUND)
def conf_dir(self, version=LATEST):
conf_file = self.conf_file(version)
if conf_file:
return os.path.dirname(conf_file)
@property
def is_imported(self):
return bool(self.repo)
@property
def has_good_build(self):
# Check if there is `_good_build` annotation in the Queryset.
# Used for Database optimization.
if hasattr(self, '_good_build'):
return self._good_build
return self.builds.filter(success=True).exists()
@property
def has_versions(self):
return self.versions.exists()
@property
def has_aliases(self):
return self.aliases.exists()
def has_pdf(self, version_slug=LATEST):
path = self.get_production_media_path(
type_='pdf', version_slug=version_slug
)
storage_path = self.get_storage_path(
type_='pdf', version_slug=version_slug
)
return os.path.exists(path) or storage.exists(storage_path)
def has_epub(self, version_slug=LATEST):
path = self.get_production_media_path(
type_='epub', version_slug=version_slug
)
storage_path = self.get_storage_path(
type_='epub', version_slug=version_slug
)
return os.path.exists(path) or storage.exists(storage_path)
def has_htmlzip(self, version_slug=LATEST):
path = self.get_production_media_path(
type_='htmlzip', version_slug=version_slug
)
storage_path = self.get_storage_path(
type_='htmlzip', version_slug=version_slug
)
return os.path.exists(path) or storage.exists(storage_path)
@property
def sponsored(self):
return False
def vcs_repo(self, version=LATEST, environment=None):
"""
Return a Backend object for this project able to handle VCS commands.
:param environment: environment to run the commands
:type environment: doc_builder.environments.BuildEnvironment
:param version: version slug for the backend (``LATEST`` by default)
:type version: str
"""
# TODO: this seems to be the only method that receives a
# ``version.slug`` instead of a ``Version`` instance (I prefer an
# instance here)
backend = backend_cls.get(self.repo_type)
if not backend:
repo = None
else:
repo = backend(self, version, environment)
return repo
def repo_nonblockinglock(self, version, max_lock_age=None):
"""
Return a ``NonBlockingLock`` to acquire the lock via context manager.
:param version: project's version that want to get the lock for.
:param max_lock_age: time (in seconds) to consider the lock's age is old
and grab it anyway. It default to the ``container_time_limit`` of
the project or the default ``DOCKER_LIMITS['time']`` or
``REPO_LOCK_SECONDS`` or 30
"""
if max_lock_age is None:
max_lock_age = (
self.container_time_limit or
settings.DOCKER_LIMITS.get('time') or
settings.REPO_LOCK_SECONDS
)
return NonBlockingLock(
project=self,
version=version,
max_lock_age=max_lock_age,
)
def repo_lock(self, version, timeout=5, polling_interval=5):
return Lock(self, version, timeout, polling_interval)
def find(self, filename, version):
"""
Find files inside the project's ``doc`` path.
:param filename: Filename to search for in project checkout
:param version: Version instance to set version checkout path
"""
matches = []
for root, __, filenames in os.walk(self.full_doc_path(version)):
for match in fnmatch.filter(filenames, filename):
matches.append(os.path.join(root, match))
return matches
def full_find(self, filename, version):
"""
Find files inside a project's checkout path.
:param filename: Filename to search for in project checkout
:param version: Version instance to set version checkout path
"""
matches = []
for root, __, filenames in os.walk(self.checkout_path(version)):
for match in fnmatch.filter(filenames, filename):
matches.append(os.path.join(root, match))
return matches
def get_latest_build(self, finished=True):
"""
Get latest build for project.
:param finished: Return only builds that are in a finished state
"""
# Check if there is `_latest_build` attribute in the Queryset.
# Used for Database optimization.
if hasattr(self, '_latest_build'):
if self._latest_build:
return self._latest_build[0]
return None
kwargs = {'type': 'html'}
if finished:
kwargs['state'] = 'finished'
return self.builds.filter(**kwargs).first()
def api_versions(self):
from readthedocs.builds.models import APIVersion
ret = []
for version_data in api.project(self.pk
).active_versions.get()['versions']:
version = APIVersion(**version_data)
ret.append(version)
return sort_version_aware(ret)
def active_versions(self):
from readthedocs.builds.models import Version
versions = Version.objects.public(project=self, only_active=True)
return (
versions.filter(built=True, active=True) |
versions.filter(active=True, uploaded=True)
)
def ordered_active_versions(self, user=None):
from readthedocs.builds.models import Version
kwargs = {
'project': self,
'only_active': True,
}
if user:
kwargs['user'] = user
versions = Version.objects.public(**kwargs).select_related(
'project',
'project__main_language_project',
).prefetch_related(
Prefetch(
'project__superprojects',
ProjectRelationship.objects.all().select_related('parent'),
to_attr='_superprojects',
),
Prefetch(
'project__domains',
Domain.objects.filter(canonical=True),
to_attr='_canonical_domains',
),
)
return sort_version_aware(versions)
def all_active_versions(self):
"""
Get queryset with all active versions.
.. note::
This is a temporary workaround for activate_versions filtering out
things that were active, but failed to build
:returns: :py:class:`Version` queryset
"""
return self.versions.filter(active=True)
def get_stable_version(self):
return self.versions.filter(slug=STABLE).first()
def update_stable_version(self):
"""
Returns the version that was promoted to be the new stable version.
Return ``None`` if no update was mode or if there is no version on the
project that can be considered stable.
"""
versions = self.versions.all()
new_stable = determine_stable_version(versions)
if new_stable:
current_stable = self.get_stable_version()
if current_stable:
identifier_updated = (
new_stable.identifier != current_stable.identifier
)
if identifier_updated and current_stable.machine:
log.info(
'Update stable version: %(project)s:%(version)s',
{
'project': self.slug,
'version': new_stable.identifier,
}
)
current_stable.identifier = new_stable.identifier
current_stable.save()
return new_stable
else:
log.info(
'Creating new stable version: %(project)s:%(version)s',
{
'project': self.slug,
'version': new_stable.identifier,
}
)
current_stable = self.versions.create_stable(
type=new_stable.type,
identifier=new_stable.identifier,
)
return new_stable
def versions_from_branch_name(self, branch):
return (
self.versions.filter(identifier=branch) |
self.versions.filter(identifier='remotes/origin/%s' % branch) |
self.versions.filter(identifier='origin/%s' % branch) |
self.versions.filter(verbose_name=branch)
)
def get_default_version(self):
"""
Get the default version (slug).
Returns self.default_version if the version with that slug actually
exists (is built and published). Otherwise returns 'latest'.
"""
# latest is a special case where we don't have to check if it exists
if self.default_version == LATEST:
return self.default_version
# check if the default_version exists
version_qs = self.versions.filter(
slug=self.default_version,
active=True,
)
if version_qs.exists():
return self.default_version
return LATEST
def get_default_branch(self):
"""Get the version representing 'latest'."""
if self.default_branch:
return self.default_branch
return self.vcs_repo().fallback_branch
def add_subproject(self, child, alias=None):
subproject, __ = ProjectRelationship.objects.get_or_create(
parent=self,
child=child,
alias=alias,
)
return subproject
def remove_subproject(self, child):
ProjectRelationship.objects.filter(parent=self, child=child).delete()
def get_parent_relationship(self):
"""Get the parent project relationship or None if this is a top level project"""
if hasattr(self, '_superprojects'):
# Cached parent project relationship
if self._superprojects:
return self._superprojects[0]
return None
return self.superprojects.select_related('parent').first()
def get_canonical_custom_domain(self):
"""Get the canonical custom domain or None."""
if hasattr(self, '_canonical_domains'):
# Cached custom domains
if self._canonical_domains:
return self._canonical_domains[0]
return None
return self.domains.filter(canonical=True).first()
@property
def features(self):
return Feature.objects.for_project(self)
def has_feature(self, feature_id):
"""
Does project have existing feature flag.
If the feature has a historical True value before the feature was added,
we consider the project to have the flag. This is used for deprecating a
feature or changing behavior for new projects
"""
return self.features.filter(feature_id=feature_id).exists()
def get_feature_value(self, feature, positive, negative):
"""
Look up project feature, return corresponding value.
If a project has a feature, return ``positive``, otherwise return
``negative``
"""
return positive if self.has_feature(feature) else negative
@property
def show_advertising(self):
"""
Whether this project is ad-free.
:returns: ``True`` if advertising should be shown and ``False`` otherwise
:rtype: bool
"""
if self.ad_free or self.gold_owners.exists():
return False
return True
@property
def environment_variables(self):
"""
Environment variables to build this particular project.
:returns: dictionary with all the variables {name: value}
:rtype: dict
"""
return {
variable.name: variable.value
for variable in self.environmentvariable_set.all()
}
class APIProject(Project):
"""
Project proxy model for API data deserialization.
This replaces the pattern where API data was deserialized into a mocked
:py:class:`Project` object. This pattern was confusing, as it was not explicit
as to what form of object you were working with -- API backed or database
backed.
This model preserves the Project model methods, allowing for overrides on
model field differences. This model pattern will generally only be used on
builder instances, where we are interacting solely with API data.
"""
features = []
class Meta:
proxy = True
def __init__(self, *args, **kwargs):
self.features = kwargs.pop('features', [])
environment_variables = kwargs.pop('environment_variables', {})
ad_free = (not kwargs.pop('show_advertising', True))
# These fields only exist on the API return, not on the model, so we'll
# remove them to avoid throwing exceptions due to unexpected fields
for key in ['users', 'resource_uri', 'absolute_url', 'downloads',
'main_language_project', 'related_projects']:
try:
del kwargs[key]
except KeyError:
pass
super().__init__(*args, **kwargs)
# Overwrite the database property with the value from the API
self.ad_free = ad_free
self._environment_variables = environment_variables
def save(self, *args, **kwargs):
return 0
def has_feature(self, feature_id):
return feature_id in self.features
@property
def show_advertising(self):
"""Whether this project is ad-free (don't access the database)"""
return not self.ad_free
@property
def environment_variables(self):
return self._environment_variables
class ImportedFile(models.Model):
"""
Imported files model.
This tracks files that are output from documentation builds, useful for
things like CDN invalidation.
"""
project = models.ForeignKey(
'Project',
verbose_name=_('Project'),
related_name='imported_files',
)
version = models.ForeignKey(
'builds.Version',
verbose_name=_('Version'),
related_name='imported_files',
null=True,
)
name = models.CharField(_('Name'), max_length=255)
slug = models.SlugField(_('Slug'))
# max_length is set to 4096 because linux has a maximum path length
# of 4096 characters for most filesystems (including EXT4).
# https://github.com/rtfd/readthedocs.org/issues/5061
path = models.CharField(_('Path'), max_length=4096)
md5 = models.CharField(_('MD5 checksum'), max_length=255)
commit = models.CharField(_('Commit'), max_length=255)
modified_date = models.DateTimeField(_('Modified date'), auto_now=True)
def get_absolute_url(self):
return resolve(
project=self.project,
version_slug=self.version.slug,
filename=self.path,
)
def __str__(self):
return '{}: {}'.format(self.name, self.project)
class HTMLFile(ImportedFile):
"""
Imported HTML file Proxy model.
This tracks only the HTML files for indexing to search.
"""
class Meta:
proxy = True
objects = HTMLFileManager()
def get_processed_json(self):
"""
Get the parsed JSON for search indexing.
Check for two paths for each index file
This is because HTMLDir can generate a file from two different places:
* foo.rst
* foo/index.rst
Both lead to `foo/index.html`
https://github.com/rtfd/readthedocs.org/issues/5368
"""
paths = []
basename = os.path.splitext(self.path)[0]
paths.append(basename + '.fjson')
if basename.endswith('/index'):
new_basename = re.sub(r'\/index$', '', basename)
paths.append(new_basename + '.fjson')
full_json_path = self.project.get_production_media_path(
type_='json', version_slug=self.version.slug, include_file=False
)
try:
for path in paths:
file_path = os.path.join(full_json_path, path)
if os.path.exists(file_path):
return process_file(file_path)
except Exception:
log.warning(
'Unhandled exception during search processing file: %s',
file_path,
)
return {
'headers': [],
'content': '',
'path': file_path,
'title': '',
'sections': [],
}
@cached_property
def processed_json(self):
return self.get_processed_json()
class Notification(models.Model):
project = models.ForeignKey(Project, related_name='%(class)s_notifications')
objects = RelatedProjectQuerySet.as_manager()
class Meta:
abstract = True
class EmailHook(Notification):
email = models.EmailField()
def __str__(self):
return self.email
class WebHook(Notification):
url = models.URLField(
max_length=600,
blank=True,
help_text=_('URL to send the webhook to'),
)
def __str__(self):
return self.url
class Domain(models.Model):
"""A custom domain name for a project."""
project = models.ForeignKey(Project, related_name='domains')
domain = models.CharField(
_('Domain'),
unique=True,
max_length=255,
validators=[validate_domain_name],
)
machine = models.BooleanField(
default=False,
help_text=_('This Domain was auto-created'),
)
cname = models.BooleanField(
default=False,
help_text=_('This Domain is a CNAME for the project'),
)
canonical = models.BooleanField(
default=False,
help_text=_(
'This Domain is the primary one where the documentation is '
'served from',
),
)
https = models.BooleanField(
_('Use HTTPS'),
default=False,
help_text=_('Always use HTTPS for this domain'),
)
count = models.IntegerField(
default=0,
help_text=_('Number of times this domain has been hit'),
)
objects = RelatedProjectQuerySet.as_manager()
class Meta:
ordering = ('-canonical', '-machine', 'domain')
def __str__(self):
return '{domain} pointed at {project}'.format(
domain=self.domain,
project=self.project.name,
)
def save(self, *args, **kwargs): # pylint: disable=arguments-differ
from readthedocs.projects import tasks
parsed = urlparse(self.domain)
if parsed.scheme or parsed.netloc:
self.domain = parsed.netloc
else:
self.domain = parsed.path
super().save(*args, **kwargs)
broadcast(
type='app',
task=tasks.symlink_domain,
args=[self.project.pk, self.domain],
)
def delete(self, *args, **kwargs): # pylint: disable=arguments-differ
from readthedocs.projects import tasks
broadcast(
type='app',
task=tasks.symlink_domain,
args=[self.project.pk, self.domain, True],
)
super().delete(*args, **kwargs)
class Feature(models.Model):
"""
Project feature flags.
Features should generally be added here as choices, however features may
also be added dynamically from a signal in other packages. Features can be
added by external packages with the use of signals::
@receiver(pre_init, sender=Feature)
def add_features(sender, **kwargs):
sender.FEATURES += (('blah', 'BLAH'),)
The FeatureForm will grab the updated list on instantiation.
"""
# Feature constants - this is not a exhaustive list of features, features
# may be added by other packages
USE_SPHINX_LATEST = 'use_sphinx_latest'
ALLOW_DEPRECATED_WEBHOOKS = 'allow_deprecated_webhooks'
PIP_ALWAYS_UPGRADE = 'pip_always_upgrade'
SKIP_SUBMODULES = 'skip_submodules'
DONT_OVERWRITE_SPHINX_CONTEXT = 'dont_overwrite_sphinx_context'
MKDOCS_THEME_RTD = 'mkdocs_theme_rtd'
API_LARGE_DATA = 'api_large_data'
DONT_SHALLOW_CLONE = 'dont_shallow_clone'
USE_TESTING_BUILD_IMAGE = 'use_testing_build_image'
SHARE_SPHINX_DOCTREE = 'share_sphinx_doctree'
USE_PDF_LATEXMK = 'use_pdf_latexmk'
DEFAULT_TO_MKDOCS_0_17_3 = 'default_to_mkdocs_0_17_3'
FEATURES = (
(USE_SPHINX_LATEST, _('Use latest version of Sphinx')),
(USE_PDF_LATEXMK, _('Use latexmk to build the PDF')),
(ALLOW_DEPRECATED_WEBHOOKS, _('Allow deprecated webhook views')),
(PIP_ALWAYS_UPGRADE, _('Always run pip install --upgrade')),
(SKIP_SUBMODULES, _('Skip git submodule checkout')),
(
DONT_OVERWRITE_SPHINX_CONTEXT,
_(
'Do not overwrite context vars in conf.py with Read the Docs context',
),
),
(
MKDOCS_THEME_RTD,
_('Use Read the Docs theme for MkDocs as default theme'),
),
(
DONT_SHALLOW_CLONE,
_('Do not shallow clone when cloning git repos'),
),
(
USE_TESTING_BUILD_IMAGE,
_('Use Docker image labelled as `testing` to build the docs'),
),
(
API_LARGE_DATA,
_('Try alternative method of posting large data'),
),
(
SHARE_SPHINX_DOCTREE,
_('Use shared directory for doctrees'),
),
(
DEFAULT_TO_MKDOCS_0_17_3,
_('Install mkdocs 0.17.3 by default')
),
)
projects = models.ManyToManyField(
Project,
blank=True,
)
# Feature is not implemented as a ChoiceField, as we don't want validation
# at the database level on this field. Arbitrary values are allowed here.
feature_id = models.CharField(
_('Feature identifier'),
max_length=32,
unique=True,
)
add_date = models.DateTimeField(
_('Date feature was added'),
auto_now_add=True,
)
default_true = models.BooleanField(
_('Historical default is True'),
default=False,
)
objects = FeatureQuerySet.as_manager()
def __str__(self):
return '{} feature'.format(self.get_feature_display(),)
def get_feature_display(self):
"""
Implement display name field for fake ChoiceField.
Because the field is not a ChoiceField here, we need to manually
implement this behavior.
"""
return dict(self.FEATURES).get(self.feature_id, self.feature_id)
class EnvironmentVariable(TimeStampedModel, models.Model):
name = models.CharField(
max_length=128,
help_text=_('Name of the environment variable'),
)
value = models.CharField(
max_length=2048,
help_text=_('Value of the environment variable'),
)
project = models.ForeignKey(
Project,
on_delete=models.CASCADE,
help_text=_('Project where this variable will be used'),
)
def __str__(self):
return self.name
def save(self, *args, **kwargs): # pylint: disable=arguments-differ
self.value = shlex_quote(self.value)
return super().save(*args, **kwargs)
|
the-stack_106_26856 | import torch
import numpy as np
import multiprocessing
import os
from torch import optim
import utils
from torch.autograd import Variable
from model import Dense_Net, ImgNet
class MHN(object):
def __init__(self, config, train_dataloader, view):
self.args = config
self.output_shape = config.output_shape
self.seed = config.seed
# self.data, self.labels, self.train_inx, self.query_inx, self.retrieval_inx = view_data
self.train_dataloader = train_dataloader
self.input_shape = self.train_dataloader.dataset.data.shape[1]
self.view = view
self.num_classes = self.train_dataloader.dataset.labels.shape[1]
if 'raw' in config.datasets:
if self.view == 0:
self.model = ImgNet(out_dim=self.output_shape)
else:
self.model = Dense_Net(input_dim=self.input_shape, out_dim=self.output_shape)
else:
self.model = Dense_Net(input_dim=self.input_shape, out_dim=self.output_shape)
# train_dataset = data_loader.NDataset(self.data, self.train_inx, self.labels, transform=train_transform)
# self.train_dataloader = data.DataLoader(train_dataset, batch_size=self.batch_sizes[self.view], shuffle=True, num_workers=num_workers, drop_last=False)
# retrieval_dataset = data_loader.NDataset(self.data, self.retrieval_inx, self.labels, transform=test_transform)
# self.retrieval_dataloader = data.DataLoader(retrieval_dataset, batch_size=self.batch_sizes[self.view], shuffle=False, num_workers=num_workers, drop_last=False)
# query_dataset = data_loader.NDataset(self.data, self.query_inx, self.labels, transform=test_transform)
# self.query_dataloader = data.DataLoader(query_dataset, batch_size=self.batch_sizes[self.view], shuffle=False, num_workers=num_workers, drop_last=False)
self.lr = config.lr[view]
self.beta1 = config.beta1
self.beta2 = config.beta2
self.batch_sizes = config.batch_sizes
self.epochs = config.epochs
self.available_num = config.available_num
self.alpha = config.alpha
self.gama = config.gama
self.W = utils.getSHAM(self.num_classes, self.output_shape, self.gama, self.available_num)
self.checkpoint_file = '{}_last_checkpoint_V{}_O{}_A{}.pth.tar'.format(self.args.datasets, self.view, self.output_shape, self.available_num)
def to_var(self, x, cuda_id):
"""Converts numpy to variable."""
if not isinstance(x, torch.Tensor):
x = torch.Tensor(x)
if torch.cuda.is_available():
x = x.cuda(cuda_id)
return Variable(x) # torch.autograd.Variable
def to_data(self, x):
"""Converts variable to numpy."""
try:
if torch.cuda.is_available():
x = x.cpu()
return x.data.numpy()
except Exception as e:
return x
def to_hashing(self, y, W):
if isinstance(y, torch.Tensor):
if len(y.shape) == 1 or y.shape[1] == 1:
tmp_ = (W[y] > 0).float() * 2 - 1
train_y = tmp_
else:
train_y = ((y.float().mm(W) > 0.).float() * 2. - 1).detach()
# train_y = y.float().mm(W).sign().detach()
# train_y = (train_y / math.sqrt(train_y.shape[1])).detach()
train_y.requires_grad = False
else:
if len(y.shape) == 1 or y.shape[1] == 1:
tmp_ = (self.W[y] > 0) * 2 - 1
train_y = tmp_
else:
train_y = (np.dot(y.reshape([-1, self.W.shape[0]]), self.W) > 0) * 2 - 1
return train_y
def criterion(self, x, y, labels, W):
l2 = lambda _x, _y: ((_x - _y) ** 2).sum(1).mean()
if isinstance(x, torch.Tensor):
dist = x.mm(y.t()) / 2.
sim = (labels.float().mm(labels.float().t()) > 0).float()
loss1 = ((1. + dist.double().exp()).log() - (sim * dist).float()).sum(1).mean().float()
loss2 = l2(x.mm(W.t()), labels)
return self.alpha * loss1 + (1 - self.alpha) * loss2
else:
return (1 - self.alpha) * l2(x, y) + self.alpha * l2(np.dot(x, self.W.T), labels)
def train_view(self, cuda_id):
print('Start %d-th MHN!' % self.view)
seed = self.seed
import numpy as np
np.random.seed(seed)
import random as rn
import torch
rn.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
import time
start = time.time()
if torch.cuda.is_available():
self.model.cuda(cuda_id)
model_params = []
for name, params in self.model.named_parameters():
if 'vgg' in name:
model_params += [{'params': [params], 'lr': self.lr * 1e-1}]
pass
else:
model_params += [{'params': [params]}]
optimizer = optim.Adam(model_params, self.lr, [self.beta1, self.beta2])
losses = []
W = torch.tensor(self.W, requires_grad=False).cuda(cuda_id).float()
criterion = lambda x, y, la: self.criterion(x, y, la, W)
batch_count = len(self.train_dataloader)
for epoch in range(self.epochs):
print(('\nView ID: %d, Epoch %d/%d') % (self.view, epoch + 1, self.epochs))
self.model.train()
mean_loss = []
# for batch_idx in range(batch_count):
for batch_idx, (train_x, train_lab) in enumerate(self.train_dataloader):
train_x = self.to_var(train_x, cuda_id)
train_lab = self.to_var(train_lab, cuda_id)
train_y = self.to_hashing(train_lab, W).float()
optimizer.zero_grad()
loss = criterion(self.model(train_x)[-1], train_y, train_lab)
loss.backward()
optimizer.step()
mean_loss.append(self.to_data(loss))
utils.show_progressbar([batch_idx, batch_count], loss=(loss.item() if batch_idx < batch_count - 1 else np.mean(mean_loss)))
losses.append(np.mean(mean_loss))
utils.save_checkpoint({
'epoch': epoch,
'model': self.model.state_dict(),
'opt': self.args,
'loss': np.array(losses)
}, filename=self.checkpoint_file, prefix=self.args.prefix)
self.adjust_learning_rate(optimizer, epoch + 1)
print('Training time: %.3f' % (time.time() - start))
# query_pre = (utils.predict(lambda x: self.model(x)[-1].view([x.shape[0], -1]), self.query_dataloader, cuda_id=cuda_id).reshape([self.query_data[self.view].shape[0], -1]) > 0) * 2 - 1
# retrieval_pre = (utils.predict(lambda x: self.model(x)[-1].view([x.shape[0], -1]), self.retrieval_dataloader, cuda_id=cuda_id).reshape([self.retrieval_data[self.view].shape[0], -1]) > 0) * 2 - 1
return self.model
def eval(self, eval_dataloader, cuda_id):
self.model = self.model.cuda(cuda_id)
self.model.eval()
ret, lab = utils.predict(lambda x: self.model(x)[-1].view([x.shape[0], -1]), eval_dataloader, cuda_id=cuda_id)
return (ret > 0) * 2 - 1, lab
def adjust_learning_rate(self, optimizer, epoch):
"""
Sets the learning rate to the initial LR
decayed by 10 after opt.lr_update epoch
"""
if (epoch % self.args.lr_update) == 0:
for param_group in optimizer.param_groups:
param_group['lr'] *= 0.1
def load_checkpoint(self, checkpoint_file=None):
checkpoint_file = os.path.join(self.args.prefix, self.checkpoint_file) if checkpoint_file is None else checkpoint_file
ckp = torch.load(checkpoint_file)
self.model.load_state_dict(ckp['model'])
print('Load pretrained model at %d-th epoch.' % ckp['epoch'])
print(ckp['opt'])
return ckp['epoch'], ckp['model'], ckp['opt'], ckp['loss']
|
the-stack_106_26857 | # Copyright (C) 2013-2018 Internet Systems Consortium.
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Author: Wlodzimierz Wencel
import os
import sys
import time
import logging
from shutil import rmtree
import subprocess
import importlib
from Crypto.Random.random import randint
from scapy.config import conf
from scapy.layers.dhcp6 import DUID_LLT
from forge_cfg import world, step
from softwaresupport.multi_server_functions import fabric_download_file, make_tarfile, archive_file_name,\
fabric_remove_file_command, fabric_run_command
import logging_facility
from srv_control import start_srv
log = logging.getLogger('forge')
values_v6 = {"T1": 0, # IA_NA IA_PD
"T2": 0, # IA_NA IA_PD
"address": "::",
"IA_Address": "::",
"prefix": "::",
"plen": 0, # prefix; plz remember, to add prefix and prefix length!
"preflft": 0, # IA_Address IA_Prefix
"validlft": 0, # IA_Address IA_Prefix
"enterprisenum": 0, # vendor
"vendor_class_data": "",
"linkaddr": world.f_cfg.srv_ipv6_addr_global, # relay
"peeraddr": world.f_cfg.cli_link_local, # relay
"ifaceid": "15", # relay
"DUID": None,
"FQDN_flags": "",
"FQDN_domain_name": "",
"address_type": 1, # dhcpv6 mac addr type, option 79
"link_local_mac_addr": world.f_cfg.cli_mac,
"remote_id": "",
"subscriber_id": "",
"ia_id": 0,
"ia_pd": 0,
"prefval": 1,
"elapsedtime": 1,
"srvaddr": "::",
"statuscode": 0,
"statusmsg": "",
"reconfigure_msg_type": 5,
"reqopts": 7,
"paaaddr": "::",
"iitype": 0,
"iimajor": 0,
"iiminor": 0,
"archtypes": 1,
"erpdomain": "",
"user_class_data": ""}
srv_values_v6 = {"T1": 1000,
"T2": 2000,
"preferred-lifetime": 3000,
"valid-lifetime": 4000,
"prefix": "3000::",
"prefix-len": 64,
"timer": 10,
"dst_addr": ()}
values_dns = {"qname": "",
"qtype": "",
"qclass": ""}
# times values, plz do not change this.
# there is a test step to do this
server_times_v6 = {"renew-timer": 1000,
"rebind-timer": 2000,
"preferred-lifetime": 3000,
"valid-lifetime": 4000,
"rapid-commit": False # yes that little odd, but let us keep it that way,
# rapid-commit it's only option that is used
# only in server configuration
}
server_times_v4 = {"renew-timer": 1000,
"rebind-timer": 2000,
"valid-lifetime": 4000}
values_v4 = {"ciaddr": "0.0.0.0",
"yiaddr": "0.0.0.0",
"siaddr": "0.0.0.0",
"giaddr": "0.0.0.0",
"htype": 1,
"broadcastBit": False,
"hops": 0,
"chaddr": None,
"FQDN_flags": "",
"FQDN_domain_name": ""}
# we should consider transfer most of functions to separate v4 and v6 files
# TODO: make separate files after branch merge
def _set_values():
# this function is called after each message send.
if world.f_cfg.proto == "v6":
world.cfg["values"] = values_v6.copy()
world.cfg["server_times"] = server_times_v6.copy()
# reset values to 'default for scenario'
world.cfg["values"]["cli_duid"] = world.cfg["cli_duid"]
world.cfg["values"]["server_id"] = ""
world.cfg["values"]["ia_id"] = world.cfg["ia_id"]
world.cfg["values"]["ia_pd"] = world.cfg["ia_pd"]
else:
world.cfg["values"] = values_v4.copy()
world.cfg["server_times"] = server_times_v4.copy()
world.set_values = _set_values
def client_id(mac):
world.cfg["cli_duid"] = DUID_LLT(timeval = int(time.time()), lladdr = mac)
if "values" in world.cfg:
world.cfg["values"]["cli_duid"] = world.cfg["cli_duid"]
def ia_id():
world.cfg["ia_id"] = randint(1, 99999)
if "values" in world.cfg:
world.cfg["values"]["ia_id"] = world.cfg["ia_id"]
def ia_pd():
world.cfg["ia_pd"] = randint(1, 99999)
if "values" in world.cfg:
world.cfg["values"]["ia_pd"] = world.cfg["ia_pd"]
def _v4_initialize():
# Setup scapy for v4
# conf.iface = IFACE
conf.checkIPaddr = False # DHCPv4 is sent from 0.0.0.0, so response matching may confuse scapy
world.cfg["srv4_addr"] = world.f_cfg.srv4_addr
world.cfg["rel4_addr"] = world.f_cfg.rel4_addr
world.cfg["giaddr4"] = world.f_cfg.giaddr4
world.cfg["space"] = "dhcp4"
world.cfg["source_port"] = 68
world.cfg["destination_port"] = 67
world.cfg["source_IP"] = "0.0.0.0"
world.cfg["destination_IP"] = "255.255.255.255"
world.dhcp_enable = True
def _v6_initialize():
world.dhcp_enable = True
# RFC 3315 define two addresess:
# All_DHCP_Relay_Agents_and_Servers = ff02::1:2
# All DHCP_Servers ff05::1:3.
world.cfg["address_v6"] = "ff02::1:2"
world.cfg["cli_link_local"] = world.f_cfg.cli_link_local
world.cfg["unicast"] = False
world.cfg["relay"] = False
world.cfg["space"] = "dhcp6"
world.cfg["source_port"] = 546
world.cfg["destination_port"] = 547
# Setup scapy for v6
conf.iface6 = world.f_cfg.iface
conf.use_pcap = True
# those values should be initialized once each test
# if you are willing to change it use 'client set value' steps
client_id(world.f_cfg.cli_mac)
ia_id()
ia_pd()
def _dns_initialize():
world.cfg["dns_iface"] = world.f_cfg.dns_iface
world.cfg["dns4_addr"] = world.f_cfg.dns4_addr
world.cfg["dns6_addr"] = world.f_cfg.dns6_addr
world.cfg["dns_port"] = world.f_cfg.dns_port
world.dns_enable = True
def _define_software(dhcp_version):
# unfortunately we have to do this every single time
world.cfg["dhcp_under_test"] = ""
world.cfg["dns_under_test"] = ""
for name in world.f_cfg.software_under_test:
if name in world.f_cfg.dhcp_used:
world.cfg["dhcp_under_test"] = name.replace('6', '4') if dhcp_version == 'v4' else name.replace('4', '6')
# world.cfg["dns_under_test"] = ""
elif name in world.f_cfg.dns_used:
world.cfg["dns_under_test"] = name
# world.cfg["dhcp_under_test"] = ""
def declare_all(dhcp_version=None):
world.climsg = [] # Message(s) to be sent
world.srvmsg = [] # Server's response(s)
world.rlymsg = [] # Server's response(s) Relayed by Relay Agent
world.tmpmsg = [] # container for temporary stored messages
world.cliopts = [] # Option(s) to be included in the next message sent
world.relayopts = [] # option(s) to be included in Relay Forward message.
world.rsoo = [] # List of relay-supplied-options
world.savedmsg = {0: []} # Saved option(s)
world.define = [] # temporary define variables
proto = dhcp_version if dhcp_version else world.f_cfg.proto
world.proto = world.f_cfg.proto = proto
world.oro = None
world.vendor = []
world.iaad = []
world.iapd = []
world.opts = []
world.subopts = []
world.message_fields = []
world.subnet_add = True
world.control_channel = None # last received response from any communication channel
world.cfg = {}
world.f_cfg.multiple_tested_servers = [world.f_cfg.mgmt_address]
# dictionary that will keep multiple configs for various servers
# mainly for testing multiple kea servers in the single test,
# multiple servers has to be configured exactly identical.
# supported only for Kea servers
world.configClass = None
# list that will keep configuration class from which mysql/postgres/netconf
# configuration script will be generated
# in future it's designed to clear JSON configuration process as well
world.configString = ""
world.cfg['leases'] = os.path.join(world.f_cfg.software_install_path,
'var/lib/kea/kea-leases%s.csv' % world.proto[1])
world.cfg['kea_logs'] = os.path.join(world.f_cfg.software_install_path + '/var/log/kea.log')
world.cfg["dhcp_log_file"] = "~/none_file"
world.loops = {"active": False,
"save_leases_details": False}
world.scapy_verbose = 99
world.dns_enable = False
world.dhcp_enable = False
world.ddns_enable = False
world.ctrl_enable = False
world.fuzzing = False
# clear tmp DB values to use default from configuration
world.f_cfg.db_type = world.f_cfg.db_type_bk
world.f_cfg.db_host = world.f_cfg.db_host_bk
world.f_cfg.db_name = world.f_cfg.db_name_bk
world.f_cfg.db_passwd = world.f_cfg.db_passwd_bk
world.f_cfg.db_user = world.f_cfg.db_user_bk
#@before.all
def test_start():
"""
Server starting before testing
"""
# clear tests results
if os.path.exists('tests_results'):
rmtree('tests_results')
os.makedirs('tests_results')
if not os.path.exists('tests_results_archive') and world.f_cfg.auto_archive:
os.makedirs('tests_results_archive')
world.result = []
# Initialize the common logger.
logging_facility.logger_initialize(world.f_cfg.loglevel)
if not world.f_cfg.no_server_management:
for each in world.f_cfg.software_under_test:
sut = importlib.import_module("softwaresupport.%s.functions" % each)
# True passed to stop_srv is to hide output in console.
sut.stop_srv(True)
if hasattr(sut, 'db_setup'):
sut.db_setup()
#@before.each_scenario
def initialize(scenario):
# try to automagically detect DHCP version based on fixture presence
# or marker presence
try:
dhcp_version = scenario._request.getfixturevalue('dhcp_version')
except:
if scenario.get_closest_marker('v4'):
dhcp_version = 'v4'
elif scenario.get_closest_marker('v6'):
dhcp_version = 'v6'
else:
dhcp_version = None
# Declare all default values
declare_all(dhcp_version)
_define_software(dhcp_version)
world.cfg["iface"] = world.f_cfg.iface
# world.cfg["server_type"] = SOFTWARE_UNDER_TEST for now I'll leave it here,
# now we use world.cfg["dhcp_under_test"] and world.cfg["dns_under_test"] (in function _define_software)
# it is being filled with values in srv_control
world.cfg["wait_interval"] = world.f_cfg.packet_wait_interval
world.cfg["cfg_file"] = "server.cfg"
world.cfg["cfg_file_2"] = "second_server.cfg"
world.cfg["conf"] = "" # Just empty config for now
# additional config structure [subnet, client class/simple options, options, pools, host reservation]:
world.subcfg = [["", "", "", "", "", "", ""]]
world.shared_subcfg = []
world.shared_subnets = []
world.shared_subnets_tmp = []
world.kea_ha = [[], [], [], []]
world.hooks = []
world.classification = []
world.reservation_backend = ""
test_result_dir = str(scenario.name).replace(".", "_").replace('[', '_').replace(']', '_').replace('/', '_')
world.cfg["test_result_dir"] = os.path.join('tests_results', test_result_dir)
world.cfg["subnet"] = ""
world.cfg["server-id"] = ""
world.cfg["csv-format"] = "true"
world.cfg["tr_id"] = None
world.name = scenario.name
world.srvopts = []
world.pref = None
world.time = None
# append single timestamp to list
world.timestamps = []
# response times list
world.RTlist = []
# time ranges that response time must fit in
world.RTranges = []
world.RTranges.append([0.9, 1.1])
world.c = 0
world.notSolicit = 0
world.saved = []
world.iaid = []
if "dhcp_under_test" in world.cfg:
# IPv6:
if world.proto == "v6":
_v6_initialize()
# IPv4:
if world.proto == "v4":
_v4_initialize()
if "dns_under_test" in world.cfg:
_dns_initialize()
world.set_values()
world.cfg["values"]["tr_id"] = world.cfg["tr_id"]
# to create separate files for each test we need:
# create new directory for that test:
if not os.path.exists(world.cfg["test_result_dir"]):
os.makedirs(world.cfg["test_result_dir"])
if not os.path.exists(world.cfg["test_result_dir"] + '/dns') and world.dns_enable:
os.makedirs(world.cfg["test_result_dir"] + '/dns')
if world.f_cfg.tcpdump:
cmd = world.f_cfg.tcpdump_path + 'tcpdump'
args = [cmd, "-U", "-w", world.cfg["test_result_dir"] + "/capture.pcap",
"-s", str(65535), "-i", world.cfg["iface"]]
subprocess.Popen(args)
# potential probelms with two instances of tcpdump running
# TODO make sure it works properly!
if world.dhcp_enable and world.dns_enable:
if world.cfg["dns_iface"] != world.cfg["iface"]:
cmd2 = world.f_cfg.tcpdump_path + 'tcpdump'
args2 = [cmd2, "-U", "-w", world.cfg["test_result_dir"] + "/capture_dns.pcap",
"-s", str(65535), "-i", world.cfg["dns_iface"]]
subprocess.Popen(args2)
_clear_remainings()
#@after.each_scenario
def cleanup(scenario):
"""
Global cleanup for each scenario. Implemented within tests by "Server is started."
"""
info = str(scenario.name) + '\n' + str(scenario.failed)
if 'outline' not in info:
world.result.append(info)
# stop dhcp server
start_srv('DHCP', 'stopped')
if world.f_cfg.tcpdump:
time.sleep(1)
args = ["killall tcpdump"]
subprocess.call(args, shell=True)
# TODO: log output in debug mode
if not world.f_cfg.no_server_management:
for each_remote_server in world.f_cfg.multiple_tested_servers:
for each in world.f_cfg.software_under_test:
functions = importlib.import_module("softwaresupport.%s.functions" % each)
# try:
if world.f_cfg.save_leases:
# save leases, if there is none leases in your software, just put "pass" in this function.
functions.save_leases(destination_address=each_remote_server)
if world.f_cfg.save_logs:
functions.save_logs(destination_address=each_remote_server)
_clear_remainings()
def _clear_remainings():
if not world.f_cfg.no_server_management:
for each_remote_server in world.f_cfg.multiple_tested_servers:
for each in world.f_cfg.software_under_test:
functions = importlib.import_module("softwaresupport.%s.functions" % each)
# every software have something else to clear. Put in clear_all() whatever you need
functions.clear_all(destination_address=each_remote_server)
# except: # TODO this should be on multi_server_functions level!
# log.info("Remote location " + each_remote_server + " unreachable!")
#@after.all
def say_goodbye():
"""
Server stopping after whole work
"""
if world.f_cfg.history:
result = open('result', 'w')
for item in world.result:
result.write(str(item) + '\n')
result.close()
if not world.f_cfg.no_server_management:
for each_remote_server in world.f_cfg.multiple_tested_servers:
for each in world.f_cfg.software_under_test:
stop = importlib.import_module("softwaresupport.%s.functions" % each)
# True passed to stop_srv is to hide output in console.
try:
stop.stop_srv(value=True, destination_address=each_remote_server)
except:
pass
if world.f_cfg.auto_archive:
name = ""
if world.cfg["dhcp_under_test"] != "":
name += world.cfg["dhcp_under_test"]
if world.cfg["dns_under_test"] != "":
if name != "":
name += "_"
name += world.cfg["dhcp_under_test"]
archive_name = world.f_cfg.proto + '_' + name + '_' + time.strftime("%Y-%m-%d-%H:%M")
archive_name = archive_file_name(1, 'tests_results_archive/' + archive_name)
make_tarfile(archive_name + '.tar.gz', 'tests_results')
|
the-stack_106_26869 | from manim2.imports import *
from from_3b1b.old.hilbert.curves import *
class Intro(TransformOverIncreasingOrders):
@staticmethod
def args_to_string(*args):
return ""
@staticmethod
def string_to_args(string):
raise Exception("string_to_args Not Implemented!")
def construct(self):
words1 = TextMobject(
"If you watched my video about Hilbert's space-filling curve\\dots"
)
words2 = TextMobject(
"\\dots you might be curious to see what a few other space-filling curves look like."
)
words2.scale(0.8)
for words in words1, words2:
words.to_edge(UP, buff = 0.2)
self.setup(HilbertCurve)
self.play(ShimmerIn(words1))
for x in range(4):
self.increase_order()
self.remove(words1)
self.increase_order(
ShimmerIn(words2)
)
for x in range(4):
self.increase_order()
class BringInPeano(Intro):
def construct(self):
words1 = TextMobject("""
For each one, see if you can figure out what
the pattern of construction is.
""")
words2 = TextMobject("""
This one is the Peano curve.
""")
words3 = TextMobject("""
It is the original space-filling curve.
""")
self.setup(PeanoCurve)
self.play(ShimmerIn(words1))
self.wait(5)
self.remove(words1)
self.add(words2.to_edge(UP))
for x in range(3):
self.increase_order()
self.remove(words2)
self.increase_order(ShimmerIn(words3.to_edge(UP)))
for x in range(2):
self.increase_order()
class FillOtherShapes(Intro):
def construct(self):
words1 = TextMobject("""
But of course, there's no reason we should limit
ourselves to filling in squares.
""")
words2 = TextMobject("""
Here's a simple triangle-filling curve I defined
in a style reflective of a Hilbert curve.
""")
words1.to_edge(UP)
words2.scale(0.8).to_edge(UP, buff = 0.2)
self.setup(TriangleFillingCurve)
self.play(ShimmerIn(words1))
for x in range(3):
self.increase_order()
self.remove(words1)
self.add(words2)
for x in range(5):
self.increase_order()
class SmallerFlowSnake(FlowSnake):
CONFIG = {
"radius" : 4
}
class MostDelightfulName(Intro):
def construct(self):
words1 = TextMobject("""
This one has the most delightful name,
thanks to mathematician/programmer Bill Gosper:
""")
words2 = TextMobject("``Flow Snake''")
words3 = TextMobject("""
What makes this one particularly interesting
is that the boundary itself is a fractal.
""")
for words in words1, words2, words3:
words.to_edge(UP)
self.setup(SmallerFlowSnake)
self.play(ShimmerIn(words1))
for x in range(3):
self.increase_order()
self.remove(words1)
self.add(words2)
for x in range(3):
self.increase_order()
self.remove(words2)
self.play(ShimmerIn(words3))
class SurpriseFractal(Intro):
def construct(self):
words = TextMobject("""
It might come as a surprise how some well-known
fractals can be described with curves.
""")
words.to_edge(UP)
self.setup(Sierpinski)
self.add(TextMobject("Speaking of other fractals\\dots"))
self.wait(3)
self.clear()
self.play(ShimmerIn(words))
for x in range(9):
self.increase_order()
class IntroduceKoch(Intro):
def construct(self):
words = list(map(TextMobject, [
"This is another famous fractal.",
"The ``Koch Snowflake''",
"Let's finish things off by seeing how to turn \
this into a space-filling curve"
]))
for text in words:
text.to_edge(UP)
self.setup(KochCurve)
self.add(words[0])
for x in range(3):
self.increase_order()
self.remove(words[0])
self.add(words[1])
for x in range(4):
self.increase_order()
self.remove(words[1])
self.add(words[2])
self.wait(6)
class StraightKoch(KochCurve):
CONFIG = {
"axiom" : "A"
}
class SharperKoch(StraightKoch):
CONFIG = {
"angle" : 0.9*np.pi/2,
}
class DullerKoch(StraightKoch):
CONFIG = {
"angle" : np.pi/6,
}
class SpaceFillingKoch(StraightKoch):
CONFIG = {
"angle" : np.pi/2,
}
class FromKochToSpaceFilling(Scene):
def construct(self):
self.max_order = 7
self.revisit_koch()
self.show_angles()
self.show_change_side_by_side()
def revisit_koch(self):
words = list(map(TextMobject, [
"First, look at how one section of this curve is made.",
"This pattern of four lines is the ``seed''",
"With each iteration, every straight line is \
replaced with an appropriately small copy of the seed",
]))
for text in words:
text.to_edge(UP)
self.add(words[0])
curve = StraightKoch(order = self.max_order)
self.play(Transform(
curve,
StraightKoch(order = 1),
run_time = 5
))
self.remove(words[0])
self.add(words[1])
self.wait(4)
self.remove(words[1])
self.add(words[2])
self.wait(3)
for order in range(2, self.max_order):
self.play(Transform(
curve,
StraightKoch(order = order)
))
if order == 2:
self.wait(2)
elif order == 3:
self.wait()
self.clear()
def show_angles(self):
words = TextMobject("""
Let's see what happens as we change
the angle in this seed
""")
words.to_edge(UP)
koch, sharper_koch, duller_koch = curves = [
CurveClass(order = 1)
for CurveClass in (StraightKoch, SharperKoch, DullerKoch)
]
arcs = [
Arc(
2*(np.pi/2 - curve.angle),
radius = r,
start_angle = np.pi+curve.angle
).shift(curve.points[curve.get_num_points()/2])
for curve, r in zip(curves, [0.6, 0.7, 0.4])
]
theta = TexMobject("\\theta")
theta.shift(arcs[0].get_center()+2.5*DOWN)
arrow = Arrow(theta, arcs[0])
self.add(words, koch)
self.play(ShowCreation(arcs[0]))
self.play(
ShowCreation(arrow),
ShimmerIn(theta)
)
self.wait(2)
self.remove(theta, arrow)
self.play(
Transform(koch, duller_koch),
Transform(arcs[0], arcs[2]),
)
self.play(
Transform(koch, sharper_koch),
Transform(arcs[0], arcs[1]),
)
self.clear()
def show_change_side_by_side(self):
seed = TextMobject("Seed")
seed.shift(3*LEFT+2*DOWN)
fractal = TextMobject("Fractal")
fractal.shift(3*RIGHT+2*DOWN)
words = list(map(TextMobject, [
"A sharper angle results in a richer curve",
"A more obtuse angle gives a sparser curve",
"And as the angle approaches 0\\dots",
"We have a new space-filling curve."
]))
for text in words:
text.to_edge(UP)
sharper, duller, space_filling = [
CurveClass(order = 1).shift(3*LEFT)
for CurveClass in (SharperKoch, DullerKoch, SpaceFillingKoch)
]
shaper_f, duller_f, space_filling_f = [
CurveClass(order = self.max_order).shift(3*RIGHT)
for CurveClass in (SharperKoch, DullerKoch, SpaceFillingKoch)
]
self.add(words[0])
left_curve = SharperKoch(order = 1)
right_curve = SharperKoch(order = 1)
self.play(
Transform(left_curve, sharper),
ApplyMethod(right_curve.shift, 3*RIGHT),
)
self.play(
Transform(
right_curve,
SharperKoch(order = 2).shift(3*RIGHT)
),
ShimmerIn(seed),
ShimmerIn(fractal)
)
for order in range(3, self.max_order):
self.play(Transform(
right_curve,
SharperKoch(order = order).shift(3*RIGHT)
))
self.remove(words[0])
self.add(words[1])
kwargs = {
"run_time" : 4,
}
self.play(
Transform(left_curve, duller, **kwargs),
Transform(right_curve, duller_f, **kwargs)
)
self.wait()
kwargs["run_time"] = 7
kwargs["rate_func"] = None
self.remove(words[1])
self.add(words[2])
self.play(
Transform(left_curve, space_filling, **kwargs),
Transform(right_curve, space_filling_f, **kwargs)
)
self.remove(words[2])
self.add(words[3])
self.wait()
|
the-stack_106_26870 | """Making sure we are running the right version of python"""
import sys
if sys.version_info[0] >= 3:
raise Exception("Must be using Python 2")
"""Making sure soar library path environment is set
Remember to set the environment variable to point to where soar build is located, e.g.:
export LD_LIBRARY_PATH=~/Desktop/Soar/out
"""
from os import environ as env, fsync
import sys
if "DYLD_LIBRARY_PATH" in env:
LIB_PATH = env["DYLD_LIBRARY_PATH"]
elif "LD_LIBRARY_PATH" in env:
LIB_PATH = env["LD_LIBRARY_PATH"]
else:
print("Soar LIBRARY_PATH environment variable not set; quitting")
exit(1)
sys.path.append(LIB_PATH)
import Python_sml_ClientInterface as sml # Python interface to SOAR
import rospy # ROS library
import json
from nav_msgs.msg import Odometry
from sensor_msgs.msg import LaserScan
import message_filters
from std_msgs.msg import String
from geometry_msgs.msg import Twist
""" Callback functions to help us see what is happening inside agent's mind"""
def register_print_callback(kernel, agent, function, user_data=None):
agent.RegisterForPrintEvent(sml.smlEVENT_PRINT, function, user_data)
def callback_print_message(mid, user_data, agent, message):
print(message.strip())
""" Client to interact with agent's mind"""
def cli(agent):
cmd = raw_input("soar> ")
while cmd not in ("exit", "quit"):
if cmd:
print(agent.ExecuteCommandLine(cmd).strip())
cmd = raw_input("soar> ")
from random import *
#class ToyEnv(object):
# """
# A very simple 'environment': sensors return two random numbers and expects a single number as actuation.
# """
# def __init__(self):
# """Return a new toy env object."""
# def get_sensors(self):
# """"""
# a=randint(1, 10)
# b=randint(1, 10)
# sensors=[a,b]
# #print("---> Environment sensed: ",sensors)
# return sensors
#
# def set_actuators(self, act):
# """"""
# print("---> Environment acted:",act)
## ROS variables
pub=None #pub should be visible by main and callbacks
sub=None #sub should be visible by main and callbacks
vel_output_pub=None
vel_msg=None
## SOAR variables #TODO is there a better way of doing this?
kernel=None
agent=None
te=None
input_link=None
a_value=None
b_value=None
output_link=None
tasks_wme=None
tasks_data_json=json.dumps({})
previous_tasks_data_json=json.dumps({})
seq_wme=None
secs_wme=None
nsecs_wme=None
frame_id_wme=None
child_frame_id_wme=None
xp_wme=None
yp_wme=None
zp_wme=None
xo_wme=None
yo_wme=None
zo_wme=None
wo_wme=None
xtl_wme=None
ytl_wme=None
ztl_wme=None
xta_wme=None
yta_wme=None
zta_wme=None
loop_counter=0
def topic_callback(odom_data, scan_data,tasks_data):
print("________________________")
print("Enter ROS topic_callback")
input_link=agent.GetInputLink()
# rospy.loginfo('odom_stamp: ' + str(odom_data.header.stamp.to_sec()) + ' ; scan_stamp: ' + str(scan_data.header.stamp.to_sec())+ ' ; tasks_data: ' + str(tasks_data))
# for i in range(len(tasks_data_json['plan'])):
# print('=====<<<<<< tasks_data_json: ', tasks_data_json['plan'][i])
# tasks_data_json['plan'][i]
# tasks_wme_list.append(tasks_data_json['plan'][i])
# print("tasks_wme_list: ",tasks_wme_list)
# if(tasks_wme!=None):
# tasks_wme=agent.DestroyWME(tasks_wme)
#
# tasks_wme=agent.CreateIdWME(input_link,"tasks")
# tasks_wme = agent.CreateIdWME(input_link,"tasks")
# task_wme=agent.CreateIdWME(tasks_wme,"next_task")
# task_wme_name=agent.CreateStringWME(task_wme,"name","move")
# task_wme_waypoint=agent.CreateStringWME(task_wme,"waypoint","Waypoint_SH#1")
#Do something with data
#dict=eval(data.data)
## Cognitive cycle goes here
# a_value_temp=float(dict['a_value'])
# b_value_temp=float(dict['b_value'])
## 2) push senses to soar
# a_value.Update(a_value_temp)
# b_value.Update(b_value_temp)
#Odometer
xp_wme.Update(float(odom_data.pose.pose.position.x))
yp_wme.Update(float(odom_data.pose.pose.position.y))
zp_wme.Update(float(odom_data.pose.pose.position.z))
xo_wme.Update(float(odom_data.pose.pose.orientation.x))
yo_wme.Update(float(odom_data.pose.pose.orientation.y))
zo_wme.Update(float(odom_data.pose.pose.orientation.z))
wo_wme.Update(float(odom_data.pose.pose.orientation.w))
xtl_wme.Update(float(odom_data.twist.twist.linear.x))
ytl_wme.Update(float(odom_data.twist.twist.linear.y))
ztl_wme.Update(float(odom_data.twist.twist.linear.z))
xta_wme.Update(float(odom_data.twist.twist.angular.x))
yta_wme.Update(float(odom_data.twist.twist.angular.y))
zta_wme.Update(float(odom_data.twist.twist.angular.z))
#Tasks List
global tasks_wme,tasks_data_json,previous_tasks_data_json
tasks_data_json = json.loads(str(tasks_data.data))
if(previous_tasks_data_json!=tasks_data_json): #Checking if list of tasks has changed before sending it to soar
print("##########################")
if(tasks_wme!=None):
#print("DestroyWME: ",tasks_wme)
tasks_wme=agent.DestroyWME(tasks_wme)
tasks_wme = agent.CreateIdWME(input_link,"tasks")
tasks_wme_list=[]
previous_task_wme=tasks_wme
for i in range(len(tasks_data_json['plan'])):
task_wme=agent.CreateIdWME(previous_task_wme,"next_task")
action_name=str(tasks_data_json['plan'][i][0])
agent.CreateStringWME(task_wme,"name",action_name)
if(action_name=='move'):
agent.CreateStringWME(task_wme,"waypoint",str(tasks_data_json['plan'][i][2]))
elif(action_name=='pick'):
agent.CreateStringWME(task_wme,"shelf",str(tasks_data_json['plan'][i][2]))
agent.CreateStringWME(task_wme,"product",str(tasks_data_json['plan'][i][3]))
elif(action_name=='drop'):
agent.CreateStringWME(task_wme,"conveyor_belt",str(tasks_data_json['plan'][i][2]))
agent.CreateStringWME(task_wme,"product",str(tasks_data_json['plan'][i][3]))
else:
print('unknown action: ',action_name)
previous_task_wme=task_wme
previous_tasks_data_json = tasks_data_json
## 3) make soar think about it
result=0
#run_result=agent.RunSelf(1) #Run agent for one step (should run until output?)
run_result=agent.RunSelfTilOutput() #TODO see why so many substates are being created
## 4) TODO get results from soar
output_link=agent.GetOutputLink()## returns an Identifier
if output_link!= None:
result_output_wme = output_link.FindByAttribute("result", 0) # returns a WMElement of the form (<output_link> ^result <val>)
result=None
if result_output_wme != None:
result = float(result_output_wme.GetValueAsString())
print("Result: ",result)
## 5) send result to environment
# te.set_actuators(result)
#rospy.loginfo("output result log: "+str(result))
# pub.publish("output result topic: "+str(result)) # Here goes the output
vel_msg=Twist()
vel_msg.linear.x=0.0
vel_msg.linear.y=0.0
vel_msg.linear.z=0.0
vel_msg.angular.x=0.0
vel_msg.angular.y=0.0
vel_msg.angular.z=0.0
vel_output_pub.publish(vel_msg)
#print("vel_msg: ",vel_msg)
##TODO Improve soar_robot.soar based on Move-north operator
global loop_counter
print("loop_counter: ",loop_counter)
loop_counter=loop_counter+1
if(loop_counter>3):
cli(agent) #open client to interact with agent
""" =============================================================== """
""" === Main program === """
""" =============================================================== """
if __name__ == "__main__":
json_string = """
{
"id": "xyz",
"init": "initURI",
"goal": "finalURI",
"plan":[
["move", "youBot#0", "Waypoint_SH#1"],
["pick", "youBot#0", "Shelf#1", "productRed"],
["move", "youBot#0", "Waypoint_CB"],
["drop", "youBot#0", "ConveyorBelt", "productRed"]
]
}
"""
temp_tasks_dummy_data = json.loads(json_string)
tasks_dummy_data=str(json.dumps(temp_tasks_dummy_data)) #makes sure it is a valid json when converting to str
#-- SOAR AGENT INITIALIZATION-----------------------------------
print("Initializing SOAR Agent")
#Instantiate link to environment
#te = ToyEnv()
#Create soar kernel and agent
kernel = sml.Kernel.CreateKernelInCurrentThread()
agent = kernel.CreateAgent("agent")
register_print_callback(kernel, agent, callback_print_message, None)
#Load soar sources
agent.ExecuteCommandLine("source soar_robot.soar")
agent.ExecuteCommandLine("soar wait-snc")
#Get input link and create input structure
input_link=agent.GetInputLink()
a_value=agent.CreateFloatWME(input_link, "a", -1.0)
b_value=agent.CreateFloatWME(input_link, "b", -1.0)
#TODO How about doing this automatically based on the topic's structure?
odom_wme=agent.CreateIdWME(input_link,"odom")
header_wme=agent.CreateIdWME(odom_wme,"header")
stamp_wme=agent.CreateIdWME(header_wme,"stamp")
pose_wme=agent.CreateIdWME(odom_wme,"pose")
pose2_wme=agent.CreateIdWME(pose_wme,"pose") #Weird, but that's how ROS odom does it
position_wme=agent.CreateIdWME(pose2_wme,"position")
orientation_wme=agent.CreateIdWME(pose2_wme,"orientation")
twist_wme=agent.CreateIdWME(odom_wme,"twist")
twist2_wme=agent.CreateIdWME(twist_wme,"twist") #Weird, but that's how ROS odom does it
twist_linear_wme=agent.CreateIdWME(twist2_wme,"linear")
twist_angular_wme=agent.CreateIdWME(twist2_wme,"angular")
#Leaves
seq_wme=agent.CreateIntWME(header_wme,"seq",-1)
secs_wme=agent.CreateFloatWME(stamp_wme,"secs",-1)
nsecs_wme=agent.CreateFloatWME(stamp_wme,"nsecs",-1)
frame_id_wme=agent.CreateStringWME(header_wme,"frame_id","empty")
child_frame_id_wme=agent.CreateStringWME(odom_wme,"child_frame_id","empty")
xp_wme=agent.CreateFloatWME(position_wme,"x",-1.0)
yp_wme=agent.CreateFloatWME(position_wme,"y",-1.0)
zp_wme=agent.CreateFloatWME(position_wme,"z",-1.0)
xo_wme=agent.CreateFloatWME(orientation_wme,"x",-1.0)
yo_wme=agent.CreateFloatWME(orientation_wme,"y",-1.0)
zo_wme=agent.CreateFloatWME(orientation_wme,"z",-1.0)
wo_wme=agent.CreateFloatWME(orientation_wme,"w",-1.0)
xtl_wme=agent.CreateFloatWME(twist_linear_wme,"x",-1.0)
ytl_wme=agent.CreateFloatWME(twist_linear_wme,"y",-1.0)
ztl_wme=agent.CreateFloatWME(twist_linear_wme,"z",-1.0)
xta_wme=agent.CreateFloatWME(twist_angular_wme,"x",-1.0)
yta_wme=agent.CreateFloatWME(twist_angular_wme,"y",-1.0)
zta_wme=agent.CreateFloatWME(twist_angular_wme,"z",-1.0)
# tasks_wme=agent.CreateIdWME(input_link,"tasks")
# task_wme=agent.CreateIdWME(tasks_wme,"next_task")
# task_wme_name=agent.CreateStringWME(task_wme,"name","move")
# task_wme_waypoint=agent.CreateStringWME(task_wme,"waypoint","Waypoint_SH#1")
#Get output link
output_link=agent.GetOutputLink()
#-- ROS SOAR NODE INITIALIZATION-----------------------------------
print("Initializing ROS SOAR node")
rospy.init_node("soar_ros_node",anonymous=True) #Always first
## SUBSCRIBERS
# Creates a subscriber object for each topic
# The messages to be synced must have the 'header' field or
# use the 'allow_headerless=True' in the TimeSynchronizer() function
# if this field is not present
#sub=message_filters.Subscriber("/turtlebot2i/soar_sub_topic", String)
odom_sub = message_filters.Subscriber('/turtlebot2i/odom', Odometry)
scan_sub = message_filters.Subscriber('/turtlebot2i/lidar/scan', LaserScan)
tasks_sub = message_filters.Subscriber('/turtlebot2i/tasks', String)
# Make the topics sync through ApproximateTimeSynchronizer with 0.1s of tolerance
ts = message_filters.ApproximateTimeSynchronizer([odom_sub, scan_sub,tasks_sub], 10, 0.1, allow_headerless=True)
# Associate the synchronizer with a callback
ts.registerCallback(topic_callback)
## PUBLISHERS
#pub=rospy.Publisher("soar_pub_topic", String, queue_size=10)
##TODO decide wether to go with direct input to velocities or use navigation package
vel_output_pub=rospy.Publisher("/turtlebot2i/commands/velocity",Twist, queue_size=10) #velocity output publisher
vel_msg=Twist()
#dummy_pub=rospy.Publisher("/turtlebot2i/soar_sub_topic",String, queue_size=10) #dummy pubs are used for inputing debug data to input topics
dummy_tasks_pub=rospy.Publisher("/turtlebot2i/tasks", String, queue_size=10)
# sub=rospy.Subscriber("soar_sub_topic", String, topic_callback)
# pub=rospy.Publisher("soar_pub_topic",String, queue_size=10)
#
# dummy_pub=rospy.Publisher("soar_sub_topic",String, queue_size=10) #used for inputing debug data to soar_sub_topic
#
# tasks_list_topic=rospy.Subscriber("soar_tasks_list_topic", String, topic_callback)
# dummy_tasks_list_topic=rospy.Publisher("soar_tasks_list_topic",String, queue_size=10) #used for inputing debug data to soar_tasks_list_topic
#-- INPUT UPDATE LOOP (for debug purposes)-----------------------------------
rate = rospy.Rate(1)
while not rospy.is_shutdown(): #loop that updates agent's inputs
print(".")
# sense=te.get_sensors()
# new_input=dict()
# new_input['a_value']=str(sense[0])
# new_input['b_value']=str(sense[1])
# current_time=str(rospy.get_time())
# new_input['time']=str(current_time)
# dummy_pub.publish(str(new_input))
# rospy.loginfo("Log new_input: "+ str(new_input))
dummy_tasks_pub.publish(str(tasks_dummy_data))
rate.sleep()
rospy.spin()
"""
#Instantiate link to environment
te = ToyEnv()
#Create soar kernel and agent
kernel = sml.Kernel.CreateKernelInCurrentThread()
agent = kernel.CreateAgent("agent")
register_print_callback(kernel, agent, callback_print_message, None)
#Load soar sources
agent.ExecuteCommandLine("source soar_robot.soar")
#Get input link and create input structure
input_link=agent.GetInputLink()
a_value=agent.CreateFloatWME(input_link, "a", -1.0)
b_value=agent.CreateFloatWME(input_link, "b", -1.0)
#Get output link
output_link=agent.GetOutputLink()
### Start Soar cognitive cycle ###
#
for i in range(0,3): # replace by a "while True:" to run forever
print(" ------------- Soar cycle: ",i," ------------- ")
# 1) sense the environment
sense=te.get_sensors()
# 2) push senses to soar
a_value.Update(sense[0])
b_value.Update(sense[1])
# 3) make soar think about it
result=0
run_result=agent.RunSelf(1) #Run agent for one step
# 4) get results from soar
output_link=agent.GetOutputLink()## returns an Identifier
if output_link!= None:
result_output_wme = output_link.FindByAttribute("result", 0) # returns a WMElement of the form (<output_link> ^result <val>)
result=None
if result_output_wme != None:
result = float(result_output_wme.GetValueAsString())
#5) send result to environment
te.set_actuators(result)
#
### End Soar cognitive cycle###
cli(agent) #open client to interact with agent
#Close agent and kernel
kernel.DestroyAgent(agent)
kernel.Shutdown()
"""
"""
---
header:
seq: 68884
stamp:
secs: 1531936453
nsecs: 639370680
frame_id: "turtlebot2i/odom"
child_frame_id: "turtlebot2ikinect"
pose:
pose:
position:
x: 2.27206683159
y: -0.190607577562
z: 1.34853923321
orientation:
x: -0.00055553537095
y: 0.000350441696355
z: 0.709878265858
w: 0.704324126244
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
twist:
twist:
linear:
x: 0.000438690185547
y: 0.000247657299042
z: -0.000138282775879
angular:
x: 0.00346004939638
y: 0.00924359634519
z: 0.000447982223704
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
---
"""
"""
^Cklaus@klaus-VirtualBox:~$ rostopic pub /turtlebot2i/commands/velocity geometrysgs/Twist "linear:
x: 0.0
y: 0.0
z: 0.0
angular:
x: 0.0
y: 0.0
z: 0.0"
publishing and latching message. Press ctrl-C to terminate
"""
"""
Planning service output is a JSON object which looks like
{
"id": "xyz",
"init": "initURI",
"goal": "finalURI",
"plan":[
["move", "youBot#0", "Waypoint_SH#1"],
["pick", "youBot#0", "Shelf#1", "productRed"],
["pick", "youBot#0", "Shelf#1", "productGreen"],
["move", "youBot#0", "Waypoint_CB"],
["drop", "youBot#0", "ConveyorBelt", "productRed"],
["move", "youBot#0", "Waypoint_CB#0"],
["drop", "youBot#0", "ConveyorBelt#0", "productGreen"],
["move", "youBot#1", "Waypoint_SH#0"],
["pick", "youBot#1", "Shelf#0", "productYellow"],
["move", "youBot#1", "Waypoint_CB#0"],
["drop", "youBot#1", "ConveyorBelt#0", "productYellow"],
["move", "youBot#1", "Waypoint_CB#1"],
["drop", "youBot#1", "ConveyorBelt#1", "productGreen"]
]
}
"""
|
the-stack_106_26871 | import sys
N,M = map(int,sys.stdin.readline().split())
array = list(range(1,M+1))
while(1):
if N+1 in array: #N+1인 수가 존재할 때
index = array.index(N+1)
if index == 0: # N+1인 수가 첫번째 수 일때
sys.exit()
else: # N+1인 수가 첫번째 수가 아닐 때
array[index-1] += 1
for i in range(index,M):
array[i] = 1
else: # N+1인 수가 없을 때
if len(array) != len(set(array)): #겹치는 수가 있다.
array[len(array)-1] += 1
else:
for element in array: #겹치는 수가 없다.
sys.stdout.write(str(element)+' ')
sys.stdout.write('\n')
array[len(array)-1] += 1 |
the-stack_106_26872 | if __name__ == '__main__':
def fac(n):
ans = 1
for i in range(1, n + 1):
ans *= i
ans = str(ans)
mysum = 0
for i in ans:
mysum += int(i)
return [ans, mysum]
n = int(input('Enter a number: '))
li = fac(n)
print(str(n) + '! = ' + str(li[0]))
for i in range(0, len(li[0])):
if i != (len(li[0]) - 1):
print(str(li[0][i]) + '+', end = '')
else:
print(str(li[0][i]) + '=' + str(li[1]))
'''
Enter a number: 6
6! = 720
7+2+0=9
'''
|
the-stack_106_26873 | import os
from pathlib import Path
from huey import SqliteHuey
from workoutizer.logger import get_logging_config
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
INITIAL_TRACE_DATA_DIR = os.path.join(BASE_DIR, "setup", "initial_trace_data")
if os.getenv("WKZ_ENV", None) == "devel":
WORKOUTIZER_DIR = BASE_DIR
else:
USER_HOME = Path.home()
WORKOUTIZER_DIR = os.path.join(str(USER_HOME), ".wkz")
SQLITE_FILE = "db.sqlite3"
WORKOUTIZER_DB_PATH = os.path.join(WORKOUTIZER_DIR, SQLITE_FILE)
TRACKS_DIR = os.path.join(WORKOUTIZER_DIR, "tracks")
STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATIC_URL = "/static/"
# SECURITY WARNING: keep the secret key used in production secret!
# however, as long as workoutizer is only used locally this is not an issue
SECRET_KEY = "h#ppx^(%ya18qrm+hgzf-vxr^t=r57k_65_hr73f^-n)@qc9as"
DEBUG = os.getenv("DJANGO_DEBUG", False)
ALLOWED_HOSTS = ["*"]
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"wkz",
"colorfield",
"rest_framework",
"channels",
"django_eventstream",
"huey.contrib.djhuey",
]
MIDDLEWARE = [
"django_grip.GripMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "workoutizer.urls"
MESSAGE_STORAGE = "django.contrib.messages.storage.cookie.CookieStorage"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "workoutizer.wsgi.application"
ASGI_APPLICATION = "workoutizer.asgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": WORKOUTIZER_DB_PATH,
}
}
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
LANGUAGE_CODE = "en-us"
TIME_ZONE = "Europe/Berlin"
USE_I18N = True
USE_L10N = True
USE_TZ = True
PLOT_WIDTH = 1110
PLOT_HEIGHT = 300
REST_FRAMEWORK = {
"DEFAULT_PERMISSION_CLASSES": [
"rest_framework.permissions.AllowAny",
],
}
# set auto primary key to BigAutoField explicitly to suppress warning
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
# plotting
trace_line_width = 3.5
trace_line_opacity = 0.9
LOGGING = get_logging_config(
django_log_level=os.getenv("DJANGO_LOG_LEVEL", "WARNING"),
wkz_log_level=os.getenv("WKZ_LOG_LEVEL", "INFO"),
huey_log_level=os.getenv("HUEY_LOG_LEVEL", "WARNING"),
path_to_log_dir=WORKOUTIZER_DIR,
)
HUEY = SqliteHuey(filename="/tmp/huey.db")
|
the-stack_106_26874 | """
Experiment configuration for:
Model: Dhingra et al 2018 -- https://arxiv.org/abs/1804.00720
Benchmark: Tacred
"""
from reflex.qa_runner import QARunner
from reflex.utils import setup_experiment
import os
ex = setup_experiment('Dhingra Tacred')
@ex.config
def conf():
qa_path = os.path.join(os.environ['BASE_PATH'], 'weights/dhingra-latest') # Path to trained weights
relations_filepath = os.path.join(os.environ['BASE_PATH'], 'data/tacred_relations.jsonl') # Path to relations file
data_directory = os.path.join(os.environ['BASE_PATH'], 'data/tacred/test') # Path to underlying data
batch_size = 16
must_choose_answer = True
device = 'cuda'
trained_to_reject = False
@ex.automain
def main(qa_path, relations_filepath, data_directory, batch_size, must_choose_answer, device, trained_to_reject):
runner = QARunner(qa_path, relations_filepath, data_directory, batch_size, must_choose_answer, device, trained_to_reject, calculate_single_error=False)
em, f1, per_relation_metrics = runner.predict()
print(f'Total samples: {runner.total_samples}')
return {'em': em, 'f1': f1, 'per_relation_metrics': per_relation_metrics}
|
the-stack_106_26875 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# В списке, состоящем из вещественных элементов, вычислить:
# 1. количество элементов списка, больших С;
# 2. произведение элементов списка, расположенных после максимального по модулю
# элемента.
if __name__ == '__main__':
lst = [0] * 10
count = 0
d = 1
c = int(input("Введите c"))
for i in range(10):
print("Введите", i + 1, "элемент")
lst[i] = int(input())
for i in range(10):
if lst[i] > c:
count += 1
print("количество элементов списка, больших С равно", count)
b = lst.index(max(lst))+1
while(b<10):
d *= lst[b]
b+=1
print("произведение элементов списка, расположенных после максимального по модулю равно",d) |
the-stack_106_26876 | """Simple test script for 4.2" 400x300 black and white displays.
Supported products:
* WaveShare 4.2" Black and White
* https://www.waveshare.com/product/modules/oleds-lcds/e-paper/4.2inch-e-paper.htm
* https://www.waveshare.com/product/modules/oleds-lcds/e-paper/4.2inch-e-paper-module.htm
"""
import time
import board
import displayio
import adafruit_il0398
displayio.release_displays()
# This pinout works on a Feather M4 and may need to be altered for other boards.
spi = board.SPI() # Uses SCK and MOSI
epd_cs = board.D9
epd_dc = board.D10
epd_reset = board.D5
epd_busy = board.D6
display_bus = displayio.FourWire(
spi, command=epd_dc, chip_select=epd_cs, reset=epd_reset, baudrate=1000000
)
time.sleep(1)
display = adafruit_il0398.IL0398(
display_bus, width=400, height=300, seconds_per_frame=20, busy_pin=epd_busy
)
g = displayio.Group()
f = open("/display-ruler.bmp", "rb")
pic = displayio.OnDiskBitmap(f)
t = displayio.TileGrid(pic, pixel_shader=displayio.ColorConverter())
g.append(t)
display.show(g)
display.refresh()
time.sleep(120)
|
the-stack_106_26877 | from __future__ import print_function
from setuptools import Command
import sys
from os.path import realpath, join, exists, dirname, curdir, basename, split
from os import makedirs
from glob import glob
from shutil import rmtree, copyfile
def argv_contains(t):
for arg in sys.argv:
if arg.startswith(t):
return True
return False
class BdistAPK(Command):
description = 'Create an APK with python-for-android'
user_options = []
def initialize_options(self):
for option in self.user_options:
setattr(self, option[0].strip('=').replace('-', '_'), None)
option_dict = self.distribution.get_option_dict('apk')
# This is a hack, we probably aren't supposed to loop through
# the option_dict so early because distutils does exactly the
# same thing later to check that we support the
# options. However, it works...
for (option, (source, value)) in option_dict.items():
setattr(self, option, str(value))
def finalize_options(self):
setup_options = self.distribution.get_option_dict('apk')
for (option, (source, value)) in setup_options.items():
if source == 'command line':
continue
if not argv_contains('--' + option):
# allow 'permissions': ['permission', 'permission] in apk
if option == 'permissions':
for perm in value:
sys.argv.append('--permission={}'.format(perm))
elif value in (None, 'None'):
sys.argv.append('--{}'.format(option))
else:
sys.argv.append('--{}={}'.format(option, value))
# Inject some argv options from setup.py if the user did not
# provide them
if not argv_contains('--name'):
name = self.distribution.get_name()
sys.argv.append('--name="{}"'.format(name))
self.name = name
if not argv_contains('--package'):
package = 'org.test.{}'.format(self.name.lower().replace(' ', ''))
print('WARNING: You did not supply an Android package '
'identifier, trying {} instead.'.format(package))
print(' This may fail if this is not a valid identifier')
sys.argv.append('--package={}'.format(package))
if not argv_contains('--version'):
version = self.distribution.get_version()
sys.argv.append('--version={}'.format(version))
if not argv_contains('--arch'):
arch = 'armeabi'
self.arch = arch
sys.argv.append('--arch={}'.format(arch))
def run(self):
self.prepare_build_dir()
from pythonforandroid.toolchain import main
sys.argv[1] = 'apk'
main()
def prepare_build_dir(self):
if argv_contains('--private') and not argv_contains('--launcher'):
print('WARNING: Received --private argument when this would '
'normally be generated automatically.')
print(' This is probably bad unless you meant to do '
'that.')
bdist_dir = 'build/bdist.android-{}'.format(self.arch)
if exists(bdist_dir):
rmtree(bdist_dir)
makedirs(bdist_dir)
globs = []
for directory, patterns in self.distribution.package_data.items():
for pattern in patterns:
globs.append(join(directory, pattern))
filens = []
for pattern in globs:
filens.extend(glob(pattern))
main_py_dirs = []
if not argv_contains('--launcher'):
for filen in filens:
new_dir = join(bdist_dir, dirname(filen))
if not exists(new_dir):
makedirs(new_dir)
print('Including {}'.format(filen))
copyfile(filen, join(bdist_dir, filen))
if basename(filen) in ('main.py', 'main.pyo'):
main_py_dirs.append(filen)
# This feels ridiculous, but how else to define the main.py dir?
# Maybe should just fail?
if not main_py_dirs and not argv_contains('--launcher'):
print('ERROR: Could not find main.py, so no app build dir defined')
print('You should name your app entry point main.py')
exit(1)
if len(main_py_dirs) > 1:
print('WARNING: Multiple main.py dirs found, using the shortest path')
main_py_dirs = sorted(main_py_dirs, key=lambda j: len(split(j)))
if not argv_contains('--launcher'):
sys.argv.append('--private={}'.format(
join(realpath(curdir), bdist_dir, dirname(main_py_dirs[0])))
)
def _set_user_options():
# This seems like a silly way to do things, but not sure if there's a
# better way to pass arbitrary options onwards to p4a
user_options = [('requirements=', None, None), ]
for i, arg in enumerate(sys.argv):
if arg.startswith('--'):
if ('=' in arg or
(i < (len(sys.argv) - 1) and not sys.argv[i+1].startswith('-'))):
user_options.append((arg[2:].split('=')[0] + '=', None, None))
else:
user_options.append((arg[2:], None, None))
BdistAPK.user_options = user_options
_set_user_options()
|
the-stack_106_26879 | #
# File:
# TRANS_read_ASCII.py
#
# Synopsis:
# Illustrates how to read an ASCII file
#
# Categories:
# I/O
#
# Author:
# Karin Meier-Fleischer, based on NCL example
#
# Date of initial publication:
# September 2018
#
# Description:
# This example shows how to read an ASCII file.
#
# Effects illustrated:
# o Read ASCII data
#
# Output:
# -
#
# Notes: The data for this example can be downloaded from
# http://www.ncl.ucar.edu/Document/Manuals/NCL_User_Guide/Data/
#
"""
Transition Guide Python Example: TRANS_read_ASCII.py
- read netCDF file
- retrieve variable informations
Input file: Test_6h.csv
2.00;3.50;5.10;8.20
2.40;3.10;4.80;8.90
2.60;3.70;5.30;10.10
2.75;3.90;5.55;10.25
3.00;4.10;6.05;10.50
2018-08-28 kmf
"""
from __future__ import print_function
import numpy as np
import Ngl
#-- data file name
diri = "/Users/k204045/local/miniconda2/envs/pyn_env/lib/ncarg/data/nug/"
fili = "Test_6h.csv"
#-- number of lines and columns in input file
nrows = 5
ncols = 4
#-- read all data
vals = Ngl.asciiread(diri+fili,(nrows,ncols),"float",sep=';')
#-- print information
print("vals: " + str(vals))
print("")
print("--> rank of vals: " + str(len(vals.shape)))
print("--> shape vals: " + str(vals.shape))
exit()
|
the-stack_106_26881 | import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.maxEvents.input = 3
process.source = cms.Source("EmptySource",
firstLuminosityBlockForEachRun = cms.untracked.VLuminosityBlockID(
cms.LuminosityBlockID(10,1),
cms.LuminosityBlockID(20,2),
cms.LuminosityBlockID(30,3)
),
numberEventsInLuminosityBlock =cms.untracked.uint32(1)
)
process.add_( cms.ESProducer("RunInfoTestESProducer",
runInfos = cms.VPSet(cms.PSet(run = cms.int32(10), avg_current = cms.double(1.)),
cms.PSet(run = cms.int32(20), avg_current = cms.double(2.)),
cms.PSet(run = cms.int32(30), avg_current = cms.double(3.)) ) ) )
process.riSource = cms.ESSource("EmptyESSource", recordName = cms.string("RunInfoRcd"),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(10,20,30))
process.test = cms.EDAnalyzer("RunInfoESAnalyzer")
process.p = cms.Path(process.test)
|
the-stack_106_26883 | import sys
sys.path.append(".")
sys.path.append("../../.")
import os
import pathlib
import torch
from torch import nn
from torch.utils.data import TensorDataset, DataLoader
from src.models.models.cnn import Encoder, Decoder
from src.models.models.DenoisingAutoencoder import DenoisingAutoencoder
from src.data.dataloader import snps_to_one_hot
def test_forward():
p = pathlib.Path(__file__).parent.parent.parent.resolve()
x = torch.load(os.path.join(p, "data", "processed", "tensors", "x_mhcuvps.pt"))
target = x.T[:32].type(torch.LongTensor)
x = snps_to_one_hot(target)
x = torch.unsqueeze(x, 1)
x = x.permute(0, 1, 3, 2)
x = x.type(torch.FloatTensor) # as it needs to be a float
ds_ae = TensorDataset(x, target)
loader = DataLoader(ds_ae, batch_size=4, shuffle=True)
encoder = Encoder(
input_size=(x.shape[-3], x.shape[-2], x.shape[-1]),
encode_size=128,
conv_kernels=[(4, 9), (2, 9), (1, 9)],
n_filters=[32, 16, 1],
strides=[1, 1, 1],
maxpool_kernels=[(2, 4), (2, 4), (1, 3)],
)
decoder = Decoder(
input_size=128,
output=x.shape[-1],
conv_kernels=[(1, 9), (2, 9), (4, 9)],
upsampling_kernels=[(1, 3), (2, 4), (2, 4)],
n_filters=[60, 60, 1],
strides=[1, 1, 1],
)
dae = DenoisingAutoencoder(encoder, decoder)
x_ = dae(x)
x = torch.squeeze(x, 1)
loss = nn.CrossEntropyLoss()
assert loss(x_, target) > loss(x, target)
|
the-stack_106_26885 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Pytest configuration."""
from __future__ import absolute_import, print_function
import json
import os
import shutil
import tempfile
import pytest
from flask import Flask
from flask_babelex import Babel
from flask_mail import Mail
from flask_menu import Menu as FlaskMenu
from flask_oauthlib.client import OAuth as FlaskOAuth
from flask_oauthlib.client import OAuthResponse
from invenio_accounts import InvenioAccounts
from invenio_db import InvenioDB, db
from invenio_userprofiles import InvenioUserProfiles, UserProfile
from invenio_userprofiles.views import blueprint_ui_init
from sqlalchemy_utils.functions import create_database, database_exists, \
drop_database
from invenio_oauthclient import InvenioOAuthClient
from invenio_oauthclient.contrib.cern import REMOTE_APP as CERN_REMOTE_APP
from invenio_oauthclient.contrib.github import REMOTE_APP as GITHUB_REMOTE_APP
from invenio_oauthclient.contrib.orcid import REMOTE_APP as ORCID_REMOTE_APP
from invenio_oauthclient.views.client import blueprint as blueprint_client
from invenio_oauthclient.views.settings import blueprint as blueprint_settings
@pytest.fixture
def base_app(request):
"""Flask application fixture without OAuthClient initialized."""
instance_path = tempfile.mkdtemp()
base_app = Flask('testapp')
base_app.config.update(
TESTING=True,
WTF_CSRF_ENABLED=False,
LOGIN_DISABLED=False,
CACHE_TYPE='simple',
OAUTHCLIENT_REMOTE_APPS=dict(
cern=CERN_REMOTE_APP,
orcid=ORCID_REMOTE_APP,
github=GITHUB_REMOTE_APP,
),
GITHUB_APP_CREDENTIALS=dict(
consumer_key='github_key_changeme',
consumer_secret='github_secret_changeme',
),
ORCID_APP_CREDENTIALS=dict(
consumer_key='orcid_key_changeme',
consumer_secret='orcid_secret_changeme',
),
CERN_APP_CREDENTIALS=dict(
consumer_key='cern_key_changeme',
consumer_secret='cern_secret_changeme',
),
# use local memory mailbox
EMAIL_BACKEND='flask_email.backends.locmem.Mail',
SQLALCHEMY_DATABASE_URI=os.getenv('SQLALCHEMY_DATABASE_URI',
'sqlite://'),
SERVER_NAME='localhost',
DEBUG=False,
SECRET_KEY='TEST',
SECURITY_DEPRECATED_PASSWORD_SCHEMES=[],
SECURITY_PASSWORD_HASH='plaintext',
SECURITY_PASSWORD_SCHEMES=['plaintext'],
)
FlaskMenu(base_app)
Babel(base_app)
Mail(base_app)
InvenioDB(base_app)
InvenioAccounts(base_app)
with base_app.app_context():
if str(db.engine.url) != 'sqlite://' and \
not database_exists(str(db.engine.url)):
create_database(str(db.engine.url))
db.create_all()
def teardown():
with base_app.app_context():
db.session.close()
if str(db.engine.url) != 'sqlite://':
drop_database(str(db.engine.url))
shutil.rmtree(instance_path)
request.addfinalizer(teardown)
base_app.test_request_context().push()
return base_app
def _init_app(app_):
"""Init OAuth app."""
FlaskOAuth(app_)
InvenioOAuthClient(app_)
app_.register_blueprint(blueprint_client)
app_.register_blueprint(blueprint_settings)
return app_
@pytest.fixture
def app(base_app):
"""Flask application fixture."""
base_app.config.update(
WTF_CSRF_ENABLED=False,
)
return _init_app(base_app)
@pytest.fixture
def app_with_csrf(base_app):
"""Flask application fixture with CSRF enabled."""
base_app.config.update(
WTF_CSRF_ENABLED=True,
)
return _init_app(base_app)
def _init_userprofiles(app_):
"""Init userprofiles module."""
InvenioUserProfiles(app_)
app_.register_blueprint(blueprint_ui_init)
return app_
@pytest.fixture
def app_with_userprofiles(app):
"""Configure userprofiles module with CSRF disabled."""
app.config.update(
USERPROFILES_EXTEND_SECURITY_FORMS=True,
WTF_CSRF_ENABLED=False,
)
return _init_userprofiles(app)
@pytest.fixture
def app_with_userprofiles_csrf(app):
"""Configure userprofiles module with CSRF enabled."""
app.config.update(
USERPROFILES_EXTEND_SECURITY_FORMS=True,
WTF_CSRF_ENABLED=True,
)
return _init_userprofiles(app)
@pytest.fixture
def models_fixture(app):
"""Flask app with example data used to test models."""
with app.app_context():
datastore = app.extensions['security'].datastore
datastore.create_user(
email='[email protected]',
password='tester',
active=True
)
datastore.create_user(
email='[email protected]',
password='tester',
active=True
)
datastore.create_user(
email='[email protected]',
password='tester',
active=True
)
datastore.commit()
return app
@pytest.fixture
def params():
"""Fixture for remote app params."""
def params(x):
return dict(
request_token_params={'scope': ''},
base_url='https://foo.bar/',
request_token_url=None,
access_token_url='https://foo.bar/oauth/access_token',
authorize_url='https://foo.bar/oauth/authorize',
consumer_key=x,
consumer_secret='testsecret',
)
return params
@pytest.fixture
def remote():
"""Fixture for remote app."""
return type('test_remote', (), dict(
name='example_remote',
request_token_params={'scope': ''},
base_url='https://foo.bar/',
request_token_url=None,
access_token_url='https://foo.bar/oauth/access_token',
authorize_url='https://foo.bar/oauth/authorize',
consumer_key='testkey',
consumer_secret='testsecret',
))()
@pytest.fixture
def views_fixture(base_app, params):
"""Flask application with example data used to test views."""
with base_app.app_context():
datastore = base_app.extensions['security'].datastore
datastore.create_user(
email='[email protected]',
password='tester',
active=True
)
datastore.create_user(
email='[email protected]',
password='tester',
active=True
)
datastore.create_user(
email='[email protected]',
password='tester',
active=True
)
datastore.commit()
base_app.config['OAUTHCLIENT_REMOTE_APPS'].update(
dict(
test=dict(
authorized_handler=lambda *args, **kwargs: 'TEST',
params=params('testid'),
title='MyLinkedTestAccount',
),
test_invalid=dict(
authorized_handler=lambda *args, **kwargs: 'TEST',
params=params('test_invalidid'),
title='Test Invalid',
),
full=dict(
params=params('fullid'),
title='Full',
),
)
)
FlaskOAuth(base_app)
InvenioOAuthClient(base_app)
base_app.register_blueprint(blueprint_client)
base_app.register_blueprint(blueprint_settings)
return base_app
@pytest.fixture
def example_github(request):
"""ORCID example data."""
return {
'name': 'Josiah Carberry',
'expires_in': 3599,
'access_token': 'test_access_token',
'refresh_token': 'test_refresh_token',
'scope': '/authenticate',
'token_type': 'bearer',
}
@pytest.fixture
def example_orcid(request):
"""ORCID example data."""
return {
'name': 'Josiah Carberry',
'expires_in': 3599,
'orcid': '0000-0002-1825-0097',
'access_token': 'test_access_token',
'refresh_token': 'test_refresh_token',
'scope': '/authenticate',
'token_type': 'bearer'
}, dict(external_id='0000-0002-1825-0097',
external_method='orcid',
user=dict(
profile=dict(
full_name='Josiah Carberry'
)
)
)
@pytest.fixture()
def example_cern(request):
"""CERN example data."""
file_path = os.path.join(os.path.dirname(__file__),
'data/oauth_response_content.json')
with open(file_path) as response_file:
json_data = response_file.read()
return OAuthResponse(
resp=None,
content=json_data,
content_type='application/json'
), dict(
access_token='test_access_token',
token_type='bearer',
expires_in=1199,
refresh_token='test_refresh_token'
), dict(
user=dict(
email='[email protected]',
profile=dict(username='taccount', full_name='Test Account'),
),
external_id='123456', external_method='cern',
active=True
)
@pytest.fixture(scope='session')
def orcid_bio():
"""ORCID response fixture."""
file_path = os.path.join(os.path.dirname(__file__), 'data/orcid_bio.json')
with open(file_path) as response_file:
data = json.load(response_file)
return data
@pytest.fixture()
def user(app_with_userprofiles):
"""Create users."""
with db.session.begin_nested():
datastore = app_with_userprofiles.extensions['security'].datastore
user1 = datastore.create_user(email='[email protected]',
password='tester', active=True)
profile = UserProfile(username='mynick', user=user1)
db.session.add(profile)
db.session.commit()
return user1
@pytest.fixture()
def form_test_data():
"""Test data to fill a registration form."""
return dict(
email='[email protected]',
profile=dict(
full_name='Test Tester',
username='test123',
),
)
|
the-stack_106_26886 | """
This is an FSLeyes plugin script that integrates AxonDeepSeg tools into FSLeyes.
Author : Stoyan I. Asenov
"""
import wx
import wx.lib.agw.hyperlink as hl
import fsleyes.controls.controlpanel as ctrlpanel
import fsleyes.actions.loadoverlay as ovLoad
import numpy as np
import nibabel as nib
from PIL import Image, ImageDraw, ImageOps
import scipy.misc
import json
from pathlib import Path
import AxonDeepSeg
from AxonDeepSeg.apply_model import axon_segmentation
from AxonDeepSeg.segment import segment_image
import AxonDeepSeg.morphometrics.compute_morphometrics as compute_morphs
from AxonDeepSeg import postprocessing, params, ads_utils
from config import axonmyelin_suffix, axon_suffix, myelin_suffix
import math
from scipy import ndimage as ndi
from skimage import measure, morphology, feature
import tempfile
import openpyxl
import pandas as pd
import imageio
from AxonDeepSeg.morphometrics.compute_morphometrics import *
VERSION = "0.2.16"
class ADScontrol(ctrlpanel.ControlPanel):
"""
This class is the object corresponding to the AxonDeepSeg control panel.
"""
def __init__(self, ortho, *args, **kwargs):
"""
This function initializes the control panel. It generates the widgets and adds them to the panel. It also sets
the initial position of the panel to the left
:param ortho: This is used to access the ortho ops in order to turn off the X and Y canvas as well as the cursor
"""
ctrlpanel.ControlPanel.__init__(self, ortho, *args, **kwargs)
# Add a sizer to the control panel
# This sizer will contain the buttons
sizer_h = wx.BoxSizer(wx.VERTICAL)
# Add the logo to the control panel
ADS_logo = self.get_logo()
sizer_h.Add(ADS_logo, flag=wx.SHAPED, proportion=1)
# Add the citation to the control panel
citation_box = wx.TextCtrl(
self, value=self.get_citation(), size=(100, 50), style=wx.TE_MULTILINE
)
sizer_h.Add(citation_box, flag=wx.SHAPED, proportion=1)
# Add a hyperlink to the documentation
hyper = hl.HyperLinkCtrl(
self, -1, label="Need help? Read the documentation", URL="https://axondeepseg.readthedocs.io/en/latest/"
)
sizer_h.Add(hyper, flag=wx.SHAPED, proportion=1)
# Define the color of button labels
button_label_color = (0, 0, 0)
# Add the image loading button
load_png_button = wx.Button(self, label="Load PNG or TIF file")
load_png_button.SetForegroundColour(button_label_color)
load_png_button.Bind(wx.EVT_BUTTON, self.on_load_png_button)
load_png_button.SetToolTip(wx.ToolTip("Loads a .png or .tif file into FSLeyes"))
sizer_h.Add(load_png_button, flag=wx.SHAPED, proportion=1)
# Add the mask loading button
load_mask_button = wx.Button(self, label="Load existing mask")
load_mask_button.SetForegroundColour(button_label_color)
load_mask_button.Bind(wx.EVT_BUTTON, self.on_load_mask_button)
load_mask_button.SetToolTip(
wx.ToolTip(
"Loads an existing axonmyelin mask into FSLeyes. "
"The selected image should contain both the axon and myelin masks. "
"The regions on the image should have an intensity of 0 for the background, "
"127 for the myelin and 255 for the axons. "
)
)
sizer_h.Add(load_mask_button, flag=wx.SHAPED, proportion=1)
# Add the model choice combobox
self.model_combobox = wx.ComboBox(
self,
choices=ads_utils.get_existing_models_list(),
size=(100, 20),
value="Select the modality",
)
self.model_combobox.SetForegroundColour(button_label_color)
self.model_combobox.SetToolTip(
wx.ToolTip("Select the modality used to acquire the image")
)
sizer_h.Add(self.model_combobox, flag=wx.SHAPED, proportion=1)
# Add the button that applies the prediction model
apply_model_button = wx.Button(self, label="Apply ADS prediction model")
apply_model_button.SetForegroundColour(button_label_color)
apply_model_button.Bind(wx.EVT_BUTTON, self.on_apply_model_button)
apply_model_button.SetToolTip(
wx.ToolTip("Applies the prediction model and displays the masks")
)
sizer_h.Add(apply_model_button, flag=wx.SHAPED, proportion=1)
# The Watershed button's purpose isn't clear. It is unavailable for now.
# # Add the button that runs the watershed algorithm
# run_watershed_button = wx.Button(self, label="Run Watershed")
# run_watershed_button.Bind(wx.EVT_BUTTON, self.on_run_watershed_button)
# run_watershed_button.SetToolTip(
# wx.ToolTip(
# "Uses a watershed algorithm to find the different axon+myelin"
# "objects. This is used to see if where are connections"
# " between two axon+myelin objects."
# )
# )
# sizer_h.Add(run_watershed_button, flag=wx.SHAPED, proportion=1)
# Add the fill axon tool
fill_axons_button = wx.Button(self, label="Fill axons")
fill_axons_button.SetForegroundColour(button_label_color)
fill_axons_button.Bind(wx.EVT_BUTTON, self.on_fill_axons_button)
fill_axons_button.SetToolTip(
wx.ToolTip(
"Automatically fills the axons inside myelin objects."
" THE MYELIN OBJECTS NEED TO BE CLOSED AND SEPARATED FROM EACH "
"OTHER (THEY MUST NOT TOUCH) FOR THIS TOOL TO WORK CORRECTLY."
)
)
sizer_h.Add(fill_axons_button, flag=wx.SHAPED, proportion=1)
# Add the save Segmentation button
save_segmentation_button = wx.Button(self, label="Save segmentation")
save_segmentation_button.SetForegroundColour(button_label_color)
save_segmentation_button.Bind(wx.EVT_BUTTON, self.on_save_segmentation_button)
save_segmentation_button.SetToolTip(
wx.ToolTip("Saves the axon and myelin masks in the selected folder")
)
sizer_h.Add(save_segmentation_button, flag=wx.SHAPED, proportion=1)
# Add compute morphometrics button
compute_morphometrics_button = wx.Button(self, label="Compute morphometrics")
compute_morphometrics_button.SetForegroundColour(button_label_color)
compute_morphometrics_button.Bind(wx.EVT_BUTTON, self.on_compute_morphometrics_button)
compute_morphometrics_button.SetToolTip(
wx.ToolTip(
"Calculates and saves the morphometrics to an excel and csv file. "
"Shows the numbers of the axons at the coordinates specified in the morphometrics file."
)
)
sizer_h.Add(compute_morphometrics_button, flag=wx.SHAPED, proportion=1)
# Add the settings button
settings_button = wx.Button(self, label="Settings")
settings_button.SetForegroundColour(button_label_color)
settings_button.Bind(wx.EVT_BUTTON, self.on_settings_button)
sizer_h.Add(settings_button, flag=wx.SHAPED, proportion=1)
# Set the sizer of the control panel
self.SetSizer(sizer_h)
# Initialize the variables that are used to track the active image
self.png_image_name = []
self.image_dir_path = []
self.most_recent_watershed_mask_name = None
# Toggle off the X and Y canvas
oopts = ortho.sceneOpts
oopts.showXCanvas = False
oopts.showYCanvas = False
# Toggle off the cursor
oopts.showCursor = False
# Toggle off the radiological orientation
self.displayCtx.radioOrientation = False
# Invert the Y display
self.frame.viewPanels[0].frame.viewPanels[0].getZCanvas().opts.invertY = True
# Create a temporary directory that will hold the NIfTI files
self.ads_temp_dir_var = tempfile.TemporaryDirectory() #This variable needs to stay loaded to keep the temporary
# directory from being destroyed
self.ads_temp_dir = Path(self.ads_temp_dir_var.name)
# Check the version
self.verrify_version()
#TODO: move the settings to another class
self.overlap_value = 25 # TODO: Move this to a more appropriate place later
self.model_resolution = 0.01
self.use_custom_resolution = False
self.custom_resolution = 0.07
self.zoom_factor = 1.0
def on_load_png_button(self, event):
"""
This function is called when the user presses on the Load Png button. It allows the user to select a PNG or TIF
image, convert it into a NIfTI and load it into FSLeyes.
"""
# Ask the user which file he wants to convert
with wx.FileDialog(
self, "select Image file", style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST
) as file_dialog:
if (
file_dialog.ShowModal() == wx.ID_CANCEL
): # The user cancelled the operation
return
in_file = Path(file_dialog.GetPath())
# Check if the image format is valid
image_extension = in_file.suffix
valid_extensions = [".png", ".tif", ".jpg", ".jpeg"]
if image_extension not in valid_extensions:
self.show_message("Invalid file extension")
return
# Store the directory path and image name for later use in the application of the prediction model
self.image_dir_path.append(in_file.parents[0])
self.png_image_name.append(in_file.name)
# Call the function that convert and loads the png or tif image
self.load_png_image_from_path(in_file)
def on_load_mask_button(self, event):
"""
This function is called when the user presses on the loadMask button. It allows the user to select an existing
PNG mask, convert it into a NIfTI and load it into FSLeyes.
The mask needs to contain an axon + myelin mask. The Axons should have an intensity > 200. The myelin should
have an intensity between 100 and 200. The data should be in uint8.
"""
# Ask the user to select the mask image
with wx.FileDialog(
self, "select mask .png file", style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST
) as file_dialog:
if (
file_dialog.ShowModal() == wx.ID_CANCEL
): # The user cancelled the operation
return
in_file = Path(file_dialog.GetPath())
# Check if the image format is valid
image_extension = in_file.suffix
valid_extensions = [".png", ".tif", ".jpg", ".jpeg"]
if image_extension not in valid_extensions:
self.show_message("Invalid file extension")
return
# Get the image data
img_png2D = ads_utils.imread(in_file)
image_name = in_file.stem
# Extract the Axon mask
axon_mask = img_png2D > 200
axon_mask = params.intensity['binary'] * np.array(axon_mask, dtype=np.uint8)
# Extract the Myelin mask
myelin_mask = (img_png2D > 100) & (img_png2D < 200)
myelin_mask = params.intensity['binary'] * np.array(myelin_mask, dtype=np.uint8)
# Load the masks into FSLeyes
axon_outfile = self.ads_temp_dir / (image_name + "-axon.png")
ads_utils.imwrite(axon_outfile, axon_mask)
self.load_png_image_from_path(axon_outfile, is_mask=True, colormap="blue")
myelin_outfile = self.ads_temp_dir / (image_name + "-myelin.png")
ads_utils.imwrite(myelin_outfile, myelin_mask)
self.load_png_image_from_path(myelin_outfile, is_mask=True, colormap="red")
def on_apply_model_button(self, event):
"""
This function is called when the user presses on the ApplyModel button. It is used to apply the prediction model
selected in the combobox. The segmentation masks are then loaded into FSLeyes
"""
# Declare the default resolution of the model
resolution = 0.1
# Get the image name and directory
image_overlay = self.get_visible_image_overlay()
if self.get_visible_image_overlay() is None:
return
n_loaded_images = self.png_image_name.__len__()
image_name = None
image_directory = None
for i in range(n_loaded_images):
if image_overlay.name == (Path(self.png_image_name[i])).stem:
image_name = self.png_image_name[i]
image_directory = self.image_dir_path[i]
if (image_name is None) or (image_directory is None):
self.show_message(
"Couldn't find the path to the loaded image. "
"Please use the plugin's image loader to import the image you wish to segment. "
)
return
image_path = image_directory / image_name
image_name_no_extension = Path(image_name).stem
# Get the selected model
selected_model = self.model_combobox.GetStringSelection()
if selected_model == "":
self.show_message("Please select a model")
return
# Get the path of the selected model
if any(selected_model in models for models in ads_utils.get_existing_models_list()):
dir_path = Path(AxonDeepSeg.__file__).parents[0]
model_path = dir_path / "models" / selected_model
else:
self.show_message("Please select a model")
return
# If the TEM model is selected, modify the resolution
if "TEM" in selected_model.upper():
resolution = 0.01
# Check if the pixel size txt file exist in the imageDirPath
pixel_size_exists = (image_directory / "pixel_size_in_micrometer.txt").exists()
# if it doesn't exist, ask the user to input the pixel size
if pixel_size_exists is False:
with wx.TextEntryDialog(
self, "Enter the pixel size in micrometer", value="0.07"
) as text_entry:
if text_entry.ShowModal() == wx.ID_CANCEL:
return
pixel_size_str = text_entry.GetValue()
pixel_size_float = float(pixel_size_str)
else: # read the pixel size
resolution_file = open((image_directory / "pixel_size_in_micrometer.txt").__str__(), 'r')
pixel_size_float = float(resolution_file.read())
# Load model configs and apply prediction
model_configfile = model_path / "config_network.json"
with open(model_configfile.__str__(), "r") as fd:
config_network = json.loads(fd.read())
segment_image(
image_path,
model_path,
self.overlap_value,
config_network,
resolution,
acquired_resolution=pixel_size_float * self.zoom_factor,
verbosity_level=3
)
# The axon_segmentation function creates the segmentation masks and stores them as PNG files in the same folder
# as the original image file.
# Load the axon and myelin masks into FSLeyes
axon_mask_path = image_directory / (image_name_no_extension + str(axon_suffix))
myelin_mask_path = image_directory / (image_name_no_extension + str(myelin_suffix))
self.load_png_image_from_path(axon_mask_path, is_mask=True, colormap="blue")
self.load_png_image_from_path(myelin_mask_path, is_mask=True, colormap="red")
self.pixel_size_float = pixel_size_float
return self
def on_save_segmentation_button(self, event):
"""
This function saves the active myelin and axon masks as PNG images. Three (3) images are generated in a folder
selected by the user : one with the axon mask, one with the myelin mask and one with both.
"""
# Find the visible myelin and axon masks
axon_mask_overlay = self.get_corrected_axon_overlay()
if axon_mask_overlay is None:
axon_mask_overlay = self.get_visible_axon_overlay()
myelin_mask_overlay = self.get_visible_myelin_overlay()
if (axon_mask_overlay is None) or (myelin_mask_overlay is None):
return
# Ask the user where to save the segmentation
with wx.DirDialog(
self,
"select the directory in which the segmentation will be save",
defaultPath="",
style=wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST,
) as file_dialog:
if file_dialog.ShowModal() == wx.ID_CANCEL:
return
save_dir = Path(file_dialog.GetPath())
# store the data of the masks in variables as numpy arrays.
# Note: since PIL uses a different convention for the X and Y coordinates, some array manipulation has to be
# done.
# Note 2 : The image array loaded in FSLeyes is flipped. We need to flip it back
myelin_array = np.array(
myelin_mask_overlay[:, :, 0], copy=True, dtype=np.uint8
)
myelin_array = np.flipud(myelin_array)
myelin_array = np.rot90(myelin_array, k=1, axes=(1, 0))
axon_array = np.array(
axon_mask_overlay[:, :, 0], copy=True, dtype=np.uint8
)
axon_array = np.flipud(axon_array)
axon_array = np.rot90(axon_array, k=1, axes=(1, 0))
# Make sure the masks have the same size
if myelin_array.shape != axon_array.shape:
self.show_message("invalid visible masks dimensions")
return
# Remove the intersection
myelin_array, axon_array, intersection = postprocessing.remove_intersection(
myelin_array, axon_array, priority=1, return_overlap=True)
if intersection.sum() > 0:
self.show_message(
"There is an overlap between the axon mask and the myelin mask. The myelin will have priority.")
# Scale the pixel values of the masks to 255 for image saving
myelin_array = myelin_array * params.intensity['binary']
axon_array = axon_array * params.intensity['binary']
image_name = myelin_mask_overlay.name[:-len("_seg-myelin")]
myelin_and_axon_array = (myelin_array // 2 + axon_array).astype(np.uint8)
ads_utils.imwrite(filename=save_dir / (image_name + str(axonmyelin_suffix)), img=myelin_and_axon_array)
ads_utils.imwrite(filename=save_dir / (image_name + str(myelin_suffix)), img=myelin_array)
ads_utils.imwrite(filename=save_dir / (image_name + str(axon_suffix)), img=axon_array)
def on_run_watershed_button(self, event):
"""
This function is called then the user presses on the runWatershed button. This creates a watershed mask that is
used to locate where are the connections between the axon-myelin objects.
The runWatershed button is currently commented, so this function is unused at the moment.
"""
# Find the visible myelin and axon masks
axon_mask_overlay = self.get_visible_axon_overlay()
myelin_mask_overlay = self.get_visible_myelin_overlay()
if (axon_mask_overlay is None) or (myelin_mask_overlay is None):
return
# Extract the data from the overlays
axon_array = axon_mask_overlay[:, :, 0]
myelin_array = myelin_mask_overlay[:, :, 0]
# Make sure the masks have the same size
if myelin_array.shape != axon_array.shape:
self.show_message("invalid visible masks dimensions")
return
# If a watershed mask already exists, remove it.
for an_overlay in self.overlayList:
if (self.most_recent_watershed_mask_name is not None) and (
an_overlay.name == self.most_recent_watershed_mask_name
):
self.overlayList.remove(an_overlay)
# Compute the watershed mask
watershed_data = self.get_watershed_segmentation(axon_array, myelin_array)
# Save the watershed mask as a png then load it as an overlay
watershed_image_array = np.rot90(watershed_data, k=3, axes=(1, 0))
watershed_image = Image.fromarray(watershed_image_array)
file_name = self.ads_temp_dir.name + "/watershed_mask.png"
watershed_image.save(file_name)
wantershed_mask_overlay = self.load_png_image_from_path(
file_name, add_to_overlayList=False
)
wantershed_mask_overlay[:, :, 0] = watershed_data
self.overlayList.append(wantershed_mask_overlay)
# Apply a "random" colour mapping to the watershed mask
opts = self.displayCtx.getOpts(wantershed_mask_overlay)
opts.cmap = "random"
self.most_recent_watershed_mask_name = "watershed_mask"
def on_fill_axons_button(self, event):
"""
This function is called when the fillAxon button is pressed by the user. It uses a flood fill algorithm to fill
the inside of the myelin objects with the axon mask
"""
# Find the visible myelin and axon mask
myelin_mask_overlay = self.get_visible_myelin_overlay()
axon_mask_overlay = self.get_visible_axon_overlay()
if myelin_mask_overlay is None:
return
if axon_mask_overlay is None:
return
# Extract the data from the overlays
myelin_array = myelin_mask_overlay[:, :, 0]
axon_array = axon_mask_overlay[:, :, 0]
# Perform the floodfill operation
axon_extracted_array = postprocessing.floodfill_axons(axon_array, myelin_array)
axon_corr_array = np.flipud(axon_extracted_array)
axon_corr_array = params.intensity['binary'] * np.rot90(axon_corr_array, k=1, axes=(1, 0))
file_name = self.ads_temp_dir / (myelin_mask_overlay.name[:-len("-myelin")] + "-axon-corr.png")
ads_utils.imwrite(filename=file_name, img=axon_corr_array)
self.load_png_image_from_path(file_name, is_mask=True, colormap="blue")
def on_compute_morphometrics_button(self, event):
"""
Compute morphometrics and save them to an Excel file.
"""
# Get pixel size
try:
pixel_size = self.pixel_size_float
except:
with wx.TextEntryDialog(
self, "Enter the pixel size in micrometer", value="0.07"
) as text_entry:
if text_entry.ShowModal() == wx.ID_CANCEL:
return
pixel_size_str = text_entry.GetValue()
pixel_size = float(pixel_size_str)
# Find the visible myelin and axon masks
axon_mask_overlay = self.get_corrected_axon_overlay()
if axon_mask_overlay is None:
axon_mask_overlay = self.get_visible_axon_overlay()
myelin_mask_overlay = self.get_visible_myelin_overlay()
if (axon_mask_overlay is None) or (myelin_mask_overlay is None):
return
# store the data of the masks in variables as numpy arrays.
# Note: since PIL uses a different convention for the X and Y coordinates, some array manipulation has to be
# done.
# Note 2 : The image array loaded in FSLeyes is flipped. We need to flip it back
myelin_array = np.array(
myelin_mask_overlay[:, :, 0] * params.intensity['binary'], copy=True, dtype=np.uint8
)
myelin_array = np.flipud(myelin_array)
myelin_array = np.rot90(myelin_array, k=1, axes=(1, 0))
axon_array = np.array(
axon_mask_overlay[:, :, 0] * params.intensity['binary'], copy=True, dtype=np.uint8
)
axon_array = np.flipud(axon_array)
axon_array = np.rot90(axon_array, k=1, axes=(1, 0))
# Make sure the masks have the same size
if myelin_array.shape != axon_array.shape:
self.show_message("invalid visible masks dimensions")
return
# Save the arrays as PNG files
pred = (myelin_array // 2 + axon_array).astype(np.uint8)
pred_axon = pred > 200
pred_myelin = np.logical_and(pred >= 50, pred <= 200)
x = np.array([], dtype=[
('x0', 'f4'),
('y0', 'f4'),
('gratio','f4'),
('axon_area','f4'),
('axon_perimeter','f4'),
('myelin_area','f4'),
('axon_diam','f4'),
('myelin_thickness','f4'),
('axonmyelin_area','f4'),
('axonmyelin_perimeter','f4'),
('solidity','f4'),
('eccentricity','f4'),
('orientation','f4')
]
)
# Compute statistics
stats_array = get_axon_morphometrics(im_axon=pred_axon, im_myelin=pred_myelin, pixel_size=pixel_size)
for stats in stats_array:
x = np.append(x,
np.array(
[(
stats['x0'],
stats['y0'],
stats['gratio'],
stats['axon_area'],
stats['axon_perimeter'],
stats['myelin_area'],
stats['axon_diam'],
stats['myelin_thickness'],
stats['axonmyelin_area'],
stats['axonmyelin_perimeter'],
stats['solidity'],
stats['eccentricity'],
stats['orientation']
)],
dtype=x.dtype)
)
with wx.FileDialog(self, "Save morphometrics file", wildcard="Excel files (*.xlsx)|*.xlsx",
defaultFile="axon_morphometrics.xlsx", style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return # the user changed their mind
# save the current contents in the file
pathname = fileDialog.GetPath()
if not (pathname.lower().endswith((".xlsx", ".csv"))): # If the user didn't add the extension, add it here
pathname = pathname + ".xlsx"
try:
# Export to excel
pd.DataFrame(x).to_excel(pathname)
except IOError:
wx.LogError("Cannot save current data in file '%s'." % pathname)
# Create the axon coordinate array
mean_diameter_in_pixel = np.average(x['axon_diam']) / pixel_size
axon_indexes = np.arange(x.size)
number_array = postprocessing.generate_axon_numbers_image(axon_indexes, x['x0'], x['y0'],
tuple(reversed(axon_array.shape)),
mean_diameter_in_pixel)
# Load the axon coordinate image into FSLeyes
number_outfile = self.ads_temp_dir / "numbers.png"
ads_utils.imwrite(number_outfile, number_array)
self.load_png_image_from_path(number_outfile, is_mask=False, colormap="yellow")
return
def on_settings_button(self, event):
#TODO: Add a class for the settings. Perhaps even one for the window
self.settings_frame = wx.Frame(self, title="Settings", size=(600, 300))
frame_sizer_h = wx.BoxSizer(wx.VERTICAL)
# Since the axon shape doesn't do anything yet, I will just comment it
# frame_sizer_axon_choice = wx.BoxSizer(wx.HORIZONTAL)
# frame_sizer_axon_choice.Add(wx.StaticText(settings_frame, label="Axon Shape: "))
# self.axon_shape_choices = ["circle", "ellipse"]
# self.axon_shape_combobox = wx.ComboBox(
# settings_frame,
# choices=self.axon_shape_choices,
# size=(100, 20),
# value=self.axon_shape_choices[0] # TODO: show the one currently selected
# )
# self.axon_shape_combobox.SetToolTip(
# wx.ToolTip(
# 'Select what is the shape of the axons that will be considered when computing the morphometrics'
# '. "circle" will use the mean diameter of the axons. "ellipse" will use minor axis of the axons.'
# )
# )
# frame_sizer_axon_choice.Add(self.axon_shape_combobox, flag=wx.SHAPED, proportion=1)
# frame_sizer_h.Add(frame_sizer_axon_choice)
# Add the overlap value to the settings menu
sizer_overlap_value = wx.BoxSizer(wx.HORIZONTAL)
sizer_overlap_value.Add(wx.StaticText(self.settings_frame, label="Overlap value (pixels): "))
self.overlap_value_spinCtrl = wx.SpinCtrl(self.settings_frame, min=0, max=100, initial=self.overlap_value)
self.overlap_value_spinCtrl.Bind(wx.EVT_SPINCTRL, self.on_overlap_value_changed)
sizer_overlap_value.Add(self.overlap_value_spinCtrl, flag=wx.SHAPED, proportion=1)
frame_sizer_h.Add(sizer_overlap_value)
# # Add the model resolution to the settings menu
# sizer_model_resolution = wx.BoxSizer(wx.HORIZONTAL)
# sizer_model_resolution.Add(wx.StaticText(settings_frame, label="Model resolution (um/pixel): "))
# self.model_resolution_spinCtrlDouble = wx.SpinCtrlDouble(
# settings_frame, initial=self.model_resolution, inc=0.0001)
# self.model_resolution_spinCtrlDouble.Bind(wx.EVT_SPINCTRLDOUBLE, self.on_model_resolution_value_changed)
# sizer_model_resolution.Add(self.model_resolution_spinCtrlDouble, flag=wx.SHAPED, proportion=1)
# frame_sizer_h.Add(sizer_model_resolution)
# # Add the option to use a custom image resolution
# sizer_custom_resolution = wx.BoxSizer(wx.HORIZONTAL)
# self.use_custom_resolution_checkbox = wx.CheckBox(
# settings_frame, label="Use custom image resolution (um/pixel): ")
# self.use_custom_resolution_checkbox.Bind(wx.EVT_CHECKBOX, self.on_use_custom_resolution_state_change)
# sizer_custom_resolution.Add(self.use_custom_resolution_checkbox)
# self.custom_resolution_spinCtrlDouble = wx.SpinCtrlDouble(
# settings_frame, initial=self.custom_resolution, inc=0.0001)
# self.custom_resolution_spinCtrlDouble.Bind(wx.EVT_SPINCTRLDOUBLE, self.on_custom_resolution_value_changed)
# sizer_custom_resolution.Add(self.custom_resolution_spinCtrlDouble)
# frame_sizer_h.Add(sizer_custom_resolution)
# Add the zoom factor to the settings menu
sizer_zoom_factor = wx.BoxSizer(wx.HORIZONTAL)
sizer_zoom_factor.Add(wx.StaticText(self.settings_frame, label="Zoom factor: "))
self.zoom_factor_spinCtrlDouble = wx.SpinCtrlDouble(
self.settings_frame, initial=self.zoom_factor, inc=0.0001)
self.zoom_factor_spinCtrlDouble.Bind(wx.EVT_SPINCTRLDOUBLE, self.on_zoom_factor_changed)
sizer_zoom_factor.Add(self.zoom_factor_spinCtrlDouble, flag=wx.SHAPED, proportion=1)
frame_sizer_h.Add(sizer_zoom_factor)
# Add the done button
sizer_done_button = wx.BoxSizer(wx.HORIZONTAL)
done_button = wx.Button(self.settings_frame, label="Done")
done_button.Bind(wx.EVT_BUTTON, self.on_done_button)
sizer_done_button.Add(done_button, flag=wx.SHAPED, proportion=1)
frame_sizer_h.Add(sizer_done_button)
self.settings_frame.SetSizer(frame_sizer_h)
self.settings_frame.Show()
def on_overlap_value_changed(self, event):
self.overlap_value = self.overlap_value_spinCtrl.GetValue()
print(self.overlap_value)
def on_model_resolution_value_changed(self, event):
self.model_resolution = self.model_resolution_spinCtrlDouble.GetValue()
def on_use_custom_resolution_state_change(self, event):
self.use_custom_resolution = self.use_custom_resolution_checkbox.GetValue()
print(self.use_custom_resolution)
def on_custom_resolution_value_changed(self, event):
self.custom_resolution = self.custom_resolution_spinCtrlDouble.GetValue()
print(self.custom_resolution)
def on_zoom_factor_changed(self, event):
self.zoom_factor = self.zoom_factor_spinCtrlDouble.GetValue()
def on_done_button(self, event):
# TODO: make sure every setting is saved
self.settings_frame.Close()
def get_watershed_segmentation(self, im_axon, im_myelin, return_centroids=False):
"""
Parts of this function were copied from the code found in this document :
https://github.com/neuropoly/axondeepseg/blob/master/AxonDeepSeg/morphometrics/compute_morphometrics.py
In the future, the referenced script should be modified in order to avoid repetition.
:param im_axon: the binary mask corresponding to axons
:type im_axon: ndarray
:param im_myelin: the binary mask corresponding to the myelin
:type im_myelin: ndarray
:param return_centroids: (optional) if this is set to true, the function will also return the centroids of the
axon objects as a list of tuples
:type return_centroids: bool
:return: the label corresponding to the axon+myelin objects
:rtype: ndarray
"""
# Label each axon object
im_axon_label = measure.label(im_axon)
# Measure properties for each axon object
axon_objects = measure.regionprops(im_axon_label)
# Deal with myelin mask
if im_myelin is not None:
# sum axon and myelin masks
im_axonmyelin = im_axon + im_myelin
# Compute distance between each pixel and the background. Note: this distance is calculated from the im_axon,
# note from the im_axonmyelin image, because we know that each axon object is already isolated, therefore the
# distance metric will be more useful for the watershed algorithm below.
distance = ndi.distance_transform_edt(im_axon)
# local_maxi = feature.peak_local_max(distance, indices=False, footprint=np.ones((31, 31)), labels=axonmyelin)
# Get axon centroid as int (not float) to be used as index
ind_centroid = (
[int(props.centroid[0]) for props in axon_objects],
[int(props.centroid[1]) for props in axon_objects],
)
# Create an image with axon centroids, which value corresponds to the value of the axon object
im_centroid = np.zeros_like(im_axon, dtype="uint16")
for i in range(len(ind_centroid[0])):
# Note: The value "i" corresponds to the label number of im_axon_label
im_centroid[ind_centroid[0][i], ind_centroid[1][i]] = i + 1
# Watershed segmentation of axonmyelin using distance map
im_axonmyelin_label = morphology.watershed(
-distance, im_centroid, mask=im_axonmyelin
)
if return_centroids is True:
return im_axonmyelin_label, ind_centroid
else:
return im_axonmyelin_label
def load_png_image_from_path(
self, image_path, is_mask=False, add_to_overlayList=True, colormap="greyscale"
):
"""
This function converts a 2D image into a NIfTI image and loads it as an overlay.
The parameter add_to_overlayList allows to display the overlay into FSLeyes.
:param image_path: The location of the image, including the name and the .extension
:type image_path: Path
:param is_mask: (optional) Whether or not this is a segmentation mask. It will be treated as a normal
image by default.
:type is_mask: bool
:param add_to_overlayList: (optional) Whether or not to add the image to the overlay list. If so, the image will
be displayed in the application. This parameter is True by default.
:type add_to_overlayList: bool
:param colormap: (optional) the colormap of image that will be displayed. This parameter is set to greyscale by
default.
:type colormap: string
:return: the FSLeyes overlay corresponding to the loaded image.
:rtype: overlay
"""
# Open the 2D image
img_png2D = ads_utils.imread(image_path)
if is_mask is True:
img_png2D = img_png2D // params.intensity['binary'] # Segmentation masks should be binary
# Flip the image on the Y axis so that the morphometrics file shows the right coordinates
img_png2D = np.flipud(img_png2D)
# Convert image data into a NIfTI image
# Note: PIL and NiBabel use different axis conventions, so some array manipulation has to be done.
img_NIfTI = nib.Nifti1Image(
np.rot90(img_png2D, k=1, axes=(1, 0)), np.eye(4)
)
# Save the NIfTI image in a temporary directory
img_name = image_path.stem
out_file = self.ads_temp_dir.__str__() + "/" + img_name + ".nii.gz"
nib.save(img_NIfTI, out_file)
# Load the NIfTI image as an overlay
img_overlay = ovLoad.loadOverlays(paths=[out_file], inmem=True, blocking=True)[
0
]
# Display the overlay
if add_to_overlayList is True:
self.overlayList.append(img_overlay)
opts = self.displayCtx.getOpts(img_overlay)
opts.cmap = colormap
return img_overlay
def get_visible_overlays(self):
"""
This function returns a list containing evey overlays that are visible on FSLeyes.
:return: The list of the visible overlays
:rtype: list
"""
visible_overlay_list = []
for an_overlay in self.overlayList:
an_overlay_display = self.displayCtx.getDisplay(an_overlay)
if an_overlay_display.enabled is True:
visible_overlay_list.append(an_overlay)
return visible_overlay_list
def get_visible_image_overlay(self):
"""
This function is used to find the active microscopy image. This image should be visible and should NOT have the
following keywords in its name : axon, myelin, Myelin, watershed, Watershed.
:return: The visible microscopy image
:rtype: overlay
"""
visible_overlay_list = self.get_visible_overlays()
image_overlay = None
n_found_overlays = 0
if visible_overlay_list.__len__() is 0:
self.show_message("No overlays are displayed")
return None
if visible_overlay_list.__len__() is 1:
return visible_overlay_list[0]
for an_overlay in visible_overlay_list:
if (
("watershed" not in an_overlay.name)
and ("Watershed" not in an_overlay.name)
and (not an_overlay.name.endswith("-myelin"))
and (not an_overlay.name.endswith("-Myelin"))
and (not an_overlay.name.endswith("-Axon"))
and (not an_overlay.name.endswith("-axon"))
):
n_found_overlays = n_found_overlays + 1
image_overlay = an_overlay
if n_found_overlays > 1:
self.show_message("More than one microscopy image has been found")
return None
if n_found_overlays is 0:
self.show_message("No visible microscopy image has been found")
return None
return image_overlay
def get_visible_axon_overlay(self):
"""
This method finds the currently visible axon overlay
:return: The visible overlay that corresponds to the axon mask
:rtype: overlay
"""
visible_overlay_list = self.get_visible_overlays()
axon_overlay = None
n_found_overlays = 0
if visible_overlay_list.__len__() is 0:
self.show_message("No overlays are displayed")
return None
for an_overlay in visible_overlay_list:
if (an_overlay.name.endswith("-axon")) or (an_overlay.name.endswith("-Axon")):
n_found_overlays = n_found_overlays + 1
axon_overlay = an_overlay
if n_found_overlays > 1:
self.show_message("More than one axon mask has been found")
return None
if n_found_overlays is 0:
self.show_message("No visible axon mask has been found")
return None
return axon_overlay
def get_corrected_axon_overlay(self):
"""
This method finds a the visible corrected axon overlay if it exists
:return: The visible corrected axon overlay
:rtype overlay
"""
visible_overlay_list = self.get_visible_overlays()
axon_overlay = None
n_found_overlays = 0
if visible_overlay_list.__len__() is 0:
self.show_message("No overlays are displayed")
return None
for an_overlay in visible_overlay_list:
if (an_overlay.name.endswith("-axon-corr")) or (an_overlay.name.endswith("-Axon-corr")):
n_found_overlays = n_found_overlays + 1
axon_overlay = an_overlay
if n_found_overlays > 1:
self.show_message("More than one corrected axon mask has been found")
return None
if n_found_overlays is 0:
return None
return axon_overlay
def get_visible_myelin_overlay(self):
"""
This method finds the currently visible myelin overlay
:return: The visible overlay that corresponds to the myelin mask
:rtype: overlay
"""
visible_overlay_list = self.get_visible_overlays()
myelin_overlay = None
n_found_overlays = 0
if visible_overlay_list.__len__() is 0:
self.show_message("No overlays are displayed")
return None
for an_overlay in visible_overlay_list:
if (an_overlay.name.endswith("-myelin")) or (an_overlay.name.endswith("-Myelin")):
n_found_overlays = n_found_overlays + 1
myelin_overlay = an_overlay
if n_found_overlays > 1:
self.show_message("More than one myelin mask has been found")
return None
if n_found_overlays is 0:
self.show_message("No visible myelin mask has been found")
return None
return myelin_overlay
def show_message(self, message, caption="Error"):
"""
This function is used to show a popup message on the FSLeyes interface.
:param message: The message to be displayed.
:type message: String
:param caption: (Optional) The caption of the message box.
:type caption: String
"""
with wx.MessageDialog(
self,
message,
caption=caption,
style=wx.OK | wx.CENTRE,
pos=wx.DefaultPosition,
) as msg:
msg.ShowModal()
def verrify_version(self):
"""
This function checks if the plugin version is the same as the one in the AxonDeepSeg directory
"""
ads_path = Path(AxonDeepSeg.__file__).parents[0]
plugin_path_parts = ads_path.parts[:-1]
plugin_path = Path(*plugin_path_parts)
plugin_file = plugin_path / "ads_plugin.py"
# Check if the plugin file exists
plugin_file_exists = plugin_file.exists()
if plugin_file_exists is False:
return
# Check the version of the plugin
with open(plugin_file.__str__()) as plugin_file_reader:
plugin_file_lines = plugin_file_reader.readlines()
plugin_file_lines = [x.strip() for x in plugin_file_lines]
version_line = 'VERSION = "' + VERSION + '"'
plugin_is_up_to_date = True
version_found = False
for lines in plugin_file_lines:
if (lines.startswith("VERSION = ")):
version_found = True
if not (lines == version_line):
plugin_is_up_to_date = False
if (version_found is False) or (plugin_is_up_to_date is False):
message = (
"A more recent version of the AxonDeepSeg plugin was found in your AxonDeepSeg installation folder. "
"You will need to replace the current FSLeyes plugin which the new one. "
"To proceed, go to: file -> load plugin -> ads_plugin.py. Then, restart FSLeyes."
)
self.show_message(message, "Warning")
return
def get_citation(self):
"""
This function returns the AxonDeepSeg paper citation.
:return: The AxonDeepSeg citation
:rtype: string
"""
return (
"If you use this work in your research, please cite it as follows: \n"
"Zaimi, A., Wabartha, M., Herman, V., Antonsanti, P.-L., Perone, C. S., & Cohen-Adad, J. (2018). "
"AxonDeepSeg: automatic axon and myelin segmentation from microscopy data using convolutional "
"neural networks. Scientific Reports, 8(1), 3816. "
"Link to paper: https://doi.org/10.1038/s41598-018-22181-4. \n"
"Copyright (c) 2018 NeuroPoly (Polytechnique Montreal)"
)
def get_logo(self):
"""
This function finds the AxonDeepSeg logo saved as a png image and returns it as a wx bitmap image.
:return: The AxonDeepSeg logo
:rtype: wx.StaticBitmap
"""
ads_path = Path(AxonDeepSeg.__file__).parents[0]
logo_file = ads_path / "logo_ads-alpha_small.png"
png = wx.Image(str(logo_file), wx.BITMAP_TYPE_ANY).ConvertToBitmap()
png.SetSize((png.GetWidth(), png.GetHeight()))
logo_image = wx.StaticBitmap(
self, -1, png, wx.DefaultPosition, (png.GetWidth(), png.GetHeight())
)
return logo_image
@staticmethod
def supportedViews():
"""
I am not sure what this method does.
"""
from fsleyes.views.orthopanel import OrthoPanel
return [OrthoPanel]
@staticmethod
def defaultLayout():
"""
This method makes the control panel appear on the left of the FSLeyes window.
"""
return {"location": wx.LEFT}
|
the-stack_106_26887 | # Copyright 2013 Evan Hazlett and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import patterns, url
urlpatterns = patterns('applications.views',
url(r'^$', 'index', name='applications.index'),
url(r'^create/$', 'create', name='applications.create'),
url(r'^details/(?P<app_uuid>\w{32})/$', 'details',
name='applications.details'),
url(r'^edit/$', 'edit', name='applications.edit'),
url(r'^(?P<app_uuid>\w{32})/delete/$', 'delete',
name='applications.delete'),
url(r'^(?P<app_uuid>\w{32})/containers/attach/$',
'attach_containers', name='applications.attach_containers'),
url(r'^(?P<app_uuid>\w{32})/containers/(?P<container_id>\w{12})/remove/$',
'remove_container', name='applications.remove_container'),
)
|
the-stack_106_26888 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2019-01-17 at 14:31
@author: cook
"""
from apero.science.velocity import general
__all__ = ['compute_ccf_fp', 'compute_ccf_science', 'locate_reference_file',
'measure_fp_peaks', 'remove_telluric_domain', 'remove_wide_peaks',
'write_ccf']
# =============================================================================
# Define functions
# =============================================================================
compute_ccf_fp = general.compute_ccf_fp
compute_ccf_science = general.compute_ccf_science
fit_fp_peaks = general.fit_fp_peaks
locate_reference_file = general.locate_reference_file
measure_fp_peaks = general.measure_fp_peaks
remove_telluric_domain = general.remove_telluric_domain
remove_wide_peaks = general.remove_wide_peaks
write_ccf = general.write_ccf
# =============================================================================
# End of code
# =============================================================================
|
the-stack_106_26889 | # Good kraken and python resource:
# https://github.com/zertrin/clikraken/tree/master/clikraken
import base64
import hashlib
import hmac
import json
import logging
import time
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
from urllib.parse import urlencode
import gevent
import requests
from gevent.lock import Semaphore
from requests import Response
from rotkehlchen.assets.converters import KRAKEN_TO_WORLD, asset_from_kraken
from rotkehlchen.constants import KRAKEN_API_VERSION, KRAKEN_BASE_URL
from rotkehlchen.constants.assets import A_DAI, A_ETH
from rotkehlchen.constants.misc import ZERO
from rotkehlchen.errors import (
DeserializationError,
RemoteError,
UnknownAsset,
UnprocessableTradePair,
)
from rotkehlchen.exchanges.data_structures import (
AssetMovement,
Trade,
get_pair_position_asset,
trade_pair_from_assets,
)
from rotkehlchen.exchanges.exchange import ExchangeInterface
from rotkehlchen.fval import FVal
from rotkehlchen.inquirer import Inquirer
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.serialization.deserialize import (
deserialize_asset_amount,
deserialize_asset_amount_force_positive,
deserialize_asset_movement_category,
deserialize_fee,
deserialize_price,
deserialize_timestamp_from_kraken,
deserialize_trade_type,
pair_get_assets,
)
from rotkehlchen.typing import ApiKey, ApiSecret, Location, Timestamp, TradePair
from rotkehlchen.user_messages import MessagesAggregator
from rotkehlchen.utils.interfaces import cache_response_timewise, protect_with_lock
from rotkehlchen.utils.misc import ts_now
from rotkehlchen.utils.serialization import rlk_jsonloads_dict
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
KRAKEN_DELISTED = ('XDAO', 'XXVN', 'ZKRW', 'XNMC', 'BSV', 'XICN')
KRAKEN_PUBLIC_METHODS = ('AssetPairs', 'Assets')
KRAKEN_QUERY_TRIES = 8
MAX_CALL_COUNTER_INCREASE = 2 # Trades and Ledger produce the max increase
def kraken_to_world_pair(pair: str) -> TradePair:
"""Turns a pair from kraken to our pair type
Can throw:
- UknownAsset if one of the assets of the pair are not known
- UnprocessableKrakenPair if the pair can't be processed and
split into its base/quote assets
"""
# handle dark pool pairs
if pair[-2:] == '.d':
pair = pair[:-2]
if len(pair) == 6 and pair[0:3] in ('EUR', 'USD', 'AUD'):
# This is for the FIAT to FIAT pairs that kraken introduced
base_asset_str = pair[0:3]
quote_asset_str = pair[3:]
elif pair == 'ETHDAI':
return trade_pair_from_assets(base=A_ETH, quote=A_DAI)
elif pair[0:2] in KRAKEN_TO_WORLD:
base_asset_str = pair[0:2]
quote_asset_str = pair[2:]
elif pair[0:3] in KRAKEN_TO_WORLD:
base_asset_str = pair[0:3]
quote_asset_str = pair[3:]
elif pair[0:3] in ('XBT', 'ETH', 'XDG', 'LTC', 'XRP'):
# Some assets can have the 'X' prefix omitted for some pairs
base_asset_str = pair[0:3]
quote_asset_str = pair[3:]
elif pair[0:4] in KRAKEN_TO_WORLD:
base_asset_str = pair[0:4]
quote_asset_str = pair[4:]
elif pair[0:5] in KRAKEN_TO_WORLD:
base_asset_str = pair[0:5]
quote_asset_str = pair[5:]
else:
raise UnprocessableTradePair(pair)
base_asset = asset_from_kraken(base_asset_str)
quote_asset = asset_from_kraken(quote_asset_str)
return trade_pair_from_assets(base_asset, quote_asset)
def world_to_kraken_pair(tradeable_pairs: List[str], pair: TradePair) -> str:
base_asset, quote_asset = pair_get_assets(pair)
base_asset_str = base_asset.to_kraken()
quote_asset_str = quote_asset.to_kraken()
pair1 = base_asset_str + quote_asset_str
pair2 = quote_asset_str + base_asset_str
# In some pairs, XXBT is XBT and ZEUR is EUR ...
pair3 = None
if 'XXBT' in pair1:
pair3 = pair1.replace('XXBT', 'XBT')
pair4 = None
if 'XXBT' in pair2:
pair4 = pair2.replace('XXBT', 'XBT')
if 'ZEUR' in pair1:
pair3 = pair1.replace('ZEUR', 'EUR')
pair4 = None
if 'ZEUR' in pair2:
pair4 = pair2.replace('ZEUR', 'EUR')
if pair1 in tradeable_pairs:
new_pair = pair1
elif pair2 in tradeable_pairs:
new_pair = pair2
elif pair3 in tradeable_pairs:
new_pair = pair3
elif pair4 in tradeable_pairs:
new_pair = pair4
else:
raise ValueError(
f'Unknown pair "{pair}" provided. Couldnt find {base_asset_str + quote_asset_str}'
f' or {quote_asset_str + base_asset_str} in tradeable pairs',
)
return new_pair
def trade_from_kraken(kraken_trade: Dict[str, Any]) -> Trade:
"""Turn a kraken trade returned from kraken trade history to our common trade
history format
- Can raise UnknownAsset due to kraken_to_world_pair
- Can raise UnprocessableTradePair due to kraken_to_world_pair
- Can raise DeserializationError due to dict entries not being as expected
- Can raise KeyError due to dict entries missing an expected entry
"""
currency_pair = kraken_to_world_pair(kraken_trade['pair'])
quote_currency = get_pair_position_asset(currency_pair, 'second')
timestamp = deserialize_timestamp_from_kraken(kraken_trade['time'])
amount = deserialize_asset_amount(kraken_trade['vol'])
cost = deserialize_price(kraken_trade['cost'])
fee = deserialize_fee(kraken_trade['fee'])
order_type = deserialize_trade_type(kraken_trade['type'])
rate = deserialize_price(kraken_trade['price'])
# pylint does not seem to see that Price is essentially FVal
if not cost.is_close(amount * rate): # pylint: disable=no-member
log.warning(f'cost ({cost}) != amount ({amount}) * rate ({rate}) for kraken trade')
log.debug(
'Processing kraken Trade',
sensitive_log=True,
timestamp=timestamp,
order_type=order_type,
kraken_pair=kraken_trade['pair'],
pair=currency_pair,
quote_currency=quote_currency,
amount=amount,
cost=cost,
fee=fee,
rate=rate,
)
# Kraken trades can have the same ordertxid and postxid for different trades ..
# Also note postxid is optional and can be missing
# The only thing that could differentiate them is timestamps in the milliseconds range
# For example here are parts of two different kraken_trade:
# {'ordertxid': 'AM4ZOZ-GLEMD-ZICOGR', 'postxid': 'AKH2SE-M7IF5-CFI7AT',
# 'pair': 'XXBTZEUR', 'time': FVal(1561161486.2955)
# {'ordertxid': 'AM4ZOZ-GLEMD-ZICOGR', 'postxid': 'AKH2SE-M7IF5-CFI7AT',
# 'pair': 'XXBTZEUR', 'time': FVal(1561161486.3005)
#
# In order to counter this for the unique exchange trade link we are going
# to use a concatenation of the above
exchange_uuid = (
str(kraken_trade['ordertxid']) +
str(kraken_trade.get('postxid', '')) + # postxid is optional
str(kraken_trade['time'])
)
return Trade(
timestamp=timestamp,
location=Location.KRAKEN,
pair=currency_pair,
trade_type=order_type,
amount=amount,
rate=rate,
fee=fee,
fee_currency=quote_currency,
link=exchange_uuid,
)
def _check_and_get_response(response: Response, method: str) -> Union[str, Dict]:
"""Checks the kraken response and if it's succesfull returns the result.
If there is recoverable error a string is returned explaining the error
May raise:
- RemoteError if there is an unrecoverable/unexpected remote error
"""
if response.status_code in (520, 525, 504):
log.debug(f'Kraken returned status code {response.status_code}')
return 'Usual kraken 5xx shenanigans'
elif response.status_code != 200:
raise RemoteError(
'Kraken API request {} for {} failed with HTTP status '
'code: {}'.format(
response.url,
method,
response.status_code,
))
try:
decoded_json = rlk_jsonloads_dict(response.text)
except json.decoder.JSONDecodeError as e:
raise RemoteError(f'Invalid JSON in Kraken response. {e}')
try:
if decoded_json['error']:
if isinstance(decoded_json['error'], list):
error = decoded_json['error'][0]
else:
error = decoded_json['error']
if 'Rate limit exceeded' in error:
log.debug(f'Kraken: Got rate limit exceeded error: {error}')
return 'Rate limited exceeded'
else:
raise RemoteError(error)
result = decoded_json['result']
except KeyError as e:
raise RemoteError(f'Unexpected format of Kraken response. Missing key: {e}')
return result
class KrakenAccountType(Enum):
STARTER = 0
INTERMEDIATE = 1
PRO = 2
def __str__(self) -> str:
if self == KrakenAccountType.STARTER:
return 'starter'
elif self == KrakenAccountType.INTERMEDIATE:
return 'intermediate'
elif self == KrakenAccountType.PRO:
return 'pro'
raise RuntimeError(f'Corrupt value {self} for KrakenAcountType -- Should never happen')
def serialize(self) -> str:
return str(self)
@staticmethod
def deserialize(symbol: str) -> 'KrakenAccountType':
if symbol == 'starter':
return KrakenAccountType.STARTER
elif symbol == 'intermediate':
return KrakenAccountType.INTERMEDIATE
elif symbol == 'pro':
return KrakenAccountType.PRO
raise DeserializationError(f'Tried to deserialized invalid kraken account type: {symbol}')
class Kraken(ExchangeInterface):
def __init__(
self,
api_key: ApiKey,
secret: ApiSecret,
database: 'DBHandler',
msg_aggregator: MessagesAggregator,
account_type: KrakenAccountType = KrakenAccountType.STARTER,
):
super(Kraken, self).__init__('kraken', api_key, secret, database)
self.msg_aggregator = msg_aggregator
self.session.headers.update({
'API-Key': self.api_key,
})
self.nonce_lock = Semaphore()
self.set_account_type(account_type)
self.call_counter = 0
self.last_query_ts = 0
def set_account_type(self, account_type: KrakenAccountType) -> None:
self.account_type = account_type
if self.account_type == KrakenAccountType.STARTER:
self.call_limit = 15
self.reduction_every_secs = 3
elif self.account_type == KrakenAccountType.INTERMEDIATE:
self.call_limit = 20
self.reduction_every_secs = 2
else: # Pro
self.call_limit = 20
self.reduction_every_secs = 1
def validate_api_key(self) -> Tuple[bool, str]:
"""Validates that the Kraken API Key is good for usage in Rotkehlchen
Makes sure that the following permission are given to the key:
- Ability to query funds
- Ability to query open/closed trades
- Ability to query ledgers
"""
valid, msg = self._validate_single_api_key_action('Balance')
if not valid:
return False, msg
valid, msg = self._validate_single_api_key_action(
method_str='TradesHistory',
req={'start': 0, 'end': 0},
)
if not valid:
return False, msg
valid, msg = self._validate_single_api_key_action(
method_str='Ledgers',
req={'start': 0, 'end': 0, 'type': 'deposit'},
)
if not valid:
return False, msg
return True, ''
def _validate_single_api_key_action(
self,
method_str: str,
req: Optional[Dict[str, Any]] = None,
) -> Tuple[bool, str]:
try:
self.api_query(method_str, req)
except (RemoteError, ValueError) as e:
error = str(e)
if 'Incorrect padding' in error:
return False, 'Provided API Key or secret is invalid'
elif 'EAPI:Invalid key' in error:
return False, 'Provided API Key is invalid'
elif 'EGeneral:Permission denied' in error:
msg = (
'Provided API Key does not have appropriate permissions. Make '
'sure that the "Query Funds", "Query Open/Closed Order and Trades"'
'and "Query Ledger Entries" actions are allowed for your Kraken API Key.'
)
return False, msg
else:
log.error(f'Kraken API key validation error: {str(e)}')
msg = (
'Unknown error at Kraken API key validation. Perhaps API '
'Key/Secret combination invalid?'
)
return False, msg
return True, ''
def first_connection(self) -> None:
self.first_connection_made = True
def _manage_call_counter(self, method: str) -> None:
self.last_query_ts = ts_now()
if method in ('Ledgers', 'TradesHistory'):
self.call_counter += 2
else:
self.call_counter += 1
def _query_public(self, method: str, req: Optional[dict] = None) -> Union[Dict, str]:
"""API queries that do not require a valid key/secret pair.
Arguments:
method -- API method name (string, no default)
req -- additional API request parameters (default: {})
"""
if req is None:
req = {}
urlpath = f'{KRAKEN_BASE_URL}/{KRAKEN_API_VERSION}/public/{method}'
try:
response = self.session.post(urlpath, data=req)
except requests.exceptions.RequestException as e:
raise RemoteError(f'Kraken API request failed due to {str(e)}')
self._manage_call_counter(method)
return _check_and_get_response(response, method)
def api_query(self, method: str, req: Optional[dict] = None) -> dict:
tries = KRAKEN_QUERY_TRIES
query_method = (
self._query_public if method in KRAKEN_PUBLIC_METHODS else self._query_private
)
while tries > 0:
if self.call_counter + MAX_CALL_COUNTER_INCREASE > self.call_limit:
# If we are close to the limit, check how much our call counter reduced
# https://www.kraken.com/features/api#api-call-rate-limit
secs_since_last_call = ts_now() - self.last_query_ts
self.call_counter = max(
0,
self.call_counter - int(secs_since_last_call / self.reduction_every_secs),
)
# If still at limit, sleep for an amount big enough for smallest tier reduction
if self.call_counter + MAX_CALL_COUNTER_INCREASE > self.call_limit:
backoff_in_seconds = self.reduction_every_secs * 2
log.debug(
f'Doing a Kraken API call would now exceed our call counter limit. '
f'Backing off for {backoff_in_seconds} seconds',
call_counter=self.call_counter,
)
gevent.sleep(backoff_in_seconds)
continue
log.debug(
'Kraken API query',
method=method,
data=req,
call_counter=self.call_counter,
)
result = query_method(method, req)
if isinstance(result, str):
# Got a recoverable error
backoff_in_seconds = int(15 / tries)
log.debug(
f'Got recoverable error {result} in a Kraken query of {method}. Will backoff '
f'for {backoff_in_seconds} seconds',
)
gevent.sleep(backoff_in_seconds)
continue
# else success
return result
raise RemoteError(
f'After {KRAKEN_QUERY_TRIES} kraken query for {method} could still not be completed',
)
def _query_private(self, method: str, req: Optional[dict] = None) -> Union[Dict, str]:
"""API queries that require a valid key/secret pair.
Arguments:
method -- API method name (string, no default)
req -- additional API request parameters (default: {})
"""
if req is None:
req = {}
urlpath = '/' + KRAKEN_API_VERSION + '/private/' + method
with self.nonce_lock:
# Protect this section, or else, non increasing nonces will be rejected
req['nonce'] = int(1000 * time.time())
post_data = urlencode(req)
# any unicode strings must be turned to bytes
hashable = (str(req['nonce']) + post_data).encode()
message = urlpath.encode() + hashlib.sha256(hashable).digest()
signature = hmac.new(
base64.b64decode(self.secret),
message,
hashlib.sha512,
)
self.session.headers.update({
'API-Sign': base64.b64encode(signature.digest()), # type: ignore
})
try:
response = self.session.post(
KRAKEN_BASE_URL + urlpath,
data=post_data.encode(),
)
except requests.exceptions.RequestException as e:
raise RemoteError(f'Kraken API request failed due to {str(e)}')
self._manage_call_counter(method)
return _check_and_get_response(response, method)
# ---- General exchanges interface ----
@protect_with_lock()
@cache_response_timewise()
def query_balances(self) -> Tuple[Optional[dict], str]:
try:
old_balances = self.api_query('Balance', req={})
except RemoteError as e:
if "Missing key: 'result'" in str(e):
# handle https://github.com/rotki/rotki/issues/946
old_balances = {}
else:
msg = (
'Kraken API request failed. Could not reach kraken due '
'to {}'.format(e)
)
log.error(msg)
return None, msg
balances = {}
for k, v in old_balances.items():
v = FVal(v)
if v == FVal(0):
continue
try:
our_asset = asset_from_kraken(k)
except UnknownAsset as e:
self.msg_aggregator.add_warning(
f'Found unsupported/unknown kraken asset {e.asset_name}. '
f' Ignoring its balance query.',
)
continue
except DeserializationError:
self.msg_aggregator.add_error(
f'Found kraken asset with non-string type {type(k)}. '
f' Ignoring its balance query.',
)
continue
entry = {}
entry['amount'] = v
if k == 'KFEE':
# There is no price value for KFEE. TODO: Shouldn't we then just skip the balance?
entry['usd_value'] = ZERO
else:
try:
usd_price = Inquirer().find_usd_price(our_asset)
except RemoteError as e:
self.msg_aggregator.add_error(
f'Error processing kraken balance entry due to inability to '
f'query USD price: {str(e)}. Skipping balance entry',
)
continue
entry['usd_value'] = FVal(v * usd_price)
if our_asset not in balances:
balances[our_asset] = entry
else: # Some assets may appear twice in kraken balance query for different locations
# Spot/staking for example
balances[our_asset]['amount'] += entry['amount']
balances[our_asset]['usd_value'] += entry['usd_value']
log.debug(
'kraken balance query result',
sensitive_log=True,
currency=our_asset,
amount=entry['amount'],
usd_value=entry['usd_value'],
)
return balances, ''
def query_until_finished(
self,
endpoint: str,
keyname: str,
start_ts: Timestamp,
end_ts: Timestamp,
extra_dict: Optional[dict] = None,
) -> List:
""" Abstracting away the functionality of querying a kraken endpoint where
you need to check the 'count' of the returned results and provide sufficient
calls with enough offset to gather all the data of your query.
"""
result: List = []
log.debug(
f'Querying Kraken {endpoint} from {start_ts} to '
f'{end_ts} with extra_dict {extra_dict}',
)
response = self._query_endpoint_for_period(
endpoint=endpoint,
start_ts=start_ts,
end_ts=end_ts,
extra_dict=extra_dict,
)
count = response['count']
offset = len(response[keyname])
result.extend(response[keyname].values())
log.debug(f'Kraken {endpoint} Query Response with count:{count}')
while offset < count:
log.debug(
f'Querying Kraken {endpoint} from {start_ts} to {end_ts} '
f'with offset {offset} and extra_dict {extra_dict}',
)
response = self._query_endpoint_for_period(
endpoint=endpoint,
start_ts=start_ts,
end_ts=end_ts,
offset=offset,
extra_dict=extra_dict,
)
assert count == response['count']
response_length = len(response[keyname])
offset += response_length
if response_length == 0 and offset != count:
# If we have provided specific filtering then this is a known
# issue documented below, so skip the warning logging
# https://github.com/rotki/rotki/issues/116
if extra_dict:
break
# it is possible that kraken misbehaves and either does not
# send us enough results or thinks it has more than it really does
log.warning(
'Missing {} results when querying kraken endpoint {}'.format(
count - offset, endpoint),
)
break
result.extend(response[keyname].values())
return result
def query_online_trade_history(
self,
start_ts: Timestamp,
end_ts: Timestamp,
) -> List[Trade]:
result = self.query_until_finished('TradesHistory', 'trades', start_ts, end_ts)
# And now turn it from kraken trade to our own trade format
trades = []
for raw_data in result:
try:
trades.append(trade_from_kraken(raw_data))
except UnknownAsset as e:
self.msg_aggregator.add_warning(
f'Found kraken trade with unknown asset '
f'{e.asset_name}. Ignoring it.',
)
continue
except UnprocessableTradePair as e:
self.msg_aggregator.add_error(
f'Found kraken trade with unprocessable pair '
f'{e.pair}. Ignoring it.',
)
continue
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'Missing key entry for {msg}.'
self.msg_aggregator.add_error(
'Error processing a kraken trade. Check logs '
'for details. Ignoring it.',
)
log.error(
'Error processing a kraken trade',
trade=raw_data,
error=msg,
)
continue
return trades
def _query_endpoint_for_period(
self,
endpoint: str,
start_ts: Timestamp,
end_ts: Timestamp,
offset: Optional[int] = None,
extra_dict: Optional[dict] = None,
) -> dict:
request: Dict[str, Union[Timestamp, int]] = {}
request['start'] = start_ts
request['end'] = end_ts
if offset is not None:
request['ofs'] = offset
if extra_dict is not None:
request.update(extra_dict)
result = self.api_query(endpoint, request)
return result
def query_online_deposits_withdrawals(
self,
start_ts: Timestamp,
end_ts: Timestamp,
) -> List[AssetMovement]:
result = self.query_until_finished(
endpoint='Ledgers',
keyname='ledger',
start_ts=start_ts,
end_ts=end_ts,
extra_dict={'type': 'deposit'},
)
result.extend(self.query_until_finished(
endpoint='Ledgers',
keyname='ledger',
start_ts=start_ts,
end_ts=end_ts,
extra_dict={'type': 'withdrawal'},
))
log.debug('Kraken deposit/withdrawals query result', num_results=len(result))
movements = []
for movement in result:
try:
asset = asset_from_kraken(movement['asset'])
movement_type = movement['type']
if movement_type not in ('deposit', 'withdrawal'):
# Other known types: 'transfer'
continue # Can be for moving funds from spot to stake etc.
movements.append(AssetMovement(
location=Location.KRAKEN,
category=deserialize_asset_movement_category(movement_type),
timestamp=deserialize_timestamp_from_kraken(movement['time']),
address=None, # no data from kraken ledger endpoint
transaction_id=None, # no data from kraken ledger endpoint
asset=asset,
amount=deserialize_asset_amount_force_positive(movement['amount']),
fee_asset=asset,
fee=deserialize_fee(movement['fee']),
link=str(movement['refid']),
))
except UnknownAsset as e:
self.msg_aggregator.add_warning(
f'Found unknown kraken asset {e.asset_name}. '
f'Ignoring its deposit/withdrawals query.',
)
continue
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'Missing key entry for {msg}.'
self.msg_aggregator.add_error(
'Failed to deserialize a kraken deposit/withdrawals. '
'Check logs for details. Ignoring it.',
)
log.error(
'Error processing a kraken deposit/withdrawal.',
raw_asset_movement=movement,
error=msg,
)
continue
return movements
|
the-stack_106_26890 | """
Firmware de dispositivo de colecta de dados
y storage em SSD. Enviando a la nuve segun
jerarquia de filas definidas.
Autor: Eng. Francis Benjamin
Fecha: 09/03/2021
"""
import uasyncio
import btree
from time import sleep
from machine import Pin
import i2c
#Objetos i2c
adress_i2c = i2c.com_i2c().scan_i2c()
i2c_bus = i2c.com_i2c().i2c_bus
oled = i2c.lcd(adress_i2c[0],i2c_bus) #primer adress i2c
rtc = i2c.rtc(i2c_bus)
async def blink(led):
led_ = Pin(led, Pin.OUT)
while True:
led_.on()
await uasyncio.sleep_ms(500)
led_.off()
await uasyncio.sleep_ms(500)
async def print_test():
while True:
posiciones = [ (0,0), (10,0),(15,0)]
templates = ["1", "2", "3"]
oled.text_template(posiciones,templates)
await uasyncio.sleep(1)
oled.simple_text(0,0,"teste")
await uasyncio.sleep(1)
def print_infos(PinNumber):
pass
#####################3
######
async def main(led):
uasyncio.create_task(blink(led))
uasyncio.create_task(print_test())
await uasyncio.sleep_ms(100000) #2000000
uasyncio.run(main(2))
|
the-stack_106_26891 | # Django Imports
from django.conf.urls import patterns, url
from django.views.generic import RedirectView
from django.views.generic.base import TemplateView
# WaW Imports
from wawmembers import views, interactions, news, policies, ajax
'''
Dispatches URL requests to functions.
'''
urlpatterns = patterns("",
url(r'^index/$', views.index, name='index'),
url(r'^main/$', views.main, name='main'),
url(r'^main/spies/$', views.spies, name='spies'),
url(r'^main/warlogs/$', views.warlogs, name='warlogs'),
url(r'^main/reslogs/$', views.reslogs, name='reslogs'),
url(r'^main/fleets/$', views.fleet_management, name='fleet_management'),
url(r'^main/fleets/logs/$', views.fleet_logs, name='fleet_logs'),
url(r'^main/fleets/exchange/$', views.exchange_ships, name='ship_exchange'),
url(r'^settings/$', views.settings, name='settings'),
url(r'^worldnews/$', news.world_news, name='world_news'),
url(r'^communiques/$', views.communiques, name='communiques'),
url(r'^communiques/sent$', views.sentcomms, name='sentcomms'),
url(r'^tasks/$', views.tasks, name='tasks'),
url(r'^stats/$', views.stats, name='stats'),
url(r'^stats/(?P<page>\d+)/$', views.statspage, name='statspage'),
url(r'^world/(?P<url>[-\w]+)/$', views.world_page, name='stats_ind'),
url(r'^newworld/$', views.new_world, name='new_world'),
url(r'^federations/$', views.alliances, name='alliances'),
url(r'^federations/(?P<page>\d+)/$', views.alliancespage, name='alliancespage'),
url(r'^federation/(?P<allid>\d+)/$', views.alliances_ind, name='alliances_ind'),
url(r'^federation/(?P<allid>\d+)/banklogs/$', views.alliances_logs, name='alliances_logs'),
url(r'^federation/(?P<allid>\d+)/memberlogs/$', views.alliances_memberlogs, name='alliances_memberlogs'),
url(r'^federation/(?P<allid>\d+)/stats/$', views.alliances_stats, name='alliances_stats'),
url(r'^federation/(?P<allid>\d+)/admin/$', views.alliances_admin, name='alliances_admin'),
url(r'^federations/new/$', views.new_alliance, name='new_alliance'),
url(r'^policies/$', RedirectView.as_view(url='/policies/economics', permanent=True)),
url(r'^policies/economics/$', policies.policies_econ, name='policies_econ'),
url(r'^policies/domestic/$', policies.policies_domestic, name='policies_domestic'),
url(r'^policies/diplomacy/$', policies.policies_diplomacy, name='policies_diplomacy'),
url(r'^policies/fleet/$', policies.policies_military, name='policies_military'),
url(r'^trades/$', views.trades, name='trades'),
url(r'^trades/new$', views.newtrade, name='newtrade'),
url(r'^galacticnews/$', views.galacticnews, name='galactic_news'),
url(r'^donate/$', TemplateView.as_view(template_name='donate.html'), name='donate'),
url(r'^irc/$', TemplateView.as_view(template_name='irc.html'), name='irc'),
url(r'^legal/$', TemplateView.as_view(template_name='legal.html'), name='legal'),
url(r'^about/$', TemplateView.as_view(template_name='about.html'), name='about'),
# ajax
url(r'^ajax/username/$', ajax.username, name='ajaxusername'),
url(r'^ajax/email/$', ajax.email, name='ajaxemail'),
url(r'^ajax/worldname/$', ajax.worldname, name='ajaxworldname'),
url(r'^ajax/avatar/$', ajax.avatar, name='ajaxavatar'),
url(r'^ajax/flag/$', ajax.flag, name='ajaxflag'),
url(r'^ajax/background/$', ajax.background, name='ajaxbackground'),
url(r'^ajax/personalship/$', ajax.personalship, name='ajaxpersonalship'),
# misc redirects
url(r'^forum/$', RedirectView.as_view(url='/forums', permanent=True)),
# url(r'^video/$', TemplateView.as_view(template_name='video.html'), name='video'),
url(r'^snake/$', TemplateView.as_view(template_name='snake.html'), name='snake'),
url(r'^guide/$', RedirectView.as_view(url='http://wawgame.eu/forums/index.php?topic=16.0', permanent=True), name='guide'),
url(r'^rules/$', RedirectView.as_view(url='http://wawgame.eu/forums/index.php?topic=650.0', permanent=True), name='rules'),
)
|
the-stack_106_26892 | from collections import Counter
class Solution:
def countElements(self, arr: List[int]) -> int:
c = Counter(arr)
res = 0
for n,n_count in c.items():
if n+1 in c:
res += n_count
return res
|
the-stack_106_26899 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import pytest
import numpy as np
from utils_cv.classification.plot import (
plot_roc_curve,
plot_precision_recall_curve,
plot_pr_roc_curves,
plot_thresholds,
)
from utils_cv.classification.model import hamming_accuracy, zero_one_accuracy
def test_plot_threshold(multilabel_result):
""" Test the plot_loss_threshold function """
y_pred, y_true = multilabel_result
plot_thresholds(hamming_accuracy, y_pred, y_true)
plot_thresholds(zero_one_accuracy, y_pred, y_true)
@pytest.fixture(scope="module")
def binaryclass_result_1():
# Binary-class classification testcase 1
BINARY_Y_TRUE = [0, 0, 1, 1]
BINARY_Y_SCORE = [0.1, 0.4, 0.35, 0.8]
BINARY_CLASSES = [0, 1]
return np.array(BINARY_Y_TRUE), np.array(BINARY_Y_SCORE), BINARY_CLASSES
@pytest.fixture(scope="module")
def binaryclass_result_2():
# Binary-class classification testcase 2
BINARY_Y_TRUE = [0, 0, 1, 1]
BINARY_Y_SCORE = [[0.1, 0.9], [0.4, 0.6], [0.35, 0.65], [0.8, 0.2]]
BINARY_CLASSES = [0, 1]
return np.array(BINARY_Y_TRUE), np.array(BINARY_Y_SCORE), BINARY_CLASSES
@pytest.fixture(scope="module")
def multiclass_result():
# Multi-class classification testcase
MULTI_Y_TRUE = [0, 0, 1, 1, 2, 2]
MULTI_Y_SCORE = [
[0.1, 0.9, 0.0],
[0.4, 0.2, 0.4],
[0.35, 0.15, 0.5],
[0.1, 0.8, 0.1],
[0.2, 0.5, 0.3],
[0.0, 0.1, 0.9],
]
MULTI_CLASSES = [0, 1, 2]
return np.array(MULTI_Y_TRUE), np.array(MULTI_Y_SCORE), MULTI_CLASSES
def test_plot_roc_curve(
binaryclass_result_1, binaryclass_result_2, multiclass_result
):
# Binary-class plot
y_true, y_score, classes = binaryclass_result_1
plot_roc_curve(y_true, y_score, classes, False)
y_true, y_score, classes = binaryclass_result_2
plot_roc_curve(y_true, y_score, classes, False)
# Multi-class plot
y_true, y_score, classes = multiclass_result
plot_roc_curve(y_true, y_score, classes, False)
def test_plot_precision_recall_curve(
binaryclass_result_1, binaryclass_result_2, multiclass_result
):
# Binary-class plot
y_true, y_score, classes = binaryclass_result_1
plot_precision_recall_curve(y_true, y_score, classes, False)
y_true, y_score, classes = binaryclass_result_2
plot_precision_recall_curve(y_true, y_score, classes, False)
# Multi-class plot
y_true, y_score, classes = multiclass_result
plot_precision_recall_curve(y_true, y_score, classes, False)
def test_plot_pr_roc_curves(
binaryclass_result_1, binaryclass_result_2, multiclass_result
):
# Binary-class plot
y_true, y_score, classes = binaryclass_result_1
plot_pr_roc_curves(y_true, y_score, classes, False, (1, 1))
y_true, y_score, classes = binaryclass_result_2
plot_pr_roc_curves(y_true, y_score, classes, False, (1, 1))
# Multi-class plot
y_true, y_score, classes = multiclass_result
plot_pr_roc_curves(y_true, y_score, classes, False, (1, 1))
|
the-stack_106_26900 | """
=================
Wavelet denoising
=================
The discrete wavelet transform is not `shift-invariant`_. Shift invariance can
be achieved through an undecimated wavelet transform (also called stationary
wavelet transform), at cost of increased redundancy (i.e. more wavelet
coefficients than input image pixels). An alternative way to approximate
shift-invariance in the context of image denoising with the discrete wavelet
transform is to use the technique known as "cycle spinning". This involves
averaging the results of the following 3-step procedure for multiple spatial
shifts, n:
1.) (circularly) shift the signal by an amount, n
2.) apply denoising
3.) apply the inverse shift
For 2D image denoising, we demonstrate here that such cycle-spinning can
provide a substantial increase in quality, with much of the gain being
achieved simply by averaging shifts of only n=0 and n=1 on each axis.
.. _`shift-invariant`: https://en.wikipedia.org/wiki/Shift-invariant_system
"""
import matplotlib.pyplot as plt
from skimage.restoration import denoise_wavelet, cycle_spin
from skimage import data, img_as_float
from skimage.util import random_noise
from skimage.measure import compare_psnr
original = img_as_float(data.chelsea()[100:250, 50:300])
sigma = 0.155
noisy = random_noise(original, var=sigma**2)
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(10, 4),
sharex=False, sharey=False)
ax = ax.ravel()
psnr_noisy = compare_psnr(original, noisy)
ax[0].imshow(noisy)
ax[0].axis('off')
ax[0].set_title('Noisy\nPSNR={:0.4g}'.format(psnr_noisy))
# Repeat denosing with different amounts of cycle spinning. e.g.
# max_shift = 0 -> no cycle spinning
# max_shift = 1 -> shifts of (0, 1) along each axis
# max_shift = 3 -> shifts of (0, 1, 2, 3) along each axis
# etc...
denoise_kwargs = dict(multichannel=True, convert2ycbcr=True, wavelet='db1')
all_psnr = []
max_shifts = [0, 1, 3, 5]
for n, s in enumerate(max_shifts):
im_bayescs = cycle_spin(noisy, func=denoise_wavelet, max_shifts=s,
func_kw=denoise_kwargs, multichannel=True)
ax[n+1].imshow(im_bayescs)
ax[n+1].axis('off')
psnr = compare_psnr(original, im_bayescs)
if s == 0:
ax[n+1].set_title(
"Denoised: no cycle shifts\nPSNR={:0.4g}".format(psnr))
else:
ax[n+1].set_title(
"Denoised: {0}x{0} shifts\nPSNR={1:0.4g}".format(s+1, psnr))
all_psnr.append(psnr)
# plot PSNR as a function of the degree of cycle shifting
ax[5].plot(max_shifts, all_psnr, 'k.-')
ax[5].set_ylabel('PSNR (dB)')
ax[5].set_xlabel('max cycle shift along each axis')
ax[5].grid('on')
plt.subplots_adjust(wspace=0.35, hspace=0.35)
# Annotate with a cyan arrow on the 6x6 case vs. no cycle shift case to
# illustrate a region with reduced block-like artifact with cycle shifting
arrowprops = dict(arrowstyle="simple,tail_width=0.1,head_width=0.5",
connectionstyle="arc3",
color='c')
for i in [1, 4]:
ax[i].annotate("", xy=(101, 39), xycoords='data',
xytext=(70, 70), textcoords='data',
arrowprops=arrowprops)
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.