repo_name
stringlengths
7
94
repo_path
stringlengths
4
237
repo_head_hexsha
stringlengths
40
40
content
stringlengths
10
680k
apis
stringlengths
2
680k
Ipgnosis/tic_tac_toe
ttt_package/libs/best_move.py
e1519b702531965cc647ff37c1c46d72f4b3b24e
# refactored from make_play to simplify # by Russell on 3/5/21 #from ttt_package.libs.move_utils import get_open_cells from ttt_package.libs.compare import get_transposed_games, reorient_games from ttt_package.libs.calc_game_bound import calc_game_bound from ttt_package.libs.maxi_min import maximin # find the best move for this agent, based on prior games in the game_history def best_move(this_board, agent, ttt_base, probs_calc): candidate_games = [] lower_bound = 0 upper_bound = 0 # note that len gives the number of the move about to be made num_moves = len(this_board) bounds_list = [] #print("best_move - this_board:", this_board) # TRANSPOSE the current game state into 8 different games and store in a list # the returned value is a list of dictionaries that contain the transposed game # and the source function, to allow the game to be transposed back tg_list = get_transposed_games(this_board) #print("best_move: tg_list =", tg_list) # for each of the 8 transposed versions of the current game in question # build a list of lower and upper bound tuples for the tg_list using calc_game_bound for tgame in tg_list: lower_bound = calc_game_bound(tgame["transpose"], agent, 'L') upper_bound = calc_game_bound(tgame["transpose"], agent, 'U') bounds_tuple = (lower_bound, upper_bound) bounds_list.append(bounds_tuple) #print("best_move: bounds_tuple =", bounds_tuple) # fetch the list of candidate games from the game history # we need to look at losing and drawing games so that we can thoroughly explore the action space # we must avoid overlooking a good move made early that resulted in a draw/loss because of a # later bad move - these will be resolved later via backpropagation candidate_games = ttt_base.get_games_list(bounds_list) #print("best_move: candidate_games =", candidate_games) # if there is at least one game that matches the current game state if candidate_games != False: # this is the list of games that match the transposed game list # de-transpose the candidate games to get the right cell for the next move # get a list of the matching detransposition games of the current game reoriented_candidates = reorient_games(tg_list, candidate_games) #print("best_move: number of reoriented_candidates games = ", len(reoriented_candidates)) #print("best_move: number of candidate games = ", len(candidate_games)) #print('best_move: reoriented_candidates =', reoriented_candidates) #print('best_move: candidate_games =', candidate_games) maximin_list = [] # iterate though the game candidates for this_game in range(len(reoriented_candidates)): these_probs = [] # get the probability element for the next move of this game candidate these_probs = reoriented_candidates[this_game]["probs"][num_moves].copy( ) # tack on the cell # of the move these_probs.append( reoriented_candidates[this_game]["game"][num_moves]) # append the game submission data to the list to be submitted to maximin maximin_list.append(these_probs) #print("maximin_list:", maximin_list) # send the list of probabilites of the detransposed recorded games for the next move recommended_move = maximin(maximin_list) #print("best_move: move = ", recommended_move) return recommended_move else: # there are no matching games in the game history #print("best_move: random choice...") # return random_move(this_board) # estimate the optimal next move optimal_move = probs_calc.calc_next_move(this_board) #print("This board =", this_board) #print("Calculating optimal move =", optimal_move) return optimal_move
[((925, 957), 'ttt_package.libs.compare.get_transposed_games', 'get_transposed_games', (['this_board'], {}), '(this_board)\n', (945, 957), False, 'from ttt_package.libs.compare import get_transposed_games, reorient_games\n'), ((1217, 1264), 'ttt_package.libs.calc_game_bound.calc_game_bound', 'calc_game_bound', (["tgame['transpose']", 'agent', '"""L"""'], {}), "(tgame['transpose'], agent, 'L')\n", (1232, 1264), False, 'from ttt_package.libs.calc_game_bound import calc_game_bound\n'), ((1287, 1334), 'ttt_package.libs.calc_game_bound.calc_game_bound', 'calc_game_bound', (["tgame['transpose']", 'agent', '"""U"""'], {}), "(tgame['transpose'], agent, 'U')\n", (1302, 1334), False, 'from ttt_package.libs.calc_game_bound import calc_game_bound\n'), ((2311, 2351), 'ttt_package.libs.compare.reorient_games', 'reorient_games', (['tg_list', 'candidate_games'], {}), '(tg_list, candidate_games)\n', (2325, 2351), False, 'from ttt_package.libs.compare import get_transposed_games, reorient_games\n'), ((3463, 3484), 'ttt_package.libs.maxi_min.maximin', 'maximin', (['maximin_list'], {}), '(maximin_list)\n', (3470, 3484), False, 'from ttt_package.libs.maxi_min import maximin\n')]
paser4se/bbxyard
yard/skills/66-python/cookbook/yvhai/demo/mt/raw_thread.py
d09bc6efb75618b2cef047bad9c8b835043446cb
#!/usr/bin/env python3 # python 线程测试 import _thread import time from yvhai.demo.base import YHDemo def print_time(thread_name, interval, times): for cnt in range(times): time.sleep(interval) print(" -- %s: %s" % (thread_name, time.ctime(time.time()))) class RawThreadDemo(YHDemo): def __init__(self): super(RawThreadDemo, self).__init__('_thread') @staticmethod def main(): try: _thread.start_new_thread(print_time, ("Thread-01", 1, 10)) _thread.start_new_thread(print_time, ("Thread-02", 2, 6)) except: print("Error: 无法启动线程") # 主线程无限等待 while 1: pass @staticmethod def demo(args=[]): RawThreadDemo.main() if __name__ == '__main__': RawThreadDemo.demo()
[((187, 207), 'time.sleep', 'time.sleep', (['interval'], {}), '(interval)\n', (197, 207), False, 'import time\n'), ((447, 505), '_thread.start_new_thread', '_thread.start_new_thread', (['print_time', "('Thread-01', 1, 10)"], {}), "(print_time, ('Thread-01', 1, 10))\n", (471, 505), False, 'import _thread\n'), ((518, 575), '_thread.start_new_thread', '_thread.start_new_thread', (['print_time', "('Thread-02', 2, 6)"], {}), "(print_time, ('Thread-02', 2, 6))\n", (542, 575), False, 'import _thread\n'), ((262, 273), 'time.time', 'time.time', ([], {}), '()\n', (271, 273), False, 'import time\n')]
praneethgb/rasa
rasa/utils/tensorflow/constants.py
5bf227f165d0b041a367d2c0bbf712ebb6a54792
# constants for configuration parameters of our tensorflow models LABEL = "label" IDS = "ids" # LABEL_PAD_ID is used to pad multi-label training examples. # It should be < 0 to avoid index out of bounds errors by tf.one_hot. LABEL_PAD_ID = -1 HIDDEN_LAYERS_SIZES = "hidden_layers_sizes" SHARE_HIDDEN_LAYERS = "share_hidden_layers" TRANSFORMER_SIZE = "transformer_size" NUM_TRANSFORMER_LAYERS = "number_of_transformer_layers" NUM_HEADS = "number_of_attention_heads" UNIDIRECTIONAL_ENCODER = "unidirectional_encoder" KEY_RELATIVE_ATTENTION = "use_key_relative_attention" VALUE_RELATIVE_ATTENTION = "use_value_relative_attention" MAX_RELATIVE_POSITION = "max_relative_position" BATCH_SIZES = "batch_size" BATCH_STRATEGY = "batch_strategy" EPOCHS = "epochs" RANDOM_SEED = "random_seed" LEARNING_RATE = "learning_rate" DENSE_DIMENSION = "dense_dimension" CONCAT_DIMENSION = "concat_dimension" EMBEDDING_DIMENSION = "embedding_dimension" ENCODING_DIMENSION = "encoding_dimension" SIMILARITY_TYPE = "similarity_type" LOSS_TYPE = "loss_type" NUM_NEG = "number_of_negative_examples" MAX_POS_SIM = "maximum_positive_similarity" MAX_NEG_SIM = "maximum_negative_similarity" USE_MAX_NEG_SIM = "use_maximum_negative_similarity" SCALE_LOSS = "scale_loss" REGULARIZATION_CONSTANT = "regularization_constant" NEGATIVE_MARGIN_SCALE = "negative_margin_scale" DROP_RATE = "drop_rate" DROP_RATE_ATTENTION = "drop_rate_attention" DROP_RATE_DIALOGUE = "drop_rate_dialogue" DROP_RATE_LABEL = "drop_rate_label" CONSTRAIN_SIMILARITIES = "constrain_similarities" WEIGHT_SPARSITY = "weight_sparsity" # Deprecated and superseeded by CONNECTION_DENSITY CONNECTION_DENSITY = "connection_density" EVAL_NUM_EPOCHS = "evaluate_every_number_of_epochs" EVAL_NUM_EXAMPLES = "evaluate_on_number_of_examples" INTENT_CLASSIFICATION = "intent_classification" ENTITY_RECOGNITION = "entity_recognition" MASKED_LM = "use_masked_language_model" SPARSE_INPUT_DROPOUT = "use_sparse_input_dropout" DENSE_INPUT_DROPOUT = "use_dense_input_dropout" RANKING_LENGTH = "ranking_length" MODEL_CONFIDENCE = "model_confidence" BILOU_FLAG = "BILOU_flag" RETRIEVAL_INTENT = "retrieval_intent" USE_TEXT_AS_LABEL = "use_text_as_label" SOFTMAX = "softmax" MARGIN = "margin" AUTO = "auto" INNER = "inner" LINEAR_NORM = "linear_norm" COSINE = "cosine" CROSS_ENTROPY = "cross_entropy" BALANCED = "balanced" SEQUENCE = "sequence" SEQUENCE_LENGTH = f"{SEQUENCE}_lengths" SENTENCE = "sentence" POOLING = "pooling" MAX_POOLING = "max" MEAN_POOLING = "mean" TENSORBOARD_LOG_DIR = "tensorboard_log_directory" TENSORBOARD_LOG_LEVEL = "tensorboard_log_level" SEQUENCE_FEATURES = "sequence_features" SENTENCE_FEATURES = "sentence_features" FEATURIZERS = "featurizers" CHECKPOINT_MODEL = "checkpoint_model" MASK = "mask" IGNORE_INTENTS_LIST = "ignore_intents_list" TOLERANCE = "tolerance" POSITIVE_SCORES_KEY = "positive_scores" NEGATIVE_SCORES_KEY = "negative_scores" RANKING_KEY = "label_ranking" QUERY_INTENT_KEY = "query_intent" SCORE_KEY = "score" THRESHOLD_KEY = "threshold" SEVERITY_KEY = "severity" NAME = "name" EPOCH_OVERRIDE = "epoch_override"
[]
GamesCreatorsClub/GCC-Rover
client/canyons-of-mars/maze.py
25a69f62a1bb01fc421924ec39f180f50d6a640b
# # Copyright 2016-2019 Games Creators Club # # MIT License # import math import pyroslib import pyroslib.logging import time from pyroslib.logging import log, LOG_LEVEL_ALWAYS, LOG_LEVEL_INFO, LOG_LEVEL_DEBUG from rover import WheelOdos, WHEEL_NAMES from rover import normaiseAngle, angleDiference from challenge_utils import Action, PID SQRT2 = math.sqrt(2) PIhalf = math.pi / 2 class MazeAttitude: UNKNOWN = 0 LEFT_WALL = 1 RIGHT_WALL = 2 FRONT_WALL = 4 BACK_WALL = 8 NO_GAP = 0 FORWARD_GAP = 1 SIDE_GAP = 2 POINTS = [0, 45, 90, 135, 180, 225, 270, 315] WALLS = [90, 270, 0, 180] L0_45 = 0 L45_90 = 45 L90_135 = 90 L135_180 = 135 L180_225 = 180 L225_270 = 225 L270_315 = 270 L315_0 = 315 LINES = [L0_45, L45_90, L90_135, L135_180, L180_225, L225_270, L270_315, L315_0] ANGLE_TOLLERANCE = 1.075 @staticmethod def normAngle(a): if a > PIhalf: a = a - math.pi elif a <= -PIhalf: a = a + math.pi return a class Line: def __init__(self, line_index, long_point_index, short_point_index, factor, adjust): self.line_index = line_index self.short_point_index = short_point_index self.long_point_index = long_point_index self.factor = factor self.adjust = adjust self.angle = None def calcAngle(self, distances): long_distance = distances[self.long_point_index] short_distance = distances[self.short_point_index] if long_distance is not None and short_distance is not None: lsqrt2 = long_distance / SQRT2 self.angle = MazeAttitude.normAngle(math.atan2(lsqrt2, lsqrt2 - short_distance) * self.factor + self.adjust) else: self.angle = None class Wall: def __init__(self, distance_sensor_angle, distance_sensor_index, wall_point_kind, left_mid_point_index, left_point_index, mid_point_index, right_point_index): self.ds_angle = distance_sensor_angle self.ds_index = distance_sensor_index self.wall_point_kind = wall_point_kind self.left_mid_point_index = left_mid_point_index self.left_point_index = left_point_index self.mid_point_index = mid_point_index self.right_point_index = right_point_index self.is_front_or_back = self.ds_angle == 0 or self.ds_angle == 180 self.selected_line = None self.angle = None self.distance = None def setAngle(self, angle, distances): self.angle = angle distance = distances[self.mid_point_index] if distance < 1: self.distance = 0 else: if self.is_front_or_back: self.distance = abs(int(math.sin(angle) * distance)) else: self.distance = abs(int(math.cos(angle) * distance)) def setAngleAndDistance(self, angle, distance): self.angle = angle self.distance = distance def tryFindingWall(self, distances, lines, points): lmline = lines[self.left_mid_point_index] lline = lines[self.left_point_index] mline = lines[self.mid_point_index] rline = lines[self.right_point_index] dlong1 = distances[lline.long_point_index] dmid = distances[mline.short_point_index] dlong2 = distances[mline.long_point_index] plong1 = points[self.left_point_index] pmid = points[self.mid_point_index] plong2 = points[self.right_point_index] if dlong1 < dlong2 and plong1 != MazeAttitude.UNKNOWN and lmline.angle * MazeAttitude.ANGLE_TOLLERANCE >= lline.angle >= lmline.angle / MazeAttitude.ANGLE_TOLLERANCE: points[self.mid_point_index] = points[lline.long_point_index] angle = MazeAttitude.normAngle(mline.angle - PIhalf) distance = distances[self.right_point_index] * abs(math.sin(mline.angle) / SQRT2) self.setAngleAndDistance(angle, distance) elif dlong1 >= dlong2 and plong2 != MazeAttitude.UNKNOWN and mline.angle * MazeAttitude.ANGLE_TOLLERANCE >= rline.angle >= mline.angle / MazeAttitude.ANGLE_TOLLERANCE: points[self.mid_point_index] = points[rline.long_point_index] angle = MazeAttitude.normAngle(mline.angle + PIhalf) distance = distances[self.left_point_index] * abs(math.sin(mline.angle) / SQRT2) self.setAngleAndDistance(angle, distance) elif lline.angle is not None and mline.angle is not None: if lline.angle * MazeAttitude.ANGLE_TOLLERANCE >= mline.angle >= lline.angle / MazeAttitude.ANGLE_TOLLERANCE: if plong1 == MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind if pmid == MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind if plong2 == MazeAttitude.UNKNOWN: points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances) else: if dlong1 < dlong2 and plong1 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind points[self.mid_point_index] = self.wall_point_kind self.setAngle(lline.angle, distances) elif dlong1 >= dlong2 and plong2 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances) elif plong1 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN and plong2 != MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind points[self.mid_point_index] = self.wall_point_kind self.setAngle(lline.angle, distances) elif plong1 != MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN and plong2 == MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances) elif lline.angle is not None and plong1 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN: points[self.left_point_index] = self.wall_point_kind points[self.mid_point_index] = self.wall_point_kind self.setAngle(lline.angle, distances) elif mline.angle is not None and pmid == MazeAttitude.UNKNOWN and plong2 == MazeAttitude.UNKNOWN: points[self.mid_point_index] = self.wall_point_kind points[self.right_point_index] = self.wall_point_kind self.setAngle(mline.angle, distances) def __init__(self): self.lines = {self.L315_0: self.Line(self.L315_0, 315, 0, -1, math.pi), self.L0_45: self.Line(self.L0_45, 45, 0, 1, -math.pi), self.L45_90: self.Line(self.L45_90, 45, 90, -1, PIhalf), self.L90_135: self.Line(self.L90_135, 135, 90, 1, -PIhalf), self.L135_180: self.Line(self.L135_180, 135, 180, -1, math.pi), self.L180_225: self.Line(self.L180_225, 225, 180, 1, -math.pi), self.L225_270: self.Line(self.L225_270, 225, 270, -1, PIhalf), self.L270_315: self.Line(self.L270_315, 315, 270, 1, -PIhalf)} self.right_wall = self.Wall(90, 2, self.RIGHT_WALL, 0, 45, 90, 135) self.left_wall = self.Wall(270, 6, self.LEFT_WALL, 180, 225, 270, 315) self.front_wall = self.Wall(0, 0, self.FRONT_WALL, 270, 315, 0, 45) self.back_wall = self.Wall(180, 4, self.BACK_WALL, 90, 135, 180, 225) self.left_gap = self.NO_GAP self.right_gap = self.NO_GAP self.walls = {self.right_wall.ds_angle: self.right_wall, self.left_wall.ds_angle: self.left_wall, self.front_wall.ds_angle: self.front_wall, self.back_wall.ds_angle: self.back_wall} self.points = {0: 0, 45: 0, 90: 0, 135: 0, 180: 0, 225: 0, 270: 0, 315: 0} self.distances = {0: 0, 45: 0, 90: 0, 135: 0, 180: 0, 225: 0, 270: 0, 315: 0} def calculate(self, state): def getPointDistance(state, angle): distance = state.radar.radar[angle] status = state.radar.status[angle] if status == 0: return distance last_distance = state.radar.last_radar[angle] if abs(distance - last_distance) < 100: return distance return None def updateUndefinedWall(wall, preferable_wall, wall_adjust, second_wall): if wall.angle is None and self.distances[wall.ds_angle] is not None: if preferable_wall.angle is not None: wall.setAngleAndDistance(self.normAngle(preferable_wall.angle + wall_adjust), self.distances[wall.mid_point_index]) else: wall.setAngleAndDistance(self.normAngle(second_wall.angle - wall_adjust), self.distances[wall.mid_point_index]) self.points[wall.ds_angle] = wall.wall_point_kind self.distances = {p: getPointDistance(state, p) for p in self.POINTS} for line in self.lines: self.lines[line].calcAngle(self.distances) wls = [self.walls[w_ds_angle] for w_ds_angle in self.WALLS if self.distances[w_ds_angle] is not None] wall_processing_order = sorted(wls, key=lambda wall: self.distances[wall.ds_angle]) for wall in wall_processing_order: wall.tryFindingWall(self.distances, self.lines, self.points) updateUndefinedWall(self.front_wall, self.right_wall, -PIhalf, self.left_wall) updateUndefinedWall(self.back_wall, self.right_wall, PIhalf, self.left_wall) updateUndefinedWall(self.right_wall, self.front_wall, PIhalf, self.back_wall) updateUndefinedWall(self.left_wall, self.front_wall, -PIhalf, self.back_wall) # TODO calc gaps class MoveForwardOnOdo(Action): def __init__(self, agent, stop_action=None): super(MoveForwardOnOdo, self).__init__(agent) self.stop_action = stop_action self.required_odo = {'fl': 0, 'fr': 0, 'bl': 0, 'br': 0} def setRequiredOdo(self, distance): for wheel_name in WHEEL_NAMES: self.required_odo[wheel_name] = distance def start(self): super(MoveForwardOnOdo, self).start() state = self.rover.getRoverState() for wheel in self.required_odo: self.required_odo[wheel] = WheelOdos.normaliseOdo(state.wheel_odos[wheel] + self.required_odo[wheel]) log(LOG_LEVEL_DEBUG, "Reset odo to " + str(self.required_odo) + "; starting...") self.rover.command(pyroslib.publish, 300, 120) # pyroslib.publish("move/steer", "300 120") def end(self): super(MoveForwardOnOdo, self).end() def next(self): state = self.rover.getRoverState() do_stop = False log(LOG_LEVEL_DEBUG, "Driving to " + str(self.required_odo)) for wheel_name in WHEEL_NAMES: if state.wheel_odos[wheel_name] >= self.required_odo[wheel_name]: do_stop = True if state.radar.radar[0] < 1.0 or state.radar.radar[315] < 1.0 or state.radar.radar[45] < 1.0: do_stop = True if do_stop: return self.stop_action else: return self def execute(self): pass def getActionName(self): return "Forward ODO" class MazeAction(Action): LEFT = -1 RIGHT = 1 def __init__(self, agent): super(MazeAction, self).__init__(agent) def check_next_action_conditions(self): return self class ChicaneAction(MazeAction): def __init__(self, agent, left_or_right, distance, speed, next_action=None): super(ChicaneAction, self).__init__(agent) self.left_or_right = left_or_right self.distance = distance self.speed = speed self.next_action = next_action if self.left_or_right == MazeAction.RIGHT: self.a1 = 45 self.a2 = 90 self.a3 = 135 else: self.a1 = 315 self.a2 = 270 self.a3 = 225 self.left_corner_action = MazeTurnAroundCornerAction(self, self.LEFT, self.distance, self.speed, self) self.right_corner_action = MazeTurnAroundCornerAction(self, self.RIGHT, self.distance, self.speed, DriverForwardForTimeAction(self, 10, self.speed, None)) def start(self): super(ChicaneAction, self).start() def end(self): super(ChicaneAction, self).end() def next(self): if self.left_or_right == self.LEFT: diagonal_distance = state.radar.radar[45] else: diagonal_distance = state.radar.radar[315] if self.left_or_right == self.LEFT and diagonal_distance > 800: log(LOG_LEVEL_INFO, "Found second part of chicane, rfd={: 4d}".format(int(diagonal_distance))) self.left_or_right = self.RIGHT elif self.left_or_right == self.RIGHT and diagonal_distance > 800: log(LOG_LEVEL_INFO, "Found end ofchicane - leaging, rfd={: 4d}".format(int(diagonal_distance))) return self.next_action return self def execute(self): state = self.rover.getRoverState() front_distance = state.radar.radar[0] gain = 60 offset = 150 # Values that worked speed=150, steer=5-7, dist=4 # self.speed = 150 # 150 speed = 50 # mm/second - TODO use odo to update to correct value! speed_steer_fudge_factor = 5 # 5-7 speed_distance_fudge_factor = 4 # 4 min_angle = 1 * math.pi / 180 steer_speed = speed * speed_steer_fudge_factor distance_speed = speed * speed_distance_fudge_factor if self.left_or_right == self.RIGHT: distance = -1000000000 distance_from_wall = state.radar.radar[90] distance_error = distance_from_wall - self.distance angle = 0 if abs(distance_error) < 10: angle = 0 elif distance_error > 0 and distance_error > distance_speed: angle = math.pi / 4 if front_distance < 450: angle += math.pi * (450 - front_distance) / 1800 # divide with 10 and by 180 -> 450/10 - 45deg elif distance_error < 0 and distance_error < -distance_speed: angle = -math.pi / 4 if front_distance < 450: angle -= math.pi * (450 - front_distance) / 1800 # divide with 10 and by 180 -> 450/10 - 45deg else: try: angle = math.asin(distance_error / distance_speed) except BaseException as ex: log(LOG_LEVEL_ALWAYS, "Domain error wa={: 3d} dw={: 4d} de={: 4d} d={: 4d} s={: 3d}".format(int(0), int(distance_from_wall), int(distance_error), int(distance), int(speed))) else: distance = 1000000000 distance_from_wall = state.radar.radar[270] distance_error = distance_from_wall - self.distance angle = 0 if abs(distance_error) < 10: angle = 0 elif distance_error > 0 and distance_error > distance_speed: angle = -math.pi / 4 if front_distance < 450: angle -= math.pi * (450 - front_distance) / 1800 # divide with 10 and by 180 -> 450/10 - 45deg elif distance_error < 0 and distance_error < -distance_speed: angle = math.pi / 4 if front_distance < 450: angle += math.pi * (450 - front_distance) / 1800 # divide with 10 and by 180 -> 450/10 - 45deg else: try: angle = -math.asin(distance_error / distance_speed) except BaseException as ex: log(LOG_LEVEL_ALWAYS, "Domain error wa={: 3d} dw={: 4d} de={: 4d} d={: 4d} s={: 3d}".format(int(0), int(distance_from_wall), int(distance_error), int(distance), int(speed))) distance = int(distance) angle = int(angle * 180 / math.pi) self.rover.command(pyroslib.publish, self.speed, angle, distance) # pyroslib.publish("move/steer", str(distance) + " " + str(self.speed) + " " + str(angle)) wheel_orientations = state.wheel_odos.odos log(LOG_LEVEL_INFO, "{:16.3f}: dist_f={: 4d} wa={: 3d} dist_w={: 4d} dist_err={: 3d} la={: 3d} ld={: 3d} ra={: 3d} rd={: 3d} s_spd={: 3d} dist_spd={: 3d} dist={: 4d} angle={: 3d} heading={: 3d} odo={:7.2f}".format( float(time.time()), int(front_distance), int(0 * 180 / math.pi), int(distance_from_wall), int(distance_error), int(0 * 180 / math.pi), int(0), int(0 * 180 / math.pi), int(0), int(steer_speed), int(distance_speed), int(distance), int(angle), int(state.heading.heading), float(state.wheel_orientations.orientations['fl']) )) def getActionName(self): return "Chicane " + ("L" if self.left_or_right == self.LEFT else "R") class MazeCorridorAction(MazeAction): def __init__(self, agent, left_or_right, distance, speed, next_action=None): super(MazeCorridorAction, self).__init__(agent) self.left_or_right = left_or_right self.distance = distance self.speed = speed self.next_action = next_action if self.left_or_right == MazeAction.RIGHT: self.a1 = 45 self.a2 = 90 self.a3 = 135 else: self.a1 = 315 self.a2 = 270 self.a3 = 225 self.left_corner_action = MazeTurnAroundCornerAction(self, self.LEFT, int(self.distance * 1), self.speed, self) self.right_corner_action = MazeTurnAroundCornerAction(self, self.RIGHT, int(self.distance * 1), self.speed, self) # self.right_corner_action = MazeTurnAroundCornerAction(self.odo, self.radar, self.heading, self.RIGHT, self.distance, self.speed, DriverForwardForTimeActoun(10, self.speed, None)) self.been_in_chicane = False def start(self): super(MazeCorridorAction, self).start() self.been_in_chicane = False def end(self): super(MazeCorridorAction, self).end() def next(self): left_diagonal_distance = state.radar.radar[315] front_distance = state.radar.radar[0] if state.radar.status[0] != 0 and abs(state.radar.radar_deltas[0]) > 100: log(LOG_LEVEL_INFO, "Front distance not correct: d={:4d} s={:2d} delta={:4d}".format(front_distance, state.radar.status[0], state.radar.radar_deltas[0])) else: if state.left_front_distance_of_wall > 100 and front_distance < 550: expected_diagonal_distance = 0 if state.left_wall_angle < 0: expected_diagonal_distance = front_distance * 2 * math.cos(math.pi / 4 + state.left_wall_angle) else: expected_diagonal_distance = front_distance * math.cos(state.left_wall_angle) * SQRT2 if False and not self.been_in_chicane and front_distance > 300 and left_diagonal_distance > expected_diagonal_distance * 1.2: log(LOG_LEVEL_INFO, "Found chicane... lfd={: 4d} fd={: 4d} dd={: 4d} ed={: 4d}".format(int(state.left_front_distance_of_wall), int(front_distance), int(left_diagonal_distance), int(expected_diagonal_distance))) self.been_in_chicane = True return ChicaneAction(self, self.LEFT, self.distance, self.speed, next_action=self) else: log(LOG_LEVEL_INFO, "Found corner - turning, lfd={: 4d} fd={: 4d} dd={: 4d} ed={: 4d}".format(int(state.left_front_distance_of_wall), int(front_distance), int(left_diagonal_distance), int(expected_diagonal_distance))) return self.left_corner_action if front_distance < 550 and state.radar.radar_deltas[0] < 0: left_distances = state.radar.radar[270] + state.radar.radar[315] right_distances = state.radar.radar[90] + state.radar.radar[45] if left_distances > right_distances: log(LOG_LEVEL_INFO, "Found corner 2 - turning left, fd={: 4d} ld={: 4d} rd={: 4d}".format(int(front_distance), int(left_distances), int(right_distances))) return self.left_corner_action else: log(LOG_LEVEL_INFO, "Found corner 2 - turning left, fd={: 4d} ld={: 4d} rd={: 4d}".format(int(front_distance), int(left_distances), int(right_distances))) return self.right_corner_action if state.right_front_distance_of_wall > 100 and state.left_front_distance_of_wall > 100 and front_distance < 700: log(LOG_LEVEL_INFO, "Found final corner - turning to finish, rfd={: 4d} fd={: 4d} ".format(int(state.right_front_distance_of_wall), int(front_distance))) return self.right_corner_action return self def execute(self): state = self.rover.getRoverState() left_diagonal_distance = state.radar.radar[315] front_distance = state.radar.radar[0] gain = 60 offset = 150 # Values that worked speed=150, steer=5-7, dist=4 # self.speed = 150 # 150 speed = 50 # mm/second - TODO use odo to update to correct value! speed_steer_fudge_factor = 5 # 5-7 speed_distance_fudge_factor = 4 # 4 min_angle = 1 * math.pi / 180 steer_speed = speed * speed_steer_fudge_factor distance_speed = speed * speed_distance_fudge_factor if self.left_or_right == self.RIGHT: wall_angle = state.right_wall_angle if -min_angle < state.right_wall_angle < min_angle: distance = 1000000000 else: distance = steer_speed / state.right_wall_angle if 0 <= distance < 150: distance = 150 elif -150 < distance < 0: distance = -150 distance = -distance distance_from_wall = state.right_wall_distance distance_error = distance_from_wall - self.distance angle = 0 if abs(distance_error) < 10: angle = 0 elif distance_error > 0 and distance_error > distance_speed: angle = math.pi / 4 elif distance_error < 0 and distance_error < -distance_speed: angle = -math.pi / 4 else: try: angle = math.asin(distance_error / distance_speed) except BaseException as ex: log(LOG_LEVEL_ALWAYS, "Domain error wa={: 3d} dw={: 4d} de={: 4d} d={: 4d} s={: 3d}".format(int(wall_angle), int(distance_from_wall), int(distance_error), int(distance), int(speed))) else: wall_angle = state.left_wall_angle if -min_angle < state.left_wall_angle < min_angle: distance = 1000000000 else: distance = steer_speed / state.left_wall_angle if 0 <= distance < 150: distance = 150 elif -150 < distance < 0: distance = -150 distance_from_wall = state.left_wall_distance distance_error = distance_from_wall - self.distance angle = 0 if abs(distance_error) < 10: angle = 0 elif distance_error > 0 and distance_error > distance_speed: angle = -math.pi / 4 elif distance_error < 0 and distance_error < -distance_speed: angle = math.pi / 4 else: try: angle = -math.asin(distance_error / distance_speed) except BaseException as ex: log(LOG_LEVEL_ALWAYS, "Domain error wa={: 3d} dw={: 4d} de={: 4d} d={: 4d} s={: 3d}".format(int(wall_angle), int(distance_from_wall), int(distance_error), int(distance), int(speed))) distance = int(distance) angle = int(angle * 180 / math.pi) self.rover.command(pyroslib.publish, self.speed, angle, distance) # pyroslib.publish("move/steer", str(distance) + " " + str(self.speed) + " " + str(angle)) wheel_orientations = state.wheel_odos.odos # log(LOG_LEVEL_INFO, "{:16.3f}: dist_f={: 4d} wa={: 3d} dist_w={: 4d} dist_err={: 3d} la={: 3d} ld={: 3d} ra={: 3d} rd={: 3d} s_spd={: 3d} dist_spd={: 3d} dist={: 4d} angle={: 3d} heading={: 3d} odo={:7.2f}".format( float(time.time()), int(front_distance), int(wall_angle * 180 / math.pi), int(distance_from_wall), int(distance_error), int(state.left_wall_angle * 180 / math.pi), int(state.left_front_distance_of_wall), int(state.right_wall_angle * 180 / math.pi), int(state.right_front_distance_of_wall), int(steer_speed), int(distance_speed), int(distance), int(angle), int(state.heading.heading), float(state.wheel_orientations.orientations['fl']) )) def getActionName(self): return "Corridor" class MazeTurnAroundCornerAction(MazeAction): def __init__(self, agent, left_or_right, distance, speed, next_action=None): super(MazeTurnAroundCornerAction, self).__init__(agent) self.left_or_right = left_or_right self.distance = distance * (1 if left_or_right == self.RIGHT else -1) self.speed = speed self.start_heading = 0 self.last_heading = 0 self.requested_heading = 0 self.pid = None self.next_action = next_action self.error = 0 def start(self): super(MazeTurnAroundCornerAction, self).start() state = self.rover.getRoverState() self.start_heading = state.heading.heading self.requested_heading = normaiseAngle(self.start_heading + 80 * -(1 if self.left_or_right == self.RIGHT else -1)) self.pid = PID(1, 0.0, 0.05, 1, 0, diff_method=angleDiference) self.pid.process(self.requested_heading, self.start_heading) log(LOG_LEVEL_INFO, "Starting to turn around corner at distance {:04d} at speed {:04d}, start heading {:07.3f}, requested heading {:07.3f}".format(self.distance, self.speed, self.start_heading, self.requested_heading)) self.rover.command(pyroslib.publish, self.speed, 0, self.distance) # pyroslib.publish("move/steer", str(self.distance) + " " + str(self.speed)) def end(self): super(MazeTurnAroundCornerAction, self).end() def next(self): heading = state.heading.heading self.error = self.pid.process(self.requested_heading, heading) if self.left_or_right == self.LEFT and self.error > 0: return self elif self.left_or_right == self.RIGHT and self.error < 0: return self else: if self.next_action is not None: log(LOG_LEVEL_INFO, "Finished turning around the corner - invoking next action " + self.next_action.getActionName()) else: log(LOG_LEVEL_INFO, "Finishing turning - no next action spectified.") return self.next_action def execute(self): state = self.rover.getRoverState() heading = state.heading.heading last_heading = self.last_heading self.last_heading = heading log(LOG_LEVEL_INFO, "Turning speed={:04d} h={:07.3f} lh={:07.3f} dh={:07.3f} rh={:07.3f} e={:07.3f}" .format(self.speed, heading, last_heading, angleDiference(heading, last_heading), self.requested_heading, self.error)) def getActionName(self): return "Turn-Around-Corner" class DriverForwardForTimeAction(Action): def __init__(self, agent, time, speed, next_action): super(DriverForwardForTimeAction, self).__init__(agent) self.time = time self.speed = speed self.next_action = next_action def start(self): self.rover.command(pyroslib.publish, self.speed, 0) # pyroslib.publish("move/drive", "0 " + str(self.speed)) log(LOG_LEVEL_INFO, "Going forward for " + str(self.time) + " ticks.") def end(self): pass def next(self): if self.time > 0: self.time -= 1 log(LOG_LEVEL_INFO, "Going forward for " + str(self.time) + " ticks.") return self return self.next_action if __name__ == "__main__": from rover import Radar, RoverState radar_values = {0: 10, 45: SQRT2 * 10, 90: 10, 135: SQRT2 * 10, 180: 10, 225: SQRT2 * 10, 270: 10, 315: SQRT2 * 10} radar_last_values = {0: 10, 45: SQRT2 * 10, 90: 10, 135: SQRT2 * 10, 180: 10, 225: SQRT2 * 10, 270: 10, 315: SQRT2 * 10} radar_status = {0: 0, 45: 0, 90: 0, 135: 0, 180: 0, 225: 0, 270: 0, 315: 0} attitude = MazeAttitude() radar = Radar(0, radar_values, radar_status, Radar(0, radar_last_values, radar_status)) state = RoverState(None, None, None, radar, None, None) def printWallLines(a): if attitude.lines[a].angle is None: print("{:3d} -> point too far - not calculated".format(a)) else: angle = int(attitude.lines[a].angle * 180 / math.pi) point = attitude.points[a] if point is None: print("{:3d} -> line at {:3d} angle".format(a, angle)) else: if point == MazeAttitude.LEFT_WALL: wall = "left wall" elif point == MazeAttitude.RIGHT_WALL: wall = "right wall" elif point == MazeAttitude.FRONT_WALL: wall = "front wall" elif point == MazeAttitude.BACK_WALL: wall = "back wall" else: wall = "no wall" print("{:3d} -> line at {:3d} angle belogs to {:s}".format(a, angle, wall)) def printWall(w): if w.angle is None: print("Wall {:3d} -> is too far - not calculated".format(w.ds_angle)) else: if w.distance is None: print("Wall {:3d} -> has angle {:3d} but is too far - distance not calculated".format(w.ds_angle, int(w.angle * 180 / math.pi))) else: print("Wall {:3d} -> has angle {:3d} and is at {:3d}".format(w.ds_angle, int(w.angle * 180 / math.pi), w.distance)) def printWalls(): for p in attitude.points: printWallLines(p) for w in attitude.walls: printWall(w) print("----------------------------------------------------------") # attitude.calculate(state) # printWalls() # # state.radar.radar[0] = 5 # state.radar.radar[45] = SQRT2 * 5 * 0.9 # state.radar.radar[315] = SQRT2 * 17 # state.radar.radar[270] = SQRT2 * 13 # state.radar.radar[225] = SQRT2 * 12 # attitude.calculate(state) # printWalls() state.radar.radar[180] = 50 state.radar.radar[315] = 30 attitude.calculate(state) printWalls()
[((352, 364), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (361, 364), False, 'import math\n'), ((29948, 29995), 'rover.RoverState', 'RoverState', (['None', 'None', 'None', 'radar', 'None', 'None'], {}), '(None, None, None, radar, None, None)\n', (29958, 29995), False, 'from rover import Radar, RoverState\n'), ((26861, 26955), 'rover.normaiseAngle', 'normaiseAngle', (['(self.start_heading + 80 * -(1 if self.left_or_right == self.RIGHT else -1))'], {}), '(self.start_heading + 80 * -(1 if self.left_or_right == self.\n RIGHT else -1))\n', (26874, 26955), False, 'from rover import normaiseAngle, angleDiference\n'), ((26971, 27022), 'challenge_utils.PID', 'PID', (['(1)', '(0.0)', '(0.05)', '(1)', '(0)'], {'diff_method': 'angleDiference'}), '(1, 0.0, 0.05, 1, 0, diff_method=angleDiference)\n', (26974, 27022), False, 'from challenge_utils import Action, PID\n'), ((29892, 29933), 'rover.Radar', 'Radar', (['(0)', 'radar_last_values', 'radar_status'], {}), '(0, radar_last_values, radar_status)\n', (29897, 29933), False, 'from rover import Radar, RoverState\n'), ((11086, 11160), 'rover.WheelOdos.normaliseOdo', 'WheelOdos.normaliseOdo', (['(state.wheel_odos[wheel] + self.required_odo[wheel])'], {}), '(state.wheel_odos[wheel] + self.required_odo[wheel])\n', (11108, 11160), False, 'from rover import WheelOdos, WHEEL_NAMES\n'), ((28546, 28583), 'rover.angleDiference', 'angleDiference', (['heading', 'last_heading'], {}), '(heading, last_heading)\n', (28560, 28583), False, 'from rover import normaiseAngle, angleDiference\n'), ((17277, 17288), 'time.time', 'time.time', ([], {}), '()\n', (17286, 17288), False, 'import time\n'), ((25469, 25480), 'time.time', 'time.time', ([], {}), '()\n', (25478, 25480), False, 'import time\n'), ((28090, 28159), 'pyroslib.logging.log', 'log', (['LOG_LEVEL_INFO', '"""Finishing turning - no next action spectified."""'], {}), "(LOG_LEVEL_INFO, 'Finishing turning - no next action spectified.')\n", (28093, 28159), False, 'from pyroslib.logging import log, LOG_LEVEL_ALWAYS, LOG_LEVEL_INFO, LOG_LEVEL_DEBUG\n'), ((19689, 19734), 'math.cos', 'math.cos', (['(math.pi / 4 + state.left_wall_angle)'], {}), '(math.pi / 4 + state.left_wall_angle)\n', (19697, 19734), False, 'import math\n'), ((1747, 1790), 'math.atan2', 'math.atan2', (['lsqrt2', '(lsqrt2 - short_distance)'], {}), '(lsqrt2, lsqrt2 - short_distance)\n', (1757, 1790), False, 'import math\n'), ((4134, 4155), 'math.sin', 'math.sin', (['mline.angle'], {}), '(mline.angle)\n', (4142, 4155), False, 'import math\n'), ((15291, 15333), 'math.asin', 'math.asin', (['(distance_error / distance_speed)'], {}), '(distance_error / distance_speed)\n', (15300, 15333), False, 'import math\n'), ((19823, 19854), 'math.cos', 'math.cos', (['state.left_wall_angle'], {}), '(state.left_wall_angle)\n', (19831, 19854), False, 'import math\n'), ((23420, 23462), 'math.asin', 'math.asin', (['(distance_error / distance_speed)'], {}), '(distance_error / distance_speed)\n', (23429, 23462), False, 'import math\n'), ((2912, 2927), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (2920, 2927), False, 'import math\n'), ((3007, 3022), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (3015, 3022), False, 'import math\n'), ((4616, 4637), 'math.sin', 'math.sin', (['mline.angle'], {}), '(mline.angle)\n', (4624, 4637), False, 'import math\n'), ((16435, 16477), 'math.asin', 'math.asin', (['(distance_error / distance_speed)'], {}), '(distance_error / distance_speed)\n', (16444, 16477), False, 'import math\n'), ((24609, 24651), 'math.asin', 'math.asin', (['(distance_error / distance_speed)'], {}), '(distance_error / distance_speed)\n', (24618, 24651), False, 'import math\n')]
jean1042/monitoring
src/spaceone/monitoring/conf/proto_conf.py
0585a1ea52ec13285eaca81cc5b19fa3f7a1fba4
PROTO = { 'spaceone.monitoring.interface.grpc.v1.data_source': ['DataSource'], 'spaceone.monitoring.interface.grpc.v1.metric': ['Metric'], 'spaceone.monitoring.interface.grpc.v1.project_alert_config': ['ProjectAlertConfig'], 'spaceone.monitoring.interface.grpc.v1.escalation_policy': ['EscalationPolicy'], 'spaceone.monitoring.interface.grpc.v1.event_rule': ['EventRule'], 'spaceone.monitoring.interface.grpc.v1.webhook': ['Webhook'], 'spaceone.monitoring.interface.grpc.v1.maintenance_window': ['MaintenanceWindow'], 'spaceone.monitoring.interface.grpc.v1.alert': ['Alert'], 'spaceone.monitoring.interface.grpc.v1.note': ['Note'], 'spaceone.monitoring.interface.grpc.v1.event': ['Event'], }
[]
PirosB3/django
tests/delete_regress/models.py
9b729ddd8f2040722971ccfb3b12f7d8162633d1
from django.contrib.contenttypes.fields import ( GenericForeignKey, GenericRelation ) from django.contrib.contenttypes.models import ContentType from django.db import models class Award(models.Model): name = models.CharField(max_length=25) object_id = models.PositiveIntegerField() content_type = models.ForeignKey(ContentType) content_object = GenericForeignKey() class AwardNote(models.Model): award = models.ForeignKey(Award) note = models.CharField(max_length=100) class Person(models.Model): name = models.CharField(max_length=25) awards = GenericRelation(Award) class Book(models.Model): pagecount = models.IntegerField() class Toy(models.Model): name = models.CharField(max_length=50) class Child(models.Model): name = models.CharField(max_length=50) toys = models.ManyToManyField(Toy, through='PlayedWith') class PlayedWith(models.Model): child = models.ForeignKey(Child) toy = models.ForeignKey(Toy) date = models.DateField(db_column='date_col') class PlayedWithNote(models.Model): played = models.ForeignKey(PlayedWith) note = models.TextField() class Contact(models.Model): label = models.CharField(max_length=100) class Email(Contact): email_address = models.EmailField(max_length=100) class Researcher(models.Model): contacts = models.ManyToManyField(Contact, related_name="research_contacts") class Food(models.Model): name = models.CharField(max_length=20, unique=True) class Eaten(models.Model): food = models.ForeignKey(Food, to_field="name") meal = models.CharField(max_length=20) # Models for #15776 class Policy(models.Model): policy_number = models.CharField(max_length=10) class Version(models.Model): policy = models.ForeignKey(Policy) class Location(models.Model): version = models.ForeignKey(Version, blank=True, null=True) class Item(models.Model): version = models.ForeignKey(Version) location = models.ForeignKey(Location, blank=True, null=True) # Models for #16128 class File(models.Model): pass class Image(File): class Meta: proxy = True class Photo(Image): class Meta: proxy = True class FooImage(models.Model): my_image = models.ForeignKey(Image) class FooFile(models.Model): my_file = models.ForeignKey(File) class FooPhoto(models.Model): my_photo = models.ForeignKey(Photo) class FooFileProxy(FooFile): class Meta: proxy = True class OrgUnit(models.Model): name = models.CharField(max_length=64, unique=True) class Login(models.Model): description = models.CharField(max_length=32) orgunit = models.ForeignKey(OrgUnit) class House(models.Model): address = models.CharField(max_length=32) class OrderedPerson(models.Model): name = models.CharField(max_length=32) lives_in = models.ForeignKey(House) class Meta: ordering = ['name']
[((218, 249), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(25)'}), '(max_length=25)\n', (234, 249), False, 'from django.db import models\n'), ((266, 295), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (293, 295), False, 'from django.db import models\n'), ((315, 345), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ContentType'], {}), '(ContentType)\n', (332, 345), False, 'from django.db import models\n'), ((367, 386), 'django.contrib.contenttypes.fields.GenericForeignKey', 'GenericForeignKey', ([], {}), '()\n', (384, 386), False, 'from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation\n'), ((432, 456), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Award'], {}), '(Award)\n', (449, 456), False, 'from django.db import models\n'), ((468, 500), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (484, 500), False, 'from django.db import models\n'), ((542, 573), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(25)'}), '(max_length=25)\n', (558, 573), False, 'from django.db import models\n'), ((587, 609), 'django.contrib.contenttypes.fields.GenericRelation', 'GenericRelation', (['Award'], {}), '(Award)\n', (602, 609), False, 'from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation\n'), ((654, 675), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (673, 675), False, 'from django.db import models\n'), ((714, 745), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (730, 745), False, 'from django.db import models\n'), ((786, 817), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (802, 817), False, 'from django.db import models\n'), ((829, 878), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Toy'], {'through': '"""PlayedWith"""'}), "(Toy, through='PlayedWith')\n", (851, 878), False, 'from django.db import models\n'), ((925, 949), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Child'], {}), '(Child)\n', (942, 949), False, 'from django.db import models\n'), ((960, 982), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Toy'], {}), '(Toy)\n', (977, 982), False, 'from django.db import models\n'), ((994, 1032), 'django.db.models.DateField', 'models.DateField', ([], {'db_column': '"""date_col"""'}), "(db_column='date_col')\n", (1010, 1032), False, 'from django.db import models\n'), ((1084, 1113), 'django.db.models.ForeignKey', 'models.ForeignKey', (['PlayedWith'], {}), '(PlayedWith)\n', (1101, 1113), False, 'from django.db import models\n'), ((1125, 1143), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1141, 1143), False, 'from django.db import models\n'), ((1187, 1219), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1203, 1219), False, 'from django.db import models\n'), ((1264, 1297), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1281, 1297), False, 'from django.db import models\n'), ((1347, 1412), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Contact'], {'related_name': '"""research_contacts"""'}), "(Contact, related_name='research_contacts')\n", (1369, 1412), False, 'from django.db import models\n'), ((1452, 1496), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'unique': '(True)'}), '(max_length=20, unique=True)\n', (1468, 1496), False, 'from django.db import models\n'), ((1537, 1577), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Food'], {'to_field': '"""name"""'}), "(Food, to_field='name')\n", (1554, 1577), False, 'from django.db import models\n'), ((1589, 1620), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (1605, 1620), False, 'from django.db import models\n'), ((1693, 1724), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (1709, 1724), False, 'from django.db import models\n'), ((1769, 1794), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Policy'], {}), '(Policy)\n', (1786, 1794), False, 'from django.db import models\n'), ((1841, 1890), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Version'], {'blank': '(True)', 'null': '(True)'}), '(Version, blank=True, null=True)\n', (1858, 1890), False, 'from django.db import models\n'), ((1933, 1959), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Version'], {}), '(Version)\n', (1950, 1959), False, 'from django.db import models\n'), ((1975, 2025), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Location'], {'blank': '(True)', 'null': '(True)'}), '(Location, blank=True, null=True)\n', (1992, 2025), False, 'from django.db import models\n'), ((2248, 2272), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Image'], {}), '(Image)\n', (2265, 2272), False, 'from django.db import models\n'), ((2318, 2341), 'django.db.models.ForeignKey', 'models.ForeignKey', (['File'], {}), '(File)\n', (2335, 2341), False, 'from django.db import models\n'), ((2389, 2413), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Photo'], {}), '(Photo)\n', (2406, 2413), False, 'from django.db import models\n'), ((2524, 2568), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'unique': '(True)'}), '(max_length=64, unique=True)\n', (2540, 2568), False, 'from django.db import models\n'), ((2616, 2647), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)'}), '(max_length=32)\n', (2632, 2647), False, 'from django.db import models\n'), ((2662, 2688), 'django.db.models.ForeignKey', 'models.ForeignKey', (['OrgUnit'], {}), '(OrgUnit)\n', (2679, 2688), False, 'from django.db import models\n'), ((2732, 2763), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)'}), '(max_length=32)\n', (2748, 2763), False, 'from django.db import models\n'), ((2812, 2843), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)'}), '(max_length=32)\n', (2828, 2843), False, 'from django.db import models\n'), ((2859, 2883), 'django.db.models.ForeignKey', 'models.ForeignKey', (['House'], {}), '(House)\n', (2876, 2883), False, 'from django.db import models\n')]
TheoSaify/Yolo-Detector
All_Program.py
f1ac387370982de323a4fc09109c57736b8ce8d6
import cv2 from cv2 import * import numpy as np from matplotlib import pyplot as plt ###############################SIFT MATCH Function################################# def SIFTMATCH(img1,img2): # Initiate SIFT detector sift = cv2.xfeatures2d.SIFT_create() # find the keypoints and descriptors with SIFT kp1, des1 = sift.detectAndCompute(img1,None) kp2, des2 = sift.detectAndCompute(img2,None) FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks = 50) flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des1,des2,k=2) # store all the good matches as per Lowe's ratio test. good = [] for m,n in matches: if m.distance < 0.7*n.distance: good.append(m) if len(good)>MIN_MATCH_COUNT: src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) matchesMask = mask.ravel().tolist() h,w = img1.shape pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) dst = cv2.perspectiveTransform(pts,M) img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA) else: print("Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)) matchesMask = None draw_params = dict(matchColor = (0,255,0), # draw matches in green color singlePointColor = None, matchesMask = matchesMask, # draw only inliers flags = 2) img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params) cv2.moveWindow('output', 150,150) # Move it to (40,30) cv2.imshow('output',img3) cv2.waitKey(0) #The function waits for specified milliseconds for any keyboard event cv2.destroyAllWindows() #cv2.destroyAllWindows() simply destroys all the windows we created ################################################################################################### #################################Function######################### def CercleDetection(img1): # Read Image raw_image = cv2.imread(img1) # Bilateral filtering forms a very good way to preserve edges. It is a non-linear filter and helps reduce noise # The parameters used are: the image, window size for averaging the neighbour, sigmaColor(Sigma value in the color space. bilateral_filtered_image = cv2.bilateralFilter(raw_image, 5, 175, 175) # Canny edge detector to detect edges in the image It takes 3 parameters: image, lower threshold and upper threshold. edge_detected_image = cv2.Canny(bilateral_filtered_image, 75, 200) # Find Contours _, contours, hierarchy = cv2.findContours(edge_detected_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contour_list = [] for contour in contours: approx = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True) area = cv2.contourArea(contour) if ((len(approx) > 8) & (len(approx) < 23) & (area > 50000) ): contour_list.append(contour) print("area %.3f"%(area)) M = cv2.moments(contour) # calculate x,y coordinate of center if M["m00"] != 0: cX = int(M["m10"] / M["m00"]) cY = int(M["m01"] / M["m00"]) else: cX, cY = 0, 0 cv2.circle(raw_image, (cX, cY), 5, (255, 255, 255), -1) cv2.putText(raw_image, "centroid", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2) # Draw Contours of circles cv2.drawContours(raw_image, contour_list, -1, (0, 255, 0), 3) # Display Images cv2.imshow("Objects Detected",raw_image) cv2.waitKey(0) cv2.destroyAllWindows() return cX,cY ############################################################ ###########################MAIN############################# MIN_MATCH_COUNT = 10 e1 = cv2.getTickCount() # # initialize the camera # cam = VideoCapture(0) # 0 -> index of camera # s, img1 = cam.read() # ret = cam.set(3,1920); # ret = cam.set(4,1080); # if s: # frame captured without any errors # cv2.namedWindow("output", cv2.WINDOW_NORMAL) # cv2.imshow("cam-test",img1) # waitKey(0) # destroyWindow("cam-test") # imwrite("Scene.jpg",img1) #save image # del(cam) # Scene image in Grayscale # imgray = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY) imgray = cv2.imread('Scene.jpg', 0) # queryImage # Reference Piece Image img1 = cv2.imread('img3.jpg',0) # queryImage # SIFT Algorithm fore Object Detection SIFTMATCH(img1, imgray) # image de reference cX, cY = CercleDetection('img3.jpg') print('cX = %.3f , cY =%.3f' % (cX, cY)) # Image Webcam cX2, cY2 = CercleDetection('img3.jpg') print('cX2 = %.3f , cY2 =%.3f' % (cX2, cY2)) deltaX = (cX2-cX) deltaY = -(CY2-cY) # Write X and Y values to File file = open("values.txt", "w") file.write("%.3f \n" % deltaX) file.write("%.3f \n" % deltaY) file.close() #Calculate time of execution e2 = cv2.getTickCount() time = (e2 - e1)/ cv2.getTickFrequency() print('time needed to execute') print(time)
[((4277, 4295), 'cv2.getTickCount', 'cv2.getTickCount', ([], {}), '()\n', (4293, 4295), False, 'import cv2\n'), ((4792, 4818), 'cv2.imread', 'cv2.imread', (['"""Scene.jpg"""', '(0)'], {}), "('Scene.jpg', 0)\n", (4802, 4818), False, 'import cv2\n'), ((4869, 4894), 'cv2.imread', 'cv2.imread', (['"""img3.jpg"""', '(0)'], {}), "('img3.jpg', 0)\n", (4879, 4894), False, 'import cv2\n'), ((5432, 5450), 'cv2.getTickCount', 'cv2.getTickCount', ([], {}), '()\n', (5448, 5450), False, 'import cv2\n'), ((252, 281), 'cv2.xfeatures2d.SIFT_create', 'cv2.xfeatures2d.SIFT_create', ([], {}), '()\n', (279, 281), False, 'import cv2\n'), ((588, 638), 'cv2.FlannBasedMatcher', 'cv2.FlannBasedMatcher', (['index_params', 'search_params'], {}), '(index_params, search_params)\n', (609, 638), False, 'import cv2\n'), ((1808, 1872), 'cv2.drawMatches', 'cv2.drawMatches', (['img1', 'kp1', 'img2', 'kp2', 'good', 'None'], {}), '(img1, kp1, img2, kp2, good, None, **draw_params)\n', (1823, 1872), False, 'import cv2\n'), ((1876, 1910), 'cv2.moveWindow', 'cv2.moveWindow', (['"""output"""', '(150)', '(150)'], {}), "('output', 150, 150)\n", (1890, 1910), False, 'import cv2\n'), ((1937, 1963), 'cv2.imshow', 'cv2.imshow', (['"""output"""', 'img3'], {}), "('output', img3)\n", (1947, 1963), False, 'import cv2\n'), ((1968, 1982), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1979, 1982), False, 'import cv2\n'), ((2062, 2085), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2083, 2085), False, 'import cv2\n'), ((2394, 2410), 'cv2.imread', 'cv2.imread', (['img1'], {}), '(img1)\n', (2404, 2410), False, 'import cv2\n'), ((2691, 2734), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['raw_image', '(5)', '(175)', '(175)'], {}), '(raw_image, 5, 175, 175)\n', (2710, 2734), False, 'import cv2\n'), ((2887, 2931), 'cv2.Canny', 'cv2.Canny', (['bilateral_filtered_image', '(75)', '(200)'], {}), '(bilateral_filtered_image, 75, 200)\n', (2896, 2931), False, 'import cv2\n'), ((2987, 3064), 'cv2.findContours', 'cv2.findContours', (['edge_detected_image', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(edge_detected_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (3003, 3064), False, 'import cv2\n'), ((3906, 3967), 'cv2.drawContours', 'cv2.drawContours', (['raw_image', 'contour_list', '(-1)', '(0, 255, 0)', '(3)'], {}), '(raw_image, contour_list, -1, (0, 255, 0), 3)\n', (3922, 3967), False, 'import cv2\n'), ((4000, 4041), 'cv2.imshow', 'cv2.imshow', (['"""Objects Detected"""', 'raw_image'], {}), "('Objects Detected', raw_image)\n", (4010, 4041), False, 'import cv2\n'), ((4046, 4060), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4057, 4060), False, 'import cv2\n'), ((4066, 4089), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4087, 4089), False, 'import cv2\n'), ((5470, 5492), 'cv2.getTickFrequency', 'cv2.getTickFrequency', ([], {}), '()\n', (5490, 5492), False, 'import cv2\n'), ((1085, 1138), 'cv2.findHomography', 'cv2.findHomography', (['src_pts', 'dst_pts', 'cv2.RANSAC', '(5.0)'], {}), '(src_pts, dst_pts, cv2.RANSAC, 5.0)\n', (1103, 1138), False, 'import cv2\n'), ((1305, 1337), 'cv2.perspectiveTransform', 'cv2.perspectiveTransform', (['pts', 'M'], {}), '(pts, M)\n', (1329, 1337), False, 'import cv2\n'), ((3218, 3242), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (3233, 3242), False, 'import cv2\n'), ((3413, 3433), 'cv2.moments', 'cv2.moments', (['contour'], {}), '(contour)\n', (3424, 3433), False, 'import cv2\n'), ((3663, 3718), 'cv2.circle', 'cv2.circle', (['raw_image', '(cX, cY)', '(5)', '(255, 255, 255)', '(-1)'], {}), '(raw_image, (cX, cY), 5, (255, 255, 255), -1)\n', (3673, 3718), False, 'import cv2\n'), ((3732, 3842), 'cv2.putText', 'cv2.putText', (['raw_image', '"""centroid"""', '(cX - 25, cY - 25)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(255, 255, 255)', '(2)'], {}), "(raw_image, 'centroid', (cX - 25, cY - 25), cv2.\n FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\n", (3743, 3842), False, 'import cv2\n'), ((915, 961), 'numpy.float32', 'np.float32', (['[kp1[m.queryIdx].pt for m in good]'], {}), '([kp1[m.queryIdx].pt for m in good])\n', (925, 961), True, 'import numpy as np\n'), ((999, 1045), 'numpy.float32', 'np.float32', (['[kp2[m.trainIdx].pt for m in good]'], {}), '([kp2[m.trainIdx].pt for m in good])\n', (1009, 1045), True, 'import numpy as np\n'), ((1226, 1286), 'numpy.float32', 'np.float32', (['[[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]'], {}), '([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]])\n', (1236, 1286), True, 'import numpy as np\n'), ((1375, 1388), 'numpy.int32', 'np.int32', (['dst'], {}), '(dst)\n', (1383, 1388), True, 'import numpy as np\n'), ((3168, 3196), 'cv2.arcLength', 'cv2.arcLength', (['contour', '(True)'], {}), '(contour, True)\n', (3181, 3196), False, 'import cv2\n')]
industrial-optimization-group/researchers-night
apps/UI_phone_mcdm.py
68f2fcb8530032e157badda772a795e1f3bb2c4b
import dash from dash.exceptions import PreventUpdate import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output, State import dash_bootstrap_components as dbc import dash_table import plotly.express as ex import plotly.graph_objects as go import pandas as pd import numpy as np data = pd.read_csv("./data/Phone_dataset_new.csv", header=0) details = pd.read_csv("./data/Phone_details.csv", header=0) names = details.loc[0] data = data.rename(columns=names) details = details.rename(columns=names) maxi = details.loc[1].astype(int) details_on_card = details.loc[2].astype(int) details_on_card = details.columns[details_on_card == 1] fitness_columns = { "Memory": -1, "RAM": -1, "Camera (MP)": -1, "Price (Euros)": 1, } fitness_data = data[fitness_columns] * maxi[fitness_columns].values external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"] app = dash.Dash( __name__, external_stylesheets=[dbc.themes.LITERA], eager_loading=True, suppress_callback_exceptions=True, ) app.layout = html.Div( children=[ # .container class is fixed, .container.scalable is scalable dbc.Row( [ dbc.Col( html.H1( children="What is your optimal phone?", className="text-center mt-4", ) ) ] ), dbc.Row( [ dbc.Col( children=[ # Top card with details(?) dbc.Card( children=[ dbc.CardBody( [ html.H4( "Researcher's Night Event", className="card-title text-center", ), html.P( ( "This app uses decision support tools to " "quickly and easily find phones which reflect " "the user's desires. Input your preferences " "below. The box on top right shows the phone " "which matches the preferences the best. " "The box on bottom right provides some " "close alternatives." ), className="card-text", ), ] ) ], className="mr-3 ml-3 mb-2 mt-2", ), dbc.Form( [ dbc.FormGroup( children=[ dbc.Label( "Choose desired operating system", html_for="os-choice", ), dbc.RadioItems( options=[ { "label": "Android", "value": "Android", }, {"label": "iOS", "value": "IOS"}, { "label": "No preference", "value": "both", }, ], id="os-choice", value="both", inline=True, # className="text-center mt-4", ), ], className="mr-3 ml-3 mb-2 mt-2", ), dbc.FormGroup( children=[ dbc.Label( "Choose desired Memory capacity (GB)", html_for="memory-choice", ), dcc.Slider( id="memory-choice", min=16, max=256, step=None, included=False, value=256, marks={ 16: "16", 32: "32", 64: "64", 128: "128", 256: "256", }, # className="text-center mt-5", ), ], className="mr-3 ml-3 mb-2 mt-2", ), dbc.FormGroup( children=[ dbc.Label( "Choose desired RAM capacity (GB)", html_for="ram-choice", ), dcc.Slider( id="ram-choice", min=2, max=12, step=1, value=12, included=False, marks={ 2: "2", 3: "3", 4: "4", 5: "5", 6: "6", 7: "7", 8: "8", 9: "9", 10: "10", 11: "11", 12: "12", }, className="text-center mt-5", ), ], className="mr-3 ml-3 mb-2 mt-2", ), dbc.FormGroup( children=[ dbc.Label( "Choose desired camera resolution (MP)", html_for="cam-choice", ), dcc.Slider( id="cam-choice", min=0, max=130, step=1, included=False, value=70, marks={ 0: "0", 10: "10", 30: "30", 50: "50", 70: "70", 90: "90", 110: "110", 130: "130", }, className="text-center mt-5", ), ], className="mr-3 ml-3 mb-2 mt-2", ), dbc.FormGroup( children=[ dbc.Label( "Choose desired budget (Euros)", html_for="cost-choice", ), dcc.Slider( id="cost-choice", min=0, max=1400, step=1, included=False, value=100, marks={ 0: "0", 200: "200", 400: "400", 600: "600", 800: "800", 1000: "1000", 1200: "1200", 1400: "1400", }, className="text-center mt-5", ), ], className="mr-3 ml-3 mb-2 mt-2", ), ], style={"maxHeight": "560px", "overflow": "auto"}, ), ], width={"size": 5, "offset": 1}, ), dbc.Col( children=[ dbc.Card( children=[ dbc.CardHeader("The best phone for you is:"), dbc.CardBody(id="results"), ], className="mb-4", ), dbc.Card( children=[ dbc.CardHeader("Other great phones:"), dbc.CardBody( id="other-results", children=( [ html.P( html.Span( f"{i}. ", id=f"other-results-list-{i}", ) ) for i in range(2, 6) ] + [ dbc.Tooltip( id=f"other-results-tooltip-{i}", target=f"other-results-list-{i}", placement="right", style={ "maxWidth": 700, "background-color": "white", "color": "white", "border-style": "solid", "border-color": "black", }, ) for i in range(2, 6) ] ), ), ], className="mt-4", ), html.Div(id="tooltips"), ], width={"size": 5, "offset": 0}, className="mb-2 mt-2", ), ] ), dbc.Row([html.Div(id="callback-dump")]), ], ) @app.callback( [ Output("results", "children"), *[Output(f"other-results-list-{i}", "children") for i in range(2, 6)], *[Output(f"other-results-tooltip-{i}", "children") for i in range(2, 6)], ], [ Input(f"{attr}-choice", "value") for attr in ["os", "memory", "ram", "cam", "cost"] ], ) def results(*choices): if choices[0] == "both": choice_data = data elif choices[0] == "IOS": choice_data = data[[True if "IOS" in st else False for st in data["OS"]]] if choices[0] == "Android": choice_data = data[[True if "Android" in st else False for st in data["OS"]]] relevant_data = choice_data[ ["Memory", "RAM", "Camera (MP)", "Price (Euros)",] ].reset_index(drop=True) card_data = choice_data[details_on_card].reset_index(drop=True) maxi = np.asarray([-1, -1, -1, 1]) relevant_data = relevant_data * maxi ideal = relevant_data.min().values nadir = relevant_data.max().values aspirations = choices[1:] * maxi distance = (aspirations - relevant_data) / (ideal - nadir) distance = distance.max(axis=1) distance_order = np.argsort(distance) best = table_from_data(card_data.loc[distance_order.values[0]], choices[1:]) total_number = len(distance_order) if total_number >= 4: others, tooltips = other_options(card_data.loc[distance_order.values[1:5]]) else: others, tooltips = other_options( card_data.loc[distance_order.values[1:total_number]] ) others = others + [f"{i}. -" for i in range(len(others) + 2, 6)] tooltips = tooltips + [None for i in range(len(tooltips) + 2, 6)] return (best, *others, *tooltips) """@app.callback(Output("tooltips", "children"), [Input("callback-dump", "children")]) def tooltips(tooldict): num = len(tooldict["ids"]) content = [] for i in range(num): content.append(dbc.Tooltip(tooldict["tables"][i], target=tooldict["ids"][i])) return content""" def table_from_data(data, choices): # print(choices) to_compare = ["Memory", "RAM", "Camera (MP)", "Price (Euros)"] # print(data[to_compare].values) diff = (data[to_compare].values - choices) * [1, 1, 1, -1] colors = [None, None, None] + ["green" if x >= 0 else "red" for x in diff] # print(np.sign(diff)) return dbc.Table( [ html.Tbody( [ html.Tr( [ html.Th(col), html.Td([str(data[col]),],), html.Td([html.Span(" ▉", style={"color": c,},)],), ] ) for (col, c) in zip(data.index, colors) ] ) ] ) def table_from_data_horizontal(data): header = [html.Thead(html.Tr([html.Th(col) for col in data.index]))] body = [html.Tbody([html.Tr([html.Td(data[col]) for col in data.index])])] return dbc.Table(header + body) def other_options(data): contents = [] tables = [] ids = [] i = 2 for index, row in data.iterrows(): contents.append(f"{i}. {row['Model']}") tables.append(table_from_data_horizontal(row)) i = i + 1 return contents, tables if __name__ == "__main__": app.run_server(debug=False)
[((345, 398), 'pandas.read_csv', 'pd.read_csv', (['"""./data/Phone_dataset_new.csv"""'], {'header': '(0)'}), "('./data/Phone_dataset_new.csv', header=0)\n", (356, 398), True, 'import pandas as pd\n'), ((409, 458), 'pandas.read_csv', 'pd.read_csv', (['"""./data/Phone_details.csv"""'], {'header': '(0)'}), "('./data/Phone_details.csv', header=0)\n", (420, 458), True, 'import pandas as pd\n'), ((947, 1068), 'dash.Dash', 'dash.Dash', (['__name__'], {'external_stylesheets': '[dbc.themes.LITERA]', 'eager_loading': '(True)', 'suppress_callback_exceptions': '(True)'}), '(__name__, external_stylesheets=[dbc.themes.LITERA], eager_loading\n =True, suppress_callback_exceptions=True)\n', (956, 1068), False, 'import dash\n'), ((14486, 14513), 'numpy.asarray', 'np.asarray', (['[-1, -1, -1, 1]'], {}), '([-1, -1, -1, 1])\n', (14496, 14513), True, 'import numpy as np\n'), ((14790, 14810), 'numpy.argsort', 'np.argsort', (['distance'], {}), '(distance)\n', (14800, 14810), True, 'import numpy as np\n'), ((16645, 16669), 'dash_bootstrap_components.Table', 'dbc.Table', (['(header + body)'], {}), '(header + body)\n', (16654, 16669), True, 'import dash_bootstrap_components as dbc\n'), ((13663, 13692), 'dash.dependencies.Output', 'Output', (['"""results"""', '"""children"""'], {}), "('results', 'children')\n", (13669, 13692), False, 'from dash.dependencies import Input, Output, State\n'), ((13876, 13908), 'dash.dependencies.Input', 'Input', (['f"""{attr}-choice"""', '"""value"""'], {}), "(f'{attr}-choice', 'value')\n", (13881, 13908), False, 'from dash.dependencies import Input, Output, State\n'), ((13704, 13749), 'dash.dependencies.Output', 'Output', (['f"""other-results-list-{i}"""', '"""children"""'], {}), "(f'other-results-list-{i}', 'children')\n", (13710, 13749), False, 'from dash.dependencies import Input, Output, State\n'), ((13783, 13831), 'dash.dependencies.Output', 'Output', (['f"""other-results-tooltip-{i}"""', '"""children"""'], {}), "(f'other-results-tooltip-{i}', 'children')\n", (13789, 13831), False, 'from dash.dependencies import Input, Output, State\n'), ((13591, 13619), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""callback-dump"""'}), "(id='callback-dump')\n", (13599, 13619), True, 'import dash_html_components as html\n'), ((16516, 16528), 'dash_html_components.Th', 'html.Th', (['col'], {}), '(col)\n', (16523, 16528), True, 'import dash_html_components as html\n'), ((1268, 1345), 'dash_html_components.H1', 'html.H1', ([], {'children': '"""What is your optimal phone?"""', 'className': '"""text-center mt-4"""'}), "(children='What is your optimal phone?', className='text-center mt-4')\n", (1275, 1345), True, 'import dash_html_components as html\n'), ((16588, 16606), 'dash_html_components.Td', 'html.Td', (['data[col]'], {}), '(data[col])\n', (16595, 16606), True, 'import dash_html_components as html\n'), ((16136, 16148), 'dash_html_components.Th', 'html.Th', (['col'], {}), '(col)\n', (16143, 16148), True, 'import dash_html_components as html\n'), ((13387, 13410), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""tooltips"""'}), "(id='tooltips')\n", (13395, 13410), True, 'import dash_html_components as html\n'), ((16244, 16279), 'dash_html_components.Span', 'html.Span', (['""" ▉"""'], {'style': "{'color': c}"}), "(' ▉', style={'color': c})\n", (16253, 16279), True, 'import dash_html_components as html\n'), ((11217, 11261), 'dash_bootstrap_components.CardHeader', 'dbc.CardHeader', (['"""The best phone for you is:"""'], {}), "('The best phone for you is:')\n", (11231, 11261), True, 'import dash_bootstrap_components as dbc\n'), ((11295, 11321), 'dash_bootstrap_components.CardBody', 'dbc.CardBody', ([], {'id': '"""results"""'}), "(id='results')\n", (11307, 11321), True, 'import dash_bootstrap_components as dbc\n'), ((11532, 11569), 'dash_bootstrap_components.CardHeader', 'dbc.CardHeader', (['"""Other great phones:"""'], {}), "('Other great phones:')\n", (11546, 11569), True, 'import dash_bootstrap_components as dbc\n'), ((1795, 1866), 'dash_html_components.H4', 'html.H4', (['"""Researcher\'s Night Event"""'], {'className': '"""card-title text-center"""'}), '("Researcher\'s Night Event", className=\'card-title text-center\')\n', (1802, 1866), True, 'import dash_html_components as html\n'), ((2039, 2351), 'dash_html_components.P', 'html.P', (['"""This app uses decision support tools to quickly and easily find phones which reflect the user\'s desires. Input your preferences below. The box on top right shows the phone which matches the preferences the best. The box on bottom right provides some close alternatives."""'], {'className': '"""card-text"""'}), '(\n "This app uses decision support tools to quickly and easily find phones which reflect the user\'s desires. Input your preferences below. The box on top right shows the phone which matches the preferences the best. The box on bottom right provides some close alternatives."\n , className=\'card-text\')\n', (2045, 2351), True, 'import dash_html_components as html\n'), ((3265, 3331), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Choose desired operating system"""'], {'html_for': '"""os-choice"""'}), "('Choose desired operating system', html_for='os-choice')\n", (3274, 3331), True, 'import dash_bootstrap_components as dbc\n'), ((3504, 3700), 'dash_bootstrap_components.RadioItems', 'dbc.RadioItems', ([], {'options': "[{'label': 'Android', 'value': 'Android'}, {'label': 'iOS', 'value': 'IOS'},\n {'label': 'No preference', 'value': 'both'}]", 'id': '"""os-choice"""', 'value': '"""both"""', 'inline': '(True)'}), "(options=[{'label': 'Android', 'value': 'Android'}, {'label':\n 'iOS', 'value': 'IOS'}, {'label': 'No preference', 'value': 'both'}],\n id='os-choice', value='both', inline=True)\n", (3518, 3700), True, 'import dash_bootstrap_components as dbc\n'), ((4767, 4841), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Choose desired Memory capacity (GB)"""'], {'html_for': '"""memory-choice"""'}), "('Choose desired Memory capacity (GB)', html_for='memory-choice')\n", (4776, 4841), True, 'import dash_bootstrap_components as dbc\n'), ((5014, 5180), 'dash_core_components.Slider', 'dcc.Slider', ([], {'id': '"""memory-choice"""', 'min': '(16)', 'max': '(256)', 'step': 'None', 'included': '(False)', 'value': '(256)', 'marks': "{(16): '16', (32): '32', (64): '64', (128): '128', (256): '256'}"}), "(id='memory-choice', min=16, max=256, step=None, included=False,\n value=256, marks={(16): '16', (32): '32', (64): '64', (128): '128', (\n 256): '256'})\n", (5024, 5180), True, 'import dash_core_components as dcc\n'), ((6154, 6222), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Choose desired RAM capacity (GB)"""'], {'html_for': '"""ram-choice"""'}), "('Choose desired RAM capacity (GB)', html_for='ram-choice')\n", (6163, 6222), True, 'import dash_bootstrap_components as dbc\n'), ((6395, 6638), 'dash_core_components.Slider', 'dcc.Slider', ([], {'id': '"""ram-choice"""', 'min': '(2)', 'max': '(12)', 'step': '(1)', 'value': '(12)', 'included': '(False)', 'marks': "{(2): '2', (3): '3', (4): '4', (5): '5', (6): '6', (7): '7', (8): '8', (9):\n '9', (10): '10', (11): '11', (12): '12'}", 'className': '"""text-center mt-5"""'}), "(id='ram-choice', min=2, max=12, step=1, value=12, included=False,\n marks={(2): '2', (3): '3', (4): '4', (5): '5', (6): '6', (7): '7', (8):\n '8', (9): '9', (10): '10', (11): '11', (12): '12'}, className=\n 'text-center mt-5')\n", (6405, 6638), True, 'import dash_core_components as dcc\n'), ((7852, 7925), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Choose desired camera resolution (MP)"""'], {'html_for': '"""cam-choice"""'}), "('Choose desired camera resolution (MP)', html_for='cam-choice')\n", (7861, 7925), True, 'import dash_bootstrap_components as dbc\n'), ((8098, 8321), 'dash_core_components.Slider', 'dcc.Slider', ([], {'id': '"""cam-choice"""', 'min': '(0)', 'max': '(130)', 'step': '(1)', 'included': '(False)', 'value': '(70)', 'marks': "{(0): '0', (10): '10', (30): '30', (50): '50', (70): '70', (90): '90', (110\n ): '110', (130): '130'}", 'className': '"""text-center mt-5"""'}), "(id='cam-choice', min=0, max=130, step=1, included=False, value=\n 70, marks={(0): '0', (10): '10', (30): '30', (50): '50', (70): '70', (\n 90): '90', (110): '110', (130): '130'}, className='text-center mt-5')\n", (8108, 8321), True, 'import dash_core_components as dcc\n'), ((9400, 9466), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Choose desired budget (Euros)"""'], {'html_for': '"""cost-choice"""'}), "('Choose desired budget (Euros)', html_for='cost-choice')\n", (9409, 9466), True, 'import dash_bootstrap_components as dbc\n'), ((9639, 9885), 'dash_core_components.Slider', 'dcc.Slider', ([], {'id': '"""cost-choice"""', 'min': '(0)', 'max': '(1400)', 'step': '(1)', 'included': '(False)', 'value': '(100)', 'marks': "{(0): '0', (200): '200', (400): '400', (600): '600', (800): '800', (1000):\n '1000', (1200): '1200', (1400): '1400'}", 'className': '"""text-center mt-5"""'}), "(id='cost-choice', min=0, max=1400, step=1, included=False, value\n =100, marks={(0): '0', (200): '200', (400): '400', (600): '600', (800):\n '800', (1000): '1000', (1200): '1200', (1400): '1400'}, className=\n 'text-center mt-5')\n", (9649, 9885), True, 'import dash_core_components as dcc\n'), ((12308, 12540), 'dash_bootstrap_components.Tooltip', 'dbc.Tooltip', ([], {'id': 'f"""other-results-tooltip-{i}"""', 'target': 'f"""other-results-list-{i}"""', 'placement': '"""right"""', 'style': "{'maxWidth': 700, 'background-color': 'white', 'color': 'white',\n 'border-style': 'solid', 'border-color': 'black'}"}), "(id=f'other-results-tooltip-{i}', target=\n f'other-results-list-{i}', placement='right', style={'maxWidth': 700,\n 'background-color': 'white', 'color': 'white', 'border-style': 'solid',\n 'border-color': 'black'})\n", (12319, 12540), True, 'import dash_bootstrap_components as dbc\n'), ((11862, 11911), 'dash_html_components.Span', 'html.Span', (['f"""{i}. """'], {'id': 'f"""other-results-list-{i}"""'}), "(f'{i}. ', id=f'other-results-list-{i}')\n", (11871, 11911), True, 'import dash_html_components as html\n')]
k-j-m/Pyxon
pyxon/utils.py
a7f9b3ce524f2441e952c47acd199dd4024d2322
import pyxon.decode as pd def unobjectify(obj): """ Turns a python object (must be a class instance) into the corresponding JSON data. Example: >>> @sprop.a # sprop annotations are needed to tell the >>> @sprop.b # unobjectify function what parameter need >>> @sprop.c # to be written out. >>> class Baz(object): pass >>> def __init__(self, a, b, c): >>> self.a = a >>> self.b = b >>> self.c = c >>> >>> baz = Baz(a=1, b=2, c='three') >>> unobjectify(baz) { 'a':1, 'b':2, 'c':'three' } """ cls = obj.__class__ # Create empty data data = {} sprops,cprops = _get_registered_props(cls) # Add simple properties for p in sprops: data[p]=getattr(obj,p) # Add calculated data for p in cprops: f2 = cprops[p][1] data[p]=f2(getattr(obj,p)) data = pd.add_type_property(data, cls) return data def _get_registered_props(cls): """ Returns all of the registered properties for a given class. Recursively calls up to parent classes that are inherited from. """ sprops = pd.class_sprops.get(cls,{}) # [name] cprops = pd.class_cprops.get(cls,{}) # {name:(fn, inv_fn)} if cls in pd.conc_to_abstract: # {ConcreteClass: (AbstractClass, _)} parent_cls = pd.conc_to_abstract[cls][0] parent_sprops, parent_cprops = _get_registered_props(parent_cls) sprops = list(set(sprops).union(set(parent_sprops))) cprops2 = parent_cprops.copy() cprops2.update(cprops) cprops = cprops2 return sprops,cprops def obj(cls): """ Helper function returns a closure turning objectify into a single argument function. This cuts down the amount of code needed in class annotations by removing the need to write lambda functions. """ return lambda d: objectify(d, cls) def objectify(data, cls): """ Function takes JSON data and a target class as arguments and returns an instance of the class created using the JSON data. I'm not sure whether it is a great idea to keep (un)objectify separate from the decode module, since they need to access some of the module-level parameters. """ # Create empty class concrete_cls = pd.conc2(data, cls) obj = concrete_cls() sprops,cprops = _get_registered_props(cls) # Add simple properties from data for p in sprops: setattr(obj, p, data[p]) # Add calculated properties from data for p in cprops: f1 = cprops[p][0] setattr(obj, p, f1(data[p])) return obj def transform_map(kfun=lambda x: x, vfun=lambda x: x): """ Function that takes two functions as arguments and returns a function that applies those functions over all of the keys and values in a map and returns the transformed version of the map. kfun: function applied to all keys (default identity) vfun: function applied to all values (default identity) (k -> k') -> (v -> v') -> ((k, v) -> (k', v')) """ return lambda dct: dict([(kfun(k),vfun(v)) for k,v in dct.items()]) def transform_list(item_decoder=lambda x: x): return lambda lst: map(item_decoder, lst) def identity(x): """ Identity function is needed when performing transformations on maps where some operation is needed on either the keys or values, but not both. """ return x
[((910, 941), 'pyxon.decode.add_type_property', 'pd.add_type_property', (['data', 'cls'], {}), '(data, cls)\n', (930, 941), True, 'import pyxon.decode as pd\n'), ((1161, 1189), 'pyxon.decode.class_sprops.get', 'pd.class_sprops.get', (['cls', '{}'], {}), '(cls, {})\n', (1180, 1189), True, 'import pyxon.decode as pd\n'), ((1211, 1239), 'pyxon.decode.class_cprops.get', 'pd.class_cprops.get', (['cls', '{}'], {}), '(cls, {})\n', (1230, 1239), True, 'import pyxon.decode as pd\n'), ((2332, 2351), 'pyxon.decode.conc2', 'pd.conc2', (['data', 'cls'], {}), '(data, cls)\n', (2340, 2351), True, 'import pyxon.decode as pd\n')]
sophie685/newfileplzworklord
AxonDeepSeg/segment.py
fbbb03c44dc9e4b0409364b49265f453ac80d3c0
# Segmentation script # ------------------- # This script lets the user segment automatically one or many images based on the default segmentation models: SEM or # TEM. # # Maxime Wabartha - 2017-08-30 # Imports import sys from pathlib import Path import json import argparse from argparse import RawTextHelpFormatter from tqdm import tqdm import pkg_resources import AxonDeepSeg import AxonDeepSeg.ads_utils as ads from AxonDeepSeg.apply_model import axon_segmentation from AxonDeepSeg.ads_utils import convert_path # Global variables SEM_DEFAULT_MODEL_NAME = "default_SEM_model_v1" TEM_DEFAULT_MODEL_NAME = "default_TEM_model_v1" MODELS_PATH = pkg_resources.resource_filename('AxonDeepSeg', 'models') MODELS_PATH = Path(MODELS_PATH) default_SEM_path = MODELS_PATH / SEM_DEFAULT_MODEL_NAME default_TEM_path = MODELS_PATH / TEM_DEFAULT_MODEL_NAME default_overlap = 25 # Definition of the functions def segment_image(path_testing_image, path_model, overlap_value, config, resolution_model, acquired_resolution = None, verbosity_level=0): ''' Segment the image located at the path_testing_image location. :param path_testing_image: the path of the image to segment. :param path_model: where to access the model :param overlap_value: the number of pixels to be used for overlap when doing prediction. Higher value means less border effects but more time to perform the segmentation. :param config: dict containing the configuration of the network :param resolution_model: the resolution the model was trained on. :param verbosity_level: Level of verbosity. The higher, the more information is given about the segmentation process. :return: Nothing. ''' # If string, convert to Path objects path_testing_image = convert_path(path_testing_image) path_model = convert_path(path_model) if path_testing_image.exists(): # Extracting the image name and its folder path from the total path. path_parts = path_testing_image.parts acquisition_name = Path(path_parts[-1]) path_acquisition = Path(*path_parts[:-1]) # Get type of model we are using selected_model = path_model.name # Read image img = ads.imread(str(path_testing_image)) # Generate tmp file fp = open(path_acquisition / '__tmp_segment__.png', 'wb+') img_name_original = acquisition_name.stem if selected_model == "default_TEM_model_v1": ads.imwrite(fp,255-img, format='png') else: ads.imwrite(fp, img, format='png') acquisition_name = Path(fp.name).name segmented_image_name = img_name_original + '_seg-axonmyelin' + '.png' # Performing the segmentation axon_segmentation(path_acquisitions_folders=path_acquisition, acquisitions_filenames=[acquisition_name], path_model_folder=path_model, config_dict=config, ckpt_name='model', inference_batch_size=1, overlap_value=overlap_value, segmentations_filenames=segmented_image_name, resampled_resolutions=resolution_model, verbosity_level=verbosity_level, acquired_resolution=acquired_resolution, prediction_proba_activate=False, write_mode=True) if verbosity_level >= 1: print(("Image {0} segmented.".format(path_testing_image))) # Remove temporary file used for the segmentation fp.close() (path_acquisition / '__tmp_segment__.png').unlink() else: print(("The path {0} does not exist.".format(path_testing_image))) return None def segment_folders(path_testing_images_folder, path_model, overlap_value, config, resolution_model, acquired_resolution = None, verbosity_level=0): ''' Segments the images contained in the image folders located in the path_testing_images_folder. :param path_testing_images_folder: the folder where all image folders are located (the images to segment are located in those image folders) :param path_model: where to access the model. :param overlap_value: the number of pixels to be used for overlap when doing prediction. Higher value means less border effects but more time to perform the segmentation. :param config: dict containing the configuration of the network :param resolution_model: the resolution the model was trained on. :param verbosity_level: Level of verbosity. The higher, the more information is given about the segmentation process. :return: Nothing. ''' # If string, convert to Path objects path_testing_images_folder = convert_path(path_testing_images_folder) path_model = convert_path(path_model) # Update list of images to segment by selecting only image files (not already segmented or not masks) img_files = [file for file in path_testing_images_folder.iterdir() if (file.suffix.lower() in ('.png','.jpg','.jpeg','.tif','.tiff')) and (not str(file).endswith(('_seg-axonmyelin.png','_seg-axon.png','_seg-myelin.png','mask.png')))] # Pre-processing: convert to png if not already done and adapt to model contrast for file_ in tqdm(img_files, desc="Segmentation..."): print(path_testing_images_folder / file_) try: height, width, _ = ads.imread(str(path_testing_images_folder / file_)).shape except: try: height, width = ads.imread(str(path_testing_images_folder / file_)).shape except Exception as e: raise e image_size = [height, width] minimum_resolution = config["trainingset_patchsize"] * resolution_model / min(image_size) if acquired_resolution < minimum_resolution: print("EXCEPTION: The size of one of the images ({0}x{1}) is too small for the provided pixel size ({2}).\n".format(height, width, acquired_resolution), "The image size must be at least {0}x{0} after resampling to a resolution of {1} to create standard sized patches.\n".format(config["trainingset_patchsize"], resolution_model), "One of the dimensions of the image has a size of {0} after resampling to that resolution.\n".format(round(acquired_resolution * min(image_size) / resolution_model)), "Image file location: {0}".format(str(path_testing_images_folder / file_)) ) sys.exit(2) selected_model = path_model.name # Read image for conversion img = ads.imread(str(path_testing_images_folder / file_)) # Generate tmpfile for segmentation pipeline fp = open(path_testing_images_folder / '__tmp_segment__.png', 'wb+') img_name_original = file_.stem if selected_model == "default_TEM_model_v1": ads.imwrite(fp,255-img, format='png') else: ads.imwrite(fp,img, format='png') acquisition_name = Path(fp.name).name segmented_image_name = img_name_original + '_seg-axonmyelin' + '.png' axon_segmentation(path_acquisitions_folders=path_testing_images_folder, acquisitions_filenames=[acquisition_name], path_model_folder=path_model, config_dict=config, ckpt_name='model', inference_batch_size=1, overlap_value=overlap_value, segmentations_filenames=[segmented_image_name], acquired_resolution=acquired_resolution, verbosity_level=verbosity_level, resampled_resolutions=resolution_model, prediction_proba_activate=False, write_mode=True) if verbosity_level >= 1: tqdm.write("Image {0} segmented.".format(str(path_testing_images_folder / file_))) # Remove temporary file used for the segmentation fp.close() (path_testing_images_folder / '__tmp_segment__.png').unlink() return None def generate_default_parameters(type_acquisition, new_path): ''' Generates the parameters used for segmentation for the default model corresponding to the type_model acquisition. :param type_model: String, the type of model to get the parameters from. :param new_path: Path to the model to use. :return: the config dictionary. ''' # If string, convert to Path objects new_path = convert_path(new_path) # Building the path of the requested model if it exists and was supplied, else we load the default model. if type_acquisition == 'SEM': if (new_path is not None) and new_path.exists(): path_model = new_path else: path_model = MODELS_PATH / SEM_DEFAULT_MODEL_NAME elif type_acquisition == 'TEM': if (new_path is not None) and new_path.exists(): path_model = new_path else: path_model = MODELS_PATH / TEM_DEFAULT_MODEL_NAME path_config_file = path_model / 'config_network.json' config = generate_config_dict(path_config_file) return path_model, config def generate_config_dict(path_to_config_file): ''' Generates the dictionary version of the configuration file from the path where it is located. :param path_to_config: relative path where the file config_network.json is located. :return: dict containing the configuration of the network, or None if no configuration file was found at the mentioned path. ''' # If string, convert to Path objects path_to_config_file = convert_path(path_to_config_file) try: with open(path_to_config_file, 'r') as fd: config_network = json.loads(fd.read()) except: raise ValueError("No configuration file available at this path.") return config_network def generate_resolution(type_acquisition, model_input_size): ''' Generates the resolution to use related to the trained modeL. :param type_acquisition: String, "SEM" or "TEM" :param model_input_size: String or Int, the size of the input. :return: Float, the resolution of the model. ''' dict_size = { "SEM":{ "512":0.1, "256":0.2 }, "TEM":{ "512":0.01 } } return dict_size[str(type_acquisition)][str(model_input_size)] # Main loop def main(argv=None): ''' Main loop. :return: Exit code. 0: Success 2: Invalid argument value 3: Missing value or file ''' print(('AxonDeepSeg v.{}'.format(AxonDeepSeg.__version__))) ap = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter) requiredName = ap.add_argument_group('required arguments') # Setting the arguments of the segmentation requiredName.add_argument('-t', '--type', required=True, choices=['SEM','TEM'], help='Type of acquisition to segment. \n'+ 'SEM: scanning electron microscopy samples. \n'+ 'TEM: transmission electron microscopy samples. ') requiredName.add_argument('-i', '--imgpath', required=True, nargs='+', help='Path to the image to segment or path to the folder \n'+ 'where the image(s) to segment is/are located.') ap.add_argument("-m", "--model", required=False, help='Folder where the model is located. \n'+ 'The default SEM model path is: \n'+str(default_SEM_path)+'\n'+ 'The default TEM model path is: \n'+str(default_TEM_path)+'\n') ap.add_argument('-s', '--sizepixel', required=False, help='Pixel size of the image(s) to segment, in micrometers. \n'+ 'If no pixel size is specified, a pixel_size_in_micrometer.txt \n'+ 'file needs to be added to the image folder path. The pixel size \n'+ 'in that file will be used for the segmentation.', default=None) ap.add_argument('-v', '--verbose', required=False, type=int, choices=list(range(0,4)), help='Verbosity level. \n'+ '0 (default) : Displays the progress bar for the segmentation. \n'+ '1: Also displays the path of the image(s) being segmented. \n'+ '2: Also displays the information about the prediction step \n'+ ' for the segmentation of current sample. \n'+ '3: Also displays the patch number being processed in the current sample.', default=0) ap.add_argument('-o', '--overlap', required=False, type=int, help='Overlap value (in pixels) of the patches when doing the segmentation. \n'+ 'Higher values of overlap can improve the segmentation at patch borders, \n'+ 'but also increase the segmentation time. \n'+ 'Default value: '+str(default_overlap)+'\n'+ 'Recommended range of values: [10-100]. \n', default=25) ap._action_groups.reverse() # Processing the arguments args = vars(ap.parse_args(argv)) type_ = str(args["type"]) verbosity_level = int(args["verbose"]) overlap_value = int(args["overlap"]) if args["sizepixel"] is not None: psm = float(args["sizepixel"]) else: psm = None path_target_list = [Path(p) for p in args["imgpath"]] new_path = Path(args["model"]) if args["model"] else None # Preparing the arguments to axon_segmentation function path_model, config = generate_default_parameters(type_, new_path) resolution_model = generate_resolution(type_, config["trainingset_patchsize"]) # Tuple of valid file extensions validExtensions = ( ".jpeg", ".jpg", ".tif", ".tiff", ".png" ) # Going through all paths passed into arguments for current_path_target in path_target_list: if not current_path_target.is_dir(): if current_path_target.suffix.lower() in validExtensions: # Handle cases if no resolution is provided on the CLI if psm == None: # Check if a pixel size file exists, if so read it. if (current_path_target.parent / 'pixel_size_in_micrometer.txt').exists(): resolution_file = open(current_path_target.parent / 'pixel_size_in_micrometer.txt', 'r') psm = float(resolution_file.read()) else: print("ERROR: No pixel size is provided, and there is no pixel_size_in_micrometer.txt file in image folder. ", "Please provide a pixel size (using argument -s), or add a pixel_size_in_micrometer.txt file ", "containing the pixel size value." ) sys.exit(3) # Check that image size is large enough for given resolution to reach minimum patch size after resizing. try: height, width, _ = ads.imread(str(current_path_target)).shape except: try: height, width = ads.imread(str(current_path_target)).shape except Exception as e: raise e image_size = [height, width] minimum_resolution = config["trainingset_patchsize"] * resolution_model / min(image_size) if psm < minimum_resolution: print("EXCEPTION: The size of one of the images ({0}x{1}) is too small for the provided pixel size ({2}).\n".format(height, width, psm), "The image size must be at least {0}x{0} after resampling to a resolution of {1} to create standard sized patches.\n".format(config["trainingset_patchsize"], resolution_model), "One of the dimensions of the image has a size of {0} after resampling to that resolution.\n".format(round(psm * min(image_size) / resolution_model)), "Image file location: {0}".format(current_path_target) ) sys.exit(2) # Performing the segmentation over the image segment_image(current_path_target, path_model, overlap_value, config, resolution_model, acquired_resolution=psm, verbosity_level=verbosity_level) print("Segmentation finished.") else: print("The path(s) specified is/are not image(s). Please update the input path(s) and try again.") break else: # Handle cases if no resolution is provided on the CLI if psm == None: # Check if a pixel size file exists, if so read it. if (current_path_target / 'pixel_size_in_micrometer.txt').exists(): resolution_file = open(current_path_target / 'pixel_size_in_micrometer.txt', 'r') psm = float(resolution_file.read()) else: print("ERROR: No pixel size is provided, and there is no pixel_size_in_micrometer.txt file in image folder. ", "Please provide a pixel size (using argument -s), or add a pixel_size_in_micrometer.txt file ", "containing the pixel size value." ) sys.exit(3) # Performing the segmentation over all folders in the specified folder containing acquisitions to segment. segment_folders(current_path_target, path_model, overlap_value, config, resolution_model, acquired_resolution=psm, verbosity_level=verbosity_level) print("Segmentation finished.") sys.exit(0) # Calling the script if __name__ == '__main__': main()
[((652, 708), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""AxonDeepSeg"""', '"""models"""'], {}), "('AxonDeepSeg', 'models')\n", (683, 708), False, 'import pkg_resources\n'), ((723, 740), 'pathlib.Path', 'Path', (['MODELS_PATH'], {}), '(MODELS_PATH)\n', (727, 740), False, 'from pathlib import Path\n'), ((1811, 1843), 'AxonDeepSeg.ads_utils.convert_path', 'convert_path', (['path_testing_image'], {}), '(path_testing_image)\n', (1823, 1843), False, 'from AxonDeepSeg.ads_utils import convert_path\n'), ((1861, 1885), 'AxonDeepSeg.ads_utils.convert_path', 'convert_path', (['path_model'], {}), '(path_model)\n', (1873, 1885), False, 'from AxonDeepSeg.ads_utils import convert_path\n'), ((4787, 4827), 'AxonDeepSeg.ads_utils.convert_path', 'convert_path', (['path_testing_images_folder'], {}), '(path_testing_images_folder)\n', (4799, 4827), False, 'from AxonDeepSeg.ads_utils import convert_path\n'), ((4845, 4869), 'AxonDeepSeg.ads_utils.convert_path', 'convert_path', (['path_model'], {}), '(path_model)\n', (4857, 4869), False, 'from AxonDeepSeg.ads_utils import convert_path\n'), ((5335, 5374), 'tqdm.tqdm', 'tqdm', (['img_files'], {'desc': '"""Segmentation..."""'}), "(img_files, desc='Segmentation...')\n", (5339, 5374), False, 'from tqdm import tqdm\n'), ((8561, 8583), 'AxonDeepSeg.ads_utils.convert_path', 'convert_path', (['new_path'], {}), '(new_path)\n', (8573, 8583), False, 'from AxonDeepSeg.ads_utils import convert_path\n'), ((9693, 9726), 'AxonDeepSeg.ads_utils.convert_path', 'convert_path', (['path_to_config_file'], {}), '(path_to_config_file)\n', (9705, 9726), False, 'from AxonDeepSeg.ads_utils import convert_path\n'), ((10729, 10790), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'RawTextHelpFormatter'}), '(formatter_class=RawTextHelpFormatter)\n', (10752, 10790), False, 'import argparse\n'), ((19040, 19051), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (19048, 19051), False, 'import sys\n'), ((2074, 2094), 'pathlib.Path', 'Path', (['path_parts[-1]'], {}), '(path_parts[-1])\n', (2078, 2094), False, 'from pathlib import Path\n'), ((2122, 2144), 'pathlib.Path', 'Path', (['*path_parts[:-1]'], {}), '(*path_parts[:-1])\n', (2126, 2144), False, 'from pathlib import Path\n'), ((2785, 3247), 'AxonDeepSeg.apply_model.axon_segmentation', 'axon_segmentation', ([], {'path_acquisitions_folders': 'path_acquisition', 'acquisitions_filenames': '[acquisition_name]', 'path_model_folder': 'path_model', 'config_dict': 'config', 'ckpt_name': '"""model"""', 'inference_batch_size': '(1)', 'overlap_value': 'overlap_value', 'segmentations_filenames': 'segmented_image_name', 'resampled_resolutions': 'resolution_model', 'verbosity_level': 'verbosity_level', 'acquired_resolution': 'acquired_resolution', 'prediction_proba_activate': '(False)', 'write_mode': '(True)'}), "(path_acquisitions_folders=path_acquisition,\n acquisitions_filenames=[acquisition_name], path_model_folder=path_model,\n config_dict=config, ckpt_name='model', inference_batch_size=1,\n overlap_value=overlap_value, segmentations_filenames=\n segmented_image_name, resampled_resolutions=resolution_model,\n verbosity_level=verbosity_level, acquired_resolution=\n acquired_resolution, prediction_proba_activate=False, write_mode=True)\n", (2802, 3247), False, 'from AxonDeepSeg.apply_model import axon_segmentation\n'), ((7191, 7664), 'AxonDeepSeg.apply_model.axon_segmentation', 'axon_segmentation', ([], {'path_acquisitions_folders': 'path_testing_images_folder', 'acquisitions_filenames': '[acquisition_name]', 'path_model_folder': 'path_model', 'config_dict': 'config', 'ckpt_name': '"""model"""', 'inference_batch_size': '(1)', 'overlap_value': 'overlap_value', 'segmentations_filenames': '[segmented_image_name]', 'acquired_resolution': 'acquired_resolution', 'verbosity_level': 'verbosity_level', 'resampled_resolutions': 'resolution_model', 'prediction_proba_activate': '(False)', 'write_mode': '(True)'}), "(path_acquisitions_folders=path_testing_images_folder,\n acquisitions_filenames=[acquisition_name], path_model_folder=path_model,\n config_dict=config, ckpt_name='model', inference_batch_size=1,\n overlap_value=overlap_value, segmentations_filenames=[\n segmented_image_name], acquired_resolution=acquired_resolution,\n verbosity_level=verbosity_level, resampled_resolutions=resolution_model,\n prediction_proba_activate=False, write_mode=True)\n", (7208, 7664), False, 'from AxonDeepSeg.apply_model import axon_segmentation\n'), ((14322, 14329), 'pathlib.Path', 'Path', (['p'], {}), '(p)\n', (14326, 14329), False, 'from pathlib import Path\n'), ((14371, 14390), 'pathlib.Path', 'Path', (["args['model']"], {}), "(args['model'])\n", (14375, 14390), False, 'from pathlib import Path\n'), ((2513, 2553), 'AxonDeepSeg.ads_utils.imwrite', 'ads.imwrite', (['fp', '(255 - img)'], {'format': '"""png"""'}), "(fp, 255 - img, format='png')\n", (2524, 2553), True, 'import AxonDeepSeg.ads_utils as ads\n'), ((2577, 2611), 'AxonDeepSeg.ads_utils.imwrite', 'ads.imwrite', (['fp', 'img'], {'format': '"""png"""'}), "(fp, img, format='png')\n", (2588, 2611), True, 'import AxonDeepSeg.ads_utils as ads\n'), ((2640, 2653), 'pathlib.Path', 'Path', (['fp.name'], {}), '(fp.name)\n', (2644, 2653), False, 'from pathlib import Path\n'), ((6565, 6576), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (6573, 6576), False, 'import sys\n'), ((6959, 6999), 'AxonDeepSeg.ads_utils.imwrite', 'ads.imwrite', (['fp', '(255 - img)'], {'format': '"""png"""'}), "(fp, 255 - img, format='png')\n", (6970, 6999), True, 'import AxonDeepSeg.ads_utils as ads\n'), ((7023, 7057), 'AxonDeepSeg.ads_utils.imwrite', 'ads.imwrite', (['fp', 'img'], {'format': '"""png"""'}), "(fp, img, format='png')\n", (7034, 7057), True, 'import AxonDeepSeg.ads_utils as ads\n'), ((7085, 7098), 'pathlib.Path', 'Path', (['fp.name'], {}), '(fp.name)\n', (7089, 7098), False, 'from pathlib import Path\n'), ((17273, 17284), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (17281, 17284), False, 'import sys\n'), ((18618, 18629), 'sys.exit', 'sys.exit', (['(3)'], {}), '(3)\n', (18626, 18629), False, 'import sys\n'), ((15969, 15980), 'sys.exit', 'sys.exit', (['(3)'], {}), '(3)\n', (15977, 15980), False, 'import sys\n')]
aplested/DC_Pyps
tests/test_hedges.py
da33fc7d0e7365044e368488d1c7cbbae7473cc7
from dcstats.hedges import Hedges_d from dcstats.statistics_EJ import simple_stats as mean_SD import random import math def generate_sample (length, mean, sigma): #generate a list of normal distributed samples sample = [] for n in range(length): sample.append(random.gauss(mean, sigma)) return sample def close_enough (a, b, count_error): if math.fabs (a - b) < math.fabs((a + b) / (count_error * 2)) : return True else: return False def gaussian_case (sig): sample_size = 200 count_error = math.sqrt(sample_size) m1 = 1 m2 = 2 s1 = generate_sample (sample_size, m1, sig) s2 = generate_sample (sample_size, m2, sig) h_testing = Hedges_d(s1, s2) h_testing.hedges_d_unbiased() #answer is in self.d approx_95CI_lower, approx_95CI_upper = h_testing.approx_CI() bs_95CI_lower, bs_95CI_upper = h_testing.bootstrap_CI(5000) print (mean_SD(s1), mean_SD(s2)) print ("h_testing.d, analytic, correction = ", h_testing.d, (m2 - m1) / sig, h_testing.correction) print ("lower: approx, bootstrap", approx_95CI_lower, bs_95CI_lower) print ("upper: approx, bootstrap", approx_95CI_upper, bs_95CI_upper) #bootstrap is similar at high d but gives wider intervals at low d assert close_enough(approx_95CI_lower, bs_95CI_lower, count_error) assert close_enough(approx_95CI_upper, bs_95CI_upper, count_error) assert close_enough(h_testing.d, (m2 - m1) / sig, count_error) ###tests def test_gaussian_case_low(): gaussian_case(0.2) #expect d = 5 def test_gaussian_case_med(): gaussian_case(0.5) #expect d = 2 def test_gaussian_case_high(): gaussian_case(1.0) #expect d = 1, fail
[((551, 573), 'math.sqrt', 'math.sqrt', (['sample_size'], {}), '(sample_size)\n', (560, 573), False, 'import math\n'), ((723, 739), 'dcstats.hedges.Hedges_d', 'Hedges_d', (['s1', 's2'], {}), '(s1, s2)\n', (731, 739), False, 'from dcstats.hedges import Hedges_d\n'), ((373, 389), 'math.fabs', 'math.fabs', (['(a - b)'], {}), '(a - b)\n', (382, 389), False, 'import math\n'), ((393, 431), 'math.fabs', 'math.fabs', (['((a + b) / (count_error * 2))'], {}), '((a + b) / (count_error * 2))\n', (402, 431), False, 'import math\n'), ((959, 970), 'dcstats.statistics_EJ.simple_stats', 'mean_SD', (['s1'], {}), '(s1)\n', (966, 970), True, 'from dcstats.statistics_EJ import simple_stats as mean_SD\n'), ((972, 983), 'dcstats.statistics_EJ.simple_stats', 'mean_SD', (['s2'], {}), '(s2)\n', (979, 983), True, 'from dcstats.statistics_EJ import simple_stats as mean_SD\n'), ((281, 306), 'random.gauss', 'random.gauss', (['mean', 'sigma'], {}), '(mean, sigma)\n', (293, 306), False, 'import random\n')]
MustafaAbbas110/FinalProject
src/FYP/fifaRecords/urls.py
30d371f06a8a1875285cfd4a8940ca3610ec1274
from django.urls import path from . import views urlpatterns = [ path('', views.Records, name ="fRec"), ]
[((70, 106), 'django.urls.path', 'path', (['""""""', 'views.Records'], {'name': '"""fRec"""'}), "('', views.Records, name='fRec')\n", (74, 106), False, 'from django.urls import path\n')]
KennethEnevoldsen/spacy-transformers
spacy_transformers/tests/regression/test_spacy_issue6401.py
fa39a94ba276ae3681d14a4b376ea50fadd574b3
import pytest from spacy.training.example import Example from spacy.util import make_tempdir from spacy import util from thinc.api import Config TRAIN_DATA = [ ("I'm so happy.", {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}}), ("I'm so angry", {"cats": {"POSITIVE": 0.0, "NEGATIVE": 1.0}}), ] cfg_string = """ [nlp] lang = "en" pipeline = ["transformer","textcat"] [components] [components.textcat] factory = "textcat" [components.textcat.model] @architectures = "spacy.TextCatEnsemble.v2" [components.textcat.model.tok2vec] @architectures = "spacy-transformers.TransformerListener.v1" grad_factor = 1.0 [components.textcat.model.tok2vec.pooling] @layers = "reduce_mean.v1" [components.transformer] factory = "transformer" """ # Xfail this until the new spaCy rc is up. @pytest.mark.xfail def test_transformer_pipeline_textcat(): """Test that a pipeline with just a transformer+textcat runs and trains properly. This used to throw an error because of shape inference issues - cf https://github.com/explosion/spaCy/issues/6401""" orig_config = Config().from_str(cfg_string) nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True) assert nlp.pipe_names == ["transformer", "textcat"] train_examples = [] for text, annotations in TRAIN_DATA: train_examples.append(Example.from_dict(nlp.make_doc(text), annotations)) optimizer = nlp.initialize(get_examples=lambda: train_examples) for i in range(2): losses = {} nlp.update(train_examples, sgd=optimizer, losses=losses) doc = nlp("We're interested at underwater basket weaving.") cats1 = doc.cats # ensure IO goes OK with make_tempdir() as d: file_path = d / "trained_nlp" nlp.to_disk(file_path) nlp2 = util.load_model_from_path(file_path) doc2 = nlp2("We're interested at underwater basket weaving.") cats2 = doc2.cats assert cats1 == cats2
[((1181, 1252), 'spacy.util.load_model_from_config', 'util.load_model_from_config', (['orig_config'], {'auto_fill': '(True)', 'validate': '(True)'}), '(orig_config, auto_fill=True, validate=True)\n', (1208, 1252), False, 'from spacy import util\n'), ((1754, 1768), 'spacy.util.make_tempdir', 'make_tempdir', ([], {}), '()\n', (1766, 1768), False, 'from spacy.util import make_tempdir\n'), ((1859, 1895), 'spacy.util.load_model_from_path', 'util.load_model_from_path', (['file_path'], {}), '(file_path)\n', (1884, 1895), False, 'from spacy import util\n'), ((1141, 1149), 'thinc.api.Config', 'Config', ([], {}), '()\n', (1147, 1149), False, 'from thinc.api import Config\n')]
rpacholek/hydra
hydra/client/repl.py
60e3c2eec5ab1fd1dde8e510baa5175173c66a6a
import asyncio from ..core.common.io import input from .action_creator import ActionCreator class REPL: def __init__(self, action_queue, config, *args, **kwargs): self.action_queue = action_queue self.config = config async def run(self): await asyncio.sleep(1) print("Insert command: ") action_creator = ActionCreator() while True: input_data = await input("~> ") if not input_data: for task in asyncio.all_tasks(): task.cancel() break action = action_creator.parse(*input_data.split()) if action: self.action_queue.push_action(action)
[((280, 296), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (293, 296), False, 'import asyncio\n'), ((495, 514), 'asyncio.all_tasks', 'asyncio.all_tasks', ([], {}), '()\n', (512, 514), False, 'import asyncio\n')]
drat/Neural-Voice-Cloning-With-Few-Samples
train_dv3.py
4febde43ccc143fc88d74d5fa0c5a117636778b4
"""Trainining script for seq2seq text-to-speech synthesis model. usage: train.py [options] options: --data-root=<dir> Directory contains preprocessed features. --checkpoint-dir=<dir> Directory where to save model checkpoints [default: checkpoints]. --hparams=<parmas> Hyper parameters [default: ]. --checkpoint=<path> Restore model from checkpoint path if given. --checkpoint-seq2seq=<path> Restore seq2seq model from checkpoint path. --checkpoint-postnet=<path> Restore postnet model from checkpoint path. --train-seq2seq-only Train only seq2seq model. --train-postnet-only Train only postnet model. --restore-parts=<path> Restore part of the model. --log-event-path=<name> Log event path. --reset-optimizer Reset optimizer. --load-embedding=<path> Load embedding from checkpoint. --speaker-id=<N> Use specific speaker of data in case for multi-speaker datasets. -h, --help Show this help message and exit """ from docopt import docopt import sys from os.path import dirname, join from tqdm import tqdm, trange from datetime import datetime # The deepvoice3 model from dv3.deepvoice3_pytorch import frontend, builder import dv3.audio import dv3.lrschedule import torch from torch.utils import data as data_utils from torch.autograd import Variable from torch import nn from torch import optim import torch.backends.cudnn as cudnn from torch.utils import data as data_utils from torch.utils.data.sampler import Sampler import numpy as np from numba import jit from nnmnkwii.datasets import FileSourceDataset, FileDataSource from os.path import join, expanduser import random import librosa.display from matplotlib import pyplot as plt import sys import os from tensorboardX import SummaryWriter from matplotlib import cm from warnings import warn from dv3.hparams import hparams, hparams_debug_string fs = hparams.sample_rate global_step = 0 global_epoch = 0 use_cuda = torch.cuda.is_available() if use_cuda: cudnn.benchmark = False _frontend = None # to be set later def _pad(seq, max_len, constant_values=0): return np.pad(seq, (0, max_len - len(seq)), mode='constant', constant_values=constant_values) def _pad_2d(x, max_len, b_pad=0): x = np.pad(x, [(b_pad, max_len - len(x) - b_pad), (0, 0)], mode="constant", constant_values=0) return x def plot_alignment(alignment, path, info=None): fig, ax = plt.subplots() im = ax.imshow( alignment, aspect='auto', origin='lower', interpolation='none') fig.colorbar(im, ax=ax) xlabel = 'Decoder timestep' if info is not None: xlabel += '\n\n' + info plt.xlabel(xlabel) plt.ylabel('Encoder timestep') plt.tight_layout() plt.savefig(path, format='png') plt.close() class TextDataSource(FileDataSource): def __init__(self, data_root, speaker_id=None): self.data_root = data_root self.speaker_ids = None self.multi_speaker = False # If not None, filter by speaker_id self.speaker_id = speaker_id def collect_files(self): meta = join(self.data_root, "train.txt") with open(meta, "rb") as f: lines = f.readlines() l = lines[0].decode("utf-8").split("|") assert len(l) == 4 or len(l) == 5 self.multi_speaker = len(l) == 5 texts = list(map(lambda l: l.decode("utf-8").split("|")[3], lines)) if self.multi_speaker: speaker_ids = list(map(lambda l: int(l.decode("utf-8").split("|")[-1]), lines)) # Filter by speaker_id # using multi-speaker dataset as a single speaker dataset if self.speaker_id is not None: indices = np.array(speaker_ids) == self.speaker_id texts = list(np.array(texts)[indices]) self.multi_speaker = False return texts return texts, speaker_ids else: return texts def collect_features(self, *args): if self.multi_speaker: text, speaker_id = args else: text = args[0] seq = _frontend.text_to_sequence(text, p=hparams.replace_pronunciation_prob) if self.multi_speaker: return np.asarray(seq, dtype=np.int32), int(speaker_id) else: return np.asarray(seq, dtype=np.int32) class _NPYDataSource(FileDataSource): def __init__(self, data_root, col, speaker_id=None): self.data_root = data_root self.col = col self.frame_lengths = [] self.speaker_id = speaker_id def collect_files(self): meta = join(self.data_root, "train.txt") with open(meta, "rb") as f: lines = f.readlines() l = lines[0].decode("utf-8").split("|") assert len(l) == 4 or len(l) == 5 multi_speaker = len(l) == 5 self.frame_lengths = list( map(lambda l: int(l.decode("utf-8").split("|")[2]), lines)) paths = list(map(lambda l: l.decode("utf-8").split("|")[self.col], lines)) paths = list(map(lambda f: join(self.data_root, f), paths)) if multi_speaker and self.speaker_id is not None: speaker_ids = list(map(lambda l: int(l.decode("utf-8").split("|")[-1]), lines)) # Filter by speaker_id # using multi-speaker dataset as a single speaker dataset indices = np.array(speaker_ids) == self.speaker_id paths = list(np.array(paths)[indices]) self.frame_lengths = list(np.array(self.frame_lengths)[indices]) # aha, need to cast numpy.int64 to int self.frame_lengths = list(map(int, self.frame_lengths)) return paths def collect_features(self, path): return np.load(path) class MelSpecDataSource(_NPYDataSource): def __init__(self, data_root, speaker_id=None): super(MelSpecDataSource, self).__init__(data_root, 1, speaker_id) class LinearSpecDataSource(_NPYDataSource): def __init__(self, data_root, speaker_id=None): super(LinearSpecDataSource, self).__init__(data_root, 0, speaker_id) class PartialyRandomizedSimilarTimeLengthSampler(Sampler): """Partially randmoized sampler 1. Sort by lengths 2. Pick a small patch and randomize it 3. Permutate mini-batchs """ def __init__(self, lengths, batch_size=16, batch_group_size=None, permutate=True): self.lengths, self.sorted_indices = torch.sort(torch.LongTensor(lengths)) self.batch_size = batch_size if batch_group_size is None: batch_group_size = min(batch_size * 32, len(self.lengths)) if batch_group_size % batch_size != 0: batch_group_size -= batch_group_size % batch_size self.batch_group_size = batch_group_size assert batch_group_size % batch_size == 0 self.permutate = permutate def __iter__(self): indices = self.sorted_indices.clone() batch_group_size = self.batch_group_size s, e = 0, 0 for i in range(len(indices) // batch_group_size): s = i * batch_group_size e = s + batch_group_size random.shuffle(indices[s:e]) # Permutate batches if self.permutate: perm = np.arange(len(indices[:e]) // self.batch_size) random.shuffle(perm) indices[:e] = indices[:e].view(-1, self.batch_size)[perm, :].view(-1) # Handle last elements s += batch_group_size if s < len(indices): random.shuffle(indices[s:]) return iter(indices) def __len__(self): return len(self.sorted_indices) class PyTorchDataset(object): def __init__(self, X, Mel, Y): self.X = X self.Mel = Mel self.Y = Y # alias self.multi_speaker = X.file_data_source.multi_speaker def __getitem__(self, idx): if self.multi_speaker: text, speaker_id = self.X[idx] return text, self.Mel[idx], self.Y[idx], speaker_id else: return self.X[idx], self.Mel[idx], self.Y[idx] def __len__(self): return len(self.X) def sequence_mask(sequence_length, max_len=None): if max_len is None: max_len = sequence_length.data.max() batch_size = sequence_length.size(0) seq_range = torch.arange(0, max_len).long() seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len) seq_range_expand = Variable(seq_range_expand) if sequence_length.is_cuda: seq_range_expand = seq_range_expand.cuda() seq_length_expand = sequence_length.unsqueeze(1) \ .expand_as(seq_range_expand) return (seq_range_expand < seq_length_expand).float() class MaskedL1Loss(nn.Module): def __init__(self): super(MaskedL1Loss, self).__init__() self.criterion = nn.L1Loss(size_average=False) def forward(self, input, target, lengths=None, mask=None, max_len=None): if lengths is None and mask is None: raise RuntimeError("Should provide either lengths or mask") # (B, T, 1) if mask is None: mask = sequence_mask(lengths, max_len).unsqueeze(-1) # (B, T, D) mask_ = mask.expand_as(input) loss = self.criterion(input * mask_, target * mask_) return loss / mask_.sum() def collate_fn(batch): """Create batch""" r = hparams.outputs_per_step downsample_step = hparams.downsample_step multi_speaker = len(batch[0]) == 4 # Lengths input_lengths = [len(x[0]) for x in batch] max_input_len = max(input_lengths) target_lengths = [len(x[1]) for x in batch] max_target_len = max(target_lengths) if max_target_len % r != 0: max_target_len += r - max_target_len % r assert max_target_len % r == 0 if max_target_len % downsample_step != 0: max_target_len += downsample_step - max_target_len % downsample_step assert max_target_len % downsample_step == 0 # Set 0 for zero beginning padding # imitates initial decoder states b_pad = r max_target_len += b_pad * downsample_step a = np.array([_pad(x[0], max_input_len) for x in batch], dtype=np.int) x_batch = torch.LongTensor(a) input_lengths = torch.LongTensor(input_lengths) target_lengths = torch.LongTensor(target_lengths) b = np.array([_pad_2d(x[1], max_target_len, b_pad=b_pad) for x in batch], dtype=np.float32) mel_batch = torch.FloatTensor(b) c = np.array([_pad_2d(x[2], max_target_len, b_pad=b_pad) for x in batch], dtype=np.float32) y_batch = torch.FloatTensor(c) # text positions text_positions = np.array([_pad(np.arange(1, len(x[0]) + 1), max_input_len) for x in batch], dtype=np.int) text_positions = torch.LongTensor(text_positions) max_decoder_target_len = max_target_len // r // downsample_step # frame positions s, e = 1, max_decoder_target_len + 1 # if b_pad > 0: # s, e = s - 1, e - 1 frame_positions = torch.arange(s, e).long().unsqueeze(0).expand( len(batch), max_decoder_target_len) # done flags done = np.array([_pad(np.zeros(len(x[1]) // r // downsample_step - 1), max_decoder_target_len, constant_values=1) for x in batch]) done = torch.FloatTensor(done).unsqueeze(-1) if multi_speaker: speaker_ids = torch.LongTensor([x[3] for x in batch]) else: speaker_ids = None return x_batch, input_lengths, mel_batch, y_batch, \ (text_positions, frame_positions), done, target_lengths, speaker_ids def time_string(): return datetime.now().strftime('%Y-%m-%d %H:%M') def save_alignment(path, attn): plot_alignment(attn.T, path, info="{}, {}, step={}".format( hparams.builder, time_string(), global_step)) def prepare_spec_image(spectrogram): # [0, 1] spectrogram = (spectrogram - np.min(spectrogram)) / (np.max(spectrogram) - np.min(spectrogram)) spectrogram = np.flip(spectrogram, axis=1) # flip against freq axis return np.uint8(cm.magma(spectrogram.T) * 255) def eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker): # harded coded texts = [ "Scientists at the CERN laboratory say they have discovered a new particle.", "There's a way to measure the acute emotional intelligence that has never gone out of style.", "President Trump met with other leaders at the Group of 20 conference.", "Generative adversarial network or variational auto-encoder.", "Please call Stella.", "Some have accepted this as a miracle without any physical explanation.", ] import dv3.synthesis synthesis._frontend = _frontend eval_output_dir = join(checkpoint_dir, "eval") os.makedirs(eval_output_dir, exist_ok=True) # hard coded speaker_ids = [0, 1, 10] if ismultispeaker else [None] for speaker_id in speaker_ids: speaker_str = "multispeaker{}".format(speaker_id) if speaker_id is not None else "single" for idx, text in enumerate(texts): signal, alignment, _, mel = synthesis.tts( model, text, p=0, speaker_id=speaker_id, fast=False) signal /= np.max(np.abs(signal)) # Alignment path = join(eval_output_dir, "step{:09d}_text{}_{}_alignment.png".format( global_step, idx, speaker_str)) save_alignment(path, alignment) tag = "eval_averaged_alignment_{}_{}".format(idx, speaker_str) writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step) # Mel writer.add_image("(Eval) Predicted mel spectrogram text{}_{}".format(idx, speaker_str), prepare_spec_image(mel), global_step) # Audio path = join(eval_output_dir, "step{:09d}_text{}_{}_predicted.wav".format( global_step, idx, speaker_str)) dv3.audio.save_wav(signal, path) try: writer.add_audio("(Eval) Predicted audio signal {}_{}".format(idx, speaker_str), signal, global_step, sample_rate=fs) except Exception as e: warn(str(e)) pass def save_states(global_step, writer, mel_outputs, linear_outputs, attn, mel, y, input_lengths, checkpoint_dir=None): print("Save intermediate states at step {}".format(global_step)) # idx = np.random.randint(0, len(input_lengths)) idx = min(1, len(input_lengths) - 1) input_length = input_lengths[idx] # Alignment # Multi-hop attention if attn is not None and attn.dim() == 4: for i, alignment in enumerate(attn): alignment = alignment[idx].cpu().data.numpy() tag = "alignment_layer{}".format(i + 1) writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step) # save files as well for now alignment_dir = join(checkpoint_dir, "alignment_layer{}".format(i + 1)) os.makedirs(alignment_dir, exist_ok=True) path = join(alignment_dir, "step{:09d}_layer_{}_alignment.png".format( global_step, i + 1)) save_alignment(path, alignment) # Save averaged alignment alignment_dir = join(checkpoint_dir, "alignment_ave") os.makedirs(alignment_dir, exist_ok=True) path = join(alignment_dir, "step{:09d}_alignment.png".format(global_step)) alignment = attn.mean(0)[idx].cpu().data.numpy() save_alignment(path, alignment) tag = "averaged_alignment" writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step) # Predicted mel spectrogram if mel_outputs is not None: mel_output = mel_outputs[idx].cpu().data.numpy() mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output)) writer.add_image("Predicted mel spectrogram", mel_output, global_step) # Predicted spectrogram if linear_outputs is not None: linear_output = linear_outputs[idx].cpu().data.numpy() spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output)) writer.add_image("Predicted linear spectrogram", spectrogram, global_step) # Predicted audio signal signal = dv3.audio.inv_spectrogram(linear_output.T) signal /= np.max(np.abs(signal)) path = join(checkpoint_dir, "step{:09d}_predicted.wav".format( global_step)) try: writer.add_audio("Predicted audio signal", signal, global_step, sample_rate=fs) except Exception as e: warn(str(e)) pass dv3.audio.save_wav(signal, path) # Target mel spectrogram if mel_outputs is not None: mel_output = mel[idx].cpu().data.numpy() mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output)) writer.add_image("Target mel spectrogram", mel_output, global_step) # Target spectrogram if linear_outputs is not None: linear_output = y[idx].cpu().data.numpy() spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output)) writer.add_image("Target linear spectrogram", spectrogram, global_step) def logit(x, eps=1e-8): return torch.log(x + eps) - torch.log(1 - x + eps) def masked_mean(y, mask): # (B, T, D) mask_ = mask.expand_as(y) return (y * mask_).sum() / mask_.sum() def spec_loss(y_hat, y, mask, priority_bin=None, priority_w=0): masked_l1 = MaskedL1Loss() l1 = nn.L1Loss() w = hparams.masked_loss_weight # L1 loss if w > 0: assert mask is not None l1_loss = w * masked_l1(y_hat, y, mask=mask) + (1 - w) * l1(y_hat, y) else: assert mask is None l1_loss = l1(y_hat, y) # Priority L1 loss if priority_bin is not None and priority_w > 0: if w > 0: priority_loss = w * masked_l1( y_hat[:, :, :priority_bin], y[:, :, :priority_bin], mask=mask) \ + (1 - w) * l1(y_hat[:, :, :priority_bin], y[:, :, :priority_bin]) else: priority_loss = l1(y_hat[:, :, :priority_bin], y[:, :, :priority_bin]) l1_loss = (1 - priority_w) * l1_loss + priority_w * priority_loss # Binary divergence loss if hparams.binary_divergence_weight <= 0: binary_div = Variable(y.data.new(1).zero_()) else: y_hat_logits = logit(y_hat) z = -y * y_hat_logits + torch.log(1 + torch.exp(y_hat_logits)) if w > 0: binary_div = w * masked_mean(z, mask) + (1 - w) * z.mean() else: binary_div = z.mean() return l1_loss, binary_div @jit(nopython=True) def guided_attention(N, max_N, T, max_T, g): W = np.zeros((max_N, max_T), dtype=np.float32) for n in range(N): for t in range(T): W[n, t] = 1 - np.exp(-(n / N - t / T)**2 / (2 * g * g)) return W def guided_attentions(input_lengths, target_lengths, max_target_len, g=0.2): B = len(input_lengths) max_input_len = input_lengths.max() W = np.zeros((B, max_target_len, max_input_len), dtype=np.float32) for b in range(B): W[b] = guided_attention(input_lengths[b], max_input_len, target_lengths[b], max_target_len, g).T return W def train(model, data_loader, optimizer, writer, init_lr=0.002, checkpoint_dir=None, checkpoint_interval=None, nepochs=None, clip_thresh=1.0, train_seq2seq=True, train_postnet=True): if use_cuda: model = model.cuda() linear_dim = model.linear_dim r = hparams.outputs_per_step downsample_step = hparams.downsample_step current_lr = init_lr binary_criterion = nn.BCELoss() assert train_seq2seq or train_postnet global global_step, global_epoch while global_epoch < nepochs: running_loss = 0. for step, (x, input_lengths, mel, y, positions, done, target_lengths, speaker_ids) \ in tqdm(enumerate(data_loader)): model.train() ismultispeaker = speaker_ids is not None # Learning rate schedule if hparams.lr_schedule is not None: lr_schedule_f = getattr(dv3.lrschedule, hparams.lr_schedule) current_lr = lr_schedule_f( init_lr, global_step, **hparams.lr_schedule_kwargs) for param_group in optimizer.param_groups: param_group['lr'] = current_lr optimizer.zero_grad() # Used for Position encoding text_positions, frame_positions = positions # Downsample mel spectrogram if downsample_step > 1: mel = mel[:, 0::downsample_step, :].contiguous() # Lengths input_lengths = input_lengths.long().numpy() decoder_lengths = target_lengths.long().numpy() // r // downsample_step # Feed data x, mel, y = Variable(x), Variable(mel), Variable(y) text_positions = Variable(text_positions) frame_positions = Variable(frame_positions) done = Variable(done) target_lengths = Variable(target_lengths) speaker_ids = Variable(speaker_ids) if ismultispeaker else None if use_cuda: if train_seq2seq: x = x.cuda() text_positions = text_positions.cuda() frame_positions = frame_positions.cuda() if train_postnet: y = y.cuda() mel = mel.cuda() done, target_lengths = done.cuda(), target_lengths.cuda() speaker_ids = speaker_ids.cuda() if ismultispeaker else None # Create mask if we use masked loss if hparams.masked_loss_weight > 0: # decoder output domain mask decoder_target_mask = sequence_mask( target_lengths / (r * downsample_step), max_len=mel.size(1)).unsqueeze(-1) if downsample_step > 1: # spectrogram-domain mask target_mask = sequence_mask( target_lengths, max_len=y.size(1)).unsqueeze(-1) else: target_mask = decoder_target_mask # shift mask decoder_target_mask = decoder_target_mask[:, r:, :] target_mask = target_mask[:, r:, :] else: decoder_target_mask, target_mask = None, None # Apply model if train_seq2seq and train_postnet: mel_outputs, linear_outputs, attn, done_hat = model( x, mel, speaker_ids=speaker_ids, text_positions=text_positions, frame_positions=frame_positions, input_lengths=input_lengths) elif train_seq2seq: assert speaker_ids is None mel_outputs, attn, done_hat, _ = model.seq2seq( x, mel, text_positions=text_positions, frame_positions=frame_positions, input_lengths=input_lengths) # reshape mel_outputs = mel_outputs.view(len(mel), -1, mel.size(-1)) linear_outputs = None elif train_postnet: assert speaker_ids is None linear_outputs = model.postnet(mel) mel_outputs, attn, done_hat = None, None, None # Losses w = hparams.binary_divergence_weight # mel: if train_seq2seq: mel_l1_loss, mel_binary_div = spec_loss( mel_outputs[:, :-r, :], mel[:, r:, :], decoder_target_mask) mel_loss = (1 - w) * mel_l1_loss + w * mel_binary_div # done: if train_seq2seq: done_loss = binary_criterion(done_hat, done) # linear: if train_postnet: n_priority_freq = int(hparams.priority_freq / (fs * 0.5) * linear_dim) linear_l1_loss, linear_binary_div = spec_loss( linear_outputs[:, :-r, :], y[:, r:, :], target_mask, priority_bin=n_priority_freq, priority_w=hparams.priority_freq_weight) linear_loss = (1 - w) * linear_l1_loss + w * linear_binary_div # Combine losses if train_seq2seq and train_postnet: loss = mel_loss + linear_loss + done_loss elif train_seq2seq: loss = mel_loss + done_loss elif train_postnet: loss = linear_loss # attention if train_seq2seq and hparams.use_guided_attention: soft_mask = guided_attentions(input_lengths, decoder_lengths, attn.size(-2), g=hparams.guided_attention_sigma) soft_mask = Variable(torch.from_numpy(soft_mask)) soft_mask = soft_mask.cuda() if use_cuda else soft_mask attn_loss = (attn * soft_mask).mean() loss += attn_loss if global_step > 0 and global_step % checkpoint_interval == 0: save_states( global_step, writer, mel_outputs, linear_outputs, attn, mel, y, input_lengths, checkpoint_dir) save_checkpoint( model, optimizer, global_step, checkpoint_dir, global_epoch, train_seq2seq, train_postnet) if global_step > 0 and global_step % hparams.eval_interval == 0: eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker) # Update loss.backward() if clip_thresh > 0: grad_norm = torch.nn.utils.clip_grad_norm( model.get_trainable_parameters(), clip_thresh) optimizer.step() # Logs writer.add_scalar("loss", float(loss.data[0]), global_step) if train_seq2seq: writer.add_scalar("done_loss", float(done_loss.data[0]), global_step) writer.add_scalar("mel loss", float(mel_loss.data[0]), global_step) writer.add_scalar("mel_l1_loss", float(mel_l1_loss.data[0]), global_step) writer.add_scalar("mel_binary_div_loss", float(mel_binary_div.data[0]), global_step) if train_postnet: writer.add_scalar("linear_loss", float(linear_loss.data[0]), global_step) writer.add_scalar("linear_l1_loss", float(linear_l1_loss.data[0]), global_step) writer.add_scalar("linear_binary_div_loss", float( linear_binary_div.data[0]), global_step) if train_seq2seq and hparams.use_guided_attention: writer.add_scalar("attn_loss", float(attn_loss.data[0]), global_step) if clip_thresh > 0: writer.add_scalar("gradient norm", grad_norm, global_step) writer.add_scalar("learning rate", current_lr, global_step) global_step += 1 running_loss += loss.data[0] averaged_loss = running_loss / (len(data_loader)) writer.add_scalar("loss (per epoch)", averaged_loss, global_epoch) print("Loss: {}".format(running_loss / (len(data_loader)))) global_epoch += 1 def save_checkpoint(model, optimizer, step, checkpoint_dir, epoch, train_seq2seq, train_postnet): if train_seq2seq and train_postnet: suffix = "" m = model elif train_seq2seq: suffix = "_seq2seq" m = model.seq2seq elif train_postnet: suffix = "_postnet" m = model.postnet checkpoint_path = join( checkpoint_dir, "checkpoint_step{:09d}{}.pth".format(global_step, suffix)) optimizer_state = optimizer.state_dict() if hparams.save_optimizer_state else None torch.save({ "state_dict": m.state_dict(), "optimizer": optimizer_state, "global_step": step, "global_epoch": epoch, }, checkpoint_path) print("Saved checkpoint:", checkpoint_path) def build_model(): model = getattr(builder, hparams.builder)( n_speakers=hparams.n_speakers, speaker_embed_dim=hparams.speaker_embed_dim, n_vocab=_frontend.n_vocab, embed_dim=hparams.text_embed_dim, mel_dim=hparams.num_mels, linear_dim=hparams.fft_size // 2 + 1, r=hparams.outputs_per_step, downsample_step=hparams.downsample_step, padding_idx=hparams.padding_idx, dropout=hparams.dropout, kernel_size=hparams.kernel_size, encoder_channels=hparams.encoder_channels, decoder_channels=hparams.decoder_channels, converter_channels=hparams.converter_channels, use_memory_mask=hparams.use_memory_mask, trainable_positional_encodings=hparams.trainable_positional_encodings, force_monotonic_attention=hparams.force_monotonic_attention, use_decoder_state_for_postnet_input=hparams.use_decoder_state_for_postnet_input, max_positions=hparams.max_positions, speaker_embedding_weight_std=hparams.speaker_embedding_weight_std, freeze_embedding=hparams.freeze_embedding, window_ahead=hparams.window_ahead, window_backward=hparams.window_backward, key_projection=hparams.key_projection, value_projection=hparams.value_projection, ) return model def load_checkpoint(path, model, optimizer, reset_optimizer): global global_step global global_epoch print("Load checkpoint from: {}".format(path)) checkpoint = torch.load(path) model.load_state_dict(checkpoint["state_dict"]) if not reset_optimizer: optimizer_state = checkpoint["optimizer"] if optimizer_state is not None: print("Load optimizer state from {}".format(path)) optimizer.load_state_dict(checkpoint["optimizer"]) global_step = checkpoint["global_step"] global_epoch = checkpoint["global_epoch"] return model def _load_embedding(path, model): state = torch.load(path)["state_dict"] key = "seq2seq.encoder.embed_tokens.weight" model.seq2seq.encoder.embed_tokens.weight.data = state[key] # https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/3 def restore_parts(path, model): print("Restore part of the model from: {}".format(path)) state = torch.load(path)["state_dict"] model_dict = model.state_dict() valid_state_dict = {k: v for k, v in state.items() if k in model_dict} model_dict.update(valid_state_dict) model.load_state_dict(model_dict) if __name__ == "__main__": args = docopt(__doc__) print("Command line args:\n", args) checkpoint_dir = args["--checkpoint-dir"] checkpoint_path = args["--checkpoint"] checkpoint_seq2seq_path = args["--checkpoint-seq2seq"] checkpoint_postnet_path = args["--checkpoint-postnet"] load_embedding = args["--load-embedding"] checkpoint_restore_parts = args["--restore-parts"] speaker_id = args["--speaker-id"] speaker_id = int(speaker_id) if speaker_id is not None else None data_root = args["--data-root"] if data_root is None: data_root = join(dirname(__file__), "data", "ljspeech") log_event_path = args["--log-event-path"] reset_optimizer = args["--reset-optimizer"] # Which model to be trained train_seq2seq = args["--train-seq2seq-only"] train_postnet = args["--train-postnet-only"] # train both if not specified if not train_seq2seq and not train_postnet: print("Training whole model") train_seq2seq, train_postnet = True, True if train_seq2seq: print("Training seq2seq model") elif train_postnet: print("Training postnet model") else: assert False, "must be specified wrong args" # Override hyper parameters hparams.parse(args["--hparams"]) print(hparams_debug_string()) assert hparams.name == "deepvoice3" # Presets if hparams.preset is not None and hparams.preset != "": preset = hparams.presets[hparams.preset] import json hparams.parse_json(json.dumps(preset)) print("Override hyper parameters with preset \"{}\": {}".format( hparams.preset, json.dumps(preset, indent=4))) _frontend = getattr(frontend, hparams.frontend) os.makedirs(checkpoint_dir, exist_ok=True) # Input dataset definitions X = FileSourceDataset(TextDataSource(data_root, speaker_id)) Mel = FileSourceDataset(MelSpecDataSource(data_root, speaker_id)) Y = FileSourceDataset(LinearSpecDataSource(data_root, speaker_id)) # Prepare sampler frame_lengths = Mel.file_data_source.frame_lengths sampler = PartialyRandomizedSimilarTimeLengthSampler( frame_lengths, batch_size=hparams.batch_size) # Dataset and Dataloader setup dataset = PyTorchDataset(X, Mel, Y) data_loader = data_utils.DataLoader( dataset, batch_size=hparams.batch_size, num_workers=hparams.num_workers, sampler=sampler, collate_fn=collate_fn, pin_memory=hparams.pin_memory) print("dataloader_prepared") # Model model = build_model() if use_cuda: model = model.cuda() optimizer = optim.Adam(model.get_trainable_parameters(), lr=hparams.initial_learning_rate, betas=( hparams.adam_beta1, hparams.adam_beta2), eps=hparams.adam_eps, weight_decay=hparams.weight_decay) if checkpoint_restore_parts is not None: restore_parts(checkpoint_restore_parts, model) # Load checkpoints if checkpoint_postnet_path is not None: load_checkpoint(checkpoint_postnet_path, model.postnet, optimizer, reset_optimizer) if checkpoint_seq2seq_path is not None: load_checkpoint(checkpoint_seq2seq_path, model.seq2seq, optimizer, reset_optimizer) if checkpoint_path is not None: load_checkpoint(checkpoint_path, model, optimizer, reset_optimizer) # Load embedding if load_embedding is not None: print("Loading embedding from {}".format(load_embedding)) _load_embedding(load_embedding, model) # Setup summary writer for tensorboard if log_event_path is None: log_event_path = "log/run-test" + str(datetime.now()).replace(" ", "_") print("Los event path: {}".format(log_event_path)) writer = SummaryWriter(log_dir=log_event_path) # Train! try: train(model, data_loader, optimizer, writer, init_lr=hparams.initial_learning_rate, checkpoint_dir=checkpoint_dir, checkpoint_interval=hparams.checkpoint_interval, nepochs=hparams.nepochs, clip_thresh=hparams.clip_thresh, train_seq2seq=train_seq2seq, train_postnet=train_postnet) except KeyboardInterrupt: save_checkpoint( model, optimizer, global_step, checkpoint_dir, global_epoch, train_seq2seq, train_postnet) print("Finished") sys.exit(0)
[((2046, 2071), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2069, 2071), False, 'import torch\n'), ((18998, 19016), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (19001, 19016), False, 'from numba import jit\n'), ((2538, 2552), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2550, 2552), True, 'from matplotlib import pyplot as plt\n'), ((2790, 2808), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (2800, 2808), True, 'from matplotlib import pyplot as plt\n'), ((2813, 2843), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Encoder timestep"""'], {}), "('Encoder timestep')\n", (2823, 2843), True, 'from matplotlib import pyplot as plt\n'), ((2848, 2866), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2864, 2866), True, 'from matplotlib import pyplot as plt\n'), ((2871, 2902), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'format': '"""png"""'}), "(path, format='png')\n", (2882, 2902), True, 'from matplotlib import pyplot as plt\n'), ((2907, 2918), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2916, 2918), True, 'from matplotlib import pyplot as plt\n'), ((8617, 8643), 'torch.autograd.Variable', 'Variable', (['seq_range_expand'], {}), '(seq_range_expand)\n', (8625, 8643), False, 'from torch.autograd import Variable\n'), ((10376, 10395), 'torch.LongTensor', 'torch.LongTensor', (['a'], {}), '(a)\n', (10392, 10395), False, 'import torch\n'), ((10417, 10448), 'torch.LongTensor', 'torch.LongTensor', (['input_lengths'], {}), '(input_lengths)\n', (10433, 10448), False, 'import torch\n'), ((10470, 10502), 'torch.LongTensor', 'torch.LongTensor', (['target_lengths'], {}), '(target_lengths)\n', (10486, 10502), False, 'import torch\n'), ((10633, 10653), 'torch.FloatTensor', 'torch.FloatTensor', (['b'], {}), '(b)\n', (10650, 10653), False, 'import torch\n'), ((10782, 10802), 'torch.FloatTensor', 'torch.FloatTensor', (['c'], {}), '(c)\n', (10799, 10802), False, 'import torch\n'), ((10988, 11020), 'torch.LongTensor', 'torch.LongTensor', (['text_positions'], {}), '(text_positions)\n', (11004, 11020), False, 'import torch\n'), ((12218, 12246), 'numpy.flip', 'np.flip', (['spectrogram'], {'axis': '(1)'}), '(spectrogram, axis=1)\n', (12225, 12246), True, 'import numpy as np\n'), ((12979, 13007), 'os.path.join', 'join', (['checkpoint_dir', '"""eval"""'], {}), "(checkpoint_dir, 'eval')\n", (12983, 13007), False, 'from os.path import join, expanduser\n'), ((13012, 13055), 'os.makedirs', 'os.makedirs', (['eval_output_dir'], {'exist_ok': '(True)'}), '(eval_output_dir, exist_ok=True)\n', (13023, 13055), False, 'import os\n'), ((17852, 17863), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (17861, 17863), False, 'from torch import nn\n'), ((19070, 19112), 'numpy.zeros', 'np.zeros', (['(max_N, max_T)'], {'dtype': 'np.float32'}), '((max_N, max_T), dtype=np.float32)\n', (19078, 19112), True, 'import numpy as np\n'), ((19398, 19460), 'numpy.zeros', 'np.zeros', (['(B, max_target_len, max_input_len)'], {'dtype': 'np.float32'}), '((B, max_target_len, max_input_len), dtype=np.float32)\n', (19406, 19460), True, 'import numpy as np\n'), ((20067, 20079), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (20077, 20079), False, 'from torch import nn\n'), ((30176, 30192), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (30186, 30192), False, 'import torch\n'), ((31232, 31247), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (31238, 31247), False, 'from docopt import docopt\n'), ((32452, 32484), 'dv3.hparams.hparams.parse', 'hparams.parse', (["args['--hparams']"], {}), "(args['--hparams'])\n", (32465, 32484), False, 'from dv3.hparams import hparams, hparams_debug_string\n'), ((32940, 32982), 'os.makedirs', 'os.makedirs', (['checkpoint_dir'], {'exist_ok': '(True)'}), '(checkpoint_dir, exist_ok=True)\n', (32951, 32982), False, 'import os\n'), ((33506, 33681), 'torch.utils.data.DataLoader', 'data_utils.DataLoader', (['dataset'], {'batch_size': 'hparams.batch_size', 'num_workers': 'hparams.num_workers', 'sampler': 'sampler', 'collate_fn': 'collate_fn', 'pin_memory': 'hparams.pin_memory'}), '(dataset, batch_size=hparams.batch_size, num_workers=\n hparams.num_workers, sampler=sampler, collate_fn=collate_fn, pin_memory\n =hparams.pin_memory)\n', (33527, 33681), True, 'from torch.utils import data as data_utils\n'), ((34964, 35001), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'log_event_path'}), '(log_dir=log_event_path)\n', (34977, 35001), False, 'from tensorboardX import SummaryWriter\n'), ((35594, 35605), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (35602, 35605), False, 'import sys\n'), ((3239, 3272), 'os.path.join', 'join', (['self.data_root', '"""train.txt"""'], {}), "(self.data_root, 'train.txt')\n", (3243, 3272), False, 'from os.path import join, expanduser\n'), ((4760, 4793), 'os.path.join', 'join', (['self.data_root', '"""train.txt"""'], {}), "(self.data_root, 'train.txt')\n", (4764, 4793), False, 'from os.path import join, expanduser\n'), ((5891, 5904), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (5898, 5904), True, 'import numpy as np\n'), ((9004, 9033), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {'size_average': '(False)'}), '(size_average=False)\n', (9013, 9033), False, 'from torch import nn\n'), ((11610, 11649), 'torch.LongTensor', 'torch.LongTensor', (['[x[3] for x in batch]'], {}), '([x[3] for x in batch])\n', (11626, 11649), False, 'import torch\n'), ((15596, 15633), 'os.path.join', 'join', (['checkpoint_dir', '"""alignment_ave"""'], {}), "(checkpoint_dir, 'alignment_ave')\n", (15600, 15633), False, 'from os.path import join, expanduser\n'), ((15642, 15683), 'os.makedirs', 'os.makedirs', (['alignment_dir'], {'exist_ok': '(True)'}), '(alignment_dir, exist_ok=True)\n', (15653, 15683), False, 'import os\n'), ((17585, 17603), 'torch.log', 'torch.log', (['(x + eps)'], {}), '(x + eps)\n', (17594, 17603), False, 'import torch\n'), ((17606, 17628), 'torch.log', 'torch.log', (['(1 - x + eps)'], {}), '(1 - x + eps)\n', (17615, 17628), False, 'import torch\n'), ((30645, 30661), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (30655, 30661), False, 'import torch\n'), ((30972, 30988), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (30982, 30988), False, 'import torch\n'), ((32495, 32517), 'dv3.hparams.hparams_debug_string', 'hparams_debug_string', ([], {}), '()\n', (32515, 32517), False, 'from dv3.hparams import hparams, hparams_debug_string\n'), ((4459, 4490), 'numpy.asarray', 'np.asarray', (['seq'], {'dtype': 'np.int32'}), '(seq, dtype=np.int32)\n', (4469, 4490), True, 'import numpy as np\n'), ((6610, 6635), 'torch.LongTensor', 'torch.LongTensor', (['lengths'], {}), '(lengths)\n', (6626, 6635), False, 'import torch\n'), ((7318, 7346), 'random.shuffle', 'random.shuffle', (['indices[s:e]'], {}), '(indices[s:e])\n', (7332, 7346), False, 'import random\n'), ((7481, 7501), 'random.shuffle', 'random.shuffle', (['perm'], {}), '(perm)\n', (7495, 7501), False, 'import random\n'), ((7687, 7714), 'random.shuffle', 'random.shuffle', (['indices[s:]'], {}), '(indices[s:])\n', (7701, 7714), False, 'import random\n'), ((8488, 8512), 'torch.arange', 'torch.arange', (['(0)', 'max_len'], {}), '(0, max_len)\n', (8500, 8512), False, 'import torch\n'), ((11527, 11550), 'torch.FloatTensor', 'torch.FloatTensor', (['done'], {}), '(done)\n', (11544, 11550), False, 'import torch\n'), ((11854, 11868), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (11866, 11868), False, 'from datetime import datetime\n'), ((12133, 12152), 'numpy.min', 'np.min', (['spectrogram'], {}), '(spectrogram)\n', (12139, 12152), True, 'import numpy as np\n'), ((12157, 12176), 'numpy.max', 'np.max', (['spectrogram'], {}), '(spectrogram)\n', (12163, 12176), True, 'import numpy as np\n'), ((12179, 12198), 'numpy.min', 'np.min', (['spectrogram'], {}), '(spectrogram)\n', (12185, 12198), True, 'import numpy as np\n'), ((12293, 12316), 'matplotlib.cm.magma', 'cm.magma', (['spectrogram.T'], {}), '(spectrogram.T)\n', (12301, 12316), False, 'from matplotlib import cm\n'), ((15331, 15372), 'os.makedirs', 'os.makedirs', (['alignment_dir'], {'exist_ok': '(True)'}), '(alignment_dir, exist_ok=True)\n', (15342, 15372), False, 'import os\n'), ((16682, 16696), 'numpy.abs', 'np.abs', (['signal'], {}), '(signal)\n', (16688, 16696), True, 'import numpy as np\n'), ((21406, 21430), 'torch.autograd.Variable', 'Variable', (['text_positions'], {}), '(text_positions)\n', (21414, 21430), False, 'from torch.autograd import Variable\n'), ((21461, 21486), 'torch.autograd.Variable', 'Variable', (['frame_positions'], {}), '(frame_positions)\n', (21469, 21486), False, 'from torch.autograd import Variable\n'), ((21506, 21520), 'torch.autograd.Variable', 'Variable', (['done'], {}), '(done)\n', (21514, 21520), False, 'from torch.autograd import Variable\n'), ((21550, 21574), 'torch.autograd.Variable', 'Variable', (['target_lengths'], {}), '(target_lengths)\n', (21558, 21574), False, 'from torch.autograd import Variable\n'), ((31791, 31808), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (31798, 31808), False, 'from os.path import dirname, join\n'), ((32730, 32748), 'json.dumps', 'json.dumps', (['preset'], {}), '(preset)\n', (32740, 32748), False, 'import json\n'), ((4377, 4408), 'numpy.asarray', 'np.asarray', (['seq'], {'dtype': 'np.int32'}), '(seq, dtype=np.int32)\n', (4387, 4408), True, 'import numpy as np\n'), ((5527, 5548), 'numpy.array', 'np.array', (['speaker_ids'], {}), '(speaker_ids)\n', (5535, 5548), True, 'import numpy as np\n'), ((13463, 13477), 'numpy.abs', 'np.abs', (['signal'], {}), '(signal)\n', (13469, 13477), True, 'import numpy as np\n'), ((19189, 19232), 'numpy.exp', 'np.exp', (['(-(n / N - t / T) ** 2 / (2 * g * g))'], {}), '(-(n / N - t / T) ** 2 / (2 * g * g))\n', (19195, 19232), True, 'import numpy as np\n'), ((21337, 21348), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (21345, 21348), False, 'from torch.autograd import Variable\n'), ((21350, 21363), 'torch.autograd.Variable', 'Variable', (['mel'], {}), '(mel)\n', (21358, 21363), False, 'from torch.autograd import Variable\n'), ((21365, 21376), 'torch.autograd.Variable', 'Variable', (['y'], {}), '(y)\n', (21373, 21376), False, 'from torch.autograd import Variable\n'), ((21601, 21622), 'torch.autograd.Variable', 'Variable', (['speaker_ids'], {}), '(speaker_ids)\n', (21609, 21622), False, 'from torch.autograd import Variable\n'), ((32851, 32879), 'json.dumps', 'json.dumps', (['preset'], {'indent': '(4)'}), '(preset, indent=4)\n', (32861, 32879), False, 'import json\n'), ((3848, 3869), 'numpy.array', 'np.array', (['speaker_ids'], {}), '(speaker_ids)\n', (3856, 3869), True, 'import numpy as np\n'), ((5216, 5239), 'os.path.join', 'join', (['self.data_root', 'f'], {}), '(self.data_root, f)\n', (5220, 5239), False, 'from os.path import join, expanduser\n'), ((5593, 5608), 'numpy.array', 'np.array', (['paths'], {}), '(paths)\n', (5601, 5608), True, 'import numpy as np\n'), ((5657, 5685), 'numpy.array', 'np.array', (['self.frame_lengths'], {}), '(self.frame_lengths)\n', (5665, 5685), True, 'import numpy as np\n'), ((18801, 18824), 'torch.exp', 'torch.exp', (['y_hat_logits'], {}), '(y_hat_logits)\n', (18810, 18824), False, 'import torch\n'), ((25424, 25451), 'torch.from_numpy', 'torch.from_numpy', (['soft_mask'], {}), '(soft_mask)\n', (25440, 25451), False, 'import torch\n'), ((3918, 3933), 'numpy.array', 'np.array', (['texts'], {}), '(texts)\n', (3926, 3933), True, 'import numpy as np\n'), ((34862, 34876), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (34874, 34876), False, 'from datetime import datetime\n'), ((11225, 11243), 'torch.arange', 'torch.arange', (['s', 'e'], {}), '(s, e)\n', (11237, 11243), False, 'import torch\n'), ((15950, 15971), 'numpy.flip', 'np.flip', (['alignment', '(1)'], {}), '(alignment, 1)\n', (15957, 15971), True, 'import numpy as np\n'), ((13811, 13832), 'numpy.flip', 'np.flip', (['alignment', '(1)'], {}), '(alignment, 1)\n', (13818, 13832), True, 'import numpy as np\n'), ((15147, 15168), 'numpy.flip', 'np.flip', (['alignment', '(1)'], {}), '(alignment, 1)\n', (15154, 15168), True, 'import numpy as np\n')]
alcinnz/Historical-Twin
magic_mirror.py
54a9ab5dc130aaeb2e00058bbaeace7377e2ff3d
#! /usr/bin/python2 import time start = time.time() import pygame, numpy import pygame.camera # Init display screen = pygame.display.set_mode((0,0), pygame.FULLSCREEN) pygame.display.set_caption("Magic Mirror") #pygame.mouse.set_visible(False) # Init font pygame.font.init() font_colour = 16, 117, 186 fonts = {40: pygame.font.Font("Futura.ttc", 40)} def font(font_size = 40): if font_size not in fonts: fonts[font_size] = pygame.font.Font("Futura.ttc", font_size) return fonts[font_size] def write(text, colour = font_colour, font_size = 40): return font(font_size).render(str(text), True, colour) # Init AI import recognition import sys, os def find_faces(pygame_capture): capture = numpy.array(pygame.surfarray.pixels3d(pygame_capture)) capture = numpy.swapaxes(capture, 0, 1) return recognition.align.getAllFaceBoundingBoxes(capture), capture index = recognition.MultiBinaryTree() imgdir = sys.argv[1] if len(sys.argv) > 1 else "images" photo_samples = [] screen.blit(write("Loading index... %fs" % (time.time() - start)), (0,0)) pygame.display.flip() with open(os.path.join(imgdir, "index.tsv")) as f: for line in f: line = line.strip().split("\t") img = os.path.join(imgdir, line[0]) description = numpy.array([float(n) for n in line[1:]]) index.insert(description, img) screen.blit(write("Loading images... %fs" % (time.time() - start)), (0,50)) pygame.display.flip() for img in os.listdir(os.path.join(imgdir, "thumbnails")): photo_samples.append(pygame.image.load(os.path.join(imgdir, "thumbnails", img))) # Init clock clock = pygame.time.Clock() # Init camera pygame.camera.init() cameras = pygame.camera.list_cameras() if not cameras: pygame.quit() print "No cameras found, exiting!" sys.exit(1) camera = pygame.camera.Camera(cameras[0]) camera.start() # Mainloop def recognize(capture, faces): fullscreen = pygame.Rect(0, 0, screen.get_width(), screen.get_height()) pygame.draw.rect(screen, (255, 255, 255), fullscreen) pygame.display.flip() face = recognition.average(recognition.getRepBBox(capture, face) for face in faces) img = index.nearest(face) screen.blit(pygame.image.load(img), (0,0)) pygame.display.flip() pygame.time.wait(10*1000) # 30s def main(): countdown = 10 lastFaceCount = 0 while True: clock.tick(60) for event in pygame.event.get(): if event.type in (pygame.QUIT, pygame.KEYDOWN): return capture = camera.get_image() faces, capture_data = find_faces(capture) for bbox in faces: rect = pygame.Rect(bbox.left(), bbox.top(), bbox.width(), bbox.height()) pygame.draw.rect(capture, (255, 0, 0), rect, 2) capture = pygame.transform.flip(capture, True, False) screen.blit(pygame.transform.smoothscale(capture, screen.get_size()), (0,0)) if len(faces) == 0 or len(faces) != lastFaceCount: countdown = 10 lastFaceCount = len(faces) elif countdown == 0: recognize(capture_data, faces) countdown = 10 else: screen.blit(write(countdown), (0,0)) countdown -= 1 pygame.display.flip() pygame.quit() if __name__ == "__main__": main()
[]
plojyon/resolwe
resolwe/__init__.py
1bee6f0860fdd087534adf1680e9350d79ab97cf
""".. Ignore pydocstyle D400. ======= Resolwe ======= Open source enterprise dataflow engine in Django. """ from resolwe.__about__ import ( # noqa: F401 __author__, __copyright__, __email__, __license__, __summary__, __title__, __url__, __version__, )
[]
andremsouza/swine_sound_analysis
audio_som64_u_grupo1.py
5583bf91b18e8ad2dcaccb30a94c134e2eab34a5
# %% [markdown] # # Testing python-som with audio dataset # %% [markdown] # # Imports # %% import matplotlib.pyplot as plt # import librosa as lr # import librosa.display as lrdisp import numpy as np import pandas as pd import pickle import seaborn as sns import sklearn.preprocessing from python_som import SOM FILE_PREFIX = 'som64_u_grupo1' # %% [markdown] # # Loading dataset # %% df = pd.read_csv('features_means.csv', index_col=0, verbose=True) df.index = pd.to_datetime(df.index) df['rac'] = False df.loc['2020-09-22':, 'rac'] = True # type: ignore df.sort_index(inplace=True) # %% [markdown] # ## Checking for and dropping duplicates # %% # Resetting index for duplicate analysis df.reset_index(inplace=True) print("Duplicates by filename:", df.duplicated(subset=['file_name']).value_counts(), sep='\n') df.drop_duplicates(subset=['file_name'], inplace=True) print("Duplicates by (datetime, ala, grupo):", df.duplicated(subset=['datetime', 'ala', 'grupo']).value_counts(), sep='\n') df.drop_duplicates(subset=['datetime', 'ala', 'grupo'], inplace=True) # Rebuilding dataframe index df.set_index('datetime', inplace=True) # %% # Filtering dataset by 'group' df = df[df['grupo'] == 1] # %% # Dropping tail of dataset for class balancing # tail_size = abs( # len(df[df['rac'].astype(int) == 1]) - len(df[df['rac'].astype(int) == 0])) # df.drop(df.tail(tail_size).index, inplace=True) # %% [markdown] # ## Visualizing distribution of sample dates # %% df_tmp = pd.DataFrame(df['file_name'].resample('1D').count()) df_tmp['count'] = df_tmp['file_name'] del df_tmp['file_name'] df_tmp['rac'] = False df_tmp.loc['2020-09-22':, 'rac'] = True # type: ignore plt.figure(figsize=(10, 10)) sns.set(style="whitegrid", palette=sns.color_palette("muted", n_colors=6, desat=1.0)) sns.barplot(y=df_tmp.index, x=df_tmp['count'], hue=df_tmp['rac']) plt.draw() df_tmp = pd.DataFrame(df['file_name'].resample('1H').count()) df_tmp['count'] = df_tmp['file_name'] del df_tmp['file_name'] df_tmp['rac'] = False df_tmp.loc['2020-09-22':, 'rac'] = True # type: ignore df_tmp = df_tmp.reset_index() df_tmp['hour'] = df_tmp['datetime'].dt.hour plt.figure(figsize=(10, 10)) sns.set(style="whitegrid", palette=sns.color_palette("muted", n_colors=6, desat=1.0)) sns.barplot(y=df_tmp['hour'], x=df_tmp['count'], hue=df_tmp['rac'], orient='h') plt.draw() # %% df_melt = pd.melt(df, value_vars=['rac'], value_name='ractopamine') plt.figure(figsize=(10, 10)) sns.set(style="whitegrid", palette=sns.color_palette("muted", n_colors=6, desat=1.0)) ax = sns.countplot(data=df_melt, x='ractopamine', hue='ractopamine') for p in ax.patches: ax.annotate(f'\n{p.get_height()}', (p.get_x() + 0.2, p.get_height()), ha='center', va='top', color='white', size=18) plt.draw() # %% # using sklearn's MinMaxScaler scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(0, 1)) df_train = df.iloc[:, 3:-1].copy() df_train = scaler.fit_transform(df_train) # %% # Defining first element of SOM shape # Second element will be assigned based on the ratio between the # first two principal components of the train dataset som_x: int = 64 try: with open(f'./{FILE_PREFIX}.obj', 'rb') as f: som = pickle.load(f) except FileNotFoundError: som = SOM(x=som_x, y=None, input_len=df_train.shape[1], learning_rate=0.5, neighborhood_radius=1.0, neighborhood_function='gaussian', cyclic_x=True, cyclic_y=True, data=df_train) # Training SOM som.weight_initialization(mode='linear', data=df_train) som.train(data=df_train, mode='random', verbose=True) with open(f'./{FILE_PREFIX}.obj', 'wb') as f: pickle.dump(som, f) # %% som_x, som_y = som.get_shape() print('SOM shape:', (som_x, som_y)) # %% # Visualizing distance matrix and activation matrix umatrix = som.distance_matrix() fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 9)) sns.heatmap(umatrix.T, cmap='bone_r', ax=ax1, robust=True) sns.heatmap(som.activation_matrix(data=df_train).T, cmap='mako', ax=ax2, robust=True) ax1.invert_yaxis() ax2.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix_activation.png', bbox_inches='tight', transparent=True) plt.draw() # %% # Visualizing distance matrix anc activation matrix separately fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(umatrix.T, cmap='bone_r', robust=True) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix.png', bbox_inches='tight', transparent=True) fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(som.activation_matrix(data=df_train).T, cmap='mako', robust=True) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_activation_matrix.png', bbox_inches='tight', transparent=True) # %% [markdown] # ## Visualizing distribution of features # %% for column in df.iloc[:, 3:-1].columns: hmap = som.get_weights()[:, :, df.iloc[:, 3:-1].columns.get_loc(column)].T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, robust=True, cmap='BrBG') ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.close(fig=fig) # %% [markdown] # ## Visualizing distribution of audios by metadata (day, hour, ...) # Each node is colorized according to its most frequent label # %% df['days'] = df.index.date df['days'] = (df['days'] - df['days'][0]) df['days'] = df['days'].apply(lambda x: x.days) df['hour'] = df.index.hour # %% # Visualizing 'rac' distribution class_assignments = som.label_map(np.array(df_train), np.array(df['rac'])) hmap = np.zeros((som_x, som_y)) for i, j in sorted(class_assignments.keys()): try: hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] + 1 except Exception: continue hmap = hmap.T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, cmap=sns.color_palette(palette=["#000000", "blue", "orange"], n_colors=3), cbar_kws={'ticks': [0, 1, 2]}) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_rac.png', bbox_inches='tight', transparent=True) plt.show() # %% # Visualizing by 'grupo' print(df.groupby('grupo')['rac'].count()) column = 'grupo' class_assignments = som.label_map(np.array(df_train), np.array(df[column])) hmap = np.zeros((som_x, som_y)) for i, j in sorted(class_assignments.keys()): try: hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] except Exception: hmap[i][j] = 0 hmap = hmap.T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, cmap=sns.color_palette(palette=["#000000", "blue", "orange"], n_colors=3), cbar_kws={'ticks': [0, 1, 2]}) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show() # %% # Visualizing by 'days' print(df.groupby('days')['rac'].count()) column = 'days' class_assignments = som.label_map(np.array(df_train), np.array(df[column])) hmap = np.zeros((som_x, som_y)) for i, j in sorted(class_assignments.keys()): try: hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] except Exception: hmap[i][j] = -1 hmap = hmap.T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, cmap='viridis') ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show() # %% # Visualizing by 'hour' print(df.groupby('hour')['rac'].count()) column = 'hour' class_assignments = som.label_map(np.array(df_train), np.array(df[column])) hmap = np.zeros((som_x, som_y)) for i, j in sorted(class_assignments.keys()): try: hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] except Exception: hmap[i][j] = -1 hmap = hmap.T fig = plt.figure(figsize=(16, 9)) ax = sns.heatmap(hmap, cmap=sns.diverging_palette(150, 250, s=100, l=20, sep=1, n=26, center='light'), center=12) ax.invert_yaxis() fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png', bbox_inches='tight', transparent=True) plt.show() # %%
[((395, 455), 'pandas.read_csv', 'pd.read_csv', (['"""features_means.csv"""'], {'index_col': '(0)', 'verbose': '(True)'}), "('features_means.csv', index_col=0, verbose=True)\n", (406, 455), True, 'import pandas as pd\n'), ((467, 491), 'pandas.to_datetime', 'pd.to_datetime', (['df.index'], {}), '(df.index)\n', (481, 491), True, 'import pandas as pd\n'), ((1699, 1727), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1709, 1727), True, 'import matplotlib.pyplot as plt\n'), ((1822, 1887), 'seaborn.barplot', 'sns.barplot', ([], {'y': 'df_tmp.index', 'x': "df_tmp['count']", 'hue': "df_tmp['rac']"}), "(y=df_tmp.index, x=df_tmp['count'], hue=df_tmp['rac'])\n", (1833, 1887), True, 'import seaborn as sns\n'), ((1888, 1898), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (1896, 1898), True, 'import matplotlib.pyplot as plt\n'), ((2177, 2205), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (2187, 2205), True, 'import matplotlib.pyplot as plt\n'), ((2300, 2379), 'seaborn.barplot', 'sns.barplot', ([], {'y': "df_tmp['hour']", 'x': "df_tmp['count']", 'hue': "df_tmp['rac']", 'orient': '"""h"""'}), "(y=df_tmp['hour'], x=df_tmp['count'], hue=df_tmp['rac'], orient='h')\n", (2311, 2379), True, 'import seaborn as sns\n'), ((2380, 2390), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (2388, 2390), True, 'import matplotlib.pyplot as plt\n'), ((2407, 2464), 'pandas.melt', 'pd.melt', (['df'], {'value_vars': "['rac']", 'value_name': '"""ractopamine"""'}), "(df, value_vars=['rac'], value_name='ractopamine')\n", (2414, 2464), True, 'import pandas as pd\n'), ((2465, 2493), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (2475, 2493), True, 'import matplotlib.pyplot as plt\n'), ((2593, 2656), 'seaborn.countplot', 'sns.countplot', ([], {'data': 'df_melt', 'x': '"""ractopamine"""', 'hue': '"""ractopamine"""'}), "(data=df_melt, x='ractopamine', hue='ractopamine')\n", (2606, 2656), True, 'import seaborn as sns\n'), ((2865, 2875), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (2873, 2875), True, 'import matplotlib.pyplot as plt\n'), ((4037, 4072), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(16, 9)'}), '(1, 2, figsize=(16, 9))\n', (4049, 4072), True, 'import matplotlib.pyplot as plt\n'), ((4073, 4131), 'seaborn.heatmap', 'sns.heatmap', (['umatrix.T'], {'cmap': '"""bone_r"""', 'ax': 'ax1', 'robust': '(True)'}), "(umatrix.T, cmap='bone_r', ax=ax1, robust=True)\n", (4084, 4131), True, 'import seaborn as sns\n'), ((4431, 4441), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (4439, 4441), True, 'import matplotlib.pyplot as plt\n'), ((4517, 4544), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (4527, 4544), True, 'import matplotlib.pyplot as plt\n'), ((4550, 4600), 'seaborn.heatmap', 'sns.heatmap', (['umatrix.T'], {'cmap': '"""bone_r"""', 'robust': '(True)'}), "(umatrix.T, cmap='bone_r', robust=True)\n", (4561, 4600), True, 'import seaborn as sns\n'), ((4754, 4781), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (4764, 4781), True, 'import matplotlib.pyplot as plt\n'), ((5935, 5959), 'numpy.zeros', 'np.zeros', (['(som_x, som_y)'], {}), '((som_x, som_y))\n', (5943, 5959), True, 'import numpy as np\n'), ((6145, 6172), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (6155, 6172), True, 'import matplotlib.pyplot as plt\n'), ((6518, 6528), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6526, 6528), True, 'import matplotlib.pyplot as plt\n'), ((6702, 6726), 'numpy.zeros', 'np.zeros', (['(som_x, som_y)'], {}), '((som_x, som_y))\n', (6710, 6726), True, 'import numpy as np\n'), ((6914, 6941), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (6924, 6941), True, 'import matplotlib.pyplot as plt\n'), ((7292, 7302), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7300, 7302), True, 'import matplotlib.pyplot as plt\n'), ((7473, 7497), 'numpy.zeros', 'np.zeros', (['(som_x, som_y)'], {}), '((som_x, som_y))\n', (7481, 7497), True, 'import numpy as np\n'), ((7686, 7713), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (7696, 7713), True, 'import matplotlib.pyplot as plt\n'), ((7719, 7752), 'seaborn.heatmap', 'sns.heatmap', (['hmap'], {'cmap': '"""viridis"""'}), "(hmap, cmap='viridis')\n", (7730, 7752), True, 'import seaborn as sns\n'), ((7900, 7910), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7908, 7910), True, 'import matplotlib.pyplot as plt\n'), ((8081, 8105), 'numpy.zeros', 'np.zeros', (['(som_x, som_y)'], {}), '((som_x, som_y))\n', (8089, 8105), True, 'import numpy as np\n'), ((8294, 8321), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (8304, 8321), True, 'import matplotlib.pyplot as plt\n'), ((8881, 8891), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8889, 8891), True, 'import matplotlib.pyplot as plt\n'), ((5249, 5276), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (5259, 5276), True, 'import matplotlib.pyplot as plt\n'), ((5286, 5329), 'seaborn.heatmap', 'sns.heatmap', (['hmap'], {'robust': '(True)', 'cmap': '"""BrBG"""'}), "(hmap, robust=True, cmap='BrBG')\n", (5297, 5329), True, 'import seaborn as sns\n'), ((5497, 5515), 'matplotlib.pyplot.close', 'plt.close', ([], {'fig': 'fig'}), '(fig=fig)\n', (5506, 5515), True, 'import matplotlib.pyplot as plt\n'), ((5887, 5905), 'numpy.array', 'np.array', (['df_train'], {}), '(df_train)\n', (5895, 5905), True, 'import numpy as np\n'), ((5907, 5926), 'numpy.array', 'np.array', (["df['rac']"], {}), "(df['rac'])\n", (5915, 5926), True, 'import numpy as np\n'), ((6653, 6671), 'numpy.array', 'np.array', (['df_train'], {}), '(df_train)\n', (6661, 6671), True, 'import numpy as np\n'), ((6673, 6693), 'numpy.array', 'np.array', (['df[column]'], {}), '(df[column])\n', (6681, 6693), True, 'import numpy as np\n'), ((7424, 7442), 'numpy.array', 'np.array', (['df_train'], {}), '(df_train)\n', (7432, 7442), True, 'import numpy as np\n'), ((7444, 7464), 'numpy.array', 'np.array', (['df[column]'], {}), '(df[column])\n', (7452, 7464), True, 'import numpy as np\n'), ((8032, 8050), 'numpy.array', 'np.array', (['df_train'], {}), '(df_train)\n', (8040, 8050), True, 'import numpy as np\n'), ((8052, 8072), 'numpy.array', 'np.array', (['df[column]'], {}), '(df[column])\n', (8060, 8072), True, 'import numpy as np\n'), ((1771, 1820), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {'n_colors': '(6)', 'desat': '(1.0)'}), "('muted', n_colors=6, desat=1.0)\n", (1788, 1820), True, 'import seaborn as sns\n'), ((2249, 2298), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {'n_colors': '(6)', 'desat': '(1.0)'}), "('muted', n_colors=6, desat=1.0)\n", (2266, 2298), True, 'import seaborn as sns\n'), ((2537, 2586), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {'n_colors': '(6)', 'desat': '(1.0)'}), "('muted', n_colors=6, desat=1.0)\n", (2554, 2586), True, 'import seaborn as sns\n'), ((3305, 3319), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3316, 3319), False, 'import pickle\n'), ((3356, 3537), 'python_som.SOM', 'SOM', ([], {'x': 'som_x', 'y': 'None', 'input_len': 'df_train.shape[1]', 'learning_rate': '(0.5)', 'neighborhood_radius': '(1.0)', 'neighborhood_function': '"""gaussian"""', 'cyclic_x': '(True)', 'cyclic_y': '(True)', 'data': 'df_train'}), "(x=som_x, y=None, input_len=df_train.shape[1], learning_rate=0.5,\n neighborhood_radius=1.0, neighborhood_function='gaussian', cyclic_x=\n True, cyclic_y=True, data=df_train)\n", (3359, 3537), False, 'from python_som import SOM\n'), ((6218, 6286), 'seaborn.color_palette', 'sns.color_palette', ([], {'palette': "['#000000', 'blue', 'orange']", 'n_colors': '(3)'}), "(palette=['#000000', 'blue', 'orange'], n_colors=3)\n", (6235, 6286), True, 'import seaborn as sns\n'), ((6987, 7055), 'seaborn.color_palette', 'sns.color_palette', ([], {'palette': "['#000000', 'blue', 'orange']", 'n_colors': '(3)'}), "(palette=['#000000', 'blue', 'orange'], n_colors=3)\n", (7004, 7055), True, 'import seaborn as sns\n'), ((8367, 8440), 'seaborn.diverging_palette', 'sns.diverging_palette', (['(150)', '(250)'], {'s': '(100)', 'l': '(20)', 'sep': '(1)', 'n': '(26)', 'center': '"""light"""'}), "(150, 250, s=100, l=20, sep=1, n=26, center='light')\n", (8388, 8440), True, 'import seaborn as sns\n'), ((3836, 3855), 'pickle.dump', 'pickle.dump', (['som', 'f'], {}), '(som, f)\n', (3847, 3855), False, 'import pickle\n')]
dallinb/footy
footy/engine/UpdateEngine.py
d6879481a85b4a84023805bf29bd7dff32afa67f
"""Prediction Engine - Update the data model with the most resent fixtures and results.""" from footy.domain import Competition class UpdateEngine: """Prediction Engine - Update the data model with the most resent fixtures and results.""" def __init__(self): """Construct a UpdateEngine object.""" def get_competition(self, code): """ Retrieve data for the supplied competition code. Returns ------- Competition A Competition object with the most recent fixtures and results for the supplied competition code. """ # return Competition return Competition def update_competition(self, competition): """ Retrieve data and enrich the supplied competition with the most recent fixtures and results. Returns ------- Competition A Competition object with the most recent fixtures and results for the supplied competition code. """ return Competition
[]
marsupialmarcos/deck.gl
bindings/pydeck/docs/scripts/embed_examples.py
c9867c1db87e492253865353f68c985019c7c613
"""Script to embed pydeck examples into .rst pages with code These populate the files you see once you click into a grid cell on the pydeck gallery page """ from multiprocessing import Pool import os import subprocess import sys from const import DECKGL_URL_BASE, EXAMPLE_GLOB, GALLERY_DIR, HTML_DIR, HOSTED_STATIC_PATH from utils import to_presentation_name, to_snake_case_string from templates import DOC_TEMPLATE if not os.environ.get("MAPBOX_API_KEY"): # If running for rtfd.io, set this variable from the Admin panel raise Exception("MAPBOX_API_KEY not set") def create_rst(pydeck_example_file_name): asset_name = to_snake_case_string(file_name=pydeck_example_file_name) deckgl_docs_layer_name = asset_name.replace("_", "-") deckgl_doc_url = None if "layer" in deckgl_docs_layer_name: # Don't add a deck.gl docs link if we're not referencing a layer # Obviously very rough, should change this eventually to handle views etc deckgl_doc_url = DECKGL_URL_BASE + deckgl_docs_layer_name # Create new .html examples html_fname = os.path.basename(pydeck_example_file_name).replace(".py", ".html") # Run the pydeck example and move the .html output subprocess.call( "{python} {fname}; mv {html_src} {html_dest}".format( python=sys.executable, fname=pydeck_example_file_name, html_src=html_fname, html_dest=HTML_DIR ), shell=True, ) python_code = open(pydeck_example_file_name, "r").read() doc_source = DOC_TEMPLATE.render( page_title=to_presentation_name(asset_name), snake_name=asset_name, python_code=python_code, hosted_html_path=os.path.join(HOSTED_STATIC_PATH, html_fname), deckgl_doc_url=deckgl_doc_url, ) rst_path = os.path.join(GALLERY_DIR, asset_name + ".rst") f = open(rst_path, "w+") print("* Converted %s to %s" % (pydeck_example_file_name, rst_path)) f.write(doc_source) f.close() def main(): pool = Pool(processes=4) candidate_files = [f for f in EXAMPLE_GLOB] if not candidate_files: raise Exception("No files found to convert") subprocess.call("mkdir -p %s" % HTML_DIR, shell=True) pool.map(create_rst, candidate_files) if __name__ == "__main__": main()
[((428, 460), 'os.environ.get', 'os.environ.get', (['"""MAPBOX_API_KEY"""'], {}), "('MAPBOX_API_KEY')\n", (442, 460), False, 'import os\n'), ((638, 694), 'utils.to_snake_case_string', 'to_snake_case_string', ([], {'file_name': 'pydeck_example_file_name'}), '(file_name=pydeck_example_file_name)\n', (658, 694), False, 'from utils import to_presentation_name, to_snake_case_string\n'), ((1787, 1833), 'os.path.join', 'os.path.join', (['GALLERY_DIR', "(asset_name + '.rst')"], {}), "(GALLERY_DIR, asset_name + '.rst')\n", (1799, 1833), False, 'import os\n'), ((1999, 2016), 'multiprocessing.Pool', 'Pool', ([], {'processes': '(4)'}), '(processes=4)\n', (2003, 2016), False, 'from multiprocessing import Pool\n'), ((2150, 2203), 'subprocess.call', 'subprocess.call', (["('mkdir -p %s' % HTML_DIR)"], {'shell': '(True)'}), "('mkdir -p %s' % HTML_DIR, shell=True)\n", (2165, 2203), False, 'import subprocess\n'), ((1091, 1133), 'os.path.basename', 'os.path.basename', (['pydeck_example_file_name'], {}), '(pydeck_example_file_name)\n', (1107, 1133), False, 'import os\n'), ((1558, 1590), 'utils.to_presentation_name', 'to_presentation_name', (['asset_name'], {}), '(asset_name)\n', (1578, 1590), False, 'from utils import to_presentation_name, to_snake_case_string\n'), ((1681, 1725), 'os.path.join', 'os.path.join', (['HOSTED_STATIC_PATH', 'html_fname'], {}), '(HOSTED_STATIC_PATH, html_fname)\n', (1693, 1725), False, 'import os\n')]
mharding01/augmented-neuromuscular-RT-running
symbolicR/python/forward_kin.py
7e1ef00d3fdf9cfa9d59fc4f3a6a0e6dd792a834
import numpy as np import sympy as sp import re import os ###################### # # # 17 16 21 # # 18 15 22 # # 19 14 23 # # 20 01 24 # # 02 08 # # 03 09 # # 04 10 # # 05 11 # # 06 12 # # 07 13 # # # ###################### # # origin: in the waist, middle point between the two pitch hip rotations # inertial frame: located at the origin (waist), but aligned with the ground (info from IMU) # # Di : position vector from the anchor point of the previous body to the current body i # (previous body is not always body i-1), expressed in the relative # frame of the previous body # DGi : position vector from the anchor point of body i to its COM (center of mass) G_i, # expressed in the relative frame of the current body i # Omi : rotational vector from the previous body to the current body i # (previous body is not always body i-1), expressed in the relative # frame of the previous body # Rdi : rotational matrix between body i and its predecessor # si : sine of the relative angle before body i # ci : cosine of the relative angle before body i # # xi : absolute position vector (from origin, expressed in the inertial frame) # of the anchor point of body i # xgi : absolute position vector of the COM G_i of body i # xpi : derivative of xi # xgpi : derivative of xgi # omi : absolute rotational vector of body i # Ri : absolute rotational matrix # Rti : transpose matrix of Ri # xji : jacobian of 'xi' # xgji : jacobian of 'xgi' # Rji : jacobian of 'Ri' # return true if it is a float def isInt(value): try: int(value) return True except: return False # return true if it has a shape 'R%a_%b%c' (indexes %a, %b, %c also returned) def isRot(value): try: a = int(value.split('_')[0].split('R')[1]) b = int(value.split('_')[1][0]) c = int(value.split('_')[1][1]) return True, a, b, c except: return False, -1, -1, -1 # return true if it has a shape 'x%a_%b' (indexes %a, %b also returned) def isVec(value): try: a = int(value.split('_')[0].split('x')[1]) b = int(value.split('_')[1]) return True, a, b except: return False, -1, -1 # count the number of 'elem' in the file def count_elem(in_file, elem): count = 0; with open(in_file, 'r') as f: # loop on all the lines for line in f: cut_line = line.split(elem) if len(cut_line) == 2: count += 1 return count # print the declaration of an element def print_declaration_elem(in_file, out_write, elem, nb_max_line): if count_elem(in_file, '{}'.format(elem)) >= 1: count = 0 with open(in_file,'r') as f: # loop on all the lines for line in f: cut_line_1 = line.split(elem) cut_line_2 = line.split(' = ') if len(cut_line_1) == 2 and len(cut_line_2) == 2: if len(cut_line_2[0].split('[')) == 1: if count == 0: out_write.write(' double {}'.format(cut_line_2[0].strip())) else: out_write.write(', {}'.format(cut_line_2[0].strip())) count += 1 if count >= nb_max_line: out_write.write(';\n') count = 0 if count != 0: out_write.write(';\n') # print all declarations def print_all_declaration(in_file, out_write, nb_max_char): count = 0 with open(in_file,'r') as f: # loop on all the lines for line in f: cut_line = line.split(' = ') if len(cut_line) == 2: if len(cut_line[0].split('[')) == 1: if count == 0: out_write.write(' double {}'.format(cut_line[0].strip())) else: out_write.write(', {}'.format(cut_line[0].strip())) count += len(cut_line[0].strip()) + 2 if count >= nb_max_char: out_write.write(';\n') count = 0 if count != 0: out_write.write(';\n') # get tilde matrix def get_tilde(v): return np.array([[0.0, -v[2], v[1]], [v[2], 0.0, -v[0]], [-v[1], v[0], 0.0]]) # get rotation matrix def get_rotation_matrix(axis, direct, cosine, sine): if direct: if axis == 1: return np.array([[1.0, 0.0, 0.0], [0.0, cosine, sine], [0.0, -sine, cosine]]) elif axis == 2: return np.array([[cosine, 0.0, -sine], [0.0, 1.0, 0.0], [sine, 0.0, cosine]]) elif axis == 3: return np.array([[cosine, sine, 0.0], [-sine, cosine, 0.0], [0.0, 0.0, 1.0]]) else: return np.array([]) else: if axis == 1: return np.array([[1.0, 0.0, 0.0], [0.0, cosine, -sine], [0.0, sine, cosine]]) elif axis == 2: return np.array([[cosine, 0.0, sine], [0.0, 1.0, 0.0], [-sine, 0.0, cosine]]) elif axis == 3: return np.array([[cosine, -sine, 0.0], [sine, cosine, 0.0], [0.0, 0.0, 1.0]]) else: return np.array([]) # get vector axis def get_vector_axis(axis, direct, elem): if direct: if axis == 1: return np.array([[elem], [0.0], [0.0]]) elif axis == 2: return np.array([[0.0], [elem], [0.0]]) elif axis == 3: return np.array([[0.0], [0.0], [elem]]) else: return np.array([]) else: if axis == 1: return np.array([[-elem], [0.0], [0.0]]) elif axis == 2: return np.array([[0.0], [-elem], [0.0]]) elif axis == 3: return np.array([[0.0], [0.0], [-elem]]) else: return np.array([]) # compute the derivative of an element (for jacobian) def der_elem(elem_str, Rj, xj, xgj, der_var): # element to derive (string) elem_str = elem_str.replace('- ','-').strip() # derivative axis der_q = int(der_var.replace('q','')) # detect positive/negative elem_split = elem_str.split('-') cur_len = len(elem_split) if cur_len == 1: # positive neg_flag = 0 pos_str = elem_split[0] elif cur_len == 2: # negative neg_flag = 1 pos_str = elem_split[1] else: print('Error: {} instead of 1 or 2 in negative detection !'.format(cur_len)) exit() # compute derivative result = 0 # cosine if pos_str == 'c{}'.format(der_q): result += -sp.Symbol('s{}'.format(der_q)) # sine elif pos_str == 's{}'.format(der_q): result += sp.Symbol('c{}'.format(der_q)) # other else: [rot_flag, a, b, c] = isRot(pos_str) [vec_flag, d, e] = isVec(pos_str) # rotation matrix if rot_flag: result += Rj[a-1][der_q-1][(b-1)*3+(c-1)] # vector elif vec_flag: result += xj[d-1][der_q-1][e-1] # apply negative if neg_flag: result = -result return result # compute the derivative of an expression (for jacobian) def symbolic_jacob_der(Rj, xj, xgj, symb_var, der_var): # list of all terms term_list = str(symb_var).replace('- ','-').replace('-','+-').split('+') if term_list[0] == '': term_list.pop(0) result = 0 # loop on all terms for cur_term in term_list: # detect products cur_term_split = cur_term.split('*') cur_len = len(cur_term_split) # no product if cur_len == 1: result += der_elem(cur_term_split[0], Rj, xj, xgj, der_var) # one product elif cur_len == 2: result += der_elem(cur_term_split[0], Rj, xj, xgj, der_var)*sp.Symbol(cur_term_split[1].strip()) result += der_elem(cur_term_split[1], Rj, xj, xgj, der_var)*sp.Symbol(cur_term_split[0].strip()) # other else: print('Error: {} * counted , only implemented for 0 or 1 !'.format(cur_len-1)) exit() return result # write the beginning of the file def write_file_beginning(out_file, joint_id_names): out_file.write('/*! \n') out_file.write(' * \\author Nicolas Van der Noot\n') out_file.write(' * \\file forward_kinematics.cc\n') out_file.write(' * \\brief forward kinematics computation for the COMAN model\n') out_file.write(' */\n\n') out_file.write('// joints enumeration\n') out_file.write('enum {') count = 0 for i in range(1, len(joint_id_names)): count += 1 if i == 1: out_file.write('{}'.format(get_string_enum(joint_id_names[i]))) elif count >= 6: count = 0 out_file.write(',\n {}'.format(get_string_enum(joint_id_names[i]))) else: out_file.write(', {}'.format(get_string_enum(joint_id_names[i]))) out_file.write('};\n\n') out_file.write('/*! \\brief main kinematics computation\n') out_file.write(' *\n') out_file.write(' * \\param[in,out] in_out inputs and outputs class\n') out_file.write(' *\n') out_file.write(' * computation of:\n') out_file.write(' * COM (center of mass) position and velocity\n') out_file.write(' * feet position, velocity and orientation\n') out_file.write(' * waist and torso orientaion angles and derivatives\n') out_file.write(' *\n') out_file.write(' * ////////////////////////\n') out_file.write(' * // //\n') out_file.write(' * // 17 16 21 //\n') out_file.write(' * // 18 15 22 //\n') out_file.write(' * // 19 14 23 //\n') out_file.write(' * // 20 01 24 //\n') out_file.write(' * // 02 08 //\n') out_file.write(' * // 03 09 //\n') out_file.write(' * // 04 10 //\n') out_file.write(' * // 05 11 //\n') out_file.write(' * // 06 12 //\n') out_file.write(' * // 07 13 //\n') out_file.write(' * // //\n') out_file.write(' * ////////////////////////\n') out_file.write(' *\n') out_file.write(' * origin: in the waist, middle point between the two pitch hip rotations\n') out_file.write(' * inertial frame: located at the origin (waist), but aligned with the ground (info from IMU)\n') out_file.write(' *\n') out_file.write(' * Di : position vector from the anchor point of the previous body to the current body i \n') out_file.write(' * (previous body is not always body i-1), expressed in the relative\n') out_file.write(' * frame of the previous body\n') out_file.write(' * DGi : position vector from the anchor point of body i to its COM (center of mass) G_i,\n') out_file.write(' * expressed in the relative frame of the current body i\n') out_file.write(' * Omi : rotational vector from the previous body to the current body i \n') out_file.write(' * (previous body is not always body i-1), expressed in the relative\n') out_file.write(' * frame of the previous body\n') out_file.write(' * Rdi : rotational matrix between body i and its predecessor\n') out_file.write(' * si : sine of the relative angle before body i\n') out_file.write(' * ci : cosine of the relative angle before body i\n') out_file.write(' *\n') out_file.write(' * xi : absolute position vector (from origin, expressed in the inertial frame)\n') out_file.write(' * of the anchor point of body i\n') out_file.write(' * xgi : absolute position vector of the COM G_i of body i\n') out_file.write(' * xpi : derivative of xi\n') out_file.write(' * xgpi : derivative of xgi\n') out_file.write(' * omi : absolute rotational vector of body i\n') out_file.write(' * Ri : absolute rotational matrix\n') out_file.write(' * Rti : transpose matrix of Ri\n') out_file.write(' * xji : jacobian of \'xi\'\n') out_file.write(' * xgji : jacobian of \'xgi\'\n') out_file.write(' * Rji : jacobian of \'Ri\'\n') out_file.write(' */\n') out_file.write('void ForwardKinematics::main_kinematics(KinematicsInOut &in_out)\n{\n') # compute the center of mass position and velocity def com_compute(out_file, nb_bodies, joint_id_names, M, xg, xgp, xgj): out_file.write(' m_tot = ') for i in range(0, nb_bodies): out_file.write('{}'.format(M[i])) if i == nb_bodies-1: out_file.write(';\n\n') else: out_file.write(' + ') out_file.write(' // global com absolute position\n') for i in range(0, 3): out_file.write(' in_out.r_COM[{}] = '.format(i)) flag_first = 0 for j in range(0, nb_bodies): if flag_first: out_file.write(' + {}*{}'.format(M[j], xg[j][i])) else: flag_first = 1 out_file.write('({}*xg{}_{}'.format(M[j], j+1, i+1)) if j == nb_bodies-1: if flag_first: out_file.write(')/m_tot;\n') else: out_file.write('0.0;\n') out_file.write('\n') out_file.write(' // global com absolute velocity\n') for i in range(0, 3): out_file.write(' in_out.rp_COM[{}] = '.format(i)) flag_first = 0 for j in range(0, nb_bodies): if flag_first: out_file.write(' + {}*xgp{}_{}'.format(M[j], j+1, i+1)) else: flag_first = 1 out_file.write('({}*xgp{}_{}'.format(M[j], j+1, i+1)) if j == nb_bodies-1: if flag_first: out_file.write(')/m_tot;\n') else: out_file.write('0.0;\n') out_file.write('\n') out_file.write(' // global com jacobian\n') out_file.write(' if (flag_jacob)\n {\n') for i in range(1, nb_bodies): for j in range(0, 3): out_file.write(' in_out.r_COM_der[{}][{}] = '.format(get_string_enum(joint_id_names[i]), j)) flag_first = 0 for k in range(0, nb_bodies): if xgj[k][i][j] != 0: if flag_first: out_file.write(' + {}*{}'.format(M[k], str(xgj[k][i][j]))) else: flag_first = 1 out_file.write('({}*{}'.format(M[k], str(xgj[k][i][j]))) if k == nb_bodies-1: if flag_first: out_file.write(')/m_tot;\n') else: out_file.write('0.0;\n') if i != nb_bodies-1: out_file.write('\n') else: out_file.write(' }\n\n') # from an orientation matrix, compute the roll, pitch, yaw angles (and derivative) def yaw_pitch_roll_angles(out_file, angle_name, R_matrix, epsilon): if epsilon > 0: # epsilon = 1 -> pitch angle in [-pi/2 ; pi/2] out_file.write(' in_out.{}[0] = atan2({}, {});\n'.format(angle_name, R_matrix[5], R_matrix[8])) out_file.write(' in_out.{}[1] = atan2(-{}, sqrt({}*{} + {}*{}));\n'.format(angle_name, R_matrix[2], R_matrix[0], R_matrix[0], R_matrix[1], R_matrix[1])) out_file.write(' in_out.{}[2] = atan2({}, {});\n'.format(angle_name, R_matrix[1], R_matrix[0])) else: # epsilon = -1 -> pitch angle in [pi/2 ; 3*pi/2] out_file.write(' in_out.{}[0] = atan2(-{}, -{});\n'.format(angle_name, R_matrix[5], R_matrix[8])) out_file.write(' in_out.{}[1] = atan2(-{}, -sqrt({}*{} + {}*{}));\n'.format(angle_name, R_matrix[2], R_matrix[0], R_matrix[0], R_matrix[1], R_matrix[1])) out_file.write(' in_out.{}[2] = atan2(-{}, -{});\n'.format(angle_name, R_matrix[1], R_matrix[0])) # compute the time derivatives of 'yaw_pitch_roll_angles' def theta_dot_compute(out_file, omega_in, omega_out, body_part): out_file.write(' in_out.{}[0] = inv_c_y_{} * (c_z_{}*{} + s_z_{}*{});\n'.format(omega_out, body_part, body_part, omega_in[0], body_part, omega_in[1])) out_file.write(' in_out.{}[1] = c_z_{}*{} - s_z_{}*{};\n'.format(omega_out, body_part, omega_in[1], body_part, omega_in[0])) out_file.write(' in_out.{}[2] = inv_c_y_{} * s_y_{} * (s_z_{}*{} + c_z_{}*{}) + {};\n'.format(omega_out, body_part, body_part, body_part, omega_in[1], body_part, omega_in[0], omega_in[2])) # angles (position and derivative) of the waist and the torso def torso_waist_angles(out_file, R, om, waist_id, torso_id): out_file.write(' // waist orientation matrix as angles [rad]\n') yaw_pitch_roll_angles(out_file, 'theta_waist', R[waist_id], 1) out_file.write('\n') out_file.write(' // torso orientation matrix as angles [rad]\n') yaw_pitch_roll_angles(out_file, 'theta_torso', R[torso_id], 1) out_file.write('\n') out_file.write(' c_y_waist = cos(in_out.theta_waist[1]);\n') out_file.write(' c_y_torso = cos(in_out.theta_torso[1]);\n') out_file.write(' c_z_waist = cos(in_out.theta_waist[2]);\n') out_file.write(' c_z_torso = cos(in_out.theta_torso[2]);\n\n') out_file.write(' s_y_waist = sin(in_out.theta_waist[1]);\n') out_file.write(' s_y_torso = sin(in_out.theta_torso[1]);\n') out_file.write(' s_z_waist = sin(in_out.theta_waist[2]);\n') out_file.write(' s_z_torso = sin(in_out.theta_torso[2]);\n\n') out_file.write(' if ((!c_y_waist) || (!c_y_torso))\n {\n') out_file.write(' return;\n }\n\n') out_file.write(' inv_c_y_waist = 1.0 / c_y_waist;\n') out_file.write(' inv_c_y_torso = 1.0 / c_y_torso;\n\n') out_file.write(' // waist orientation angle derivatives [rad/s]\n') theta_dot_compute(out_file, om[waist_id], 'omega_waist', 'waist') out_file.write('\n') out_file.write(' // torso orientation angle derivatives [rad/s]\n') theta_dot_compute(out_file, om[torso_id], 'omega_torso', 'torso') # compute the feet position, velocity and orientation def feet_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj, xgj, r_foot_id, l_foot_id, x_min, x_max, y_min, y_max): # symbolic variables declarations nb_contacts = 4 x_r_foot = x[r_foot_id] x_l_foot = x[l_foot_id] xp_r_foot = xp[r_foot_id] xp_l_foot = xp[l_foot_id] om_r_foot = om[r_foot_id] om_l_foot = om[l_foot_id] R_r_foot = R[r_foot_id] R_l_foot = R[l_foot_id] Dpt_r_foot = sp.zeros(3, 1) Dpt_l_foot = sp.zeros(3, 1) Dpt_r_foot[2] = sp.Symbol('DPT_3_16') Dpt_l_foot[2] = sp.Symbol('DPT_3_29') Dpt_r_foot_cont = nb_contacts * [None] Dpt_l_foot_cont = nb_contacts * [None] for i in range(0, nb_contacts): Dpt_r_foot_cont[i] = sp.zeros(3, 1) Dpt_l_foot_cont[i] = sp.zeros(3, 1) Dpt_r_foot_cont[0][0] = x_min Dpt_r_foot_cont[1][0] = x_min Dpt_r_foot_cont[2][0] = x_max Dpt_r_foot_cont[3][0] = x_max Dpt_r_foot_cont[0][1] = y_min Dpt_r_foot_cont[1][1] = y_max Dpt_r_foot_cont[2][1] = y_min Dpt_r_foot_cont[3][1] = y_max for i in range(0, nb_contacts): Dpt_r_foot_cont[i][2] = sp.Symbol('DPT_3_16') for i in range(0, nb_contacts): for j in range(0, 3): Dpt_l_foot_cont[i][j] = Dpt_r_foot_cont[i][j] x_r_cont = nb_contacts * [None] x_l_cont = nb_contacts * [None] # computation om_tilde_r_foot = get_tilde(om_r_foot) om_tilde_l_foot = get_tilde(om_l_foot) x_r = x_r_foot + R_r_foot.T * Dpt_r_foot x_l = x_l_foot + R_l_foot.T * Dpt_l_foot xp_r = xp_r_foot + om_tilde_r_foot * (R_r_foot.T * Dpt_r_foot) xp_l = xp_l_foot + om_tilde_l_foot * (R_l_foot.T * Dpt_l_foot) for i in range(0, nb_contacts): x_r_cont[i] = x_r_foot + R_r_foot.T * Dpt_r_foot_cont[i] x_l_cont[i] = x_l_foot + R_l_foot.T * Dpt_l_foot_cont[i] # writing outputs out_file.write(' // right foot absolute position\n') for i in range(0,3): out_file.write(' in_out.r_Rfoot[{}] = {};\n'.format(i, x_r[i])) out_file.write('\n') out_file.write(' // right foot absolute velocity\n') for i in range(0,3): out_file.write(' in_out.rp_Rfoot[{}] = {};\n'.format(i, xp_r[i])) out_file.write('\n') out_file.write(' // right foot jacobian\n') out_file.write(' if (flag_jacob)\n {\n') flag_first = 0 for i in range (1, nb_bodies): flag_print = 0 for j in range(0, 3): cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_r[j], 'q{}'.format(i+1)) if cur_jac != 0: if not flag_first: flag_first = 1 flag_print = 1 elif not flag_print: flag_print = 1 out_file.write('\n') out_file.write(' in_out.r_Rfoot_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\n\n') out_file.write(' // left foot absolute position\n') for i in range(0,3): out_file.write(' in_out.r_Lfoot[{}] = {};\n'.format(i, x_l[i])) out_file.write('\n') out_file.write(' // left foot absolute velocity\n') for i in range(0,3): out_file.write(' in_out.rp_Lfoot[{}] = {};\n'.format(i, xp_l[i])) out_file.write('\n') out_file.write(' // left foot jacobian\n') out_file.write(' if (flag_jacob)\n {\n') flag_first = 0 for i in range (1, nb_bodies): flag_print = 0 for j in range(0, 3): cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_l[j], 'q{}'.format(i+1)) if cur_jac != 0: if not flag_first: flag_first = 1 flag_print = 1 elif not flag_print: flag_print = 1 out_file.write('\n') out_file.write(' in_out.r_Lfoot_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\n\n') out_file.write(' // right foot contact points absolute position\n') for i in range(0, nb_contacts): for j in range(0, 3): out_file.write(' in_out.r_Rfoot_cont[{}][{}] = {};\n'.format(i, j, x_r_cont[i][j])) out_file.write('\n') out_file.write(' // right foot contact points jacobian\n') out_file.write(' if (flag_jacob)\n {\n') flag_first = 0 for i in range(0, nb_contacts): for j in range (1, nb_bodies): flag_print = 0 for k in range(0, 3): cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_r_cont[i][k], 'q{}'.format(j+1)) if cur_jac != 0: if not flag_first: flag_first = 1 flag_print = 1 elif not flag_print: flag_print = 1 out_file.write('\n') out_file.write(' in_out.r_Rfoot_cont_der[{}][{}][{}] = {};\n'.format(i, get_string_enum(joint_id_names[j]), k, cur_jac)) out_file.write(' }\n\n') out_file.write(' // left foot contact points absolute position\n') for i in range(0, nb_contacts): for j in range(0, 3): out_file.write(' in_out.r_Lfoot_cont[{}][{}] = {};\n'.format(i, j, x_l_cont[i][j])) out_file.write('\n') out_file.write(' // left foot contact points jacobian\n') out_file.write(' if (flag_jacob)\n {\n') flag_first = 0 for i in range(0, nb_contacts): for j in range (1, nb_bodies): flag_print = 0 for k in range(0, 3): cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_l_cont[i][k], 'q{}'.format(j+1)) if cur_jac != 0: if not flag_first: flag_first = 1 flag_print = 1 elif not flag_print: flag_print = 1 out_file.write('\n') out_file.write(' in_out.r_Lfoot_cont_der[{}][{}][{}] = {};\n'.format(i, get_string_enum(joint_id_names[j]), k, cur_jac)) out_file.write(' }\n\n') out_file.write(' // feet absolute orientation\n') for i in range(0, 9): out_file.write(' in_out.Rfoot_or[{}] = {};\n'.format(i, R_r_foot[i])) out_file.write('\n') for i in range(0, 9): out_file.write(' in_out.Lfoot_or[{}] = {};\n'.format(i, R_l_foot[i])) out_file.write('\n') out_file.write(' // right foot absolute orientation jacobian\n') out_file.write(' if (flag_jacob)\n {\n') flag_first = 0 for i in range (1, nb_bodies): flag_print = 0 for j in range(0,9): cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_r_foot[j], 'q{}'.format(i+1)) if cur_jac != 0: if not flag_first: flag_first = 1 flag_print = 1 elif not flag_print: flag_print = 1 out_file.write('\n') out_file.write(' in_out.Rfoot_or_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\n\n') out_file.write(' // left foot absolute orientation jacobian\n') out_file.write(' if (flag_jacob)\n {\n') flag_first = 0 for i in range (1, nb_bodies): flag_print = 0 for j in range(0,9): cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_l_foot[j], 'q{}'.format(i+1)) if cur_jac != 0: if not flag_first: flag_first = 1 flag_print = 1 elif not flag_print: flag_print = 1 out_file.write('\n') out_file.write(' in_out.Lfoot_or_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\n\n') out_file.write(' // right foot orientation matrix as angles [rad]\n') yaw_pitch_roll_angles(out_file, 'theta_Rfoot', R[r_foot_id], 1) out_file.write('\n') out_file.write(' // left foot orientation matrix as angles [rad]\n') yaw_pitch_roll_angles(out_file, 'theta_Lfoot', R[l_foot_id], 1) out_file.write('\n') out_file.write(' c_y_Rfoot = cos(in_out.theta_Rfoot[1]);\n') out_file.write(' c_y_Lfoot = cos(in_out.theta_Lfoot[1]);\n') out_file.write(' c_z_Rfoot = cos(in_out.theta_Rfoot[2]);\n') out_file.write(' c_z_Lfoot = cos(in_out.theta_Lfoot[2]);\n\n') out_file.write(' s_y_Rfoot = sin(in_out.theta_Rfoot[1]);\n') out_file.write(' s_y_Lfoot = sin(in_out.theta_Lfoot[1]);\n') out_file.write(' s_z_Rfoot = sin(in_out.theta_Rfoot[2]);\n') out_file.write(' s_z_Lfoot = sin(in_out.theta_Lfoot[2]);\n\n') out_file.write(' if ((!c_y_Rfoot) || (!c_y_Lfoot))\n {\n') out_file.write(' return;\n }\n\n') out_file.write(' inv_c_y_Rfoot = 1.0 / c_y_Rfoot;\n') out_file.write(' inv_c_y_Lfoot = 1.0 / c_y_Lfoot;\n\n') out_file.write(' // right foot orientation angle derivatives [rad/s]\n') theta_dot_compute(out_file, om[r_foot_id], 'omega_Rfoot', 'Rfoot') out_file.write('\n') out_file.write(' // left foot orientation angle derivatives [rad/s]\n') theta_dot_compute(out_file, om[l_foot_id], 'omega_Lfoot', 'Lfoot') out_file.write('\n') # compute the wrists position, velocity and orientation def wrists_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj, xgj, r_elb_id, l_elb_id, r_wrist_x, r_wrist_y, r_wrist_z): # symbolic variables declarations x_r_elb = x[r_elb_id] x_l_elb = x[l_elb_id] xp_r_elb = xp[r_elb_id] xp_l_elb = xp[l_elb_id] om_r_elb = om[r_elb_id] om_l_elb = om[l_elb_id] R_r_elb = R[r_elb_id] R_l_elb = R[l_elb_id] Dpt_r_wrist = sp.zeros(3, 1) Dpt_l_wrist = sp.zeros(3, 1) Dpt_r_wrist[0] = r_wrist_x Dpt_r_wrist[1] = r_wrist_y Dpt_r_wrist[2] = r_wrist_z Dpt_l_wrist[0] = r_wrist_x Dpt_l_wrist[1] = -r_wrist_y Dpt_l_wrist[2] = r_wrist_z # computation om_tilde_r_elb = get_tilde(om_r_elb) om_tilde_l_elb = get_tilde(om_l_elb) x_r = x_r_elb + R_r_elb.T * Dpt_r_wrist x_l = x_l_elb + R_l_elb.T * Dpt_l_wrist xp_r = xp_r_elb + om_tilde_r_elb * (R_r_elb.T * Dpt_r_wrist) xp_l = xp_l_elb + om_tilde_l_elb * (R_l_elb.T * Dpt_l_wrist) # writing outputs out_file.write(' // right wrist absolute position\n') for i in range(0,3): out_file.write(' in_out.r_Rwrist[{}] = {};\n'.format(i, x_r[i])) out_file.write('\n') out_file.write(' // right wrist absolute velocity\n') for i in range(0,3): out_file.write(' in_out.rp_Rwrist[{}] = {};\n'.format(i, xp_r[i])) out_file.write('\n') out_file.write(' // right wrist jacobian\n') out_file.write(' if (flag_jacob)\n {\n') flag_first = 0 for i in range (1, nb_bodies): flag_print = 0 for j in range(0, 3): cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_r[j], 'q{}'.format(i+1)) if cur_jac != 0: if not flag_first: flag_first = 1 flag_print = 1 elif not flag_print: flag_print = 1 out_file.write('\n') out_file.write(' in_out.r_Rwrist_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\n\n') out_file.write(' // left wrist absolute position\n') for i in range(0,3): out_file.write(' in_out.r_Lwrist[{}] = {};\n'.format(i, x_l[i])) out_file.write('\n') out_file.write(' // left wrist absolute velocity\n') for i in range(0,3): out_file.write(' in_out.rp_Lwrist[{}] = {};\n'.format(i, xp_l[i])) out_file.write('\n') out_file.write(' // left wrist jacobian\n') out_file.write(' if (flag_jacob)\n {\n') flag_first = 0 for i in range (1, nb_bodies): flag_print = 0 for j in range(0, 3): cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_l[j], 'q{}'.format(i+1)) if cur_jac != 0: if not flag_first: flag_first = 1 flag_print = 1 elif not flag_print: flag_print = 1 out_file.write('\n') out_file.write(' in_out.r_Lwrist_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\n\n') out_file.write(' // wrists absolute orientation\n') for i in range(0, 9): out_file.write(' in_out.Rwrist_or[{}] = {};\n'.format(i, R_r_elb[i])) out_file.write('\n') for i in range(0, 9): out_file.write(' in_out.Lwrist_or[{}] = {};\n'.format(i, R_l_elb[i])) out_file.write('\n') out_file.write(' // right wrist absolute orientation jacobian\n') out_file.write(' if (flag_jacob)\n {\n') flag_first = 0 for i in range (1, nb_bodies): flag_print = 0 for j in range(0,9): cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_r_elb[j], 'q{}'.format(i+1)) if cur_jac != 0: if not flag_first: flag_first = 1 flag_print = 1 elif not flag_print: flag_print = 1 out_file.write('\n') out_file.write(' in_out.Rwrist_or_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\n\n') out_file.write(' // left wrist absolute orientation jacobian\n') out_file.write(' if (flag_jacob)\n {\n') flag_first = 0 for i in range (1, nb_bodies): flag_print = 0 for j in range(0,9): cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_l_elb[j], 'q{}'.format(i+1)) if cur_jac != 0: if not flag_first: flag_first = 1 flag_print = 1 elif not flag_print: flag_print = 1 out_file.write('\n') out_file.write(' in_out.Lwrist_or_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac)) out_file.write(' }\n\n') # get a string for the enumeration of joints def get_string_enum(cur_string): cur_split = cur_string.split('_') if len(cur_split) >= 2: new_string = cur_split[0] for i in range(1, len(cur_split)-1): new_string = '{}{}'.format(new_string, cur_split[i]) else: new_string = cur_string cur_split = filter(None, re.split("([A-Z][^A-Z]*)", new_string)) new_string = cur_split[0].upper() for i in range(1, len(cur_split)): new_string = '{}_{}'.format(new_string, cur_split[i].upper()) return new_string # write the end of the file def write_file_end(out_file): out_file.write('}\n') # print matrix components declaration def write_matrix_declaration(out_file, prefix): out_file.write(' double ') for i in range(0,3): for j in range(0,3): out_file.write('{}{}{}'.format(prefix, i+1, j+1)) if i == 2 and j == 2: out_file.write(';\n') else: out_file.write(', ') # print variables declaration def write_variables_declaration(out_file, prefix, min, max): out_file.write(' double ') for i in range(min, max+1): out_file.write('{}{}'.format(prefix, i)) if i == max: out_file.write(';\n') else: out_file.write(', ') # variables initialization def write_intialization(out_file, nb_bodies, joint_id_names): out_file.write(' // -- variables initialization -- //\n') out_file.write('\n // IMU - rotation matrices\n') for i in range(0, 3): for j in range(0, 3): out_file.write(' IMU{}{} = in_out.IMU_Orientation[{}];\n'.format(i+1, j+1, 3*i+j)) out_file.write('\n // IMU - angles velocity\n') for i in range(0, 3): out_file.write(' omega_{} = in_out.IMU_Angular_Rate[{}];\n'.format(i+1, i)) out_file.write('\n // joint cosines\n') for i in range(1, nb_bodies): out_file.write(' c{} = cos(in_out.q_mot[{}]);\n'.format(i+1, joint_id_names[i])) out_file.write('\n // joint sines\n') for i in range(1, nb_bodies): out_file.write(' s{} = sin(in_out.q_mot[{}]);\n'.format(i+1, joint_id_names[i])) out_file.write('\n // joint relative velocities\n') for i in range(1, nb_bodies): out_file.write(' Om{} = in_out.qd_mot[{}];\n'.format(i+1, joint_id_names[i])) # write symbolic vector and replace symbolic variable by its name def write_symb_vector(out_file, vector, start_name, end_name): new_vector = sp.zeros(3, 1) flag_print = 0 for i in range(0,3): if vector[i] == 0 or vector[i] == 1: new_vector[i] = vector[i] else: flag_print = 1 elem_name = '{}{}{}'.format(start_name, i+1, end_name) out_file.write(' {} = {};\n'.format(elem_name, vector[i]).replace('1.0*','')) new_vector[i] = sp.Symbol(elem_name) if flag_print: out_file.write('\n') return new_vector # write symbolic matrix and replace symbolic variable by its name def write_symb_matrix(out_file, matrix, start_name, end_name): new_matrix = sp.zeros(3, 3) flag_print = 0 for i in range(0,3): for j in range(0,3): if matrix[i,j] == 0 or matrix[i,j] == 1: new_matrix[i,j] = matrix[i,j] else: flag_print = 1 elem_name = '{}{}{}{}'.format(start_name, i+1, j+1, end_name) out_file.write(' {} = {};\n'.format(elem_name, matrix[i,j]).replace('1.0*','')) new_matrix[i,j] = sp.Symbol(elem_name) if flag_print: out_file.write('\n') return new_matrix # save the symbolic vector for print def print_save_symb_vector(vector, start_name, end_name): new_vector = sp.zeros(3, 1) save_vector = 3 * [None] for i in range(0,3): if vector[i] == 0 or vector[i] == 1: new_vector[i] = vector[i] save_vector[i] = None else: elem_name = '{}{}{}'.format(start_name, i+1, end_name) save_vector[i] = ' {} = {};\n'.format(elem_name, vector[i]).replace('1.0*','') new_vector[i] = sp.Symbol(elem_name) return new_vector, save_vector # save the symbolic matrix for print def print_save_symb_matrix(matrix, start_name, end_name): new_matrix = sp.zeros(3, 3) save_matrix = 9 * [None] for i in range(0,3): for j in range(0,3): if matrix[i,j] == 0 or matrix[i,j] == 1: new_matrix[i,j] = matrix[i,j] save_matrix[3*i+j] = None else: elem_name = '{}{}{}{}'.format(start_name, i+1, j+1, end_name) save_matrix[3*i+j] = ' {} = {};\n'.format(elem_name, matrix[i,j]).replace('1.0*','') new_matrix[i,j] = sp.Symbol(elem_name) return new_matrix, save_matrix # write symbolic jacobian of a rotation matrix def write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R_matrix, index): # loop on all the joints for i in range (1, nb_bodies): new_matrix = sp.zeros(3, 3) # loop on all the matrix elements for j in range(0, 9): new_matrix[j] = symbolic_jacob_der(Rj, xj, xgj, R_matrix[j], 'q{}'.format(i+1)) [Rj[index-1][i], Rj_print[index-1][i]] = print_save_symb_matrix(new_matrix, 'R{}_'.format(index), '_d{}'.format(i+1)) # write symbolic jacobian of an anchor point def write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x_vector, index): # loop on all the joints for i in range (1, nb_bodies): new_vector = sp.zeros(3, 1) # loop on all the vector elements for j in range(0, 3): new_vector[j] = symbolic_jacob_der(Rj, xj, xgj, x_vector[j], 'q{}'.format(i+1)) [xj[index-1][i], xj_print[index-1][i]] = print_save_symb_vector(new_vector, 'x{}_'.format(index), '_d{}'.format(i+1)) # write symbolic jacobian of a com point def write_symb_xgj(nb_bodies, Rj, xj, xgj, xgj_print, x_vector, index): # loop on all the joints for i in range (1, nb_bodies): new_vector = sp.zeros(3, 1) # loop on all the vector elements for j in range(0, 3): new_vector[j] = symbolic_jacob_der(Rj, xj, xgj, x_vector[j], 'q{}'.format(i+1)) [xgj[index-1][i], xgj_print[index-1][i]] = print_save_symb_vector(new_vector, 'xg{}_'.format(index), '_d{}'.format(i+1)) # symbolic computation def symbolic_computation(out_file, nb_bodies, joint_id_names, rot_axis, parent_body_index, Dpt, Dg, M): out_file.write('\n\n // -- symbolic computation -- //\n') # Rj, xj, xgj and xgj (jacobian) Rj = nb_bodies*[None] xj = nb_bodies*[None] xgj = nb_bodies*[None] Rj_print = nb_bodies*[None] xj_print = nb_bodies*[None] xgj_print = nb_bodies*[None] for i in range(0, nb_bodies): Rj[i] = nb_bodies*[None] xj[i] = nb_bodies*[None] xgj[i] = nb_bodies*[None] Rj_print[i] = nb_bodies*[None] xj_print[i] = nb_bodies*[None] xgj_print[i] = nb_bodies*[None] for j in range(0, nb_bodies-1): Rj[i][j] = sp.zeros(3, 3) xj[i][j] = sp.zeros(3, 1) xgj[i][j] = sp.zeros(3, 1) Rj_print[i][j] = 9 * [None] xj_print[i][j] = 3 * [None] xgj_print[i][j] = 3 * [None] # rotation matrices out_file.write('\n // rotation matrices\n') R = nb_bodies*[None] Rt = nb_bodies*[None] Rd = nb_bodies*[None] Rd[0] = sp.zeros(3, 3) R[0] = sp.zeros(3, 3) for i in range(0, 3): for j in range(0, 3): R[0][i,j] = sp.Symbol('IMU{}{}'.format(i+1, j+1)) write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R[0], 1) R[0] = write_symb_matrix(out_file, R[0], 'R1_', '') Rt[0] = R[0].T for i in range(1, nb_bodies): Rd[i] = get_rotation_matrix(rot_axis[i], 1, sp.Symbol('c{}'.format(i+1)), sp.Symbol('s{}'.format(i+1))) R[i] = Rd[i] * R[parent_body_index[i]] write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R[i], i+1) R[i] = write_symb_matrix(out_file, R[i], 'R{}_'.format(i+1), '') Rt[i] = R[i].T # jacobian rotation matrices out_file.write('\n // jacobian rotation matrices\n') out_file.write(' if (flag_jacob)\n {\n') flag_first = 0 for i in range(0, nb_bodies): for j in range(1, nb_bodies): flag_print = 0 for k in range(0, 9): if Rj_print[i][j][k] != None: if not flag_first: flag_first = 1 flag_print = 1 elif not flag_print: flag_print = 1 out_file.write('\n') out_file.write('{}'.format(Rj_print[i][j][k])) out_file.write(' }\n') # omega out_file.write('\n // joint absolute velocities\n') Om = nb_bodies*[None] om = nb_bodies*[None] om_tilde = nb_bodies*[None] Om[0] = sp.zeros(3, 1) om[0] = sp.zeros(3, 1) for i in range(0,3): om[0][i] = sp.Symbol('omega_{}'.format(i+1)) om[0] = write_symb_vector(out_file, om[0], 'om1_', '') om_tilde[0] = get_tilde(om[0]) for i in range(1, nb_bodies): parent_id = parent_body_index[i] Om[i] = get_vector_axis(rot_axis[i], 1, sp.Symbol('Om{}'.format(i+1))) om[i] = om[parent_id] + Rt[parent_id] * Om[i] om[i] = write_symb_vector(out_file, om[i], 'om{}_'.format(i+1), '') om_tilde[i] = get_tilde(om[i]) # x & xp out_file.write('\n // anchor point absolute positions and velocities\n') x = nb_bodies*[None] xp = nb_bodies*[None] x[0] = Rt[0] * Dpt[0] xp[0] = om_tilde[0] * (Rt[0] * Dpt[0]) write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x[0], 1) x[0] = write_symb_vector(out_file, x[0], 'x1_', '') xp[0] = write_symb_vector(out_file, xp[0], 'xp1_', '') for i in range(1, nb_bodies): parent_id = parent_body_index[i] x[i] = x[parent_id] + Rt[parent_id] * Dpt[i] xp[i] = xp[parent_id] + om_tilde[parent_id] * (Rt[parent_id] * Dpt[i]) write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x[i], i+1) x[i] = write_symb_vector(out_file, x[i], 'x{}_'.format(i+1), '') xp[i] = write_symb_vector(out_file, xp[i], 'xp{}_'.format(i+1), '') # jacobian x out_file.write('\n // jacobian anchor point positions\n') out_file.write(' if (flag_jacob)\n {\n') flag_first = 0 for i in range(0, nb_bodies): for j in range(1, nb_bodies): flag_print = 0 for k in range(0, 3): if xj_print[i][j][k] != None: if not flag_first: flag_first = 1 flag_print = 1 elif not flag_print: flag_print = 1 out_file.write('\n') out_file.write('{}'.format(xj_print[i][j][k])) out_file.write(' }\n') # xg & xgp out_file.write('\n // com absolute positions and velocities\n') xg = nb_bodies*[None] xgp = nb_bodies*[None] for i in range(0, nb_bodies): xg[i] = x[i] + Rt[i] * Dg[i] xgp[i] = xp[i] + om_tilde[i] * (Rt[i] * Dg[i]) write_symb_xgj(nb_bodies, Rj, xj, xgj, xgj_print, xg[i], i+1) xg[i] = write_symb_vector(out_file, xg[i], 'xg{}_'.format(i+1), '') xgp[i] = write_symb_vector(out_file, xgp[i], 'xgp{}_'.format(i+1), '') # jacobian xg out_file.write('\n // jacobian com absolute positions\n') out_file.write(' if (flag_jacob)\n {\n') flag_first = 0 for i in range(0, nb_bodies): for j in range(1, nb_bodies): flag_print = 0 for k in range(0, 3): if xgj_print[i][j][k] != None: if not flag_first: flag_first = 1 flag_print = 1 elif not flag_print: flag_print = 1 out_file.write('\n') out_file.write('{}'.format(xgj_print[i][j][k])) out_file.write(' }\n') # results out_file.write('\n // -- Collecting results -- //\n\n') com_compute(out_file, nb_bodies, joint_id_names, M, xg, xgp, xgj) feet_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj, xgj, 6, 12, -0.06, 0.08, -0.045, 0.045) wrists_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj, xgj, 19, 23, -0.02, -0.005, -0.225) torso_waist_angles(out_file, R, om, 0, 15) # generate the symbolic output file def gen_symbolic_out(out_file_name, nb_bodies, rot_axis, parent_body_index, joint_id_names, Dpt, Dg, M): # temporary file in_temp = './{}_temp.cc'.format(out_file_name) file_temp = open(in_temp, 'w') # beginning of the file write_file_beginning(file_temp, joint_id_names) # variables initialization write_intialization(file_temp, nb_bodies, joint_id_names) # symbolic computation symbolic_computation(file_temp, nb_bodies, joint_id_names, rot_axis, parent_body_index, Dpt, Dg, M) # end of the file write_file_end(file_temp) file_temp.close() # output file out_file = open('./{}.cc'.format(out_file_name), 'w') with open(in_temp, 'r') as f: # loop on all the lines for line in f: # declaration if len(line.split('// -- variables initialization -- //')) != 1: out_file.write(' // -- variables declaration -- //\n\n') print_all_declaration(in_temp, out_file, 100) out_file.write('\n\n') # copy temporary file out_file.write(line) out_file.close() # remove temporary file os.remove(in_temp) # main script # rotation axis for each joint before body i (1:x, 2:y, 3:z) rot_axis = np.array([0, # waist 2, 1, 3, 2, 1, 2, # right leg 2, 1, 3, 2, 1, 2, # left leg 1, 2, 3, # trunk 2, 1, 3, 2, # right arm 2, 1, 3, 2 # left arm ]) # parent index parent_body_index = np.array([ -1, # waist 0, 1, 2, 3, 4, 5, # right leg 0, 7, 8, 9, 10, 11, # left leg 0, 13, 14, # trunk 15, 16, 17, 18, # right arm 15, 20, 21, 22 # left arm ]) nb_bodies = len(parent_body_index) ## anchor point positions Dpt = nb_bodies*[None] # waist Dpt[0] = sp.Matrix([0.0, 0.0, 0.0]) # right leg Dpt[1] = sp.Matrix([0.0, sp.Symbol('DPT_2_2'), 0.0]) Dpt[2] = sp.Matrix([0.0, sp.Symbol('DPT_2_6'), 0.0]) Dpt[3] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_8')]) Dpt[4] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_10')]) Dpt[5] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_12')]) Dpt[6] = sp.Matrix([0.0, 0.0, 0.0]) # left leg Dpt[7] = sp.Matrix([0.0, sp.Symbol('DPT_2_3'), 0.0]) Dpt[8] = sp.Matrix([0.0, sp.Symbol('DPT_2_18'), 0.0]) Dpt[9] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_20')]) Dpt[10] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_22')]) Dpt[11] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_24')]) Dpt[12] = sp.Matrix([0.0, 0.0, 0.0]) # trunk Dpt[13] = sp.Matrix([sp.Symbol('DPT_1_4'), 0.0, sp.Symbol('DPT_3_4')]) Dpt[14] = sp.Matrix([0.0, 0.0, 0.0]) Dpt[15] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_32')]) # right arm Dpt[16] = sp.Matrix([sp.Symbol('DPT_1_36'), sp.Symbol('DPT_2_36'), sp.Symbol('DPT_3_36')]) Dpt[17] = sp.Matrix([0.0, sp.Symbol('DPT_2_39'), 0.0]) Dpt[18] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_41')]) Dpt[19] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_43')]) # left arm Dpt[20] = sp.Matrix([sp.Symbol('DPT_1_37'), sp.Symbol('DPT_2_37'), sp.Symbol('DPT_3_37')]) Dpt[21] = sp.Matrix([0.0, sp.Symbol('DPT_2_46'), 0.0]) Dpt[22] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_48')]) Dpt[23] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_50')]) ## COM positions Dg = nb_bodies*[None] # waist Dg[0] = sp.Matrix([sp.Symbol('L_1_6'), sp.Symbol('L_2_6'), sp.Symbol('L_3_6')]) # right leg Dg[1] = sp.Matrix([sp.Symbol('L_1_7') , sp.Symbol('L_2_7') , sp.Symbol('L_3_7')]) Dg[2] = sp.Matrix([sp.Symbol('L_1_8') , sp.Symbol('L_2_8') , sp.Symbol('L_3_8')]) Dg[3] = sp.Matrix([sp.Symbol('L_1_9') , sp.Symbol('L_2_9') , sp.Symbol('L_3_9')]) Dg[4] = sp.Matrix([sp.Symbol('L_1_10'), sp.Symbol('L_2_10'), sp.Symbol('L_3_10')]) Dg[5] = sp.Matrix([sp.Symbol('L_1_11'), sp.Symbol('L_2_11'), sp.Symbol('L_3_11')]) Dg[6] = sp.Matrix([sp.Symbol('L_1_12'), 0.0 , sp.Symbol('L_3_12')]) # left leg Dg[7] = sp.Matrix([sp.Symbol('L_1_13'), sp.Symbol('L_2_13'), sp.Symbol('L_3_13')]) Dg[8] = sp.Matrix([sp.Symbol('L_1_14'), sp.Symbol('L_2_14'), sp.Symbol('L_3_14')]) Dg[9] = sp.Matrix([sp.Symbol('L_1_15'), sp.Symbol('L_2_15'), sp.Symbol('L_3_15')]) Dg[10] = sp.Matrix([sp.Symbol('L_1_16'), sp.Symbol('L_2_16'), sp.Symbol('L_3_16')]) Dg[11] = sp.Matrix([sp.Symbol('L_1_17'), sp.Symbol('L_2_17'), sp.Symbol('L_3_17')]) Dg[12] = sp.Matrix([sp.Symbol('L_1_18'), 0.0 , sp.Symbol('L_3_18')]) # trunk Dg[13] = sp.Matrix([sp.Symbol('L_1_19'), sp.Symbol('L_2_19'), sp.Symbol('L_3_19')]) Dg[14] = sp.Matrix([sp.Symbol('L_1_20'), sp.Symbol('L_2_20'), sp.Symbol('L_3_20')]) Dg[15] = sp.Matrix([sp.Symbol('L_1_21'), sp.Symbol('L_2_21'), sp.Symbol('L_3_21')]) # right arm Dg[16] = sp.Matrix([sp.Symbol('L_1_22'), sp.Symbol('L_2_22'), sp.Symbol('L_3_22')]) Dg[17] = sp.Matrix([sp.Symbol('L_1_23'), sp.Symbol('L_2_23'), sp.Symbol('L_3_23')]) Dg[18] = sp.Matrix([sp.Symbol('L_1_24'), sp.Symbol('L_2_24'), sp.Symbol('L_3_24')]) Dg[19] = sp.Matrix([sp.Symbol('L_1_25'), sp.Symbol('L_2_25'), sp.Symbol('L_3_25')]) # left arm Dg[20] = sp.Matrix([sp.Symbol('L_1_26'), sp.Symbol('L_2_26'), sp.Symbol('L_3_26')]) Dg[21] = sp.Matrix([sp.Symbol('L_1_27'), sp.Symbol('L_2_27'), sp.Symbol('L_3_27')]) Dg[22] = sp.Matrix([sp.Symbol('L_1_28'), sp.Symbol('L_2_28'), sp.Symbol('L_3_28')]) Dg[23] = sp.Matrix([sp.Symbol('L_1_29'), sp.Symbol('L_2_29'), sp.Symbol('L_3_29')]) # masses M = np.array([ 'M_6', # waist 'M_7' , 'M_8' , 'M_9' , 'M_10', 'M_11', 'M_12', # right leg 'M_13', 'M_14', 'M_15', 'M_16', 'M_17', 'M_18', # left leg 'M_19', 'M_20', 'M_21', # trunk 'M_22', 'M_23', 'M_24', 'M_25', # right arm 'M_26', 'M_27', 'M_28', 'M_29' # left arm ]) # joint names joint_id_names = np.array(['0', # waist 'RightHipPitch_id', 'RightHipRoll_id', 'RightHipYaw_id', 'RightKneePitch_id', 'RightFootRoll_id', 'RightFootPitch_id', # right leg 'LeftHipPitch_id' , 'LeftHipRoll_id' , 'LeftHipYaw_id' , 'LeftKneePitch_id' , 'LeftFootRoll_id' , 'LeftFootPitch_id' , # left leg 'TorsoRoll_id' , 'TorsoPitch_id' , 'TorsoYaw_id' , # trunk 'RightShPitch_id' , 'RightShRoll_id' , 'RightShYaw_id' , 'RightElbPitch_id', # right arm 'LeftShPitch_id' , 'LeftShRoll_id' , 'LeftShYaw_id' , 'LeftElbPitch_id' # left arm ]) out_file_name = 'forward_kinematics' gen_symbolic_out(out_file_name, nb_bodies, rot_axis, parent_body_index, joint_id_names, Dpt, Dg, M)
[((40420, 40506), 'numpy.array', 'np.array', (['[0, 2, 1, 3, 2, 1, 2, 2, 1, 3, 2, 1, 2, 1, 2, 3, 2, 1, 3, 2, 2, 1, 3, 2]'], {}), '([0, 2, 1, 3, 2, 1, 2, 2, 1, 3, 2, 1, 2, 1, 2, 3, 2, 1, 3, 2, 2, 1,\n 3, 2])\n', (40428, 40506), True, 'import numpy as np\n'), ((40648, 40748), 'numpy.array', 'np.array', (['[-1, 0, 1, 2, 3, 4, 5, 0, 7, 8, 9, 10, 11, 0, 13, 14, 15, 16, 17, 18, 15, \n 20, 21, 22]'], {}), '([-1, 0, 1, 2, 3, 4, 5, 0, 7, 8, 9, 10, 11, 0, 13, 14, 15, 16, 17, \n 18, 15, 20, 21, 22])\n', (40656, 40748), True, 'import numpy as np\n'), ((40977, 41003), 'sympy.Matrix', 'sp.Matrix', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (40986, 41003), True, 'import sympy as sp\n'), ((41293, 41319), 'sympy.Matrix', 'sp.Matrix', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (41302, 41319), True, 'import sympy as sp\n'), ((41616, 41642), 'sympy.Matrix', 'sp.Matrix', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (41625, 41642), True, 'import sympy as sp\n'), ((41733, 41759), 'sympy.Matrix', 'sp.Matrix', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (41742, 41759), True, 'import sympy as sp\n'), ((44477, 44683), 'numpy.array', 'np.array', (["['M_6', 'M_7', 'M_8', 'M_9', 'M_10', 'M_11', 'M_12', 'M_13', 'M_14', 'M_15',\n 'M_16', 'M_17', 'M_18', 'M_19', 'M_20', 'M_21', 'M_22', 'M_23', 'M_24',\n 'M_25', 'M_26', 'M_27', 'M_28', 'M_29']"], {}), "(['M_6', 'M_7', 'M_8', 'M_9', 'M_10', 'M_11', 'M_12', 'M_13',\n 'M_14', 'M_15', 'M_16', 'M_17', 'M_18', 'M_19', 'M_20', 'M_21', 'M_22',\n 'M_23', 'M_24', 'M_25', 'M_26', 'M_27', 'M_28', 'M_29'])\n", (44485, 44683), True, 'import numpy as np\n'), ((44796, 45259), 'numpy.array', 'np.array', (["['0', 'RightHipPitch_id', 'RightHipRoll_id', 'RightHipYaw_id',\n 'RightKneePitch_id', 'RightFootRoll_id', 'RightFootPitch_id',\n 'LeftHipPitch_id', 'LeftHipRoll_id', 'LeftHipYaw_id',\n 'LeftKneePitch_id', 'LeftFootRoll_id', 'LeftFootPitch_id',\n 'TorsoRoll_id', 'TorsoPitch_id', 'TorsoYaw_id', 'RightShPitch_id',\n 'RightShRoll_id', 'RightShYaw_id', 'RightElbPitch_id', 'LeftShPitch_id',\n 'LeftShRoll_id', 'LeftShYaw_id', 'LeftElbPitch_id']"], {}), "(['0', 'RightHipPitch_id', 'RightHipRoll_id', 'RightHipYaw_id',\n 'RightKneePitch_id', 'RightFootRoll_id', 'RightFootPitch_id',\n 'LeftHipPitch_id', 'LeftHipRoll_id', 'LeftHipYaw_id',\n 'LeftKneePitch_id', 'LeftFootRoll_id', 'LeftFootPitch_id',\n 'TorsoRoll_id', 'TorsoPitch_id', 'TorsoYaw_id', 'RightShPitch_id',\n 'RightShRoll_id', 'RightShYaw_id', 'RightElbPitch_id', 'LeftShPitch_id',\n 'LeftShRoll_id', 'LeftShYaw_id', 'LeftElbPitch_id'])\n", (44804, 45259), True, 'import numpy as np\n'), ((3891, 3961), 'numpy.array', 'np.array', (['[[0.0, -v[2], v[1]], [v[2], 0.0, -v[0]], [-v[1], v[0], 0.0]]'], {}), '([[0.0, -v[2], v[1]], [v[2], 0.0, -v[0]], [-v[1], v[0], 0.0]])\n', (3899, 3961), True, 'import numpy as np\n'), ((16556, 16570), 'sympy.zeros', 'sp.zeros', (['(3)', '(1)'], {}), '(3, 1)\n', (16564, 16570), True, 'import sympy as sp\n'), ((16585, 16599), 'sympy.zeros', 'sp.zeros', (['(3)', '(1)'], {}), '(3, 1)\n', (16593, 16599), True, 'import sympy as sp\n'), ((16618, 16639), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_3_16"""'], {}), "('DPT_3_16')\n", (16627, 16639), True, 'import sympy as sp\n'), ((16657, 16678), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_3_29"""'], {}), "('DPT_3_29')\n", (16666, 16678), True, 'import sympy as sp\n'), ((24536, 24550), 'sympy.zeros', 'sp.zeros', (['(3)', '(1)'], {}), '(3, 1)\n', (24544, 24550), True, 'import sympy as sp\n'), ((24566, 24580), 'sympy.zeros', 'sp.zeros', (['(3)', '(1)'], {}), '(3, 1)\n', (24574, 24580), True, 'import sympy as sp\n'), ((30534, 30548), 'sympy.zeros', 'sp.zeros', (['(3)', '(1)'], {}), '(3, 1)\n', (30542, 30548), True, 'import sympy as sp\n'), ((31070, 31084), 'sympy.zeros', 'sp.zeros', (['(3)', '(3)'], {}), '(3, 3)\n', (31078, 31084), True, 'import sympy as sp\n'), ((31622, 31636), 'sympy.zeros', 'sp.zeros', (['(3)', '(1)'], {}), '(3, 1)\n', (31630, 31636), True, 'import sympy as sp\n'), ((32116, 32130), 'sympy.zeros', 'sp.zeros', (['(3)', '(3)'], {}), '(3, 3)\n', (32124, 32130), True, 'import sympy as sp\n'), ((34947, 34961), 'sympy.zeros', 'sp.zeros', (['(3)', '(3)'], {}), '(3, 3)\n', (34955, 34961), True, 'import sympy as sp\n'), ((34971, 34985), 'sympy.zeros', 'sp.zeros', (['(3)', '(3)'], {}), '(3, 3)\n', (34979, 34985), True, 'import sympy as sp\n'), ((36188, 36202), 'sympy.zeros', 'sp.zeros', (['(3)', '(1)'], {}), '(3, 1)\n', (36196, 36202), True, 'import sympy as sp\n'), ((36212, 36226), 'sympy.zeros', 'sp.zeros', (['(3)', '(1)'], {}), '(3, 1)\n', (36220, 36226), True, 'import sympy as sp\n'), ((40312, 40330), 'os.remove', 'os.remove', (['in_temp'], {}), '(in_temp)\n', (40321, 40330), False, 'import os\n'), ((16817, 16831), 'sympy.zeros', 'sp.zeros', (['(3)', '(1)'], {}), '(3, 1)\n', (16825, 16831), True, 'import sympy as sp\n'), ((16855, 16869), 'sympy.zeros', 'sp.zeros', (['(3)', '(1)'], {}), '(3, 1)\n', (16863, 16869), True, 'import sympy as sp\n'), ((17180, 17201), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_3_16"""'], {}), "('DPT_3_16')\n", (17189, 17201), True, 'import sympy as sp\n'), ((28587, 28625), 're.split', 're.split', (['"""([A-Z][^A-Z]*)"""', 'new_string'], {}), "('([A-Z][^A-Z]*)', new_string)\n", (28595, 28625), False, 'import re\n'), ((32747, 32761), 'sympy.zeros', 'sp.zeros', (['(3)', '(3)'], {}), '(3, 3)\n', (32755, 32761), True, 'import sympy as sp\n'), ((33219, 33233), 'sympy.zeros', 'sp.zeros', (['(3)', '(1)'], {}), '(3, 1)\n', (33227, 33233), True, 'import sympy as sp\n'), ((33688, 33702), 'sympy.zeros', 'sp.zeros', (['(3)', '(1)'], {}), '(3, 1)\n', (33696, 33702), True, 'import sympy as sp\n'), ((41042, 41062), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_2_2"""'], {}), "('DPT_2_2')\n", (41051, 41062), True, 'import sympy as sp\n'), ((41095, 41115), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_2_6"""'], {}), "('DPT_2_6')\n", (41104, 41115), True, 'import sympy as sp\n'), ((41153, 41173), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_3_8"""'], {}), "('DPT_3_8')\n", (41162, 41173), True, 'import sympy as sp\n'), ((41206, 41227), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_3_10"""'], {}), "('DPT_3_10')\n", (41215, 41227), True, 'import sympy as sp\n'), ((41260, 41281), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_3_12"""'], {}), "('DPT_3_12')\n", (41269, 41281), True, 'import sympy as sp\n'), ((41358, 41378), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_2_3"""'], {}), "('DPT_2_3')\n", (41367, 41378), True, 'import sympy as sp\n'), ((41412, 41433), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_2_18"""'], {}), "('DPT_2_18')\n", (41421, 41433), True, 'import sympy as sp\n'), ((41472, 41493), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_3_20"""'], {}), "('DPT_3_20')\n", (41481, 41493), True, 'import sympy as sp\n'), ((41527, 41548), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_3_22"""'], {}), "('DPT_3_22')\n", (41536, 41548), True, 'import sympy as sp\n'), ((41582, 41603), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_3_24"""'], {}), "('DPT_3_24')\n", (41591, 41603), True, 'import sympy as sp\n'), ((41673, 41693), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_1_4"""'], {}), "('DPT_1_4')\n", (41682, 41693), True, 'import sympy as sp\n'), ((41700, 41720), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_3_4"""'], {}), "('DPT_3_4')\n", (41709, 41720), True, 'import sympy as sp\n'), ((41791, 41812), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_3_32"""'], {}), "('DPT_3_32')\n", (41800, 41812), True, 'import sympy as sp\n'), ((41849, 41870), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_1_36"""'], {}), "('DPT_1_36')\n", (41858, 41870), True, 'import sympy as sp\n'), ((41872, 41893), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_2_36"""'], {}), "('DPT_2_36')\n", (41881, 41893), True, 'import sympy as sp\n'), ((41895, 41916), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_3_36"""'], {}), "('DPT_3_36')\n", (41904, 41916), True, 'import sympy as sp\n'), ((41945, 41966), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_2_39"""'], {}), "('DPT_2_39')\n", (41954, 41966), True, 'import sympy as sp\n'), ((42005, 42026), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_3_41"""'], {}), "('DPT_3_41')\n", (42014, 42026), True, 'import sympy as sp\n'), ((42060, 42081), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_3_43"""'], {}), "('DPT_3_43')\n", (42069, 42081), True, 'import sympy as sp\n'), ((42117, 42138), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_1_37"""'], {}), "('DPT_1_37')\n", (42126, 42138), True, 'import sympy as sp\n'), ((42140, 42161), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_2_37"""'], {}), "('DPT_2_37')\n", (42149, 42161), True, 'import sympy as sp\n'), ((42163, 42184), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_3_37"""'], {}), "('DPT_3_37')\n", (42172, 42184), True, 'import sympy as sp\n'), ((42213, 42234), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_2_46"""'], {}), "('DPT_2_46')\n", (42222, 42234), True, 'import sympy as sp\n'), ((42273, 42294), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_3_48"""'], {}), "('DPT_3_48')\n", (42282, 42294), True, 'import sympy as sp\n'), ((42328, 42349), 'sympy.Symbol', 'sp.Symbol', (['"""DPT_3_50"""'], {}), "('DPT_3_50')\n", (42337, 42349), True, 'import sympy as sp\n'), ((42420, 42438), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_6"""'], {}), "('L_1_6')\n", (42429, 42438), True, 'import sympy as sp\n'), ((42440, 42458), 'sympy.Symbol', 'sp.Symbol', (['"""L_2_6"""'], {}), "('L_2_6')\n", (42449, 42458), True, 'import sympy as sp\n'), ((42460, 42478), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_6"""'], {}), "('L_3_6')\n", (42469, 42478), True, 'import sympy as sp\n'), ((42513, 42531), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_7"""'], {}), "('L_1_7')\n", (42522, 42531), True, 'import sympy as sp\n'), ((42534, 42552), 'sympy.Symbol', 'sp.Symbol', (['"""L_2_7"""'], {}), "('L_2_7')\n", (42543, 42552), True, 'import sympy as sp\n'), ((42555, 42573), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_7"""'], {}), "('L_3_7')\n", (42564, 42573), True, 'import sympy as sp\n'), ((42595, 42613), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_8"""'], {}), "('L_1_8')\n", (42604, 42613), True, 'import sympy as sp\n'), ((42616, 42634), 'sympy.Symbol', 'sp.Symbol', (['"""L_2_8"""'], {}), "('L_2_8')\n", (42625, 42634), True, 'import sympy as sp\n'), ((42637, 42655), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_8"""'], {}), "('L_3_8')\n", (42646, 42655), True, 'import sympy as sp\n'), ((42677, 42695), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_9"""'], {}), "('L_1_9')\n", (42686, 42695), True, 'import sympy as sp\n'), ((42698, 42716), 'sympy.Symbol', 'sp.Symbol', (['"""L_2_9"""'], {}), "('L_2_9')\n", (42707, 42716), True, 'import sympy as sp\n'), ((42719, 42737), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_9"""'], {}), "('L_3_9')\n", (42728, 42737), True, 'import sympy as sp\n'), ((42759, 42778), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_10"""'], {}), "('L_1_10')\n", (42768, 42778), True, 'import sympy as sp\n'), ((42780, 42799), 'sympy.Symbol', 'sp.Symbol', (['"""L_2_10"""'], {}), "('L_2_10')\n", (42789, 42799), True, 'import sympy as sp\n'), ((42801, 42820), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_10"""'], {}), "('L_3_10')\n", (42810, 42820), True, 'import sympy as sp\n'), ((42842, 42861), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_11"""'], {}), "('L_1_11')\n", (42851, 42861), True, 'import sympy as sp\n'), ((42863, 42882), 'sympy.Symbol', 'sp.Symbol', (['"""L_2_11"""'], {}), "('L_2_11')\n", (42872, 42882), True, 'import sympy as sp\n'), ((42884, 42903), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_11"""'], {}), "('L_3_11')\n", (42893, 42903), True, 'import sympy as sp\n'), ((42925, 42944), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_12"""'], {}), "('L_1_12')\n", (42934, 42944), True, 'import sympy as sp\n'), ((42967, 42986), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_12"""'], {}), "('L_3_12')\n", (42976, 42986), True, 'import sympy as sp\n'), ((43021, 43040), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_13"""'], {}), "('L_1_13')\n", (43030, 43040), True, 'import sympy as sp\n'), ((43042, 43061), 'sympy.Symbol', 'sp.Symbol', (['"""L_2_13"""'], {}), "('L_2_13')\n", (43051, 43061), True, 'import sympy as sp\n'), ((43063, 43082), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_13"""'], {}), "('L_3_13')\n", (43072, 43082), True, 'import sympy as sp\n'), ((43105, 43124), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_14"""'], {}), "('L_1_14')\n", (43114, 43124), True, 'import sympy as sp\n'), ((43126, 43145), 'sympy.Symbol', 'sp.Symbol', (['"""L_2_14"""'], {}), "('L_2_14')\n", (43135, 43145), True, 'import sympy as sp\n'), ((43147, 43166), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_14"""'], {}), "('L_3_14')\n", (43156, 43166), True, 'import sympy as sp\n'), ((43189, 43208), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_15"""'], {}), "('L_1_15')\n", (43198, 43208), True, 'import sympy as sp\n'), ((43210, 43229), 'sympy.Symbol', 'sp.Symbol', (['"""L_2_15"""'], {}), "('L_2_15')\n", (43219, 43229), True, 'import sympy as sp\n'), ((43231, 43250), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_15"""'], {}), "('L_3_15')\n", (43240, 43250), True, 'import sympy as sp\n'), ((43273, 43292), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_16"""'], {}), "('L_1_16')\n", (43282, 43292), True, 'import sympy as sp\n'), ((43294, 43313), 'sympy.Symbol', 'sp.Symbol', (['"""L_2_16"""'], {}), "('L_2_16')\n", (43303, 43313), True, 'import sympy as sp\n'), ((43315, 43334), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_16"""'], {}), "('L_3_16')\n", (43324, 43334), True, 'import sympy as sp\n'), ((43357, 43376), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_17"""'], {}), "('L_1_17')\n", (43366, 43376), True, 'import sympy as sp\n'), ((43378, 43397), 'sympy.Symbol', 'sp.Symbol', (['"""L_2_17"""'], {}), "('L_2_17')\n", (43387, 43397), True, 'import sympy as sp\n'), ((43399, 43418), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_17"""'], {}), "('L_3_17')\n", (43408, 43418), True, 'import sympy as sp\n'), ((43441, 43460), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_18"""'], {}), "('L_1_18')\n", (43450, 43460), True, 'import sympy as sp\n'), ((43483, 43502), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_18"""'], {}), "('L_3_18')\n", (43492, 43502), True, 'import sympy as sp\n'), ((43534, 43553), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_19"""'], {}), "('L_1_19')\n", (43543, 43553), True, 'import sympy as sp\n'), ((43555, 43574), 'sympy.Symbol', 'sp.Symbol', (['"""L_2_19"""'], {}), "('L_2_19')\n", (43564, 43574), True, 'import sympy as sp\n'), ((43576, 43595), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_19"""'], {}), "('L_3_19')\n", (43585, 43595), True, 'import sympy as sp\n'), ((43618, 43637), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_20"""'], {}), "('L_1_20')\n", (43627, 43637), True, 'import sympy as sp\n'), ((43639, 43658), 'sympy.Symbol', 'sp.Symbol', (['"""L_2_20"""'], {}), "('L_2_20')\n", (43648, 43658), True, 'import sympy as sp\n'), ((43660, 43679), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_20"""'], {}), "('L_3_20')\n", (43669, 43679), True, 'import sympy as sp\n'), ((43702, 43721), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_21"""'], {}), "('L_1_21')\n", (43711, 43721), True, 'import sympy as sp\n'), ((43723, 43742), 'sympy.Symbol', 'sp.Symbol', (['"""L_2_21"""'], {}), "('L_2_21')\n", (43732, 43742), True, 'import sympy as sp\n'), ((43744, 43763), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_21"""'], {}), "('L_3_21')\n", (43753, 43763), True, 'import sympy as sp\n'), ((43799, 43818), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_22"""'], {}), "('L_1_22')\n", (43808, 43818), True, 'import sympy as sp\n'), ((43820, 43839), 'sympy.Symbol', 'sp.Symbol', (['"""L_2_22"""'], {}), "('L_2_22')\n", (43829, 43839), True, 'import sympy as sp\n'), ((43841, 43860), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_22"""'], {}), "('L_3_22')\n", (43850, 43860), True, 'import sympy as sp\n'), ((43883, 43902), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_23"""'], {}), "('L_1_23')\n", (43892, 43902), True, 'import sympy as sp\n'), ((43904, 43923), 'sympy.Symbol', 'sp.Symbol', (['"""L_2_23"""'], {}), "('L_2_23')\n", (43913, 43923), True, 'import sympy as sp\n'), ((43925, 43944), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_23"""'], {}), "('L_3_23')\n", (43934, 43944), True, 'import sympy as sp\n'), ((43967, 43986), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_24"""'], {}), "('L_1_24')\n", (43976, 43986), True, 'import sympy as sp\n'), ((43988, 44007), 'sympy.Symbol', 'sp.Symbol', (['"""L_2_24"""'], {}), "('L_2_24')\n", (43997, 44007), True, 'import sympy as sp\n'), ((44009, 44028), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_24"""'], {}), "('L_3_24')\n", (44018, 44028), True, 'import sympy as sp\n'), ((44051, 44070), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_25"""'], {}), "('L_1_25')\n", (44060, 44070), True, 'import sympy as sp\n'), ((44072, 44091), 'sympy.Symbol', 'sp.Symbol', (['"""L_2_25"""'], {}), "('L_2_25')\n", (44081, 44091), True, 'import sympy as sp\n'), ((44093, 44112), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_25"""'], {}), "('L_3_25')\n", (44102, 44112), True, 'import sympy as sp\n'), ((44147, 44166), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_26"""'], {}), "('L_1_26')\n", (44156, 44166), True, 'import sympy as sp\n'), ((44168, 44187), 'sympy.Symbol', 'sp.Symbol', (['"""L_2_26"""'], {}), "('L_2_26')\n", (44177, 44187), True, 'import sympy as sp\n'), ((44189, 44208), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_26"""'], {}), "('L_3_26')\n", (44198, 44208), True, 'import sympy as sp\n'), ((44231, 44250), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_27"""'], {}), "('L_1_27')\n", (44240, 44250), True, 'import sympy as sp\n'), ((44252, 44271), 'sympy.Symbol', 'sp.Symbol', (['"""L_2_27"""'], {}), "('L_2_27')\n", (44261, 44271), True, 'import sympy as sp\n'), ((44273, 44292), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_27"""'], {}), "('L_3_27')\n", (44282, 44292), True, 'import sympy as sp\n'), ((44315, 44334), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_28"""'], {}), "('L_1_28')\n", (44324, 44334), True, 'import sympy as sp\n'), ((44336, 44355), 'sympy.Symbol', 'sp.Symbol', (['"""L_2_28"""'], {}), "('L_2_28')\n", (44345, 44355), True, 'import sympy as sp\n'), ((44357, 44376), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_28"""'], {}), "('L_3_28')\n", (44366, 44376), True, 'import sympy as sp\n'), ((44399, 44418), 'sympy.Symbol', 'sp.Symbol', (['"""L_1_29"""'], {}), "('L_1_29')\n", (44408, 44418), True, 'import sympy as sp\n'), ((44420, 44439), 'sympy.Symbol', 'sp.Symbol', (['"""L_2_29"""'], {}), "('L_2_29')\n", (44429, 44439), True, 'import sympy as sp\n'), ((44441, 44460), 'sympy.Symbol', 'sp.Symbol', (['"""L_3_29"""'], {}), "('L_3_29')\n", (44450, 44460), True, 'import sympy as sp\n'), ((4077, 4147), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, cosine, sine], [0.0, -sine, cosine]]'], {}), '([[1.0, 0.0, 0.0], [0.0, cosine, sine], [0.0, -sine, cosine]])\n', (4085, 4147), True, 'import numpy as np\n'), ((4410, 4480), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, cosine, -sine], [0.0, sine, cosine]]'], {}), '([[1.0, 0.0, 0.0], [0.0, cosine, -sine], [0.0, sine, cosine]])\n', (4418, 4480), True, 'import numpy as np\n'), ((4809, 4841), 'numpy.array', 'np.array', (['[[elem], [0.0], [0.0]]'], {}), '([[elem], [0.0], [0.0]])\n', (4817, 4841), True, 'import numpy as np\n'), ((5028, 5061), 'numpy.array', 'np.array', (['[[-elem], [0.0], [0.0]]'], {}), '([[-elem], [0.0], [0.0]])\n', (5036, 5061), True, 'import numpy as np\n'), ((30844, 30864), 'sympy.Symbol', 'sp.Symbol', (['elem_name'], {}), '(elem_name)\n', (30853, 30864), True, 'import sympy as sp\n'), ((31950, 31970), 'sympy.Symbol', 'sp.Symbol', (['elem_name'], {}), '(elem_name)\n', (31959, 31970), True, 'import sympy as sp\n'), ((34627, 34641), 'sympy.zeros', 'sp.zeros', (['(3)', '(3)'], {}), '(3, 3)\n', (34635, 34641), True, 'import sympy as sp\n'), ((34657, 34671), 'sympy.zeros', 'sp.zeros', (['(3)', '(1)'], {}), '(3, 1)\n', (34665, 34671), True, 'import sympy as sp\n'), ((34687, 34701), 'sympy.zeros', 'sp.zeros', (['(3)', '(1)'], {}), '(3, 1)\n', (34695, 34701), True, 'import sympy as sp\n'), ((4176, 4246), 'numpy.array', 'np.array', (['[[cosine, 0.0, -sine], [0.0, 1.0, 0.0], [sine, 0.0, cosine]]'], {}), '([[cosine, 0.0, -sine], [0.0, 1.0, 0.0], [sine, 0.0, cosine]])\n', (4184, 4246), True, 'import numpy as np\n'), ((4509, 4579), 'numpy.array', 'np.array', (['[[cosine, 0.0, sine], [0.0, 1.0, 0.0], [-sine, 0.0, cosine]]'], {}), '([[cosine, 0.0, sine], [0.0, 1.0, 0.0], [-sine, 0.0, cosine]])\n', (4517, 4579), True, 'import numpy as np\n'), ((4870, 4902), 'numpy.array', 'np.array', (['[[0.0], [elem], [0.0]]'], {}), '([[0.0], [elem], [0.0]])\n', (4878, 4902), True, 'import numpy as np\n'), ((5090, 5123), 'numpy.array', 'np.array', (['[[0.0], [-elem], [0.0]]'], {}), '([[0.0], [-elem], [0.0]])\n', (5098, 5123), True, 'import numpy as np\n'), ((31429, 31449), 'sympy.Symbol', 'sp.Symbol', (['elem_name'], {}), '(elem_name)\n', (31438, 31449), True, 'import sympy as sp\n'), ((32500, 32520), 'sympy.Symbol', 'sp.Symbol', (['elem_name'], {}), '(elem_name)\n', (32509, 32520), True, 'import sympy as sp\n'), ((4275, 4345), 'numpy.array', 'np.array', (['[[cosine, sine, 0.0], [-sine, cosine, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[cosine, sine, 0.0], [-sine, cosine, 0.0], [0.0, 0.0, 1.0]])\n', (4283, 4345), True, 'import numpy as np\n'), ((4364, 4376), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4372, 4376), True, 'import numpy as np\n'), ((4608, 4678), 'numpy.array', 'np.array', (['[[cosine, -sine, 0.0], [sine, cosine, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[cosine, -sine, 0.0], [sine, cosine, 0.0], [0.0, 0.0, 1.0]])\n', (4616, 4678), True, 'import numpy as np\n'), ((4697, 4709), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4705, 4709), True, 'import numpy as np\n'), ((4931, 4963), 'numpy.array', 'np.array', (['[[0.0], [0.0], [elem]]'], {}), '([[0.0], [0.0], [elem]])\n', (4939, 4963), True, 'import numpy as np\n'), ((4982, 4994), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4990, 4994), True, 'import numpy as np\n'), ((5152, 5185), 'numpy.array', 'np.array', (['[[0.0], [0.0], [-elem]]'], {}), '([[0.0], [0.0], [-elem]])\n', (5160, 5185), True, 'import numpy as np\n'), ((5204, 5216), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5212, 5216), True, 'import numpy as np\n')]
0xiso/PyMISP
examples/last.py
20a340414422714dcf31389957343c663550ed1a
#!/usr/bin/env python # -*- coding: utf-8 -*- from pymisp import PyMISP from keys import misp_url, misp_key, misp_verifycert import argparse import os import json # Usage for pipe masters: ./last.py -l 5h | jq . def init(url, key): return PyMISP(url, key, misp_verifycert, 'json') def download_last(m, last, out=None): result = m.download_last(last) if out is None: if 'response' in result: print(json.dumps(result['response'])) else: print('No results for that time period') exit(0) else: with open(out, 'w') as f: f.write(json.dumps(result['response'])) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Download latest events from a MISP instance.') parser.add_argument("-l", "--last", required=True, help="can be defined in days, hours, minutes (for example 5d or 12h or 30m).") parser.add_argument("-o", "--output", help="Output file") args = parser.parse_args() if args.output is not None and os.path.exists(args.output): print('Output file already exists, abord.') exit(0) misp = init(misp_url, misp_key) download_last(misp, args.last, args.output)
[((248, 289), 'pymisp.PyMISP', 'PyMISP', (['url', 'key', 'misp_verifycert', '"""json"""'], {}), "(url, key, misp_verifycert, 'json')\n", (254, 289), False, 'from pymisp import PyMISP\n'), ((692, 780), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Download latest events from a MISP instance."""'}), "(description=\n 'Download latest events from a MISP instance.')\n", (715, 780), False, 'import argparse\n'), ((1040, 1067), 'os.path.exists', 'os.path.exists', (['args.output'], {}), '(args.output)\n', (1054, 1067), False, 'import os\n'), ((436, 466), 'json.dumps', 'json.dumps', (["result['response']"], {}), "(result['response'])\n", (446, 466), False, 'import json\n'), ((619, 649), 'json.dumps', 'json.dumps', (["result['response']"], {}), "(result['response'])\n", (629, 649), False, 'import json\n')]
Chaoslecion123/Diver
saleor/dashboard/urls.py
8c5c493701422eada49cbf95b0b0add08f1ea561
from django.conf.urls import include, url from django.views.generic.base import TemplateView from . import views as core_views from .category.urls import urlpatterns as category_urls from .collection.urls import urlpatterns as collection_urls from .customer.urls import urlpatterns as customer_urls from .discount.urls import urlpatterns as discount_urls from .menu.urls import urlpatterns as menu_urls from .order.urls import urlpatterns as order_urls from .page.urls import urlpatterns as page_urls from .product.urls import urlpatterns as product_urls from .search.urls import urlpatterns as search_urls from .shipping.urls import urlpatterns as shipping_urls from .sites.urls import urlpatterns as site_urls from .staff.urls import urlpatterns as staff_urls from .taxes.urls import urlpatterns as taxes_urls # BEGIN :: SoftButterfly Extensions -------------------------------------------- from .brand.urls import urlpatterns as brand_urls from .widget.slider.urls import urlpatterns as slider_urls from .widget.banner.urls import urlpatterns as banner_urls from .widget.scene.urls import urlpatterns as scene_urls from .widget.benefit.urls import urlpatterns as benefit_urls from .store.physical_store.urls import urlpatterns as store_urls from .store.social_network.urls import urlpatterns as social_network_urls from .store.special_page.urls import urlpatterns as special_page_urls from .store.bank_account.urls import urlpatterns as bank_account_urls from .store.footer_item.urls import urlpatterns as footer_item_urls # END :: SoftButterfly Extensions ---------------------------------------------- urlpatterns = [ url(r'^$', core_views.index, name='index'), url(r'^categories/', include(category_urls)), url(r'^collections/', include(collection_urls)), url(r'^orders/', include(order_urls)), url(r'^page/', include(page_urls)), url(r'^products/', include(product_urls)), url(r'^customers/', include(customer_urls)), url(r'^staff/', include(staff_urls)), url(r'^discounts/', include(discount_urls)), url(r'^settings/', include( site_urls + social_network_urls + special_page_urls + bank_account_urls + footer_item_urls)), # Extensions url(r'^menu/', include(menu_urls)), url(r'^shipping/', include(shipping_urls)), url(r'^style-guide/', core_views.styleguide, name='styleguide'), url(r'^search/', include(search_urls)), url(r'^taxes/', include(taxes_urls)), url(r'^next/', TemplateView.as_view(template_name='dashboard/next.html')), # BEGIN :: SoftButterfly Extensions ---------------------------------------- url(r'^brand/', include(brand_urls)), url(r'^slider/', include(slider_urls)), url(r'^banner/', include(banner_urls)), url(r'^scene/', include(scene_urls)), url(r'^store/', include(store_urls)), url(r'^benefit/', include(benefit_urls)), # END :: SoftButterfly Extensions ------------------------------------------ ]
[((1630, 1671), 'django.conf.urls.url', 'url', (['"""^$"""', 'core_views.index'], {'name': '"""index"""'}), "('^$', core_views.index, name='index')\n", (1633, 1671), False, 'from django.conf.urls import include, url\n'), ((2295, 2357), 'django.conf.urls.url', 'url', (['"""^style-guide/"""', 'core_views.styleguide'], {'name': '"""styleguide"""'}), "('^style-guide/', core_views.styleguide, name='styleguide')\n", (2298, 2357), False, 'from django.conf.urls import include, url\n'), ((1699, 1721), 'django.conf.urls.include', 'include', (['category_urls'], {}), '(category_urls)\n', (1706, 1721), False, 'from django.conf.urls import include, url\n'), ((1750, 1774), 'django.conf.urls.include', 'include', (['collection_urls'], {}), '(collection_urls)\n', (1757, 1774), False, 'from django.conf.urls import include, url\n'), ((1798, 1817), 'django.conf.urls.include', 'include', (['order_urls'], {}), '(order_urls)\n', (1805, 1817), False, 'from django.conf.urls import include, url\n'), ((1839, 1857), 'django.conf.urls.include', 'include', (['page_urls'], {}), '(page_urls)\n', (1846, 1857), False, 'from django.conf.urls import include, url\n'), ((1883, 1904), 'django.conf.urls.include', 'include', (['product_urls'], {}), '(product_urls)\n', (1890, 1904), False, 'from django.conf.urls import include, url\n'), ((1931, 1953), 'django.conf.urls.include', 'include', (['customer_urls'], {}), '(customer_urls)\n', (1938, 1953), False, 'from django.conf.urls import include, url\n'), ((1976, 1995), 'django.conf.urls.include', 'include', (['staff_urls'], {}), '(staff_urls)\n', (1983, 1995), False, 'from django.conf.urls import include, url\n'), ((2022, 2044), 'django.conf.urls.include', 'include', (['discount_urls'], {}), '(discount_urls)\n', (2029, 2044), False, 'from django.conf.urls import include, url\n'), ((2070, 2173), 'django.conf.urls.include', 'include', (['(site_urls + social_network_urls + special_page_urls + bank_account_urls +\n footer_item_urls)'], {}), '(site_urls + social_network_urls + special_page_urls +\n bank_account_urls + footer_item_urls)\n', (2077, 2173), False, 'from django.conf.urls import include, url\n'), ((2222, 2240), 'django.conf.urls.include', 'include', (['menu_urls'], {}), '(menu_urls)\n', (2229, 2240), False, 'from django.conf.urls import include, url\n'), ((2266, 2288), 'django.conf.urls.include', 'include', (['shipping_urls'], {}), '(shipping_urls)\n', (2273, 2288), False, 'from django.conf.urls import include, url\n'), ((2381, 2401), 'django.conf.urls.include', 'include', (['search_urls'], {}), '(search_urls)\n', (2388, 2401), False, 'from django.conf.urls import include, url\n'), ((2424, 2443), 'django.conf.urls.include', 'include', (['taxes_urls'], {}), '(taxes_urls)\n', (2431, 2443), False, 'from django.conf.urls import include, url\n'), ((2465, 2522), 'django.views.generic.base.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""dashboard/next.html"""'}), "(template_name='dashboard/next.html')\n", (2485, 2522), False, 'from django.views.generic.base import TemplateView\n'), ((2626, 2645), 'django.conf.urls.include', 'include', (['brand_urls'], {}), '(brand_urls)\n', (2633, 2645), False, 'from django.conf.urls import include, url\n'), ((2669, 2689), 'django.conf.urls.include', 'include', (['slider_urls'], {}), '(slider_urls)\n', (2676, 2689), False, 'from django.conf.urls import include, url\n'), ((2713, 2733), 'django.conf.urls.include', 'include', (['banner_urls'], {}), '(banner_urls)\n', (2720, 2733), False, 'from django.conf.urls import include, url\n'), ((2756, 2775), 'django.conf.urls.include', 'include', (['scene_urls'], {}), '(scene_urls)\n', (2763, 2775), False, 'from django.conf.urls import include, url\n'), ((2798, 2817), 'django.conf.urls.include', 'include', (['store_urls'], {}), '(store_urls)\n', (2805, 2817), False, 'from django.conf.urls import include, url\n'), ((2842, 2863), 'django.conf.urls.include', 'include', (['benefit_urls'], {}), '(benefit_urls)\n', (2849, 2863), False, 'from django.conf.urls import include, url\n')]
Jf-Chen/FRN-main
experiments/CUB_fewshot_raw/FRN/ResNet-12/train.py
5b57b9e0d7368058a8e3ba41a53c460b54ab9b91
import os import sys import torch import yaml from functools import partial sys.path.append('../../../../') from trainers import trainer, frn_train from datasets import dataloaders from models.FRN import FRN args = trainer.train_parser() with open('../../../../config.yml', 'r') as f: temp = yaml.safe_load(f) data_path = os.path.abspath(temp['data_path']) fewshot_path = os.path.join(data_path,'CUB_fewshot_raw') pm = trainer.Path_Manager(fewshot_path=fewshot_path,args=args) train_way = args.train_way shots = [args.train_shot, args.train_query_shot] train_loader = dataloaders.meta_train_dataloader(data_path=pm.train, way=train_way, shots=shots, transform_type=args.train_transform_type) model = FRN(way=train_way, shots=[args.train_shot, args.train_query_shot], resnet=args.resnet) train_func = partial(frn_train.default_train,train_loader=train_loader) tm = trainer.Train_Manager(args,path_manager=pm,train_func=train_func) tm.train(model) tm.evaluate(model)
[((76, 107), 'sys.path.append', 'sys.path.append', (['"""../../../../"""'], {}), "('../../../../')\n", (91, 107), False, 'import sys\n'), ((217, 239), 'trainers.trainer.train_parser', 'trainer.train_parser', ([], {}), '()\n', (237, 239), False, 'from trainers import trainer, frn_train\n'), ((328, 362), 'os.path.abspath', 'os.path.abspath', (["temp['data_path']"], {}), "(temp['data_path'])\n", (343, 362), False, 'import os\n'), ((378, 420), 'os.path.join', 'os.path.join', (['data_path', '"""CUB_fewshot_raw"""'], {}), "(data_path, 'CUB_fewshot_raw')\n", (390, 420), False, 'import os\n'), ((426, 484), 'trainers.trainer.Path_Manager', 'trainer.Path_Manager', ([], {'fewshot_path': 'fewshot_path', 'args': 'args'}), '(fewshot_path=fewshot_path, args=args)\n', (446, 484), False, 'from trainers import trainer, frn_train\n'), ((577, 705), 'datasets.dataloaders.meta_train_dataloader', 'dataloaders.meta_train_dataloader', ([], {'data_path': 'pm.train', 'way': 'train_way', 'shots': 'shots', 'transform_type': 'args.train_transform_type'}), '(data_path=pm.train, way=train_way, shots=\n shots, transform_type=args.train_transform_type)\n', (610, 705), False, 'from datasets import dataloaders\n'), ((854, 945), 'models.FRN.FRN', 'FRN', ([], {'way': 'train_way', 'shots': '[args.train_shot, args.train_query_shot]', 'resnet': 'args.resnet'}), '(way=train_way, shots=[args.train_shot, args.train_query_shot], resnet=\n args.resnet)\n', (857, 945), False, 'from models.FRN import FRN\n'), ((979, 1038), 'functools.partial', 'partial', (['frn_train.default_train'], {'train_loader': 'train_loader'}), '(frn_train.default_train, train_loader=train_loader)\n', (986, 1038), False, 'from functools import partial\n'), ((1044, 1111), 'trainers.trainer.Train_Manager', 'trainer.Train_Manager', (['args'], {'path_manager': 'pm', 'train_func': 'train_func'}), '(args, path_manager=pm, train_func=train_func)\n', (1065, 1111), False, 'from trainers import trainer, frn_train\n'), ((298, 315), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (312, 315), False, 'import yaml\n')]
zzzace2000/dropout-feature-rankin
exp/DFRdatasets/simulate.py
7769ce822f3c0a6d23167d11f1569f59e56b1266
import argparse import argparse import os import numpy as np import torch from dataloaders.LoaderBase import LoaderBase import exp.feature.feature_utils as feature_utils def run_with_identifier(unit, corr_val, datasize, rank_names, loader, show_ols=True): loader.clear_cache() # Ex: 'nn_rank:0.01'. Then extract nn_rank and 0.01 seperately result = {} performance = {} ranks = {} for rank_name in rank_names: the_rank_func_name = rank_name if ':' in rank_name: tmp = rank_name.split(':') the_rank_func_name = tmp[0] loader.bdnet_hyperparams['reg_coef'] = float(tmp[1]) # Run different datasizes / correlations. # Ex: datasizes => [100_0, 200_0, 1000] # Ex: correlations => [1000_0.1, 1000_0.2] identifier = '%d_%f' % (datasize, corr_val) # return a dictionary but not a rank!!! rank_dict = getattr(loader, the_rank_func_name)(testfold=identifier) if 'metrics' in rank_dict: for attr_name in rank_dict['metrics']: performance['%s_%s' % (the_rank_func_name, attr_name)] = rank_dict metrics = loader.evaluate(rank_dict['rank']) for attr_name in metrics: result['%s_%s' % (the_rank_func_name, attr_name)] = metrics[attr_name] ranks['%s_rank' % the_rank_func_name] = rank_dict['rank'] if show_ols: performance['ols'] = loader.get_ols_error() return result, performance, ranks def run(mode, args): # Get the loader loader = LoaderBase.create( args.dataset, {'visdom_enabled': args.visdom_enabled, 'cuda_enabled': args.cuda, 'nn_cache': args.nn_cache }) default_params = { 'datasize': 1000, 'corr_val': -1, 'rank_names': args.rank_func, 'show_ols': False, 'loader': loader, } if mode == 'correlation': corr_vals = np.arange(0., 1.0, 0.1) # corr_vals = [0., 0.1] containers = feature_utils.run_std_err_params( 'corr_val', values=corr_vals, repeat=args.repeat, val_func=run_with_identifier, default_params=default_params, num_output_table=2, kept_raw=True) else: datasizes = [100, 200, 1000, 3000] containers = feature_utils.run_std_err_params( 'datasize', values=datasizes, repeat=args.repeat, val_func=run_with_identifier, default_params=default_params, num_output_table=2, kept_raw=True) raw = containers.pop() # Save containers and rank folder = 'results/{}'.format(args.dataset) if not os.path.exists(folder): os.mkdir(folder) filename = args.identifier + '-%s' % mode torch.save(containers, '{}/{}.pth'.format(folder, filename)) torch.save(raw, '{}/{}_raw.pth'.format(folder, filename)) def parse_args(): # Training settings parser = argparse.ArgumentParser(description='train rnn to predict') # parser.add_argument('--lr', type=float, default=0.001) # parser.add_argument('--epochs', type=int, default=100) # parser.add_argument('--reg_coef', type=float, default=0.001) # parser.add_argument('--batch-size', type=int, default=32) # parser.add_argument('--batch-print', type=int, default=30) # parser.add_argument('--save-freq', type=int, default=1) parser.add_argument('--no-cuda', action='store_true', default=False) parser.add_argument('--resume', type=str, default=None) parser.add_argument('--gpu-ids', nargs='+', type=int, default=[0], help='number of gpus to produce') parser.add_argument('--identifier', type=str, default='0201') # parser.add_argument('--reg_coef', type=float, default=None, help='vbd regularization coef!') parser.add_argument('--dataset', type=str, default='GaussSimulation', help='["wineqaulity", "OnlineNewsPopularity", ' '"ClassificationONPLoader", "RegSupport2Loader"]') parser.add_argument('--seed', type=int, default='1234') parser.add_argument('--repeat', type=int, default=1) # parser.add_argument('--lookahead', type=int, default=5) # parser.add_argument('--weighted', action='store_true', default=False) # parser.add_argument('--reuse-rnn', action='store_true', default=False) parser.add_argument('--modes', nargs='+', type=str, default=['correlation'], help='correlation / sizes') parser.add_argument('--rank_func', nargs='+', type=str, default=['vbd_linear_rank'], help='nn_rank') # parser.add_argument('--test_func', nargs='+', type=str, default=['no_test'], # help='["nn_test_zero", "nn_test_retrain"]') parser.add_argument('--visdom_enabled', action='store_true', default=True) parser.add_argument('--no_rank_cache', action='store_true', default=False) parser.add_argument('--no_nn_cache', action='store_true', default=False) # parser.add_argument('--start_val', type=int, default=2) args = parser.parse_args() args.nn_cache = (not args.no_nn_cache) args.rank_cache = (not args.no_rank_cache) args.cuda = (not args.no_cuda) and torch.cuda.is_available() np.random.seed(args.seed) torch.manual_seed(args.seed) if args.cuda: print('gpu current device:', torch.cuda.current_device()) torch.cuda.manual_seed(args.seed) if len(args.gpu_ids) > 0: print('start using gpu device:', args.gpu_ids) torch.cuda.set_device(args.gpu_ids[0]) print('args:', args) print('==================== Start =====================') print('') return args if __name__ == '__main__': args = parse_args() if 'other_ranks' in args.rank_func: args.rank_func.remove('other_ranks') args.rank_func += ['marginal_rank', 'rf_rank', 'zero_rank', 'shuffle_rank', 'random_rank', 'enet_rank', 'lasso_rank'] for mode in args.modes: run(mode, args)
[((1550, 1680), 'dataloaders.LoaderBase.LoaderBase.create', 'LoaderBase.create', (['args.dataset', "{'visdom_enabled': args.visdom_enabled, 'cuda_enabled': args.cuda,\n 'nn_cache': args.nn_cache}"], {}), "(args.dataset, {'visdom_enabled': args.visdom_enabled,\n 'cuda_enabled': args.cuda, 'nn_cache': args.nn_cache})\n", (1567, 1680), False, 'from dataloaders.LoaderBase import LoaderBase\n'), ((2917, 2976), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""train rnn to predict"""'}), "(description='train rnn to predict')\n", (2940, 2976), False, 'import argparse\n'), ((5245, 5270), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (5259, 5270), True, 'import numpy as np\n'), ((5275, 5303), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (5292, 5303), False, 'import torch\n'), ((1961, 1985), 'numpy.arange', 'np.arange', (['(0.0)', '(1.0)', '(0.1)'], {}), '(0.0, 1.0, 0.1)\n', (1970, 1985), True, 'import numpy as np\n'), ((2038, 2225), 'exp.feature.feature_utils.run_std_err_params', 'feature_utils.run_std_err_params', (['"""corr_val"""'], {'values': 'corr_vals', 'repeat': 'args.repeat', 'val_func': 'run_with_identifier', 'default_params': 'default_params', 'num_output_table': '(2)', 'kept_raw': '(True)'}), "('corr_val', values=corr_vals, repeat=args.\n repeat, val_func=run_with_identifier, default_params=default_params,\n num_output_table=2, kept_raw=True)\n", (2070, 2225), True, 'import exp.feature.feature_utils as feature_utils\n'), ((2316, 2503), 'exp.feature.feature_utils.run_std_err_params', 'feature_utils.run_std_err_params', (['"""datasize"""'], {'values': 'datasizes', 'repeat': 'args.repeat', 'val_func': 'run_with_identifier', 'default_params': 'default_params', 'num_output_table': '(2)', 'kept_raw': '(True)'}), "('datasize', values=datasizes, repeat=args.\n repeat, val_func=run_with_identifier, default_params=default_params,\n num_output_table=2, kept_raw=True)\n", (2348, 2503), True, 'import exp.feature.feature_utils as feature_utils\n'), ((2637, 2659), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (2651, 2659), False, 'import os\n'), ((2669, 2685), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (2677, 2685), False, 'import os\n'), ((5215, 5240), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5238, 5240), False, 'import torch\n'), ((5397, 5430), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (5419, 5430), False, 'import torch\n'), ((5360, 5387), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (5385, 5387), False, 'import torch\n'), ((5536, 5574), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.gpu_ids[0]'], {}), '(args.gpu_ids[0])\n', (5557, 5574), False, 'import torch\n')]
JiangZehua/gym-pcgrl
gym_pcgrl/envs/reps/wide_3D_rep.py
80ddbde173803e81060578c2c4167d8d1f5cacba
from gym_pcgrl.envs.reps.representation3D import Representation3D from PIL import Image from gym import spaces import numpy as np from gym_pcgrl.envs.probs.minecraft.mc_render import reps_3D_render """ The wide representation where the agent can pick the tile position and tile value at each update. """ class Wide3DRepresentation(Representation3D): """ Initialize all the parameters used by that representation """ def __init__(self): super().__init__() """ Gets the action space used by the wide representation Parameters: length: the current map length width: the current map width height: the current map height num_tiles: the total number of the tile values Returns: MultiDiscrete: the action space used by that wide representation which consists of the x position, y position, z position and the tile value """ def get_action_space(self, length, width, height, num_tiles): return spaces.MultiDiscrete([length, width, height, num_tiles]) """ Get the observation space used by the wide representation Parameters: length: the current map length width: the current map width height: the current map height num_tiles: the total number of the tile values Returns: Box: the observation space used by that representation. A 3D array of tile numbers """ def get_observation_space(self, length, width, height, num_tiles): return spaces.Dict({ "map": spaces.Box(low=0, high=num_tiles-1, dtype=np.uint8, shape=(height, width, length)) }) """ Get the current representation observation object at the current moment Returns: observation: the current observation at the current moment. A 3D array of tile numbers """ def get_observation(self): return { "map": self._map.copy() } """ Update the wide representation with the input action Parameters: action: an action that is used to advance the environment (same as action space) Returns: boolean: True if the action change the map, False if nothing changed """ def update(self, action): change = [0,1][self._map[action[2]][action[1]][action[0]] != action[3]] self._map[action[2]][action[1]][action[0]] = action[3] return change, action[0], action[1], action[2]
[((994, 1050), 'gym.spaces.MultiDiscrete', 'spaces.MultiDiscrete', (['[length, width, height, num_tiles]'], {}), '([length, width, height, num_tiles])\n', (1014, 1050), False, 'from gym import spaces\n'), ((1541, 1629), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(num_tiles - 1)', 'dtype': 'np.uint8', 'shape': '(height, width, length)'}), '(low=0, high=num_tiles - 1, dtype=np.uint8, shape=(height, width,\n length))\n', (1551, 1629), False, 'from gym import spaces\n')]
thinkitdata/ucsmsdk
ucsmsdk/mometa/adaptor/AdaptorMenloQStats.py
da6599e1dbc1207a30eabe548a7e5791af5f476b
"""This module contains the general information for AdaptorMenloQStats ManagedObject.""" from ...ucsmo import ManagedObject from ...ucscoremeta import MoPropertyMeta, MoMeta from ...ucsmeta import VersionMeta class AdaptorMenloQStatsConsts: MENLO_QUEUE_COMPONENT_N = "N" MENLO_QUEUE_COMPONENT_CPU = "cpu" MENLO_QUEUE_COMPONENT_ETH = "eth" MENLO_QUEUE_COMPONENT_FC = "fc" MENLO_QUEUE_COMPONENT_UNKNOWN = "unknown" MENLO_QUEUE_INDEX_0 = "0" MENLO_QUEUE_INDEX_0_A = "0_A" MENLO_QUEUE_INDEX_0_B = "0_B" MENLO_QUEUE_INDEX_1 = "1" MENLO_QUEUE_INDEX_1_A = "1_A" MENLO_QUEUE_INDEX_1_B = "1_B" MENLO_QUEUE_INDEX_UNKNOWN = "unknown" SUSPECT_FALSE = "false" SUSPECT_NO = "no" SUSPECT_TRUE = "true" SUSPECT_YES = "yes" class AdaptorMenloQStats(ManagedObject): """This is AdaptorMenloQStats class.""" consts = AdaptorMenloQStatsConsts() naming_props = set([u'menloQueueComponent', u'menloQueueIndex']) mo_meta = MoMeta("AdaptorMenloQStats", "adaptorMenloQStats", "menlo-q-stats-comp-[menlo_queue_component]index-[menlo_queue_index]", VersionMeta.Version111j, "OutputOnly", 0xf, [], ["admin", "operations", "read-only"], [u'adaptorUnit'], [u'adaptorMenloQStatsHist'], ["Get"]) prop_meta = { "child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111j, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []), "dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []), "drop_overrun_n0": MoPropertyMeta("drop_overrun_n0", "dropOverrunN0", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []), "drop_overrun_n0_delta": MoPropertyMeta("drop_overrun_n0_delta", "dropOverrunN0Delta", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []), "drop_overrun_n0_delta_avg": MoPropertyMeta("drop_overrun_n0_delta_avg", "dropOverrunN0DeltaAvg", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []), "drop_overrun_n0_delta_max": MoPropertyMeta("drop_overrun_n0_delta_max", "dropOverrunN0DeltaMax", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []), "drop_overrun_n0_delta_min": MoPropertyMeta("drop_overrun_n0_delta_min", "dropOverrunN0DeltaMin", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []), "drop_overrun_n1": MoPropertyMeta("drop_overrun_n1", "dropOverrunN1", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []), "drop_overrun_n1_delta": MoPropertyMeta("drop_overrun_n1_delta", "dropOverrunN1Delta", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []), "drop_overrun_n1_delta_avg": MoPropertyMeta("drop_overrun_n1_delta_avg", "dropOverrunN1DeltaAvg", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []), "drop_overrun_n1_delta_max": MoPropertyMeta("drop_overrun_n1_delta_max", "dropOverrunN1DeltaMax", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []), "drop_overrun_n1_delta_min": MoPropertyMeta("drop_overrun_n1_delta_min", "dropOverrunN1DeltaMin", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []), "intervals": MoPropertyMeta("intervals", "intervals", "uint", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []), "menlo_queue_component": MoPropertyMeta("menlo_queue_component", "menloQueueComponent", "string", VersionMeta.Version111j, MoPropertyMeta.NAMING, None, None, None, None, ["N", "cpu", "eth", "fc", "unknown"], []), "menlo_queue_index": MoPropertyMeta("menlo_queue_index", "menloQueueIndex", "string", VersionMeta.Version111j, MoPropertyMeta.NAMING, None, None, None, None, ["0", "0_A", "0_B", "1", "1_A", "1_B", "unknown"], []), "rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []), "sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []), "status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []), "suspect": MoPropertyMeta("suspect", "suspect", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []), "thresholded": MoPropertyMeta("thresholded", "thresholded", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []), "time_collected": MoPropertyMeta("time_collected", "timeCollected", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [], []), "truncate_overrun_n0": MoPropertyMeta("truncate_overrun_n0", "truncateOverrunN0", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []), "truncate_overrun_n0_delta": MoPropertyMeta("truncate_overrun_n0_delta", "truncateOverrunN0Delta", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []), "truncate_overrun_n0_delta_avg": MoPropertyMeta("truncate_overrun_n0_delta_avg", "truncateOverrunN0DeltaAvg", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []), "truncate_overrun_n0_delta_max": MoPropertyMeta("truncate_overrun_n0_delta_max", "truncateOverrunN0DeltaMax", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []), "truncate_overrun_n0_delta_min": MoPropertyMeta("truncate_overrun_n0_delta_min", "truncateOverrunN0DeltaMin", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []), "truncate_overrun_n1": MoPropertyMeta("truncate_overrun_n1", "truncateOverrunN1", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []), "truncate_overrun_n1_delta": MoPropertyMeta("truncate_overrun_n1_delta", "truncateOverrunN1Delta", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []), "truncate_overrun_n1_delta_avg": MoPropertyMeta("truncate_overrun_n1_delta_avg", "truncateOverrunN1DeltaAvg", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []), "truncate_overrun_n1_delta_max": MoPropertyMeta("truncate_overrun_n1_delta_max", "truncateOverrunN1DeltaMax", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []), "truncate_overrun_n1_delta_min": MoPropertyMeta("truncate_overrun_n1_delta_min", "truncateOverrunN1DeltaMin", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []), "update": MoPropertyMeta("update", "update", "uint", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []), } prop_map = { "childAction": "child_action", "dn": "dn", "dropOverrunN0": "drop_overrun_n0", "dropOverrunN0Delta": "drop_overrun_n0_delta", "dropOverrunN0DeltaAvg": "drop_overrun_n0_delta_avg", "dropOverrunN0DeltaMax": "drop_overrun_n0_delta_max", "dropOverrunN0DeltaMin": "drop_overrun_n0_delta_min", "dropOverrunN1": "drop_overrun_n1", "dropOverrunN1Delta": "drop_overrun_n1_delta", "dropOverrunN1DeltaAvg": "drop_overrun_n1_delta_avg", "dropOverrunN1DeltaMax": "drop_overrun_n1_delta_max", "dropOverrunN1DeltaMin": "drop_overrun_n1_delta_min", "intervals": "intervals", "menloQueueComponent": "menlo_queue_component", "menloQueueIndex": "menlo_queue_index", "rn": "rn", "sacl": "sacl", "status": "status", "suspect": "suspect", "thresholded": "thresholded", "timeCollected": "time_collected", "truncateOverrunN0": "truncate_overrun_n0", "truncateOverrunN0Delta": "truncate_overrun_n0_delta", "truncateOverrunN0DeltaAvg": "truncate_overrun_n0_delta_avg", "truncateOverrunN0DeltaMax": "truncate_overrun_n0_delta_max", "truncateOverrunN0DeltaMin": "truncate_overrun_n0_delta_min", "truncateOverrunN1": "truncate_overrun_n1", "truncateOverrunN1Delta": "truncate_overrun_n1_delta", "truncateOverrunN1DeltaAvg": "truncate_overrun_n1_delta_avg", "truncateOverrunN1DeltaMax": "truncate_overrun_n1_delta_max", "truncateOverrunN1DeltaMin": "truncate_overrun_n1_delta_min", "update": "update", } def __init__(self, parent_mo_or_dn, menlo_queue_component, menlo_queue_index, **kwargs): self._dirty_mask = 0 self.menlo_queue_component = menlo_queue_component self.menlo_queue_index = menlo_queue_index self.child_action = None self.drop_overrun_n0 = None self.drop_overrun_n0_delta = None self.drop_overrun_n0_delta_avg = None self.drop_overrun_n0_delta_max = None self.drop_overrun_n0_delta_min = None self.drop_overrun_n1 = None self.drop_overrun_n1_delta = None self.drop_overrun_n1_delta_avg = None self.drop_overrun_n1_delta_max = None self.drop_overrun_n1_delta_min = None self.intervals = None self.sacl = None self.status = None self.suspect = None self.thresholded = None self.time_collected = None self.truncate_overrun_n0 = None self.truncate_overrun_n0_delta = None self.truncate_overrun_n0_delta_avg = None self.truncate_overrun_n0_delta_max = None self.truncate_overrun_n0_delta_min = None self.truncate_overrun_n1 = None self.truncate_overrun_n1_delta = None self.truncate_overrun_n1_delta_avg = None self.truncate_overrun_n1_delta_max = None self.truncate_overrun_n1_delta_min = None self.update = None ManagedObject.__init__(self, "AdaptorMenloQStats", parent_mo_or_dn, **kwargs)
[]
XelaRellum/old_password
python/old_password_test.py
b461941069bc7f1187776a992f86c89317ab215e
import unittest import pytest from old_password import old_password import csv import re @pytest.mark.parametrize("password,expected_hash", [ (None, None), ("", ""), ("a", "60671c896665c3fa"), ("abc", "7cd2b5942be28759"), ("ä", "0751368d49315f7f"), ]) def test_old_password(password, expected_hash): assert old_password(password) == expected_hash def test_password_with_space(): """ spaces in password are skipped """ assert old_password("pass word") == old_password("password") def test_password_with_tab(): """ tabs in password are skipped """ assert old_password("pass\tword") == old_password("password") def test_password_from_testdata(): with open("../testdata.csv", "r") as file: for line in file: line = line.strip() password, expected_hash = line.split(";") hash = old_password(password) assert hash == expected_hash, "password: %s" % password
[((93, 259), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""password,expected_hash"""', "[(None, None), ('', ''), ('a', '60671c896665c3fa'), ('abc',\n '7cd2b5942be28759'), ('ä', '0751368d49315f7f')]"], {}), "('password,expected_hash', [(None, None), ('', ''),\n ('a', '60671c896665c3fa'), ('abc', '7cd2b5942be28759'), ('ä',\n '0751368d49315f7f')])\n", (116, 259), False, 'import pytest\n'), ((334, 356), 'old_password.old_password', 'old_password', (['password'], {}), '(password)\n', (346, 356), False, 'from old_password import old_password\n'), ((471, 496), 'old_password.old_password', 'old_password', (['"""pass word"""'], {}), "('pass word')\n", (483, 496), False, 'from old_password import old_password\n'), ((500, 524), 'old_password.old_password', 'old_password', (['"""password"""'], {}), "('password')\n", (512, 524), False, 'from old_password import old_password\n'), ((618, 644), 'old_password.old_password', 'old_password', (['"""pass\tword"""'], {}), "('pass\\tword')\n", (630, 644), False, 'from old_password import old_password\n'), ((648, 672), 'old_password.old_password', 'old_password', (['"""password"""'], {}), "('password')\n", (660, 672), False, 'from old_password import old_password\n'), ((888, 910), 'old_password.old_password', 'old_password', (['password'], {}), '(password)\n', (900, 910), False, 'from old_password import old_password\n')]
CindyChen1995/MKR
src/RS_model/train_mlp.py
f9ae37903dcf43b6d101dfc08644ce4a29ecbf9d
# -*- coding: utf-8 -*- """ ------------------------------------------------- Description : Author : cmy date: 2020/1/2 ------------------------------------------------- """ import datetime import heapq import numpy as np import tensorflow as tf import time from metrics import ndcg_at_k from train import get_user_record from DMF import DMF import os os.environ["CUDA_VISIBLE_DEVICES"] = "1" config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.5 # maximun alloc gpu50% of MEM config.gpu_options.allow_growth = True #allocate dynamically def train(args, data, show_loss, show_topk, log_dir): n_user, n_item = data[0], data[1] train_data, eval_data, test_data = data[2], data[3], data[4] model = DMF(args, n_user, n_item) user_num = 100 k_list = [1, 2, 5, 10, 20, 50, 100] train_record = get_user_record(train_data, True) test_record = get_user_record(test_data, False) user_list = list(set(train_record.keys()) & set(test_record.keys())) if len(user_list) > user_num: user_list = np.random.choice(user_list, size=user_num, replace=False) item_set = set(list(range(n_item))) with tf.Session(config=config) as sess,\ open(log_dir + 'result_' + str(args.epochs) + '_' + str(args.lr) + '_' + str(int(time.time())) + '.txt', 'w') as f_result: sess.run(tf.global_variables_initializer()) for step in range(args.epochs): f_result.write('**************************epoch_i:' + str(step) + '********************' + '\n') # RS training np.random.shuffle(train_data) start = 0 batch_i = 0 while start < train_data.shape[0]: _, loss = model.train_dmf(sess, get_feed_dict_for_dmf(model, train_data, start, start + args.batch_size, 0.5)) start += args.batch_size if show_loss: if (step * (len(train_data) // args.batch_size) + batch_i) % 20 == 0: time_str = datetime.datetime.now().isoformat() print('{}: Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format( time_str, step, batch_i, (len(train_data) // args.batch_size), loss)) # print(loss) batch_i += 1 # CTR evaluation # train_auc, train_acc = model.eval(sess, get_feed_dict_for_dmf(model, train_data, 0, train_data.shape[0])) eval_auc, eval_acc = model.eval(sess, get_feed_dict_for_dmf(model, eval_data, 0, eval_data.shape[0])) test_auc, test_acc = model.eval(sess, get_feed_dict_for_dmf(model, test_data, 0, test_data.shape[0])) # eval_str = 'epoch %d train auc: %.4f acc: %.4f eval auc: %.4f acc: %.4f test auc: %.4f acc: %.4f' \ # % (step, train_auc, train_acc, eval_auc, eval_acc, test_auc, test_acc) eval_str = 'epoch %d eval auc: %.4f acc: %.4f test auc: %.4f acc: %.4f' \ % (step, eval_auc, eval_acc, test_auc, test_acc) print(eval_str) f_result.write(eval_str + '\n') # top-K evaluation if show_topk: topk_str = '' precision, recall, f1, hr, ndcg = topk_eval( sess, model, user_list, train_record, test_record, item_set, k_list) print('precision: ', end='') topk_str += 'precision: ' for i in precision: print('%.4f\t' % i, end='') topk_str += '%.4f\t' % i print() print('recall: ', end='') topk_str += '\n' + 'recall: ' for i in recall: print('%.4f\t' % i, end='') topk_str += '%.4f\t' % i print() print('f1: ', end='') topk_str += '\n' + 'f1: ' for i in f1: print('%.4f\t' % i, end='') topk_str += '%.4f\t' % i print() print('hr: ', end='') topk_str += '\n' + 'hr: ' for i in hr: print('%.4f\t' % i, end='') topk_str += '%.4f\t' % i print() print('ndcg: ', end='') topk_str += '\n' + 'ndcg: ' for i in ndcg: print('%.4f\t' % i, end='') topk_str += '%.4f\t' % i print() f_result.write(topk_str + '\n') def get_feed_dict_for_dmf(model, data, start, end, keep_drop=0.0): feed_dict = {model.user_indices: data[start:end, 0], model.item_indices: data[start:end, 1], model.labels: data[start:end, 2], model.keep_drop: keep_drop} return feed_dict def topk_eval(sess, model, user_list, train_record, test_record, item_set, k_list): precision_list = {k: [] for k in k_list} recall_list = {k: [] for k in k_list} hr_list = {k: [] for k in k_list} ndcg_list = {k: [] for k in k_list} total_test = 0 for user in user_list: test_item_list = list(item_set - train_record[user]) item_score_map = dict() items, scores = model.get_scores(sess, {model.user_indices: [user] * len(test_item_list), model.item_indices: test_item_list, model.keep_drop: 0.0}) for item, score in zip(items, scores): item_score_map[item] = score item_score_pair_sorted = sorted(item_score_map.items(), key=lambda x: x[1], reverse=True) item_sorted = [i[0] for i in item_score_pair_sorted] K_max_item_score = heapq.nlargest(k_list[-1], item_score_map, key=item_score_map.get) r = [] for i in K_max_item_score: if i in test_record[user]: r.append(1) else: r.append(0) for k in k_list: hit_num = len(set(item_sorted[:k]) & test_record[user]) precision_list[k].append(hit_num / k) recall_list[k].append(hit_num / len(test_record[user])) hr_list[k].append(hit_num) ndcg_list[k].append(ndcg_at_k(r, k)) total_test += len(test_record[user]) precision = [np.mean(precision_list[k]) for k in k_list] recall = [np.mean(recall_list[k]) for k in k_list] f1 = [2 / (1 / precision[i] + 1 / recall[i]) for i in range(len(k_list))] hr = [np.sum(hr_list[k]) / total_test for k in k_list] ndcg = [np.mean(ndcg_list[k]) for k in k_list] return precision, recall, f1, hr, ndcg
[((429, 445), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (443, 445), True, 'import tensorflow as tf\n'), ((766, 791), 'DMF.DMF', 'DMF', (['args', 'n_user', 'n_item'], {}), '(args, n_user, n_item)\n', (769, 791), False, 'from DMF import DMF\n'), ((870, 903), 'train.get_user_record', 'get_user_record', (['train_data', '(True)'], {}), '(train_data, True)\n', (885, 903), False, 'from train import get_user_record\n'), ((922, 955), 'train.get_user_record', 'get_user_record', (['test_data', '(False)'], {}), '(test_data, False)\n', (937, 955), False, 'from train import get_user_record\n'), ((1083, 1140), 'numpy.random.choice', 'np.random.choice', (['user_list'], {'size': 'user_num', 'replace': '(False)'}), '(user_list, size=user_num, replace=False)\n', (1099, 1140), True, 'import numpy as np\n'), ((1191, 1216), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (1201, 1216), True, 'import tensorflow as tf\n'), ((5904, 5970), 'heapq.nlargest', 'heapq.nlargest', (['k_list[-1]', 'item_score_map'], {'key': 'item_score_map.get'}), '(k_list[-1], item_score_map, key=item_score_map.get)\n', (5918, 5970), False, 'import heapq\n'), ((6499, 6525), 'numpy.mean', 'np.mean', (['precision_list[k]'], {}), '(precision_list[k])\n', (6506, 6525), True, 'import numpy as np\n'), ((6557, 6580), 'numpy.mean', 'np.mean', (['recall_list[k]'], {}), '(recall_list[k])\n', (6564, 6580), True, 'import numpy as np\n'), ((6747, 6768), 'numpy.mean', 'np.mean', (['ndcg_list[k]'], {}), '(ndcg_list[k])\n', (6754, 6768), True, 'import numpy as np\n'), ((1375, 1408), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1406, 1408), True, 'import tensorflow as tf\n'), ((1597, 1626), 'numpy.random.shuffle', 'np.random.shuffle', (['train_data'], {}), '(train_data)\n', (1614, 1626), True, 'import numpy as np\n'), ((6686, 6704), 'numpy.sum', 'np.sum', (['hr_list[k]'], {}), '(hr_list[k])\n', (6692, 6704), True, 'import numpy as np\n'), ((6418, 6433), 'metrics.ndcg_at_k', 'ndcg_at_k', (['r', 'k'], {}), '(r, k)\n', (6427, 6433), False, 'from metrics import ndcg_at_k\n'), ((1316, 1327), 'time.time', 'time.time', ([], {}), '()\n', (1325, 1327), False, 'import time\n'), ((2043, 2066), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2064, 2066), False, 'import datetime\n')]
glass-w/PyLipID
pylipid.py
ee29f92ba6187cd22b9554a599177152ebed9c4c
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Aug 28 19:28:17 2019 @author: Wanling Song """ import mdtraj as md import numpy as np import pandas as pd import argparse import sys from collections import defaultdict import pickle import os import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import networkx as nx import seaborn as sns from matplotlib.ticker import MultipleLocator from scipy.optimize import curve_fit from scipy.sparse import coo_matrix from scipy import sparse from statsmodels.nonparametric.kernel_density import KDEMultivariate import community import warnings from shutil import copyfile import datetime from itertools import product import logomaker import re warnings.simplefilter(action='ignore', category=FutureWarning) warnings.filterwarnings('ignore') np.seterr(all='ignore') ################################### ###### Parameter settings ####### ################################### parser = argparse.ArgumentParser() parser.add_argument("-f", nargs="+", metavar="./run/md.xtc", help="List of trajectories, seperated by space, \ Supports xtc, gro format. Used by mdtraj.load()") parser.add_argument("-c", nargs="+", metavar="./run/system.gro", \ help="List of coordinates of trajectory, in the same order as -f, required when inputs of -f are xtc trajectories, \ Supported format: gro, pdb, etc., Used by mdtraj.load()") parser.add_argument("-stride", default=1, metavar=1, help="Striding through trajectories. Only every stride-th will be analized." ) parser.add_argument("-dt", default=None, metavar="None", help="The time interval between two adjacent frames in the trajectories. \ If not specified, the mdtraj will deduce from the trajectories. This works for trajectories in format of e.g. xtc which \ include timestep information. For trajectories in dcd format, users have to provide the time interval manually, \ in a time unite consistent with -tu") parser.add_argument("-tu", default="us", choices=["ns", "us"], metavar="us", \ help="Time unit for interaction duration calculation. Available options: ns, us. This will affect the unit of koff as well.") parser.add_argument("-save_dir", default=None, metavar="None", help="The directory where all the generated results will be put in. \ The directory will be created if not existing. Using the current working directory if not specified.") parser.add_argument("-cutoffs", nargs=2, default=(0.55, 1.0), metavar=(0.55, 1.0), \ help="Double cutoff seperated by space. In unit of nm. Default is 0.55 1.0. The double cutoffs are used to define lipid \ interactions. A continuous lipid contact with a given residue starts when the lipid moves to the given residue \ closer than the smaller cutoff; and ends when the lipid moves farther than the larger cutoff. The standard single \ cutoff can be acheived by setting the same value for both cutoffs.") parser.add_argument("-lipids", nargs="+", metavar="POPC", default="POPC CHOL POP2", \ help="Lipid species to check, seperated by space. Should be consistent with residue names in your trajectories.") parser.add_argument("-lipid_atoms", nargs="+", metavar="PO4", default=None, \ help="Lipid atoms to check, seperated by space. Should be consistent with the atom names in your trajectories.") parser.add_argument("-radii", nargs="+", default=None, metavar="BB:0.26 SC1:0.23", help="Change/Define the radius of atoms/beads \ that is used for the calculation of binding site surface area. Values need to be in the unit of nm. Supported syntax is \ BB:0.26, which defines the radius of bead BB as 0.26 nm, or CA:0.12 which defines the radius of atom CA as 0.12 nm. For \ atomistic simulations, the default radii are taken from \ mdtraj https://github.com/mdtraj/mdtraj/blob/master/mdtraj/geometry/sasa.py#L56. For coarse-grained \ simulations, this script defines the radius of the MARTINI 2 beads of BB as 0.26 nm and SC1/2/3 as 0.23 nm.") parser.add_argument("-nprot", default=1, metavar="1", \ help="num. of proteins (or chains) in the simulation system. The calculated results will be averaged among these proteins \ (or chains). The proteins (or chains) need to be identical, otherwise the averaging will fail.") parser.add_argument("-resi_offset", default=0, metavar="0", help="Shifting the residue index. It is useful if you need to change the residue \ index in your trajectories. For example, to change the residue indeces from 5,6,7,..., to 10,11,12,..., use -resi_offset 4. \ All the outputs, including plotted figures and saved coordinates, will be changed by this.") parser.add_argument("-resi_list", nargs="+", default=[], metavar="1-10 20-30", help="The indices of residues on which the calculations are done. \ This option is useful for those proteins with large regions that don't require calculation. Skipping those calculations could \ save time and memory. Accepted syntax include 1/ defining a range, like 1-10 (both ends included); 2/ single residue index, \ like 25 26 17. All the selections are seperated by space. For example, -resi_list 1-10 20-30 40 45 46 means selecting \ residues 1-10, 20-30, 40, 45 and 46 for calculation. The residue indices are not affected by -resi_offset, i.e. they \ should be consistent with the indices in your trajectories.") parser.add_argument("-chain_breaks", nargs="+", default=[], metavar="100 281 420", help="Start a new chain at the X-th residue (starting at 1) in \ the trajectory topology. This identifier is independent of the residue index but checks the residue order in the topology. \ Multiple chain breaks are supported. This option is useful when the simulation system contains \ multiple differnt chains, or users want to see the difference between chains even if these chains are identical. Using this flag \ will generate seperate figures for each of the chains. But the binding site detection will still treat the proteins in the \ system collectively, i.e. those binding sites that cover at multiple chains will be identified.") parser.add_argument("-nbootstrap", default=10, metavar=10, help="The number of samples for bootstrapping the calcultion of koff. \ The default is 10. The larger the number, the more time-consuming the calculation will be. The closer the bootstrapped \ residence time/koffs are to the original values, the more reliable those original values are. The bootstrapped results \ are ploted in each of the koff plots and plotted apposed to the original values in the figure showing residence time. ") parser.add_argument("-save_dataset", nargs="?", default=True, const=True, metavar="True", help="Save dataset in Pickle. Default is True") parser.add_argument("-gen_binding_poses", default=5, metavar=5, help="The num. of top-scored lipid binding poses to be generated for each binding \ site. The default is 5. A scoring function is generated for each binding site based on the sum of the probability density function of each atom/bead \ the lipid molecule. Score = sum(PDF(atom_i) * Weight(atom_i)) for atom_i in the lipid molecule. The weight function Weight(atom_i) \ is specified by the flag -score_weights.") parser.add_argument("-save_pose_format", default="gro", metavar="gro", help="The format the generated lipid binding poses are written into. This function \ is carried out by mdtraj.save(), hence supports the formats that are included by mdtraj. ") parser.add_argument("-score_weights", nargs="+", default=None, metavar="PO4:1 C1:1", help="The weight of each of the lipid atom/bead contributes to the scoring function. \ Top-rated lipid binding poses can be generated based on users' specification. The bounds poses of each binding site are scored based \ on the scoring function Score = sum(PDF(atom_i) * Weight(atom_i)) for atom_i in the lipid molecule.") parser.add_argument("-letter_map", nargs="+", default=None, metavar="ARG:K GLY:G", help="Map the three-letter amino acids to one letter. This map is \ used in making logomaker figures (https://logomaker.readthedocs.io/en/latest/). The common 20 amino acids are defined \ by this script. Users need to use this flag to define maps for uncommon amino acids in their systems.") parser.add_argument("-pdb", default=None, metavar="None", help="Provide a PDB structure onto which the binding site information will be mapped. \ Using this flag will generate a 'show_binding_site_info.py' file in the -save_dir directory, which allows users to check the \ mapped binding site information in PyMol. Users can run the generated script by 'python show_binding_site_info.py' \ to open such a PyMol session.") parser.add_argument("-pymol_gui", nargs="?", default=True, const=True, metavar="True", help="Show the PyMol session of binding site information \ at the end of the calcution. Need to be used in conjuction with -pdb.") args = parser.parse_args(sys.argv[1:]) ########################################## ########## assisting functions ########### ########################################## def get_atom_index_for_lipid(lipid, traj, part=None): whole_atom_index = traj.top.select("resname {}".format(lipid)) if part != None: parts_atom_index = [traj.topology.atom(idx).index for idx in whole_atom_index if traj.topology.atom(idx).name in part] return parts_atom_index else: return whole_atom_index class Durations(): def __init__(self, contact_residues_low, contact_residue_high, dt): self.contact_low = contact_residues_low self.contact_high = contact_residue_high self.dt = dt def cal_duration(self): self.pointer = [np.zeros_like(self.contact_high[idx], dtype=np.int) for idx in range(len(self.contact_high))] durations = [] for i in range(len(self.contact_low)): for j in range(len(self.contact_low[i])): pos = np.where(self.contact_high[i] == self.contact_low[i][j])[0][0] if self.pointer[i][pos] == 0: durations.append(self.get_duration(i, pos)) if len(durations) == 0: return [0] else: return durations def get_duration(self, i, j): count = 1 self.pointer[i][j] = 1 lipid_to_search = self.contact_high[i][j] for k in range(i+1, len(self.contact_high)): locations = np.where(self.contact_high[k] == lipid_to_search)[0] if len(locations) == 0: return count * self.dt else: pos = locations[0] self.pointer[k][pos] = 1 count +=1 return (count - 1) * self.dt def cal_interaction_intensity(contact_residues_high): """ The probablily of finding the lipids around the selected residue plus the number of lipids found around the selected residue, the average number of lipid per contact """ contact_counts = [len(item) for item in contact_residues_high] mask = np.array(contact_counts) > 0 contact_counts_nonzero = np.array(contact_counts)[mask] return 100 * len(contact_counts_nonzero)/len(contact_residues_high), np.nan_to_num(contact_counts_nonzero.mean()) def cal_sigma(durations, num_of_lipids, T_total, delta_t_range): sigma = {} for delta_t in delta_t_range: if delta_t == 0: sigma[delta_t] = 1 sigma0 = float(sum([restime - delta_t for restime in durations if restime >= delta_t])) / ((T_total - delta_t) * num_of_lipids) else: try: sigma[delta_t] = float(sum([restime - delta_t for restime in durations if restime >= delta_t])) / ((T_total - delta_t) * num_of_lipids * sigma0) except ZeroDivisionError: sigma[delta_t] = 0 return sigma def cal_restime_koff(sigma, initial_guess): """ fit the exponential curve y=A*e^(-k1*x)+B*e^(-k2*x) """ delta_t_range = list(sigma.keys()) delta_t_range.sort() # x hist_values = np.nan_to_num([sigma[delta_t] for delta_t in delta_t_range]) # y try: popt, pcov = curve_fit(bi_expo, np.array(delta_t_range, dtype=np.float128), np.array(hist_values, dtype=np.float128), p0=initial_guess, maxfev=100000) n_fitted = bi_expo(np.array(delta_t_range, dtype=np.float128), *popt) r_squared = 1 - np.sum((np.nan_to_num(n_fitted) - np.nan_to_num(hist_values))**2)/np.sum((hist_values - np.mean(hist_values))**2) ks = [abs(k) for k in popt[:2]] koff = np.min(ks) restime = 1/koff except RuntimeError: koff = 0 restime = 0 r_squared = 0 popt = [0, 0, 0, 0] return restime, koff, r_squared, popt def bi_expo(x, k1, k2, A, B): return A*np.exp(-k1*x) + B*np.exp(-k2*x) def check_dir(save_dir, suffix=None): if save_dir == None: save_dir = os.getcwd() else: save_dir = os.path.abspath(save_dir) if suffix != None: save_dir = os.path.join(save_dir, suffix) if not os.path.isdir(save_dir): print("Creating new director: {}".format(save_dir)) os.makedirs(save_dir) return save_dir def sparse_corrcoef(A, B=None): if B is not None: A = sparse.vstack((A, B), format='csr') A = A.astype(np.float64) n = A.shape[1] # Compute the covariance matrix rowsum = A.sum(1) centering = rowsum.dot(rowsum.T.conjugate()) / n C = (A.dot(A.T.conjugate()) - centering) / (n - 1) # The correlation coefficients are given by # C_{i,j} / sqrt(C_{i} * C_{j}) d = np.diag(C) coeffs = C / np.sqrt(np.outer(d, d)) return coeffs ##################################### ####### Main Class object ########### ##################################### class LipidInteraction(): def __init__(self, trajfile_list, grofile_list=None, stride=1, dt=None, cutoff=[0.55, 1.0], \ lipid="POPC", lipid_atoms=None, nprot=1, resi_list=[], resi_offset=0, save_dir=None, timeunit="us"): if grofile_list != None: assert len(trajfile_list) == len(grofile_list), \ "List of coordinates should be in the same order and length of list of trajectories!" self.save_dir = check_dir(save_dir) self.trajfile_list = trajfile_list self.grofile_list = grofile_list self.dt = dt self.nrepeats = len(self.trajfile_list) self.cutoff = np.sort(np.array(cutoff, dtype=float)) self.lipid = lipid self.lipid_atoms = lipid_atoms self.nprot = int(nprot) self.timeunit = timeunit self.koff = {} self.sigmas = {} self.params = {} self.r_squared = {} self.res_time = {} self.koff_b = {} self.koff_b_cv = {} self.res_time_b = {} self.res_time_b_cv = {} self.r_squared_b = {} self.interaction_duration = defaultdict(list) self.interaction_occupancy = defaultdict(list) self.lipid_count = defaultdict(list) self.contact_residues_high = defaultdict(list) self.contact_residues_low = defaultdict(list) self.stride = int(stride) self.resi_offset = resi_offset self.resi_list = resi_list self.residue_set = [] self._protein_ref = None self._lipid_ref = None return def _get_traj_stats(self, traj, lipid, lipid_atoms): lipid_atom_indices = traj.top.select("resn {}".format(self.lipid)) lipid_resi_indices = set() for atom in lipid_atom_indices: lipid_resi_indices.add(traj.top.atom(atom).residue.index) num_of_lipids = len(lipid_resi_indices) lipid_resi_indices = list(lipid_resi_indices) lipid_resi_indices.sort() lipid_resi_indices_original = lipid_resi_indices if self._lipid_ref == None: one_lipid_indices = [] for lipid_id in np.sort(traj.top.select("resn {}".format(self.lipid))): if len(one_lipid_indices) == 0: one_lipid_indices.append(lipid_id) elif traj.top.atom(lipid_id).residue.index != traj.top.atom(one_lipid_indices[-1]).residue.index: break else: one_lipid_indices.append(lipid_id) self._lipid_ref = traj[0].atom_slice(np.unique(one_lipid_indices)) if lipid_atoms != None: lipid_haystack = get_atom_index_for_lipid(lipid, traj, part=lipid_atoms) selected_atom_indices = np.hstack([traj.top.select("protein"), lipid_haystack]) new_xyz = [frame[selected_atom_indices] for frame in traj.xyz] reduced_frame = traj[0].atom_slice(selected_atom_indices) reduced_top = reduced_frame.top new_traj = md.Trajectory(new_xyz, reduced_top, time=traj.time, unitcell_lengths=traj.unitcell_lengths, \ unitcell_angles=traj.unitcell_angles) lipid_resi_indices = [new_traj.top.atom(new_traj.top.select("protein")[-1]).residue.index+1+idx \ for idx in np.arange(num_of_lipids)] else: new_traj = traj all_protein_atom_indices = new_traj.top.select("protein") natoms_per_protein = int(len(all_protein_atom_indices)/self.nprot) prot_atom_indices = all_protein_atom_indices[:natoms_per_protein] nresi_per_protein = new_traj.top.atom(prot_atom_indices[-1]).residue.index - \ new_traj.top.atom(prot_atom_indices[0]).residue.index + 1 selected_protein_resi_set = [] if len(self.resi_list) == 0: residue_set = ["{}{}".format(new_traj.top.residue(resi).resSeq+self.resi_offset, new_traj.top.residue(resi).name) \ for resi in np.arange(new_traj.top.atom(prot_atom_indices[0]).residue.index, \ new_traj.top.atom(prot_atom_indices[-1]).residue.index + 1)] residue_set = np.array(residue_set, dtype=str) # residue id in structure instead of builtin index in mdtraj for protein_idx in range(self.nprot): selected_protein_resi_set.append(np.unique([new_traj.top.atom(atom_idx).residue.index \ for atom_idx in \ all_protein_atom_indices[protein_idx*natoms_per_protein:(protein_idx+1)*natoms_per_protein]])) elif len(self.resi_list) > 0: resi_list = np.sort(np.array(np.hstack(self.resi_list), dtype=int)) for protein_idx in range(self.nprot): selected_protein_resi_set.append(np.unique([new_traj.top.atom(atom_idx).residue.index \ for atom_idx in \ all_protein_atom_indices[protein_idx*natoms_per_protein:(protein_idx+1)*natoms_per_protein] \ if new_traj.top.atom(atom_idx).residue.resSeq in resi_list])) residue_set = ["{}{}".format(new_traj.top.residue(resi).resSeq+self.resi_offset, new_traj.top.residue(resi).name) \ for resi in selected_protein_resi_set[0]] residue_set = np.array(residue_set, dtype=str) if self._protein_ref == None: self._protein_ref = new_traj[0].atom_slice(prot_atom_indices) self._selected_residue_indices = selected_protein_resi_set[0] return new_traj, {"natoms_per_protein": natoms_per_protein, "nresi_per_protein": nresi_per_protein, "selected_protein_resi_set": selected_protein_resi_set, "residue_set": residue_set, "num_of_lipids": num_of_lipids, "lipid_resi_indices": lipid_resi_indices, "lipid_resi_indices_original": lipid_resi_indices_original} def cal_interactions(self, save_dir=None, save_dataset=True, nbootstrap=10): if save_dir == None: self.save_dir = check_dir(self.save_dir, "Interaction_{}".format(self.lipid)) else: self.save_dir = check_dir(save_dir, "Interaction_{}".format(self.lipid)) with open("{}/calculation_log_{}.txt".format(self.save_dir, self.lipid), "w") as f: f.write("###### Lipid: {}\n".format(self.lipid)) f.write("###### Lipid Atoms: {}\n".format(self.lipid_atoms)) f.write("###### Cutoffs: {}\n".format(self.cutoff)) f.write("###### nprot: {}\n".format(self.nprot)) f.write("###### Trajectories:\n") for traj_fn in self.trajfile_list: f.write(" {}\n".format(traj_fn)) f.write("###### Coordinates:\n") for gro_fn in self.grofile_list: f.write(" {}\n".format(gro_fn)) f.write("\n") row = [] col = [] data = [] self.num_of_lipids = [] self.lipid_resi_set = [] self.T_total = [] self.timesteps = [] self.nresi_per_protein = [] ncol_start = 0 for traj_idx, trajfile in enumerate(self.trajfile_list): print("\n########## Start calculation of {} interaction in \n########## {} \n".format(self.lipid, self.trajfile_list[traj_idx])) f.write("\n###### Start calculation of {} interaction in \n###### {} \n".format(self.lipid, self.trajfile_list[traj_idx])) traj = md.load(trajfile, top=self.grofile_list[traj_idx], stride=self.stride) if self.dt == None: timestep = traj.timestep/1000000.0 if self.timeunit == "us" else traj.timestep/1000.0 else: timestep = float(self.dt * self.stride) self.T_total.append((traj.n_frames - 1) * timestep) self.timesteps.append(timestep) new_traj, traj_stats = self._get_traj_stats(traj, self.lipid, self.lipid_atoms) self.num_of_lipids.append(traj_stats["num_of_lipids"]) self.lipid_resi_set.append(traj_stats["lipid_resi_indices_original"]) self.nresi_per_protein.append(len(traj_stats["residue_set"])) self.residue_set = traj_stats["residue_set"] if len(traj_stats["residue_set"]) > len(self.residue_set) else self.residue_set ncol_per_protein = traj_stats["num_of_lipids"] * new_traj.n_frames for idx_protein in np.arange(self.nprot): for resid, (residue_index, residue) in enumerate(zip(traj_stats["selected_protein_resi_set"][idx_protein], traj_stats["residue_set"])): pairs = list(product([residue_index], traj_stats["lipid_resi_indices"])) dist_matrix_resi, _ = md.compute_contacts(new_traj, pairs, scheme="closest", periodic=True) contact_residues_low = [[] for dummy in np.arange(new_traj.n_frames)] contact_residues_high = [[] for dummy in np.arange(new_traj.n_frames)] frame_id_set_low, lipid_id_set_low = np.where(dist_matrix_resi <= self.cutoff[0]) frame_id_set_high, lipid_id_set_high = np.where(dist_matrix_resi <= self.cutoff[1]) for frame_id, lipid_id in zip(frame_id_set_low, lipid_id_set_low): contact_residues_low[frame_id].append(int(lipid_id)) for frame_id, lipid_id in zip(frame_id_set_high, lipid_id_set_high): contact_residues_high[frame_id].append(int(lipid_id)) col.append([ncol_start + ncol_per_protein*idx_protein + lipid_id*new_traj.n_frames + \ frame_id for frame_id, lipid_id in zip(frame_id_set_low, lipid_id_set_low)]) contact_low = [np.array(contact, dtype=int) for contact in contact_residues_low] contact_high = [np.array(contact, dtype=int) for contact in contact_residues_high] row.append([resid for dummy in np.arange(len(frame_id_set_low))]) data.append(dist_matrix_resi[frame_id_set_low, lipid_id_set_low]) self.contact_residues_high[resid].append(contact_high) self.contact_residues_low[resid].append(contact_low) self.interaction_duration[residue].append(Durations(contact_low, contact_high, timestep).cal_duration()) occupancy, lipidcount = cal_interaction_intensity(contact_high) self.interaction_occupancy[residue].append(occupancy) self.lipid_count[residue].append(lipidcount) ncol_start += ncol_per_protein * self.nprot ############################################### ###### get some statistics for this traj ###### ############################################### durations = np.array([np.concatenate(self.interaction_duration[residue][-self.nprot:]).mean() for residue in traj_stats["residue_set"]]) duration_arg_idx = np.argsort(durations)[::-1] occupancies = np.array([np.mean(self.interaction_occupancy[residue][-self.nprot:]) for residue in traj_stats["residue_set"]]) occupancy_arg_idx = np.argsort(occupancies)[::-1] lipidcounts = np.array([np.mean(self.lipid_count[residue][-self.nprot:]) for residue in traj_stats["residue_set"]]) lipidcount_arg_idx = np.argsort(lipidcounts)[::-1] log_text = "10 residues that showed longest average interaction durations ({}):\n".format(self.timeunit) for residue, duration in zip(traj_stats["residue_set"][duration_arg_idx][:10], durations[duration_arg_idx][:10]): log_text += "{:^8s} -- {:^8.3f}\n".format(residue, duration) log_text += "10 residues that showed highest lipid occupancy (100%):\n" for residue, occupancy in zip(traj_stats["residue_set"][occupancy_arg_idx][:10], occupancies[occupancy_arg_idx][:10]): log_text += "{:^8s} -- {:^8.2f}\n".format(residue, occupancy) log_text += "10 residues that have the largest number of surrounding lipids (count):\n" for residue, lipidcount in zip(traj_stats["residue_set"][lipidcount_arg_idx][:10], lipidcounts[lipidcount_arg_idx][:10]): log_text += "{:^8s} -- {:^8.2f}\n".format(residue, lipidcount) print(log_text) f.write(log_text) row = np.concatenate(row) col = np.concatenate(col) data = np.concatenate(data) contact_info = coo_matrix((data, (row, col)), shape=(max(self.nresi_per_protein), ncol_start)) self.interaction_covariance = sparse_corrcoef(contact_info) ################################################### ############ calculate and plot koffs ############# ################################################### koff_dir = check_dir(self.save_dir, "Koffs_{}".format(self.lipid)) for residue in self.residue_set: duration_raw = np.concatenate(self.interaction_duration[residue]) if np.sum(duration_raw) > 0: bootstrap_results = self.bootstrap(duration_raw, residue, "{}/{}_{}.pdf".format(koff_dir, self.lipid, residue), \ nbootstrap=nbootstrap) self.sigmas[residue] = bootstrap_results["sigma"] self.koff[residue] = bootstrap_results["koff"] self.res_time[residue] = bootstrap_results["restime"] self.params[residue] = bootstrap_results["params"] self.r_squared[residue] = bootstrap_results["r_squared"] self.koff_b[residue] = bootstrap_results["koff_b_avg"] self.koff_b_cv[residue] = bootstrap_results["koff_b_cv"] self.res_time_b[residue] = bootstrap_results["res_time_b_avg"] self.res_time_b_cv[residue] = bootstrap_results["res_time_b_cv"] self.r_squared_b[residue] = bootstrap_results["r_squared_b_avg"] else: delta_t_range = np.arange(0, self.T_total[traj_idx], np.min(self.timesteps)) self.sigmas[residue] = {key:value for key, value in zip(delta_t_range, np.zeros(len(delta_t_range)))} self.koff[residue] = 0 self.res_time[residue] = 0 self.params[residue] = [0, 0, 0, 0] self.r_squared[residue] = 0.0 self.koff_b[residue] = 0 self.koff_b_cv[residue] = 0 self.res_time_b[residue] = 0 self.res_time_b_cv[residue] = 0 self.r_squared_b[residue] = 0.0 ############################################## ########## wrapping up dataset ############### ############################################## T_max = np.max(self.T_total) Res_Time = np.array([self.res_time[residue] for residue in self.residue_set]) Capped = Res_Time > T_max Res_Time[Capped] = T_max Res_Time_B = np.array([self.res_time_b[residue] for residue in self.residue_set]) Capped = Res_Time_B > T_max Res_Time_B[Capped] = T_max dataset = pd.DataFrame({"Residue": [residue for residue in self.residue_set], "Residue idx": self._selected_residue_indices, "Occupancy": np.array([np.mean(self.interaction_occupancy[residue]) \ for residue in self.residue_set]), "Occupancy_std": np.array([np.std(self.interaction_occupancy[residue]) \ for residue in self.residue_set]), "Duration": np.array([np.mean(np.concatenate(self.interaction_duration[residue])) \ for residue in self.residue_set]), "Duration_std": np.array([np.std(np.concatenate(self.interaction_duration[residue])) \ for residue in self.residue_set]), "Residence Time": Res_Time, "Capped": Capped, "R squared": np.array([self.r_squared[residue] for residue in self.residue_set]), "Koff": np.array([self.koff[residue] for residue in self.residue_set]), "Residence Time_boot": Res_Time_B, "Residence Time_boot_cv": np.array([self.res_time_b_cv[residue] for residue in self.residue_set]), "Koff_boot": np.array([self.koff_b[residue] for residue in self.residue_set]), "Koff_boot_cv": np.array([self.koff_b_cv[residue] for residue in self.residue_set]), "R squared_boot": np.array([self.r_squared_b[residue] for residue in self.residue_set]), "LipidCount": np.array([np.mean(self.lipid_count[residue]) \ for residue in self.residue_set]), "LipidCount_std": np.array([np.std(self.lipid_count[residue]) \ for residue in self.residue_set])}) dataset.to_csv("{}/Interactions_{}.csv".format(self.save_dir, self.lipid), index=False) self.dataset = dataset reminder = """ NOTE: Occupancy: percentage of frames where lipid is in contact with the given residue (0-100%); Duration: Average length of a continuous interaction of lipid with the given residue (in unit of {timeunit}); LipidCount: Average number of lipid surrounding the given residue within the longer cutoff; Koff: Koff of lipid with the given residue (in unit of ({timeunit})^(-1)); """.format(**{"timeunit": self.timeunit}) print(reminder) print() if save_dataset: dataset_dir = check_dir(self.save_dir, "Dataset") with open("{}/interaction_durations_{}.pickle".format(dataset_dir, self.lipid), "wb") as f: pickle.dump(self.interaction_duration, f, 2) with open("{}/sigmas_{}.pickle".format(dataset_dir, self.lipid), "wb") as f: pickle.dump(self.sigmas, f, 2) with open("{}/curve_fitting_params_{}.pickle".format(dataset_dir, self.lipid), "wb") as f: pickle.dump(self.params, f, 2) with open("{}/interaction_covariance_matrix_{}.pickle".format(dataset_dir, self.lipid), "wb") as f: pickle.dump(self.interaction_covariance, f, 2) return def bootstrap(self, durations, label, fig_fn, nbootstrap=10): """ bootstrap durations to calculate koffs, return bootstrapped values """ initial_guess = (1., 1., 1., 1.) ##### prep for plotting ###### plt.rcParams["font.size"] = 10 plt.rcParams["font.weight"] = "bold" if self.timeunit == "ns": xlabel = "Duration (ns)" elif self.timeunit == "us": xlabel = r"Duration ($\mu s$)" fig = plt.figure(1, figsize=(8.2, 3.5)) left, width = 0.0975, 0.23 bottom, height = 0.17, 0.75 left_h = left + width + 0.0375 rect_scatter = [left, bottom, width, height] rect_histy = [left_h, bottom, width, height] axScatter = fig.add_axes(rect_scatter) axHisty = fig.add_axes(rect_histy) ######## start bootstrapping ###### delta_t_range = np.arange(0, np.min(self.T_total), np.min(self.timesteps)) duration_sampled_set = [np.random.choice(durations, size=len(durations)) for dummy in range(nbootstrap)] koff1_sampled_set = [] koff2_sampled_set = [] restime_sampled_set = [] r_squared_sampled_set = [] for duration_sampled in duration_sampled_set: sigma_sampled = cal_sigma(duration_sampled, len(duration_sampled), np.max(self.T_total), delta_t_range) hist_values_sampled = np.array([sigma_sampled[delta_t] for delta_t in delta_t_range]) axHisty.plot(delta_t_range, hist_values_sampled, color="gray", alpha=0.5) restime_sampled, koff_sampled, r_squared_sampled, params_sampled = cal_restime_koff(sigma_sampled, initial_guess) n_fitted = bi_expo(np.array(delta_t_range), *params_sampled) r_squared_sampled = 1 - np.sum((np.nan_to_num(n_fitted) - np.nan_to_num(hist_values_sampled))**2)/np.sum((hist_values_sampled - np.mean(hist_values_sampled))**2) ks_sampled = [abs(k) for k in params_sampled[:2]] ks_sampled.sort() koff1_sampled_set.append(ks_sampled[0]) koff2_sampled_set.append(ks_sampled[1]) restime_sampled_set.append(restime_sampled) r_squared_sampled_set.append(r_squared_sampled) ######## plot original data ######### sigma = cal_sigma(durations, len(durations), np.max(self.T_total), delta_t_range) x = np.sort(durations) y = np.arange(len(x)) + 1 axScatter.scatter(x[::-1], y, label=label, s=10) axScatter.set_xlim(0, x[-1] * 1.1) axScatter.legend(loc="upper right", prop={"size": 10}, frameon=False) axScatter.set_ylabel("Sorted Index", fontsize=10, weight="bold") axScatter.set_xlabel(xlabel, fontsize=10, weight="bold") hist_values = np.array([sigma[delta_t] for delta_t in delta_t_range]) axHisty.scatter(delta_t_range, hist_values, zorder=8, s=3, label="sigma func.") axHisty.yaxis.set_label_position("right") axHisty.yaxis.tick_right() axHisty.set_xlabel(r"$\Delta t$", fontsize=10, weight="bold") axHisty.set_ylabel("Probability", fontsize=10, weight="bold") axHisty.set_yticks([0, 0.25, 0.5, 0.75, 1.0]) axHisty.set_ylim(-0.1, 1.1) restime, koff, r_squared, params = cal_restime_koff(sigma, initial_guess) n_fitted = bi_expo(np.array(delta_t_range), *params) r_squared = 1 - np.sum((np.nan_to_num(n_fitted) - np.nan_to_num(hist_values))**2)/np.sum((hist_values - np.mean(hist_values))**2) ks = [abs(k) for k in params[:2]] ks.sort() axHisty.plot(delta_t_range, n_fitted, 'r--', linewidth=3, zorder=10, label="Fitted biexpo.") axHisty.legend(loc="upper right", prop={"size": 10}, frameon=False) ######### labels ############ if self.timeunit == "ns": text = "{:18s} = {:.3f} ns$^{{-1}} $\n".format("$k_{{off1}}$", ks[0]) text += "{:18s} = {:.3f} ns$^{{-1}} $\n".format("$k_{{off2}}$", ks[1]) text += "{:14s} = {:.4f}\n".format("$R^2$", r_squared) text += "{:18s} = {:.3f} ns$^{{-1}}$ ({:3.1f}%)\n".format("$k_{{off1, boot}}$", np.mean(koff1_sampled_set), 100*np.std(koff1_sampled_set)/np.mean(koff1_sampled_set)) text += "{:18s} = {:.3f} ns$^{{-1}}$ ({:3.1f}%)\n".format("$k_{{off2, boot}}$", np.mean(koff2_sampled_set), 100*np.std(koff2_sampled_set)/np.mean(koff2_sampled_set)) text += "{:18s} = {:.4f}\n".format("$R^2$$_{{boot, avg}}$", np.mean(r_squared_sampled_set)) elif self.timeunit == "us": text = "{:18s} = {:.3f} $\mu s^{{-1}} $\n".format("$k_{{off1}}$", ks[0]) text += "{:18s} = {:.3f} $\mu s^{{-1}} $\n".format("$k_{{off2}}$", ks[1]) text += "{:14s} = {:.4f}\n".format("$R^2$", r_squared) text += "{:18s} = {:.3f} $\mu s^{{-1}}$ ({:3.1f}%)\n".format("$k_{{off1, boot}}$", np.mean(koff1_sampled_set), 100*np.std(koff1_sampled_set)/np.mean(koff1_sampled_set)) text += "{:18s} = {:.3f} $\mu s^{{-1}}$ ({:3.1f}%)\n".format("$k_{{off2, boot}}$", np.mean(koff2_sampled_set), 100*np.std(koff2_sampled_set)/np.mean(koff2_sampled_set)) text += "{:18s} = {:.4f}\n".format("$R^2$$_{{boot, avg}}$", np.mean(r_squared_sampled_set)) axHisty.text(1.4, 1.0, text, verticalalignment='top', horizontalalignment='left', transform=axHisty.transAxes, \ fontdict={"size": 8, "weight": "bold"}) plt.savefig(fig_fn, dpi=300) plt.close() return {"koff": koff, "restime": restime, "sigma": sigma, "params": params, "r_squared": r_squared, "koff_b_avg": np.mean(koff1_sampled_set), "koff_b_cv": np.std(koff1_sampled_set)/np.mean(koff1_sampled_set), "res_time_b_avg": np.mean(restime_sampled_set), "res_time_b_cv": np.std(restime_sampled_set)/np.mean(restime_sampled_set), "r_squared_b_avg": np.mean(r_squared_sampled_set)} def cal_interaction_network(self, save_dir=None, pdb=None, pymol_gui=True, save_dataset=True, nbootstrap=10, \ radii=None, gen_binding_poses=5, score_weights=None, save_pose_format="pdb", kde_bw=0.15): Residue_property_book = {"ARG": "Pos. Charge", "HIS": "Pos. Charge", "LYS": "Pos. Charge", "ASP": "Neg. Charge", "GLU": "Neg. Charge", "SER": "Polar", "THR": "Polar", "ASN": "Polar", "GLN": "Polar", "CYS": "Special", "SEC": "Special", "GLY": "Special", "PRO": "Special", "ALA": "Hydrophobic", "VAL": "Hydrophobic", "ILE": "Hydrophobic", "LEU": "Hydrophobic", "MET": "Hydrophobic", "PHE": "Hydrophobic", "TYR": "Hydrophobic", "TRP": "Hydrophobic"} MARTINI_CG_radii = {"BB": 0.26, "SC1": 0.23, "SC2": 0.23, "SC3": 0.23} if radii == None: radii_book = MARTINI_CG_radii else: radii_book = {**MARTINI_CG_radii, **radii} if save_dir == None: save_dir = check_dir(self.save_dir, "Binding_Sites_{}".format(self.lipid)) else: save_dir = check_dir(save_dir, "Binding_Sites_{}".format(self.lipid)) interaction_covariance = np.nan_to_num(self.interaction_covariance) f = open("{}/BindingSites_Info_{}.txt".format(save_dir, self.lipid), "w") ##### write out info ###### reminder = """ # Occupancy: percentage of frames where lipid is in contact with the given residue (0-100%); # Duration/Residence Time: average length of a continuous interaction of lipid with the given residue (in unit of {timeunit}); # Koff: Koff of lipid with the given residue/binding site (in unit of ({timeunit})^(-1)); # Pos. Charge: ARG, HIS, LYS; # Neg. Charge: ASP, GLU; # Polar: SER, THR, ASN, GLN; # Hydrophobic: ALA, VAL, ILE, LEU, MET, PHE, TYR, TRP; # Special: CYS, SEC, GLY, PRO. """.format(**{"timeunit": self.timeunit}) f.write(reminder) f.write("\n") binding_site_id = 0 covariance_network = np.copy(interaction_covariance) covariance_network[covariance_network < 0.0] = 0.0 residue_network_raw = nx.Graph(covariance_network) part = community.best_partition(residue_network_raw, weight='weight') values = [part.get(node) for node in residue_network_raw.nodes()] binding_site_identifiers = np.ones(len(self.residue_set), dtype=int) * 999 self.interaction_duration_BS = defaultdict(list) self.interaction_occupancy_BS = defaultdict(list) self.lipid_count_BS = defaultdict(list) self.sigmas_BS = {} self.params_BS = {} BS_restime = np.zeros(len(self.residue_set)) BS_koff = np.zeros(len(self.residue_set)) BS_rsquared = np.zeros(len(self.residue_set)) BS_duration = np.zeros(len(self.residue_set)) BS_lipidcount = np.zeros(len(self.residue_set)) BS_occupancy = np.zeros(len(self.residue_set)) BS_koff_b = np.zeros(len(self.residue_set)) BS_koff_b_cv = np.zeros(len(self.residue_set)) BS_restime_b = np.zeros(len(self.residue_set)) BS_restime_b_cv = np.zeros(len(self.residue_set)) BS_rsquared_b = np.zeros(len(self.residue_set)) BS_surface_area = np.zeros(len(self.residue_set)) t_total_max = np.max(self.T_total) node_list_set = [] for value in range(max(values)): node_list = [k for k,v in part.items() if v == value] if len(node_list) >= 3: binding_site_identifiers[node_list] = binding_site_id node_list_set.append(node_list) binding_site_id += 1 ########### cal site koff and surface area ############ if len(node_list_set) > 0: surface_area_all = defaultdict(list) self._coordinate_pool = [[] for dummy in np.arange(len(node_list_set))] for traj_idx, trajfile in enumerate(self.trajfile_list): traj = md.load(trajfile, top=self.grofile_list[traj_idx], stride=self.stride) if self.dt == None: timestep = traj.timestep/1000000.0 if self.timeunit == "us" else traj.timestep/1000.0 else: timestep = float(self.dt) protein_indices_all = traj.top.select("protein") natoms_per_protein = int(len(protein_indices_all)/self.nprot) for idx_protein in np.arange(self.nprot): protein_indices = protein_indices_all[idx_protein*natoms_per_protein:(idx_protein+1)*natoms_per_protein] for binding_site_id, node_list in enumerate(node_list_set): contact_BS_low = [] contact_BS_high = [] list_to_take = traj_idx*self.nprot+idx_protein for frame_idx in range(len(self.contact_residues_high[node_list[0]][list_to_take])): contact_high_frame = np.unique(np.concatenate([self.contact_residues_high[node][list_to_take][frame_idx] for node in node_list])) contact_low_frame = np.unique(np.concatenate([self.contact_residues_low[node][list_to_take][frame_idx] for node in node_list])) contact_BS_high.append(contact_high_frame) contact_BS_low.append(contact_low_frame) self.interaction_duration_BS[binding_site_id].append(Durations(contact_BS_low, contact_BS_high, timestep).cal_duration()) occupancy, lipidcount = cal_interaction_intensity(contact_BS_high) self.interaction_occupancy_BS[binding_site_id].append(occupancy) self.lipid_count_BS[binding_site_id].append(lipidcount) ########### store lipid binding poses ############ for frame_id in range(len(contact_BS_low)): for lipid_id in contact_BS_low[frame_id]: lipid_index = self.lipid_resi_set[traj_idx][lipid_id] lipid_indices = np.sort([atom.index for atom in traj.top.residue(lipid_index).atoms]) self._coordinate_pool[binding_site_id].append([np.copy(traj.xyz[frame_id, np.hstack([protein_indices, lipid_indices])]), \ np.copy(traj.unitcell_angles[frame_id]), \ np.copy(traj.unitcell_lengths[frame_id])]) ### calculate area ### new_xyz = [] for frame in traj.xyz: new_frame = frame[protein_indices] new_xyz.append(new_frame) reduced_frame = traj[0].atom_slice(protein_indices) reduced_top = reduced_frame.top if reduced_top.residue(0).index != 0: starting_index = reduced_top.residue(0).index for residue in reduced_top.residues: residue.index -= starting_index new_traj = md.Trajectory(new_xyz, reduced_top, time=traj.time, unitcell_lengths=traj.unitcell_lengths, unitcell_angles=traj.unitcell_angles) areas = md.shrake_rupley(new_traj, mode='residue', change_radii=radii_book) for binding_site_id, node_list in enumerate(node_list_set): surface_area_all[binding_site_id].append(areas[:, node_list].sum(axis=1)) ########### write and plot results ########### for binding_site_id in np.arange(len(node_list_set)): duration_raw = np.concatenate(self.interaction_duration_BS[binding_site_id]) mask = (binding_site_identifiers == binding_site_id) bootstrap_results = self.bootstrap(duration_raw, "BS id: {}".format(binding_site_id), "{}/BS_koff_id{}.pdf".format(save_dir, binding_site_id), nbootstrap=nbootstrap) self.sigmas_BS[binding_site_id] = bootstrap_results["sigma"] self.params_BS[binding_site_id] = bootstrap_results["params"] BS_restime[mask] = bootstrap_results["restime"] BS_koff[mask] = bootstrap_results["koff"] BS_rsquared[mask] = bootstrap_results["r_squared"] BS_koff_b[mask] = bootstrap_results["koff_b_avg"] BS_koff_b_cv[mask] = bootstrap_results["koff_b_cv"] BS_restime_b[mask] = bootstrap_results["res_time_b_avg"] BS_restime_b_cv[mask] = bootstrap_results["res_time_b_cv"] BS_rsquared_b[mask] = bootstrap_results["r_squared_b_avg"] bs_area = np.concatenate(surface_area_all[binding_site_id]).mean() BS_surface_area[mask] = bs_area ############# write results ############### f.write("# Binding site {}\n".format(binding_site_id)) BS_restime[mask] = bootstrap_results["restime"] if bootstrap_results["restime"] <= t_total_max else t_total_max if bootstrap_results["restime"] <= t_total_max: f.write("{:20s} {:10.3f} {:5s} R squared: {:7.4f}\n".format(" BS Residence Time:", bootstrap_results["restime"], self.timeunit, bootstrap_results["r_squared"])) else: f.write("{:20s} {:10.3f} {:5s}** R squared: {:7.4f}\n".format(" BS Residence Time:", t_total_max, self.timeunit, bootstrap_results["r_squared"])) f.write("{:20s} {:10.3f}\n".format(" BS koff:", bootstrap_results["koff"])) f.write("{:20s} {:10.3f} +- {:10.3f}\n".format(" BS koff Bootstrap:", bootstrap_results["koff_b_avg"], bootstrap_results["koff_b_cv"])) duration = np.mean(np.concatenate(self.interaction_duration_BS[binding_site_id])) BS_duration[mask] = duration f.write("{:20s} {:10.3f} {:5s}\n".format(" BS Duration:", duration, self.timeunit)) occupancy = np.mean(self.interaction_occupancy_BS[binding_site_id]) BS_occupancy[mask] = occupancy f.write("{:20s} {:10.3f} %\n".format(" BS Lipid Occupancy:", occupancy)) lipidcount = np.mean(self.lipid_count_BS[binding_site_id]) BS_lipidcount[mask] = lipidcount f.write("{:20s} {:10.3f}\n".format(" BS Lipid Count:", lipidcount)) f.write("{:20s} {:10.3f} nm^2 +- {:10.3f}\n".format(" BS Surface Area:", bs_area, np.concatenate(surface_area_all[binding_site_id]).std())) res_stats = {"Pos. Charge": 0, "Neg. Charge": 0, "Polar": 0, "Special": 0, "Hydrophobic": 0} for residue in self.residue_set[mask]: res_stats[Residue_property_book[re.findall("[a-zA-Z]+$", residue)[0]]] += 1 BS_num_resi = np.sum(mask) f.write("{:20s} {:10s}\n".format(" Pos. Charge:", "/".join([str(res_stats["Pos. Charge"]), str(BS_num_resi)]))) f.write("{:20s} {:10s}\n".format(" Neg. Charge:", "/".join([str(res_stats["Neg. Charge"]), str(BS_num_resi)]))) f.write("{:20s} {:10s}\n".format(" Polar:", "/".join([str(res_stats["Polar"]), str(BS_num_resi)]))) f.write("{:20s} {:10s}\n".format(" Hydrophobic:", "/".join([str(res_stats["Hydrophobic"]), str(BS_num_resi)]))) f.write("{:20s} {:10s}\n".format(" Special:", "/".join([str(res_stats["Special"]), str(BS_num_resi)]))) f.write("{:^9s}{:^9s}{:^13s}{:^11s}{:^10s}{:^10s}{:^10s}{:^13s}{:^10s}{:^10s}\n".format("Residue", "Duration", "Duration std", \ "Res. Time", "R squared", "Occupancy", "Occu. std", "Lipid Count", "L. C. std", "Koff")) for residue in self.residue_set[mask]: f.write("{Residue:^9s}{Duration:^9.3f}{Duration_std:^13.3f}{Residence Time:^11.3f}{R squared:^10.4f}{Occupancy:^10.3f}{Occupancy_std:^10.3f}{LipidCount:^13.3f}{LipidCount_std:^10.3f}{Koff:^10.4f}\n".format(\ **self.dataset[self.dataset["Residue"]==residue].to_dict("records")[0] )) f.write("\n") f.write("\n") f.close() ######################## plot area stats ########################## bs_id_set = [] bs_area_set = [] for binding_site_id in surface_area_all.keys(): bs_area_set.append(np.concatenate(surface_area_all[binding_site_id])) bs_id_set.append([binding_site_id for dummy in np.arange(len(np.concatenate(surface_area_all[binding_site_id])))]) d_area = pd.DataFrame({"BS id": np.concatenate(bs_id_set), "Area (nm^2)": np.concatenate(bs_area_set)}) plt.rcParams["font.size"] = 8 plt.rcParams["font.weight"] = "bold" if len(surface_area_all.keys()) <= 8: fig, ax = plt.subplots(figsize=(4.5, 2.8)) elif len(surface_area_all.keys()) > 8 and len(surface_area_all.keys()) <= 15: fig, ax = plt.subplots(figsize=(6.5, 2.8)) else: fig, ax = plt.subplots(figsize=(9.5, 3)) sns.violinplot(x="BS id", y="Area (nm^2)", data=d_area, palette="Set3", bw=.2, cut=1, linewidth=1, ax=ax) ax.set_xlabel("BS id", fontsize=8, weight="bold") ax.set_ylabel(r"Surface Area (nm$^2$)", fontsize=8, weight="bold") ax.set_title("{} Binding Site Surface Area".format(self.lipid), fontsize=8, weight="bold") plt.tight_layout() plt.savefig("{}/BS_surface_area.pdf".format(save_dir), dpi=300) plt.close() ################ update dataset ######################## self.dataset["Binding site"] = binding_site_identifiers self.dataset["BS Residence Time"] = BS_restime self.dataset["BS koff"] = BS_koff self.dataset["BS Duration"] = BS_duration self.dataset["BS Occupancy"] = BS_occupancy self.dataset["BS LipidCount"] = BS_lipidcount self.dataset["BS R squared"] = BS_rsquared self.dataset["BS Residence Time_boot"] = BS_restime_b self.dataset["BS Residence Time_boot_cv"] = BS_restime_b_cv self.dataset["BS koff_boot"] = BS_koff_b self.dataset["BS koff_boot_cv"] = BS_koff_b_cv self.dataset["BS R squared_boot"] = BS_rsquared_b self.dataset["BS Surface Area"] = BS_surface_area self.dataset.to_csv("{}/Interactions_{}.csv".format(self.save_dir, self.lipid), index=False) ################ save dataset ################### if save_dataset: dataset_dir = check_dir(self.save_dir, "Dataset") with open("{}/BS_interaction_duration_{}.pickle".format(dataset_dir, self.lipid), "wb") as f: pickle.dump(self.interaction_duration_BS, f, 2) with open("{}/BS_sigmas_{}.pickle".format(dataset_dir, self.lipid), "wb") as f: pickle.dump(self.sigmas_BS, f, 2) with open("{}/BS_curve_fitting_params_{}.pickle".format(dataset_dir, self.lipid), "wb") as f: pickle.dump(self.params_BS, f, 2) with open("{}/BS_surface_area_{}.pickle".format(dataset_dir, self.lipid), "wb") as f: pickle.dump(surface_area_all, f, 2) ################## generate binding poses ################ if gen_binding_poses > 0 and len(node_list_set) > 0: coords_save_dir = check_dir(save_dir, "Binding_Poses") lipid_atom_map = {atom.index:atom.name for atom in self._lipid_ref.top.atoms} weights = {name:1 for index, name in lipid_atom_map.items()} if score_weights != None: weights.update(score_weights) binding_site_id_set = np.arange(len(self._coordinate_pool)) if len(self.resi_list) == 0: selected_protein_atoms = [[atom.index for atom in residue.atoms] for residue in self._protein_ref.top.residues] else: selected_protein_atoms = [[atom.index for atom in residue.atoms] for residue in self._protein_ref.top.residues \ if residue.resSeq in self.resi_list] lipid_atoms = [self._protein_ref.n_atoms + atom_idx for atom_idx in np.arange(self._lipid_ref.n_atoms)] joined_top = self._protein_ref.top.join(self._lipid_ref.top) for binding_site_id in binding_site_id_set: num_of_poses = gen_binding_poses if gen_binding_poses <= len(self._coordinate_pool[binding_site_id]) \ else len(self._coordinate_pool[binding_site_id]) node_list = node_list_set[binding_site_id] new_traj = md.Trajectory([frame[0] for frame in self._coordinate_pool[binding_site_id]], joined_top, \ time=np.arange(len(self._coordinate_pool[binding_site_id])), \ unitcell_angles=[frame[1] for frame in self._coordinate_pool[binding_site_id]], \ unitcell_lengths=[frame[2] for frame in self._coordinate_pool[binding_site_id]]) dist_per_atom = [[md.compute_distances(new_traj, list(product([lipid_atoms[idx]], selected_protein_atoms[resi])), periodic=True).min(axis=1) \ for resi in node_list] for idx in np.arange(self._lipid_ref.n_atoms)] kde_funcs = {} var_type = "" bw = [] for idx in range(len(dist_per_atom[0])): var_type += "c" bw.append(kde_bw) try: for atom_idx in np.arange(self._lipid_ref.n_atoms): kde_funcs[atom_idx] = KDEMultivariate(data=np.array(dist_per_atom[atom_idx]).T, \ var_type=var_type, bw=bw) ### evaluate binding poses ### scores = np.sum([weights[lipid_atom_map[idx]] * kde_funcs[idx].pdf() \ for idx in np.arange(self._lipid_ref.n_atoms)], axis=0) selected_indices = np.argsort(scores)[::-1][:num_of_poses] ############################### for pose_id in np.arange(num_of_poses, dtype=int): new_traj[selected_indices[pose_id]].save("{}/BSid{}_No{}.{}".format(coords_save_dir, \ binding_site_id, pose_id, save_pose_format)) except ValueError: with open("{}/Error.txt".format(coords_save_dir), "a+") as error_file: error_file.write("BSid {}: Pose generation error -- possibly due to insufficient number of binding event.\n".format(binding_site_id)) ###################################################################### ###### show binding site residues with scaled spheres in pymol ####### ###################################################################### if pdb != None: ############ check if pdb has a path to it ########## pdb_new_loc = os.path.join(self.save_dir, os.path.basename(pdb)) copyfile(pdb, pdb_new_loc) struct_ref = md.load(pdb_new_loc) ########### write out a pymol pml file ############### binding_site_id += 1 text = """ import pandas as pd import numpy as np import mdtraj as md import pymol from pymol import cmd pymol.finish_launching() dataset = pd.read_csv("{HOME_DIR}/Interactions_{LIPID}.csv") residue_set = np.array(dataset["Residue"].tolist()) binding_site_id = {BINDING_SITE_ID} binding_site_identifiers = np.array(dataset["Binding site"].tolist()) struct_ref = md.load("{PDB}") ######### calculate scale ############### residue_idx_set = dataset["Residue idx"] interactions = np.zeros(residue_idx_set.max()+1) values_to_check = dataset["Residence Time"] interactions[residue_idx_set] = values_to_check MID = values_to_check.quantile(0.5) SCALES = 1.5 / 5 + np.exp(-30 * (interactions - MID)) ###################################### ######## some pymol settings ######### cmd.set("retain_order", 1) cmd.set("cartoon_oval_length", 1.0) cmd.set("cartoon_oval_width", 0.3) cmd.set("cartoon_color", "white") cmd.set("stick_radius", 0.35) ################################## cmd.load("{PDB}", "Prot_{LIPID}") prefix = "Prot_{LIPID}" cmd.hide("everything") cmd.show("cartoon", prefix) cmd.center(prefix) cmd.orient(prefix) colors = np.array([np.random.choice(np.arange(256, dtype=float), size=3) for dummy in range(binding_site_id)]) colors /= 255.0 """.format(**{"HOME_DIR": self.save_dir, "LIPID": self.lipid, "BINDING_SITE_ID": binding_site_id, "PDB": pdb_new_loc}) text += r""" for bs_id in np.arange(binding_site_id): cmd.set_color("tmp_{}".format(bs_id), list(colors[bs_id])) for selected_residue in np.where(binding_site_identifiers == bs_id)[0]: selected_residue_index = residue_idx_set[selected_residue] selected_atom_indices = np.array([atom.index for atom in struct_ref.top.residue(selected_residue_index).atoms], dtype=str) selected_resid = struct_ref.top.residue(selected_residue_index).resSeq selected_resn = struct_ref.top.residue(selected_residue_index).name cmd.select("BS{}_{}{}".format(bs_id, selected_resid, selected_resn), "rank {} and (not name C+O+N)".format("+".join(selected_atom_indices))) cmd.show("spheres", "BS{}_{}{}".format(bs_id, selected_resid, selected_resn)) cmd.set("sphere_scale", SCALES[selected_residue_index], selection="BS{}_{}{}".format(bs_id, selected_resid, selected_resn)) cmd.color("tmp_{}".format(bs_id), "BS{}_{}{}".format(bs_id, selected_resid, selected_resn)) cmd.group("BS{}".format(bs_id), "BS{}_*".format(bs_id)) """ with open("{}/show_binding_sites_info.py".format(self.save_dir), "w") as f: f.write(text) ################## Launch a pymol session ####################### if pymol_gui: import pymol from pymol import cmd pymol.finish_launching(['pymol', '-q']) ##### do some pymol settings ##### residue_idx_set = self.dataset["Residue idx"] interactions = np.zeros(residue_idx_set.max()+1) values_to_check = self.dataset["Residence Time"] interactions[residue_idx_set] = values_to_check MID = values_to_check.quantile(0.5) SCALES = 1.5 / 5 + np.exp(-30 * (interactions - MID)) ##### do some pymol settings ##### cmd.set("retain_order", 1) cmd.set("cartoon_oval_length", 1.0) cmd.set("cartoon_oval_width", 0.3) cmd.set("cartoon_color", "white") cmd.set("stick_radius", 0.35) ################################## cmd.load(pdb_new_loc, "Prot_{}".format(self.lipid)) prefix = "Prot_{}".format(self.lipid) cmd.hide("everything") cmd.show("cartoon", prefix) cmd.center(prefix) cmd.orient(prefix) colors = np.array([np.random.choice(np.arange(256, dtype=float), size=3) for dummy in range(binding_site_id)]) colors /= 255.0 for bs_id in np.arange(binding_site_id): cmd.set_color("tmp_{}".format(bs_id), list(colors[bs_id])) for selected_residue in np.where(binding_site_identifiers == bs_id)[0]: selected_residue_index = residue_idx_set[selected_residue] selected_atom_indices = np.array([atom.index for atom in struct_ref.top.residue(selected_residue_index).atoms], dtype=str) selected_resid = struct_ref.top.residue(selected_residue_index).resSeq selected_resn = struct_ref.top.residue(selected_residue_index).name cmd.select("{}_BS{}_{}{}".format(self.lipid, bs_id, selected_resid, selected_resn), \ "rank {} and (not name C+O+N)".format("+".join(selected_atom_indices))) cmd.show("spheres", "{}_BS{}_{}{}".format(self.lipid, bs_id, selected_resid, selected_resn)) cmd.set("sphere_scale", SCALES[selected_residue_index], selection="{}_BS{}_{}{}".format(self.lipid, bs_id, selected_resid, selected_resn)) cmd.color("tmp_{}".format(bs_id), "{}_BS{}_{}{}".format(self.lipid, bs_id, selected_resid, selected_resn)) cmd.group("{}_BS{}".format(self.lipid, bs_id), "{}_BS{}_*".format(self.lipid, bs_id)) return def plot_interactions(self, item="Duration", save_dir=None, letter_map=None, chain_breaks=[]): if save_dir == None: save_dir = check_dir(self.save_dir, "Figures_{}".format(self.lipid)) else: save_dir = check_dir(save_dir, "Figures_{}".format(self.lipid)) ### single-letter dictionary ### single_letter = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K', 'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N', 'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W', 'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M'} if letter_map != None: single_letter.update(letter_map) if len(chain_breaks) == 0: chain_break_points = [0, len(self.dataset)] no_break = True else: chain_break_points = [0] for points in chain_breaks: chain_break_points.append(points) chain_break_points.append(len(self.dataset)) no_break = False plt.rcParams["font.size"] = 8 plt.rcParams["font.weight"] = "bold" for point_idx in np.arange(1, len(chain_break_points), dtype=int): dataset = self.dataset[chain_break_points[point_idx-1]:chain_break_points[point_idx]] data = dataset[item] if len(data) == 0: continue resi = np.array([int(re.findall("^[0-9]+", residue)[0]) for residue in self.residue_set])[chain_break_points[point_idx-1]:chain_break_points[point_idx]] SL_resn = [single_letter[re.findall("[a-zA-Z]+$", residue)[0]] for residue in self.residue_set][chain_break_points[point_idx-1]:chain_break_points[point_idx]] width = 1 sns.set_style("ticks", {'xtick.major.size': 5.0, 'ytick.major.size': 5.0}) if item == "Residence Time": if len(data) <= 500: fig = plt.figure(figsize=(5.5, 5)) elif len(data) > 500 and len(data) <= 1500: fig = plt.figure(figsize=(7.5, 5)) else: fig = plt.figure(figsize=(9, 6)) ax_R2 = fig.add_axes([0.18, 0.79, 0.75, 0.10]) ax_capped = fig.add_axes([0.18, 0.71, 0.75, 0.05]) ax_data = fig.add_axes([0.18, 0.50, 0.75, 0.18]) ax_boot = fig.add_axes([0.18, 0.22, 0.75, 0.18]) ax_boot_cv = fig.add_axes([0.18, 0.08, 0.75, 0.10]) ax_boot.xaxis.tick_top() ax_boot.invert_yaxis() ax_boot_cv.invert_yaxis() for ax in [ax_data, ax_capped, ax_R2, ax_boot, ax_boot_cv]: ax.yaxis.set_ticks_position('left') ax.spines['right'].set_visible(False) for ax in [ax_capped, ax_R2, ax_boot_cv]: ax.xaxis.set_ticks_position('none') ax.spines['top'].set_visible(False) ax.spines['bottom'].set_visible(False) ax.set_xticklabels([]) ax_data.spines['top'].set_visible(False) ax_boot.spines['bottom'].set_visible(False) if len(data) > 1000: ax_data.xaxis.set_major_locator(MultipleLocator(200)) ax_data.xaxis.set_minor_locator(MultipleLocator(50)) ax_boot.xaxis.set_major_locator(MultipleLocator(200)) ax_boot.xaxis.set_minor_locator(MultipleLocator(50)) elif len(data) <= 1000 and len(data) > 100: ax_data.xaxis.set_major_locator(MultipleLocator(100)) ax_data.xaxis.set_minor_locator(MultipleLocator(10)) ax_boot.xaxis.set_major_locator(MultipleLocator(100)) ax_boot.xaxis.set_minor_locator(MultipleLocator(10)) elif len(data) <= 100: ax_data.xaxis.set_major_locator(MultipleLocator(10)) ax_data.xaxis.set_minor_locator(MultipleLocator(1)) ax_boot.xaxis.set_major_locator(MultipleLocator(10)) ax_boot.xaxis.set_minor_locator(MultipleLocator(1)) if self.timeunit == "ns": timeunit = " (ns) " elif self.timeunit == "us": timeunit = r" ($\mu s$)" ax_data.bar(resi, data, width, linewidth=0, color="#F75C03") ax_data.set_ylabel("Res. Time {}".format(timeunit), fontsize=8, weight="bold", va="center") ax_data.set_xlabel("Residue Index", fontsize=8, weight="bold") ax_capped.plot(resi, dataset["Capped"]*1, linewidth=0, marker="+", markerfacecolor="#38040E", \ markeredgecolor="#38040E", markersize=2) ax_capped.set_ylim(0.9, 1.1) ax_capped.set_yticks([1.0]) ax_capped.set_yticklabels(["Capped"], fontsize=8, weight="bold") ax_capped.set_xlim(ax_data.get_xlim()) mask = dataset["R squared"] > 0 ax_R2.plot(resi[mask], dataset["R squared"][mask], linewidth=0, marker="+", markerfacecolor="#0FA3B1", markeredgecolor="#0FA3B1", \ markersize=2) ax_R2.set_xlim(ax_data.get_xlim()) ax_R2.set_ylabel(r"$R^2$", fontsize=8, weight="bold", va="center") ax_R2.set_title("{} {}".format(self.lipid, item), fontsize=8, weight="bold") ax_boot.bar(resi, dataset["Residence Time_boot"], width, linewidth=0, color="#F75C03") ax_boot.set_xlim(ax_data.get_xlim()) ax_boot.set_ylabel("Res. Time \n Boot. {}".format(timeunit), fontsize=8, weight="bold", va="center") ax_boot.set_xticklabels([]) mask = dataset["R squared_boot"] > 0 mask = dataset["Residence Time_boot_cv"] > 0 ax_boot_cv.plot(resi[mask], dataset["Residence Time_boot_cv"][mask], linewidth=0, marker="+", markerfacecolor="#0FA3B1", markeredgecolor="#F7B538", markersize=2) ax_boot_cv.set_ylabel("Coef. Var.", fontsize=8, weight="bold", va="center") ax_boot_cv.set_xlim(ax_data.get_xlim()) for ax in [ax_data, ax_capped, ax_R2, ax_boot, ax_boot_cv]: ax.yaxis.set_label_coords(-0.15, 0.5, transform=ax.transAxes) if no_break: plt.savefig("{}/{}_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid), dpi=300) else: plt.savefig("{}/{}_{}_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid, str(point_idx)), dpi=300) plt.close() ###### logomater ##### df = pd.DataFrame({"Resid": resi, "Resn": SL_resn, "Data": data}) matrix = df.pivot(index="Resid", columns='Resn', values="Data").fillna(0) n_rows = 1 + resi[-1]//100 - resi[0]//100 start = (resi[0]//100)*100 length = start + 100 - resi[0] fig, axes = plt.subplots(n_rows, 1, figsize=(4.5, 1.3*n_rows), sharey=True) plt.subplots_adjust(hspace=0.5) for idx, ax in enumerate(np.atleast_1d(axes)): if idx == (n_rows - 1): logomaker.Logo(matrix[(idx-1)*100 + length:], color_scheme="chemistry", ax=ax) ax.set_xlabel("Residue Index", fontsize=8, weight="bold") elif idx == 0: logomaker.Logo(matrix[:length], color_scheme="chemistry", ax=ax) else: logomaker.Logo(matrix[(idx-1)*100+length:idx*100+length], color_scheme="chemistry", ax=ax) ax.xaxis.set_major_locator(MultipleLocator(20)) ax.xaxis.set_minor_locator(MultipleLocator(1)) ax.set_xlim(idx*100+start, (idx+1)*100+start) ax.set_ylim(0, data.max()*1.05) ax.set_ylabel("Res. Time {}".format(timeunit), fontsize=8, weight="bold", va="center") for label in ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels(): plt.setp(label, fontsize=8, weight="bold") plt.tight_layout() if no_break: plt.savefig("{}/{}_logo_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid), dpi=300) else: plt.savefig("{}/{}_logo_{}_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid, str(point_idx)), dpi=300) plt.close() else: fig, ax = plt.subplots(1, 1, figsize=(4.5,2.8)) ax.bar(resi, data, width, linewidth=0, color=sns.xkcd_rgb["red"]) sns.despine(fig, top=True, right=True, trim=False) if len(data) > 1000: ax.xaxis.set_major_locator(MultipleLocator(200)) ax.xaxis.set_minor_locator(MultipleLocator(50)) elif len(data) <= 1000: ax.xaxis.set_major_locator(MultipleLocator(100)) ax.xaxis.set_minor_locator(MultipleLocator(10)) ax.set_xlabel("Residue Index", fontsize=8, weight="bold") if self.timeunit == "ns": timeunit = " (ns) " elif self.timeunit == "us": timeunit = r" ($\mu s$)" if item == "Duration": ylabel = item + timeunit elif item == "Occupancy": ylabel = item + " 100% " elif item == "LipidCount": ylabel = "Num. of Lipids" ax.set_ylabel(ylabel, fontsize=8, weight="bold") for label in ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels(): plt.setp(label, fontsize=8, weight="bold") ax.set_title("{} {}".format(self.lipid, item), fontsize=8, weight="bold") plt.tight_layout() if no_break: plt.savefig("{}/{}_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid), dpi=300) else: plt.savefig("{}/{}_{}_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid, str(point_idx)), dpi=300) plt.close() ###### logomater ##### df = pd.DataFrame({"Resid": resi, "Resn": SL_resn, "Data": data}) matrix = df.pivot(index="Resid", columns='Resn', values="Data").fillna(0) n_rows = 1 + resi[-1]//100 - resi[0]//100 start = (resi[0]//100)*100 length = start + 100 - resi[0] fig, axes = plt.subplots(n_rows, 1, figsize=(4.5, 1.3*n_rows), sharey=True) plt.subplots_adjust(hspace=0.5) for idx, ax in enumerate(np.atleast_1d(axes)): if idx == (n_rows - 1): logomaker.Logo(matrix[(idx-1)*100 + length:], color_scheme="chemistry", ax=ax) ax.set_xlabel("Residue Index", fontsize=8, weight="bold") elif idx == 0: logomaker.Logo(matrix[:length], color_scheme="chemistry", ax=ax) else: logomaker.Logo(matrix[(idx-1)*100+length:idx*100+length], color_scheme="chemistry", ax=ax) ax.xaxis.set_major_locator(MultipleLocator(20)) ax.xaxis.set_minor_locator(MultipleLocator(1)) ax.set_xlim(idx*100+start, (idx+1)*100+start) ax.set_ylim(0, data.max()*1.05) ax.set_ylabel(ylabel, fontsize=8, weight="bold", va="center") for label in ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels(): plt.setp(label, fontsize=8, weight="bold") plt.tight_layout() if no_break: plt.savefig("{}/{}_logo_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid), dpi=300) else: plt.savefig("{}/{}_logo_{}_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid, str(point_idx)), dpi=300) plt.close() return def write_to_pdb(self, item, save_dir=None): if save_dir == None: save_dir = check_dir(self.save_dir, "Coordinates_{}".format(self.lipid)) else: save_dir = check_dir(save_dir, "Coordinates_{}".format(self.lipid)) ##### load coords ###### data = self.dataset[item] coords = self._protein_ref.xyz[0] table, _ = self._protein_ref.top.to_dataframe() atom_idx_set = table.serial resid_set = table.resSeq + self.resi_offset atom_name_set = table.name resn_set = table.resName chainID = [chr(65+int(idx)) for idx in table.chainID] data_expanded = np.zeros(len(table)) residue_indices = np.array([atom.residue.index for atom in self._protein_ref.top.atoms]) for value, selected_residue_index in zip(data, self._selected_residue_indices): locations = np.where(residue_indices == selected_residue_index)[0] data_expanded[locations] = value ######## write out coords ########### fn = "{}/Coords_{}.pdb".format(save_dir, "_".join(item.split())) with open(fn, "w") as f: for idx in np.arange(self._protein_ref.n_atoms): coords_dictionary = {"HEADER": "ATOM", "ATOM_ID": atom_idx_set[idx], "ATOM_NAME": atom_name_set[idx], "SPARE": "", "RESN": resn_set[idx], "CHAIN_ID": chainID[idx], "RESI": resid_set[idx], "COORDX": coords[idx, 0] * 10, "COORDY": coords[idx, 1] * 10, "COORDZ": coords[idx, 2] * 10, "OCCUP": 1.0, "BFACTOR": data_expanded[idx]} row = "{HEADER:6s}{ATOM_ID:5d} ".format(**coords_dictionary) +\ "{ATOM_NAME:^4s}{SPARE:1s}{RESN:3s} ".format(**coords_dictionary) +\ "{CHAIN_ID:1s}{RESI:4d}{SPARE:1s} ".format(**coords_dictionary) +\ "{COORDX:8.3f}{COORDY:8.3f}{COORDZ:8.3f}{OCCUP:6.2f}{BFACTOR:6.2f}\n".format(**coords_dictionary) f.write(row) f.write("TER") return ###################################################### ########### Load params and do calculation ########### ###################################################### if __name__ == '__main__': trajfile_list = args.f grofile_list = args.c lipid_set = args.lipids cutoff = [float(data) for data in args.cutoffs] save_dir = check_dir(args.save_dir) ####################################################################### ######## write a backup file of params for reproducibility ############ fn = os.path.join(save_dir, "pylipid_backup_{}.txt".format(datetime.datetime.now().strftime("%Y_%m_%d_%H%M"))) with open(fn, "w") as f: f.write("##### Record params for reproducibility #####\n") f.write("python {}\n".format(" ".join(sys.argv))) ###################################################################### ######################### process resi_list ########################## resi_list = [] if len(args.resi_list) > 0: for item in args.resi_list: if "-" in item: item_list = item.split("-") resi_list.append(np.arange(int(item_list[0]), int(item_list[-1])+1)) else: resi_list.append(int(item)) resi_list = np.hstack(resi_list) ####################################################################### ############################ change of radii ########################## ##### mdtraj default radii: ##### https://github.com/mdtraj/mdtraj/blob/b28df2cd6e5c35fa006fe3c24728857880793abb/mdtraj/geometry/sasa.py#L56 if args.radii == None: radii_book = None else: radii_book = {} for item in args.radii: radius = item.split(":") radii_book[radius[0]] = float(radius[1]) ####################################################################### ################# score weight for kde calculation #################### if args.score_weights == None: score_weights = None else: score_weights = {} for item in args.score_weights: weight = item.split(":") score_weights[weight[0]] = float(weight[1]) ####################################################################### ################# map three letter to single letter ################### letter_map = None if args.letter_map != None: letter_map = {} for item in args.letter_map: letter_map[item.split(":")[0]] = item.split(":")[1] ####################################################################### ################# process chain breaks ################################ chain_breaks = [] if len(args.chain_breaks) == 0 else [int(num)-1 for num in args.chain_breaks] ####################################################################### for lipid in lipid_set: li = LipidInteraction(trajfile_list, grofile_list, stride=int(args.stride), dt=args.dt, cutoff=cutoff, lipid=lipid, \ lipid_atoms=args.lipid_atoms, nprot=args.nprot, timeunit=args.tu, resi_offset=int(args.resi_offset), \ resi_list=resi_list, save_dir=args.save_dir) li.cal_interactions(save_dataset=args.save_dataset, nbootstrap=int(args.nbootstrap)) li.plot_interactions(item="Duration", letter_map=letter_map, chain_breaks=chain_breaks) li.plot_interactions(item="Residence Time", letter_map=letter_map, chain_breaks=chain_breaks) li.plot_interactions(item="Occupancy", letter_map=letter_map, chain_breaks=chain_breaks) li.plot_interactions(item="LipidCount", letter_map=letter_map, chain_breaks=chain_breaks) li.write_to_pdb(item="Duration") li.write_to_pdb(item="Residence Time") li.write_to_pdb(item="Occupancy") li.write_to_pdb(item="LipidCount") li.cal_interaction_network(pdb=args.pdb, save_dataset=args.save_dataset, \ pymol_gui=args.pymol_gui, radii=radii_book, gen_binding_poses=int(args.gen_binding_poses), \ score_weights=score_weights, save_pose_format=args.save_pose_format)
[((279, 300), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (293, 300), False, 'import matplotlib\n'), ((724, 786), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (745, 786), False, 'import warnings\n'), ((787, 820), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (810, 820), False, 'import warnings\n'), ((821, 844), 'numpy.seterr', 'np.seterr', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (830, 844), True, 'import numpy as np\n'), ((964, 989), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (987, 989), False, 'import argparse\n'), ((12832, 12892), 'numpy.nan_to_num', 'np.nan_to_num', (['[sigma[delta_t] for delta_t in delta_t_range]'], {}), '([sigma[delta_t] for delta_t in delta_t_range])\n', (12845, 12892), True, 'import numpy as np\n'), ((14383, 14393), 'numpy.diag', 'np.diag', (['C'], {}), '(C)\n', (14390, 14393), True, 'import numpy as np\n'), ((11827, 11851), 'numpy.array', 'np.array', (['contact_counts'], {}), '(contact_counts)\n', (11835, 11851), True, 'import numpy as np\n'), ((11885, 11909), 'numpy.array', 'np.array', (['contact_counts'], {}), '(contact_counts)\n', (11893, 11909), True, 'import numpy as np\n'), ((13336, 13346), 'numpy.min', 'np.min', (['ks'], {}), '(ks)\n', (13342, 13346), True, 'import numpy as np\n'), ((13686, 13697), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (13695, 13697), False, 'import os\n'), ((13727, 13752), 'os.path.abspath', 'os.path.abspath', (['save_dir'], {}), '(save_dir)\n', (13742, 13752), False, 'import os\n'), ((13795, 13825), 'os.path.join', 'os.path.join', (['save_dir', 'suffix'], {}), '(save_dir, suffix)\n', (13807, 13825), False, 'import os\n'), ((13837, 13860), 'os.path.isdir', 'os.path.isdir', (['save_dir'], {}), '(save_dir)\n', (13850, 13860), False, 'import os\n'), ((13930, 13951), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (13941, 13951), False, 'import os\n'), ((14041, 14076), 'scipy.sparse.vstack', 'sparse.vstack', (['(A, B)'], {'format': '"""csr"""'}), "((A, B), format='csr')\n", (14054, 14076), False, 'from scipy import sparse\n'), ((15705, 15722), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (15716, 15722), False, 'from collections import defaultdict\n'), ((15760, 15777), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (15771, 15777), False, 'from collections import defaultdict\n'), ((15805, 15822), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (15816, 15822), False, 'from collections import defaultdict\n'), ((15860, 15877), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (15871, 15877), False, 'from collections import defaultdict\n'), ((15914, 15931), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (15925, 15931), False, 'from collections import defaultdict\n'), ((30056, 30076), 'numpy.max', 'np.max', (['self.T_total'], {}), '(self.T_total)\n', (30062, 30076), True, 'import numpy as np\n'), ((30096, 30162), 'numpy.array', 'np.array', (['[self.res_time[residue] for residue in self.residue_set]'], {}), '([self.res_time[residue] for residue in self.residue_set])\n', (30104, 30162), True, 'import numpy as np\n'), ((30251, 30319), 'numpy.array', 'np.array', (['[self.res_time_b[residue] for residue in self.residue_set]'], {}), '([self.res_time_b[residue] for residue in self.residue_set])\n', (30259, 30319), True, 'import numpy as np\n'), ((34516, 34549), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(8.2, 3.5)'}), '(1, figsize=(8.2, 3.5))\n', (34526, 34549), True, 'import matplotlib.pyplot as plt\n'), ((36413, 36431), 'numpy.sort', 'np.sort', (['durations'], {}), '(durations)\n', (36420, 36431), True, 'import numpy as np\n'), ((36804, 36859), 'numpy.array', 'np.array', (['[sigma[delta_t] for delta_t in delta_t_range]'], {}), '([sigma[delta_t] for delta_t in delta_t_range])\n', (36812, 36859), True, 'import numpy as np\n'), ((39475, 39503), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_fn'], {'dpi': '(300)'}), '(fig_fn, dpi=300)\n', (39486, 39503), True, 'import matplotlib.pyplot as plt\n'), ((39512, 39523), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (39521, 39523), True, 'import matplotlib.pyplot as plt\n'), ((41290, 41332), 'numpy.nan_to_num', 'np.nan_to_num', (['self.interaction_covariance'], {}), '(self.interaction_covariance)\n', (41303, 41332), True, 'import numpy as np\n'), ((42109, 42140), 'numpy.copy', 'np.copy', (['interaction_covariance'], {}), '(interaction_covariance)\n', (42116, 42140), True, 'import numpy as np\n'), ((42230, 42258), 'networkx.Graph', 'nx.Graph', (['covariance_network'], {}), '(covariance_network)\n', (42238, 42258), True, 'import networkx as nx\n'), ((42274, 42336), 'community.best_partition', 'community.best_partition', (['residue_network_raw'], {'weight': '"""weight"""'}), "(residue_network_raw, weight='weight')\n", (42298, 42336), False, 'import community\n'), ((42533, 42550), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (42544, 42550), False, 'from collections import defaultdict\n'), ((42591, 42608), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (42602, 42608), False, 'from collections import defaultdict\n'), ((42639, 42656), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (42650, 42656), False, 'from collections import defaultdict\n'), ((43391, 43411), 'numpy.max', 'np.max', (['self.T_total'], {}), '(self.T_total)\n', (43397, 43411), True, 'import numpy as np\n'), ((78345, 78415), 'numpy.array', 'np.array', (['[atom.residue.index for atom in self._protein_ref.top.atoms]'], {}), '([atom.residue.index for atom in self._protein_ref.top.atoms])\n', (78353, 78415), True, 'import numpy as np\n'), ((81297, 81317), 'numpy.hstack', 'np.hstack', (['resi_list'], {}), '(resi_list)\n', (81306, 81317), True, 'import numpy as np\n'), ((10496, 10547), 'numpy.zeros_like', 'np.zeros_like', (['self.contact_high[idx]'], {'dtype': 'np.int'}), '(self.contact_high[idx], dtype=np.int)\n', (10509, 10547), True, 'import numpy as np\n'), ((12946, 12988), 'numpy.array', 'np.array', (['delta_t_range'], {'dtype': 'np.float128'}), '(delta_t_range, dtype=np.float128)\n', (12954, 12988), True, 'import numpy as np\n'), ((12990, 13030), 'numpy.array', 'np.array', (['hist_values'], {'dtype': 'np.float128'}), '(hist_values, dtype=np.float128)\n', (12998, 13030), True, 'import numpy as np\n'), ((13092, 13134), 'numpy.array', 'np.array', (['delta_t_range'], {'dtype': 'np.float128'}), '(delta_t_range, dtype=np.float128)\n', (13100, 13134), True, 'import numpy as np\n'), ((13570, 13585), 'numpy.exp', 'np.exp', (['(-k1 * x)'], {}), '(-k1 * x)\n', (13576, 13585), True, 'import numpy as np\n'), ((13588, 13603), 'numpy.exp', 'np.exp', (['(-k2 * x)'], {}), '(-k2 * x)\n', (13594, 13603), True, 'import numpy as np\n'), ((14419, 14433), 'numpy.outer', 'np.outer', (['d', 'd'], {}), '(d, d)\n', (14427, 14433), True, 'import numpy as np\n'), ((15235, 15264), 'numpy.array', 'np.array', (['cutoff'], {'dtype': 'float'}), '(cutoff, dtype=float)\n', (15243, 15264), True, 'import numpy as np\n'), ((17600, 17734), 'mdtraj.Trajectory', 'md.Trajectory', (['new_xyz', 'reduced_top'], {'time': 'traj.time', 'unitcell_lengths': 'traj.unitcell_lengths', 'unitcell_angles': 'traj.unitcell_angles'}), '(new_xyz, reduced_top, time=traj.time, unitcell_lengths=traj.\n unitcell_lengths, unitcell_angles=traj.unitcell_angles)\n', (17613, 17734), True, 'import mdtraj as md\n'), ((18831, 18863), 'numpy.array', 'np.array', (['residue_set'], {'dtype': 'str'}), '(residue_set, dtype=str)\n', (18839, 18863), True, 'import numpy as np\n'), ((27618, 27637), 'numpy.concatenate', 'np.concatenate', (['row'], {}), '(row)\n', (27632, 27637), True, 'import numpy as np\n'), ((27656, 27675), 'numpy.concatenate', 'np.concatenate', (['col'], {}), '(col)\n', (27670, 27675), True, 'import numpy as np\n'), ((27695, 27715), 'numpy.concatenate', 'np.concatenate', (['data'], {}), '(data)\n', (27709, 27715), True, 'import numpy as np\n'), ((28219, 28269), 'numpy.concatenate', 'np.concatenate', (['self.interaction_duration[residue]'], {}), '(self.interaction_duration[residue])\n', (28233, 28269), True, 'import numpy as np\n'), ((34937, 34957), 'numpy.min', 'np.min', (['self.T_total'], {}), '(self.T_total)\n', (34943, 34957), True, 'import numpy as np\n'), ((34959, 34981), 'numpy.min', 'np.min', (['self.timesteps'], {}), '(self.timesteps)\n', (34965, 34981), True, 'import numpy as np\n'), ((35430, 35493), 'numpy.array', 'np.array', (['[sigma_sampled[delta_t] for delta_t in delta_t_range]'], {}), '([sigma_sampled[delta_t] for delta_t in delta_t_range])\n', (35438, 35493), True, 'import numpy as np\n'), ((36364, 36384), 'numpy.max', 'np.max', (['self.T_total'], {}), '(self.T_total)\n', (36370, 36384), True, 'import numpy as np\n'), ((37372, 37395), 'numpy.array', 'np.array', (['delta_t_range'], {}), '(delta_t_range)\n', (37380, 37395), True, 'import numpy as np\n'), ((39663, 39689), 'numpy.mean', 'np.mean', (['koff1_sampled_set'], {}), '(koff1_sampled_set)\n', (39670, 39689), True, 'import numpy as np\n'), ((39792, 39820), 'numpy.mean', 'np.mean', (['restime_sampled_set'], {}), '(restime_sampled_set)\n', (39799, 39820), True, 'import numpy as np\n'), ((39932, 39962), 'numpy.mean', 'np.mean', (['r_squared_sampled_set'], {}), '(r_squared_sampled_set)\n', (39939, 39962), True, 'import numpy as np\n'), ((43867, 43884), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (43878, 43884), False, 'from collections import defaultdict\n'), ((53409, 53520), 'seaborn.violinplot', 'sns.violinplot', ([], {'x': '"""BS id"""', 'y': '"""Area (nm^2)"""', 'data': 'd_area', 'palette': '"""Set3"""', 'bw': '(0.2)', 'cut': '(1)', 'linewidth': '(1)', 'ax': 'ax'}), "(x='BS id', y='Area (nm^2)', data=d_area, palette='Set3', bw=\n 0.2, cut=1, linewidth=1, ax=ax)\n", (53423, 53520), True, 'import seaborn as sns\n'), ((53771, 53789), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (53787, 53789), True, 'import matplotlib.pyplot as plt\n'), ((53878, 53889), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (53887, 53889), True, 'import matplotlib.pyplot as plt\n'), ((59579, 59605), 'shutil.copyfile', 'copyfile', (['pdb', 'pdb_new_loc'], {}), '(pdb, pdb_new_loc)\n', (59587, 59605), False, 'from shutil import copyfile\n'), ((59631, 59651), 'mdtraj.load', 'md.load', (['pdb_new_loc'], {}), '(pdb_new_loc)\n', (59638, 59651), True, 'import mdtraj as md\n'), ((67012, 67086), 'seaborn.set_style', 'sns.set_style', (['"""ticks"""', "{'xtick.major.size': 5.0, 'ytick.major.size': 5.0}"], {}), "('ticks', {'xtick.major.size': 5.0, 'ytick.major.size': 5.0})\n", (67025, 67086), True, 'import seaborn as sns\n'), ((78803, 78839), 'numpy.arange', 'np.arange', (['self._protein_ref.n_atoms'], {}), '(self._protein_ref.n_atoms)\n', (78812, 78839), True, 'import numpy as np\n'), ((11218, 11267), 'numpy.where', 'np.where', (['(self.contact_high[k] == lipid_to_search)'], {}), '(self.contact_high[k] == lipid_to_search)\n', (11226, 11267), True, 'import numpy as np\n'), ((17148, 17176), 'numpy.unique', 'np.unique', (['one_lipid_indices'], {}), '(one_lipid_indices)\n', (17157, 17176), True, 'import numpy as np\n'), ((20186, 20218), 'numpy.array', 'np.array', (['residue_set'], {'dtype': 'str'}), '(residue_set, dtype=str)\n', (20194, 20218), True, 'import numpy as np\n'), ((22415, 22485), 'mdtraj.load', 'md.load', (['trajfile'], {'top': 'self.grofile_list[traj_idx]', 'stride': 'self.stride'}), '(trajfile, top=self.grofile_list[traj_idx], stride=self.stride)\n', (22422, 22485), True, 'import mdtraj as md\n'), ((23416, 23437), 'numpy.arange', 'np.arange', (['self.nprot'], {}), '(self.nprot)\n', (23425, 23437), True, 'import numpy as np\n'), ((28285, 28305), 'numpy.sum', 'np.sum', (['duration_raw'], {}), '(duration_raw)\n', (28291, 28305), True, 'import numpy as np\n'), ((31519, 31586), 'numpy.array', 'np.array', (['[self.r_squared[residue] for residue in self.residue_set]'], {}), '([self.r_squared[residue] for residue in self.residue_set])\n', (31527, 31586), True, 'import numpy as np\n'), ((31628, 31690), 'numpy.array', 'np.array', (['[self.koff[residue] for residue in self.residue_set]'], {}), '([self.koff[residue] for residue in self.residue_set])\n', (31636, 31690), True, 'import numpy as np\n'), ((31817, 31888), 'numpy.array', 'np.array', (['[self.res_time_b_cv[residue] for residue in self.residue_set]'], {}), '([self.res_time_b_cv[residue] for residue in self.residue_set])\n', (31825, 31888), True, 'import numpy as np\n'), ((31935, 31999), 'numpy.array', 'np.array', (['[self.koff_b[residue] for residue in self.residue_set]'], {}), '([self.koff_b[residue] for residue in self.residue_set])\n', (31943, 31999), True, 'import numpy as np\n'), ((32049, 32116), 'numpy.array', 'np.array', (['[self.koff_b_cv[residue] for residue in self.residue_set]'], {}), '([self.koff_b_cv[residue] for residue in self.residue_set])\n', (32057, 32116), True, 'import numpy as np\n'), ((32168, 32237), 'numpy.array', 'np.array', (['[self.r_squared_b[residue] for residue in self.residue_set]'], {}), '([self.r_squared_b[residue] for residue in self.residue_set])\n', (32176, 32237), True, 'import numpy as np\n'), ((33500, 33544), 'pickle.dump', 'pickle.dump', (['self.interaction_duration', 'f', '(2)'], {}), '(self.interaction_duration, f, 2)\n', (33511, 33544), False, 'import pickle\n'), ((33650, 33680), 'pickle.dump', 'pickle.dump', (['self.sigmas', 'f', '(2)'], {}), '(self.sigmas, f, 2)\n', (33661, 33680), False, 'import pickle\n'), ((33800, 33830), 'pickle.dump', 'pickle.dump', (['self.params', 'f', '(2)'], {}), '(self.params, f, 2)\n', (33811, 33830), False, 'import pickle\n'), ((33959, 34005), 'pickle.dump', 'pickle.dump', (['self.interaction_covariance', 'f', '(2)'], {}), '(self.interaction_covariance, f, 2)\n', (33970, 34005), False, 'import pickle\n'), ((35359, 35379), 'numpy.max', 'np.max', (['self.T_total'], {}), '(self.T_total)\n', (35365, 35379), True, 'import numpy as np\n'), ((35737, 35760), 'numpy.array', 'np.array', (['delta_t_range'], {}), '(delta_t_range)\n', (35745, 35760), True, 'import numpy as np\n'), ((38177, 38203), 'numpy.mean', 'np.mean', (['koff1_sampled_set'], {}), '(koff1_sampled_set)\n', (38184, 38203), True, 'import numpy as np\n'), ((38355, 38381), 'numpy.mean', 'np.mean', (['koff2_sampled_set'], {}), '(koff2_sampled_set)\n', (38362, 38381), True, 'import numpy as np\n'), ((38513, 38543), 'numpy.mean', 'np.mean', (['r_squared_sampled_set'], {}), '(r_squared_sampled_set)\n', (38520, 38543), True, 'import numpy as np\n'), ((39704, 39729), 'numpy.std', 'np.std', (['koff1_sampled_set'], {}), '(koff1_sampled_set)\n', (39710, 39729), True, 'import numpy as np\n'), ((39730, 39756), 'numpy.mean', 'np.mean', (['koff1_sampled_set'], {}), '(koff1_sampled_set)\n', (39737, 39756), True, 'import numpy as np\n'), ((39839, 39866), 'numpy.std', 'np.std', (['restime_sampled_set'], {}), '(restime_sampled_set)\n', (39845, 39866), True, 'import numpy as np\n'), ((39867, 39895), 'numpy.mean', 'np.mean', (['restime_sampled_set'], {}), '(restime_sampled_set)\n', (39874, 39895), True, 'import numpy as np\n'), ((44061, 44131), 'mdtraj.load', 'md.load', (['trajfile'], {'top': 'self.grofile_list[traj_idx]', 'stride': 'self.stride'}), '(trajfile, top=self.grofile_list[traj_idx], stride=self.stride)\n', (44068, 44131), True, 'import mdtraj as md\n'), ((44520, 44541), 'numpy.arange', 'np.arange', (['self.nprot'], {}), '(self.nprot)\n', (44529, 44541), True, 'import numpy as np\n'), ((47885, 47946), 'numpy.concatenate', 'np.concatenate', (['self.interaction_duration_BS[binding_site_id]'], {}), '(self.interaction_duration_BS[binding_site_id])\n', (47899, 47946), True, 'import numpy as np\n'), ((50239, 50294), 'numpy.mean', 'np.mean', (['self.interaction_occupancy_BS[binding_site_id]'], {}), '(self.interaction_occupancy_BS[binding_site_id])\n', (50246, 50294), True, 'import numpy as np\n'), ((50460, 50505), 'numpy.mean', 'np.mean', (['self.lipid_count_BS[binding_site_id]'], {}), '(self.lipid_count_BS[binding_site_id])\n', (50467, 50505), True, 'import numpy as np\n'), ((51085, 51097), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (51091, 51097), True, 'import numpy as np\n'), ((53140, 53172), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(4.5, 2.8)'}), '(figsize=(4.5, 2.8))\n', (53152, 53172), True, 'import matplotlib.pyplot as plt\n'), ((55043, 55090), 'pickle.dump', 'pickle.dump', (['self.interaction_duration_BS', 'f', '(2)'], {}), '(self.interaction_duration_BS, f, 2)\n', (55054, 55090), False, 'import pickle\n'), ((55199, 55232), 'pickle.dump', 'pickle.dump', (['self.sigmas_BS', 'f', '(2)'], {}), '(self.sigmas_BS, f, 2)\n', (55210, 55232), False, 'import pickle\n'), ((55355, 55388), 'pickle.dump', 'pickle.dump', (['self.params_BS', 'f', '(2)'], {}), '(self.params_BS, f, 2)\n', (55366, 55388), False, 'import pickle\n'), ((55503, 55538), 'pickle.dump', 'pickle.dump', (['surface_area_all', 'f', '(2)'], {}), '(surface_area_all, f, 2)\n', (55514, 55538), False, 'import pickle\n'), ((59544, 59565), 'os.path.basename', 'os.path.basename', (['pdb'], {}), '(pdb)\n', (59560, 59565), False, 'import os\n'), ((62549, 62588), 'pymol.finish_launching', 'pymol.finish_launching', (["['pymol', '-q']"], {}), "(['pymol', '-q'])\n", (62571, 62588), False, 'import pymol\n'), ((63085, 63111), 'pymol.cmd.set', 'cmd.set', (['"""retain_order"""', '(1)'], {}), "('retain_order', 1)\n", (63092, 63111), False, 'from pymol import cmd\n'), ((63128, 63163), 'pymol.cmd.set', 'cmd.set', (['"""cartoon_oval_length"""', '(1.0)'], {}), "('cartoon_oval_length', 1.0)\n", (63135, 63163), False, 'from pymol import cmd\n'), ((63180, 63214), 'pymol.cmd.set', 'cmd.set', (['"""cartoon_oval_width"""', '(0.3)'], {}), "('cartoon_oval_width', 0.3)\n", (63187, 63214), False, 'from pymol import cmd\n'), ((63231, 63264), 'pymol.cmd.set', 'cmd.set', (['"""cartoon_color"""', '"""white"""'], {}), "('cartoon_color', 'white')\n", (63238, 63264), False, 'from pymol import cmd\n'), ((63281, 63310), 'pymol.cmd.set', 'cmd.set', (['"""stick_radius"""', '(0.35)'], {}), "('stick_radius', 0.35)\n", (63288, 63310), False, 'from pymol import cmd\n'), ((63500, 63522), 'pymol.cmd.hide', 'cmd.hide', (['"""everything"""'], {}), "('everything')\n", (63508, 63522), False, 'from pymol import cmd\n'), ((63539, 63566), 'pymol.cmd.show', 'cmd.show', (['"""cartoon"""', 'prefix'], {}), "('cartoon', prefix)\n", (63547, 63566), False, 'from pymol import cmd\n'), ((63583, 63601), 'pymol.cmd.center', 'cmd.center', (['prefix'], {}), '(prefix)\n', (63593, 63601), False, 'from pymol import cmd\n'), ((63618, 63636), 'pymol.cmd.orient', 'cmd.orient', (['prefix'], {}), '(prefix)\n', (63628, 63636), False, 'from pymol import cmd\n'), ((63825, 63851), 'numpy.arange', 'np.arange', (['binding_site_id'], {}), '(binding_site_id)\n', (63834, 63851), True, 'import numpy as np\n'), ((72015, 72026), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (72024, 72026), True, 'import matplotlib.pyplot as plt\n'), ((72088, 72148), 'pandas.DataFrame', 'pd.DataFrame', (["{'Resid': resi, 'Resn': SL_resn, 'Data': data}"], {}), "({'Resid': resi, 'Resn': SL_resn, 'Data': data})\n", (72100, 72148), True, 'import pandas as pd\n'), ((72415, 72480), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_rows', '(1)'], {'figsize': '(4.5, 1.3 * n_rows)', 'sharey': '(True)'}), '(n_rows, 1, figsize=(4.5, 1.3 * n_rows), sharey=True)\n', (72427, 72480), True, 'import matplotlib.pyplot as plt\n'), ((72495, 72526), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.5)'}), '(hspace=0.5)\n', (72514, 72526), True, 'import matplotlib.pyplot as plt\n'), ((73615, 73633), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (73631, 73633), True, 'import matplotlib.pyplot as plt\n'), ((73950, 73961), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (73959, 73961), True, 'import matplotlib.pyplot as plt\n'), ((74007, 74045), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(4.5, 2.8)'}), '(1, 1, figsize=(4.5, 2.8))\n', (74019, 74045), True, 'import matplotlib.pyplot as plt\n'), ((74143, 74193), 'seaborn.despine', 'sns.despine', (['fig'], {'top': '(True)', 'right': '(True)', 'trim': '(False)'}), '(fig, top=True, right=True, trim=False)\n', (74154, 74193), True, 'import seaborn as sns\n'), ((75368, 75386), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (75384, 75386), True, 'import matplotlib.pyplot as plt\n'), ((75693, 75704), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (75702, 75704), True, 'import matplotlib.pyplot as plt\n'), ((75766, 75826), 'pandas.DataFrame', 'pd.DataFrame', (["{'Resid': resi, 'Resn': SL_resn, 'Data': data}"], {}), "({'Resid': resi, 'Resn': SL_resn, 'Data': data})\n", (75778, 75826), True, 'import pandas as pd\n'), ((76093, 76158), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_rows', '(1)'], {'figsize': '(4.5, 1.3 * n_rows)', 'sharey': '(True)'}), '(n_rows, 1, figsize=(4.5, 1.3 * n_rows), sharey=True)\n', (76105, 76158), True, 'import matplotlib.pyplot as plt\n'), ((76173, 76204), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.5)'}), '(hspace=0.5)\n', (76192, 76204), True, 'import matplotlib.pyplot as plt\n'), ((77268, 77286), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (77284, 77286), True, 'import matplotlib.pyplot as plt\n'), ((77603, 77614), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (77612, 77614), True, 'import matplotlib.pyplot as plt\n'), ((78528, 78579), 'numpy.where', 'np.where', (['(residue_indices == selected_residue_index)'], {}), '(residue_indices == selected_residue_index)\n', (78536, 78579), True, 'import numpy as np\n'), ((17924, 17948), 'numpy.arange', 'np.arange', (['num_of_lipids'], {}), '(num_of_lipids)\n', (17933, 17948), True, 'import numpy as np\n'), ((26135, 26156), 'numpy.argsort', 'np.argsort', (['durations'], {}), '(durations)\n', (26145, 26156), True, 'import numpy as np\n'), ((26341, 26364), 'numpy.argsort', 'np.argsort', (['occupancies'], {}), '(occupancies)\n', (26351, 26364), True, 'import numpy as np\n'), ((26541, 26564), 'numpy.argsort', 'np.argsort', (['lipidcounts'], {}), '(lipidcounts)\n', (26551, 26564), True, 'import numpy as np\n'), ((29326, 29348), 'numpy.min', 'np.min', (['self.timesteps'], {}), '(self.timesteps)\n', (29332, 29348), True, 'import numpy as np\n'), ((38235, 38261), 'numpy.mean', 'np.mean', (['koff1_sampled_set'], {}), '(koff1_sampled_set)\n', (38242, 38261), True, 'import numpy as np\n'), ((38413, 38439), 'numpy.mean', 'np.mean', (['koff2_sampled_set'], {}), '(koff2_sampled_set)\n', (38420, 38439), True, 'import numpy as np\n'), ((38914, 38940), 'numpy.mean', 'np.mean', (['koff1_sampled_set'], {}), '(koff1_sampled_set)\n', (38921, 38940), True, 'import numpy as np\n'), ((39095, 39121), 'numpy.mean', 'np.mean', (['koff2_sampled_set'], {}), '(koff2_sampled_set)\n', (39102, 39121), True, 'import numpy as np\n'), ((39253, 39283), 'numpy.mean', 'np.mean', (['r_squared_sampled_set'], {}), '(r_squared_sampled_set)\n', (39260, 39283), True, 'import numpy as np\n'), ((47325, 47459), 'mdtraj.Trajectory', 'md.Trajectory', (['new_xyz', 'reduced_top'], {'time': 'traj.time', 'unitcell_lengths': 'traj.unitcell_lengths', 'unitcell_angles': 'traj.unitcell_angles'}), '(new_xyz, reduced_top, time=traj.time, unitcell_lengths=traj.\n unitcell_lengths, unitcell_angles=traj.unitcell_angles)\n', (47338, 47459), True, 'import mdtraj as md\n'), ((47483, 47550), 'mdtraj.shrake_rupley', 'md.shrake_rupley', (['new_traj'], {'mode': '"""residue"""', 'change_radii': 'radii_book'}), "(new_traj, mode='residue', change_radii=radii_book)\n", (47499, 47550), True, 'import mdtraj as md\n'), ((50003, 50064), 'numpy.concatenate', 'np.concatenate', (['self.interaction_duration_BS[binding_site_id]'], {}), '(self.interaction_duration_BS[binding_site_id])\n', (50017, 50064), True, 'import numpy as np\n'), ((52675, 52724), 'numpy.concatenate', 'np.concatenate', (['surface_area_all[binding_site_id]'], {}), '(surface_area_all[binding_site_id])\n', (52689, 52724), True, 'import numpy as np\n'), ((52901, 52926), 'numpy.concatenate', 'np.concatenate', (['bs_id_set'], {}), '(bs_id_set)\n', (52915, 52926), True, 'import numpy as np\n'), ((52943, 52970), 'numpy.concatenate', 'np.concatenate', (['bs_area_set'], {}), '(bs_area_set)\n', (52957, 52970), True, 'import numpy as np\n'), ((53289, 53321), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6.5, 2.8)'}), '(figsize=(6.5, 2.8))\n', (53301, 53321), True, 'import matplotlib.pyplot as plt\n'), ((53366, 53396), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(9.5, 3)'}), '(figsize=(9.5, 3))\n', (53378, 53396), True, 'import matplotlib.pyplot as plt\n'), ((56537, 56571), 'numpy.arange', 'np.arange', (['self._lipid_ref.n_atoms'], {}), '(self._lipid_ref.n_atoms)\n', (56546, 56571), True, 'import numpy as np\n'), ((57958, 57992), 'numpy.arange', 'np.arange', (['self._lipid_ref.n_atoms'], {}), '(self._lipid_ref.n_atoms)\n', (57967, 57992), True, 'import numpy as np\n'), ((58594, 58628), 'numpy.arange', 'np.arange', (['num_of_poses'], {'dtype': 'int'}), '(num_of_poses, dtype=int)\n', (58603, 58628), True, 'import numpy as np\n'), ((62983, 63017), 'numpy.exp', 'np.exp', (['(-30 * (interactions - MID))'], {}), '(-30 * (interactions - MID))\n', (62989, 63017), True, 'import numpy as np\n'), ((67191, 67219), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5.5, 5)'}), '(figsize=(5.5, 5))\n', (67201, 67219), True, 'import matplotlib.pyplot as plt\n'), ((72568, 72587), 'numpy.atleast_1d', 'np.atleast_1d', (['axes'], {}), '(axes)\n', (72581, 72587), True, 'import numpy as np\n'), ((75219, 75261), 'matplotlib.pyplot.setp', 'plt.setp', (['label'], {'fontsize': '(8)', 'weight': '"""bold"""'}), "(label, fontsize=8, weight='bold')\n", (75227, 75261), True, 'import matplotlib.pyplot as plt\n'), ((76246, 76265), 'numpy.atleast_1d', 'np.atleast_1d', (['axes'], {}), '(axes)\n', (76259, 76265), True, 'import numpy as np\n'), ((80615, 80638), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (80636, 80638), False, 'import datetime\n'), ((10736, 10792), 'numpy.where', 'np.where', (['(self.contact_high[i] == self.contact_low[i][j])'], {}), '(self.contact_high[i] == self.contact_low[i][j])\n', (10744, 10792), True, 'import numpy as np\n'), ((19395, 19420), 'numpy.hstack', 'np.hstack', (['self.resi_list'], {}), '(self.resi_list)\n', (19404, 19420), True, 'import numpy as np\n'), ((23738, 23807), 'mdtraj.compute_contacts', 'md.compute_contacts', (['new_traj', 'pairs'], {'scheme': '"""closest"""', 'periodic': '(True)'}), "(new_traj, pairs, scheme='closest', periodic=True)\n", (23757, 23807), True, 'import mdtraj as md\n'), ((24058, 24102), 'numpy.where', 'np.where', (['(dist_matrix_resi <= self.cutoff[0])'], {}), '(dist_matrix_resi <= self.cutoff[0])\n', (24066, 24102), True, 'import numpy as np\n'), ((24166, 24210), 'numpy.where', 'np.where', (['(dist_matrix_resi <= self.cutoff[1])'], {}), '(dist_matrix_resi <= self.cutoff[1])\n', (24174, 24210), True, 'import numpy as np\n'), ((26203, 26261), 'numpy.mean', 'np.mean', (['self.interaction_occupancy[residue][-self.nprot:]'], {}), '(self.interaction_occupancy[residue][-self.nprot:])\n', (26210, 26261), True, 'import numpy as np\n'), ((26412, 26460), 'numpy.mean', 'np.mean', (['self.lipid_count[residue][-self.nprot:]'], {}), '(self.lipid_count[residue][-self.nprot:])\n', (26419, 26460), True, 'import numpy as np\n'), ((30611, 30655), 'numpy.mean', 'np.mean', (['self.interaction_occupancy[residue]'], {}), '(self.interaction_occupancy[residue])\n', (30618, 30655), True, 'import numpy as np\n'), ((30807, 30850), 'numpy.std', 'np.std', (['self.interaction_occupancy[residue]'], {}), '(self.interaction_occupancy[residue])\n', (30813, 30850), True, 'import numpy as np\n'), ((32295, 32329), 'numpy.mean', 'np.mean', (['self.lipid_count[residue]'], {}), '(self.lipid_count[residue])\n', (32302, 32329), True, 'import numpy as np\n'), ((32484, 32517), 'numpy.std', 'np.std', (['self.lipid_count[residue]'], {}), '(self.lipid_count[residue])\n', (32490, 32517), True, 'import numpy as np\n'), ((38209, 38234), 'numpy.std', 'np.std', (['koff1_sampled_set'], {}), '(koff1_sampled_set)\n', (38215, 38234), True, 'import numpy as np\n'), ((38387, 38412), 'numpy.std', 'np.std', (['koff2_sampled_set'], {}), '(koff2_sampled_set)\n', (38393, 38412), True, 'import numpy as np\n'), ((38972, 38998), 'numpy.mean', 'np.mean', (['koff1_sampled_set'], {}), '(koff1_sampled_set)\n', (38979, 38998), True, 'import numpy as np\n'), ((39153, 39179), 'numpy.mean', 'np.mean', (['koff2_sampled_set'], {}), '(koff2_sampled_set)\n', (39160, 39179), True, 'import numpy as np\n'), ((48925, 48974), 'numpy.concatenate', 'np.concatenate', (['surface_area_all[binding_site_id]'], {}), '(surface_area_all[binding_site_id])\n', (48939, 48974), True, 'import numpy as np\n'), ((57648, 57682), 'numpy.arange', 'np.arange', (['self._lipid_ref.n_atoms'], {}), '(self._lipid_ref.n_atoms)\n', (57657, 57682), True, 'import numpy as np\n'), ((63976, 64019), 'numpy.where', 'np.where', (['(binding_site_identifiers == bs_id)'], {}), '(binding_site_identifiers == bs_id)\n', (63984, 64019), True, 'import numpy as np\n'), ((67306, 67334), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7.5, 5)'}), '(figsize=(7.5, 5))\n', (67316, 67334), True, 'import matplotlib.pyplot as plt\n'), ((67383, 67409), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 6)'}), '(figsize=(9, 6))\n', (67393, 67409), True, 'import matplotlib.pyplot as plt\n'), ((68528, 68548), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(200)'], {}), '(200)\n', (68543, 68548), False, 'from matplotlib.ticker import MultipleLocator\n'), ((68602, 68621), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(50)'], {}), '(50)\n', (68617, 68621), False, 'from matplotlib.ticker import MultipleLocator\n'), ((68675, 68695), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(200)'], {}), '(200)\n', (68690, 68695), False, 'from matplotlib.ticker import MultipleLocator\n'), ((68749, 68768), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(50)'], {}), '(50)\n', (68764, 68768), False, 'from matplotlib.ticker import MultipleLocator\n'), ((72658, 72744), 'logomaker.Logo', 'logomaker.Logo', (['matrix[(idx - 1) * 100 + length:]'], {'color_scheme': '"""chemistry"""', 'ax': 'ax'}), "(matrix[(idx - 1) * 100 + length:], color_scheme='chemistry',\n ax=ax)\n", (72672, 72744), False, 'import logomaker\n'), ((73131, 73150), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(20)'], {}), '(20)\n', (73146, 73150), False, 'from matplotlib.ticker import MultipleLocator\n'), ((73199, 73217), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(1)'], {}), '(1)\n', (73214, 73217), False, 'from matplotlib.ticker import MultipleLocator\n'), ((73556, 73598), 'matplotlib.pyplot.setp', 'plt.setp', (['label'], {'fontsize': '(8)', 'weight': '"""bold"""'}), "(label, fontsize=8, weight='bold')\n", (73564, 73598), True, 'import matplotlib.pyplot as plt\n'), ((74278, 74298), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(200)'], {}), '(200)\n', (74293, 74298), False, 'from matplotlib.ticker import MultipleLocator\n'), ((74347, 74366), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(50)'], {}), '(50)\n', (74362, 74366), False, 'from matplotlib.ticker import MultipleLocator\n'), ((76336, 76422), 'logomaker.Logo', 'logomaker.Logo', (['matrix[(idx - 1) * 100 + length:]'], {'color_scheme': '"""chemistry"""', 'ax': 'ax'}), "(matrix[(idx - 1) * 100 + length:], color_scheme='chemistry',\n ax=ax)\n", (76350, 76422), False, 'import logomaker\n'), ((76809, 76828), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(20)'], {}), '(20)\n', (76824, 76828), False, 'from matplotlib.ticker import MultipleLocator\n'), ((76877, 76895), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(1)'], {}), '(1)\n', (76892, 76895), False, 'from matplotlib.ticker import MultipleLocator\n'), ((77209, 77251), 'matplotlib.pyplot.setp', 'plt.setp', (['label'], {'fontsize': '(8)', 'weight': '"""bold"""'}), "(label, fontsize=8, weight='bold')\n", (77217, 77251), True, 'import matplotlib.pyplot as plt\n'), ((13175, 13198), 'numpy.nan_to_num', 'np.nan_to_num', (['n_fitted'], {}), '(n_fitted)\n', (13188, 13198), True, 'import numpy as np\n'), ((13201, 13227), 'numpy.nan_to_num', 'np.nan_to_num', (['hist_values'], {}), '(hist_values)\n', (13214, 13227), True, 'import numpy as np\n'), ((13255, 13275), 'numpy.mean', 'np.mean', (['hist_values'], {}), '(hist_values)\n', (13262, 13275), True, 'import numpy as np\n'), ((23632, 23690), 'itertools.product', 'product', (['[residue_index]', "traj_stats['lipid_resi_indices']"], {}), "([residue_index], traj_stats['lipid_resi_indices'])\n", (23639, 23690), False, 'from itertools import product\n'), ((24821, 24849), 'numpy.array', 'np.array', (['contact'], {'dtype': 'int'}), '(contact, dtype=int)\n', (24829, 24849), True, 'import numpy as np\n'), ((24927, 24955), 'numpy.array', 'np.array', (['contact'], {'dtype': 'int'}), '(contact, dtype=int)\n', (24935, 24955), True, 'import numpy as np\n'), ((31009, 31059), 'numpy.concatenate', 'np.concatenate', (['self.interaction_duration[residue]'], {}), '(self.interaction_duration[residue])\n', (31023, 31059), True, 'import numpy as np\n'), ((31217, 31267), 'numpy.concatenate', 'np.concatenate', (['self.interaction_duration[residue]'], {}), '(self.interaction_duration[residue])\n', (31231, 31267), True, 'import numpy as np\n'), ((37438, 37461), 'numpy.nan_to_num', 'np.nan_to_num', (['n_fitted'], {}), '(n_fitted)\n', (37451, 37461), True, 'import numpy as np\n'), ((37464, 37490), 'numpy.nan_to_num', 'np.nan_to_num', (['hist_values'], {}), '(hist_values)\n', (37477, 37490), True, 'import numpy as np\n'), ((37518, 37538), 'numpy.mean', 'np.mean', (['hist_values'], {}), '(hist_values)\n', (37525, 37538), True, 'import numpy as np\n'), ((38946, 38971), 'numpy.std', 'np.std', (['koff1_sampled_set'], {}), '(koff1_sampled_set)\n', (38952, 38971), True, 'import numpy as np\n'), ((39127, 39152), 'numpy.std', 'np.std', (['koff2_sampled_set'], {}), '(koff2_sampled_set)\n', (39133, 39152), True, 'import numpy as np\n'), ((58467, 58485), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (58477, 58485), True, 'import numpy as np\n'), ((63689, 63716), 'numpy.arange', 'np.arange', (['(256)'], {'dtype': 'float'}), '(256, dtype=float)\n', (63698, 63716), True, 'import numpy as np\n'), ((68882, 68902), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(100)'], {}), '(100)\n', (68897, 68902), False, 'from matplotlib.ticker import MultipleLocator\n'), ((68956, 68975), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(10)'], {}), '(10)\n', (68971, 68975), False, 'from matplotlib.ticker import MultipleLocator\n'), ((69029, 69049), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(100)'], {}), '(100)\n', (69044, 69049), False, 'from matplotlib.ticker import MultipleLocator\n'), ((69103, 69122), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(10)'], {}), '(10)\n', (69118, 69122), False, 'from matplotlib.ticker import MultipleLocator\n'), ((72878, 72942), 'logomaker.Logo', 'logomaker.Logo', (['matrix[:length]'], {'color_scheme': '"""chemistry"""', 'ax': 'ax'}), "(matrix[:length], color_scheme='chemistry', ax=ax)\n", (72892, 72942), False, 'import logomaker\n'), ((72993, 73097), 'logomaker.Logo', 'logomaker.Logo', (['matrix[(idx - 1) * 100 + length:idx * 100 + length]'], {'color_scheme': '"""chemistry"""', 'ax': 'ax'}), "(matrix[(idx - 1) * 100 + length:idx * 100 + length],\n color_scheme='chemistry', ax=ax)\n", (73007, 73097), False, 'import logomaker\n'), ((74455, 74475), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(100)'], {}), '(100)\n', (74470, 74475), False, 'from matplotlib.ticker import MultipleLocator\n'), ((74524, 74543), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(10)'], {}), '(10)\n', (74539, 74543), False, 'from matplotlib.ticker import MultipleLocator\n'), ((76556, 76620), 'logomaker.Logo', 'logomaker.Logo', (['matrix[:length]'], {'color_scheme': '"""chemistry"""', 'ax': 'ax'}), "(matrix[:length], color_scheme='chemistry', ax=ax)\n", (76570, 76620), False, 'import logomaker\n'), ((76671, 76775), 'logomaker.Logo', 'logomaker.Logo', (['matrix[(idx - 1) * 100 + length:idx * 100 + length]'], {'color_scheme': '"""chemistry"""', 'ax': 'ax'}), "(matrix[(idx - 1) * 100 + length:idx * 100 + length],\n color_scheme='chemistry', ax=ax)\n", (76685, 76775), False, 'import logomaker\n'), ((23872, 23900), 'numpy.arange', 'np.arange', (['new_traj.n_frames'], {}), '(new_traj.n_frames)\n', (23881, 23900), True, 'import numpy as np\n'), ((23967, 23995), 'numpy.arange', 'np.arange', (['new_traj.n_frames'], {}), '(new_traj.n_frames)\n', (23976, 23995), True, 'import numpy as np\n'), ((25985, 26049), 'numpy.concatenate', 'np.concatenate', (['self.interaction_duration[residue][-self.nprot:]'], {}), '(self.interaction_duration[residue][-self.nprot:])\n', (25999, 26049), True, 'import numpy as np\n'), ((35823, 35846), 'numpy.nan_to_num', 'np.nan_to_num', (['n_fitted'], {}), '(n_fitted)\n', (35836, 35846), True, 'import numpy as np\n'), ((35849, 35883), 'numpy.nan_to_num', 'np.nan_to_num', (['hist_values_sampled'], {}), '(hist_values_sampled)\n', (35862, 35883), True, 'import numpy as np\n'), ((35919, 35947), 'numpy.mean', 'np.mean', (['hist_values_sampled'], {}), '(hist_values_sampled)\n', (35926, 35947), True, 'import numpy as np\n'), ((45076, 45177), 'numpy.concatenate', 'np.concatenate', (['[self.contact_residues_high[node][list_to_take][frame_idx] for node in\n node_list]'], {}), '([self.contact_residues_high[node][list_to_take][frame_idx] for\n node in node_list])\n', (45090, 45177), True, 'import numpy as np\n'), ((45233, 45333), 'numpy.concatenate', 'np.concatenate', (['[self.contact_residues_low[node][list_to_take][frame_idx] for node in node_list\n ]'], {}), '([self.contact_residues_low[node][list_to_take][frame_idx] for\n node in node_list])\n', (45247, 45333), True, 'import numpy as np\n'), ((50737, 50786), 'numpy.concatenate', 'np.concatenate', (['surface_area_all[binding_site_id]'], {}), '(surface_area_all[binding_site_id])\n', (50751, 50786), True, 'import numpy as np\n'), ((58383, 58417), 'numpy.arange', 'np.arange', (['self._lipid_ref.n_atoms'], {}), '(self._lipid_ref.n_atoms)\n', (58392, 58417), True, 'import numpy as np\n'), ((66675, 66705), 're.findall', 're.findall', (['"""^[0-9]+"""', 'residue'], {}), "('^[0-9]+', residue)\n", (66685, 66705), False, 'import re\n'), ((66844, 66877), 're.findall', 're.findall', (['"""[a-zA-Z]+$"""', 'residue'], {}), "('[a-zA-Z]+$', residue)\n", (66854, 66877), False, 'import re\n'), ((69215, 69234), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(10)'], {}), '(10)\n', (69230, 69234), False, 'from matplotlib.ticker import MultipleLocator\n'), ((69288, 69306), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(1)'], {}), '(1)\n', (69303, 69306), False, 'from matplotlib.ticker import MultipleLocator\n'), ((69360, 69379), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(10)'], {}), '(10)\n', (69375, 69379), False, 'from matplotlib.ticker import MultipleLocator\n'), ((69433, 69451), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(1)'], {}), '(1)\n', (69448, 69451), False, 'from matplotlib.ticker import MultipleLocator\n'), ((52803, 52852), 'numpy.concatenate', 'np.concatenate', (['surface_area_all[binding_site_id]'], {}), '(surface_area_all[binding_site_id])\n', (52817, 52852), True, 'import numpy as np\n'), ((58061, 58094), 'numpy.array', 'np.array', (['dist_per_atom[atom_idx]'], {}), '(dist_per_atom[atom_idx])\n', (58069, 58094), True, 'import numpy as np\n'), ((46528, 46567), 'numpy.copy', 'np.copy', (['traj.unitcell_angles[frame_id]'], {}), '(traj.unitcell_angles[frame_id])\n', (46535, 46567), True, 'import numpy as np\n'), ((46650, 46690), 'numpy.copy', 'np.copy', (['traj.unitcell_lengths[frame_id]'], {}), '(traj.unitcell_lengths[frame_id])\n', (46657, 46690), True, 'import numpy as np\n'), ((51011, 51044), 're.findall', 're.findall', (['"""[a-zA-Z]+$"""', 'residue'], {}), "('[a-zA-Z]+$', residue)\n", (51021, 51044), False, 'import re\n'), ((57491, 57548), 'itertools.product', 'product', (['[lipid_atoms[idx]]', 'selected_protein_atoms[resi]'], {}), '([lipid_atoms[idx]], selected_protein_atoms[resi])\n', (57498, 57548), False, 'from itertools import product\n'), ((46400, 46443), 'numpy.hstack', 'np.hstack', (['[protein_indices, lipid_indices]'], {}), '([protein_indices, lipid_indices])\n', (46409, 46443), True, 'import numpy as np\n')]
yaakiyu/rt-bot
main.py
f68bca95c516e08c31ecc846524dcea4c8ba1503
# RT by Rext from asyncio import run from discord import Intents, Status, Game, AllowedMentions from core.bot import RT from data import SECRET try: from uvloop import install except ModuleNotFoundError: ... else: install() intents = Intents.default() intents.message_content = True intents.members = True bot = RT( allowed_mentions=AllowedMentions(everyone=False), intents=intents, status=Status.dnd, activity=Game("起動") ) bot.print("Now loading...") try: run(bot.start(SECRET["token"])) except KeyboardInterrupt: bot.print("Bye")
[((240, 257), 'discord.Intents.default', 'Intents.default', ([], {}), '()\n', (255, 257), False, 'from discord import Intents, Status, Game, AllowedMentions\n'), ((218, 227), 'uvloop.install', 'install', ([], {}), '()\n', (225, 227), False, 'from uvloop import install\n'), ((343, 374), 'discord.AllowedMentions', 'AllowedMentions', ([], {'everyone': '(False)'}), '(everyone=False)\n', (358, 374), False, 'from discord import Intents, Status, Game, AllowedMentions\n'), ((425, 435), 'discord.Game', 'Game', (['"""起動"""'], {}), "('起動')\n", (429, 435), False, 'from discord import Intents, Status, Game, AllowedMentions\n')]
dmitryvinn/hiplot
hiplot/fetchers_demo.py
52fe8b195a4e254240eb1a0847953fa3c1957a43
# Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import uuid import random import math import time import typing as t from . import experiment as hip # Demos from the README. If one of those is modified, please modify the readme as well def demo_change_column_properties() -> hip.Experiment: data = [{"param": 1, "loss": 10, "hidden_field": "value1", "c": "red"}, {"param": 2, "loss": 5, "hidden_field": "value2", "c": "black"}] exp = hip.Experiment.from_iterable(data) exp.parameters_definition["c"].colors = {"red": "rgb(255, 0, 0)", "black": "rgb(0, 0, 0)"} exp.parameters_definition["loss"].type = hip.ValueType.NUMERIC_LOG exp.display_data(hip.Displays.PARALLEL_PLOT).update({ 'hide': ['hidden_field'], # This column won't appear in the parallel plot 'order': ['c'] # Column `c` will be displayed first the in parallel plot }) return exp def demo_basic_usage() -> hip.Experiment: data = [{'dropout': 0.1, 'lr': 0.001, 'loss': 10.0, 'optimizer': 'SGD'}, {'dropout': 0.15, 'lr': 0.01, 'loss': 3.5, 'optimizer': 'Adam'}, {'dropout': 0.3, 'lr': 0.1, 'loss': 4.5, 'optimizer': 'Adam'}] return hip.Experiment.from_iterable(data) def demo_line_xy() -> hip.Experiment: # DEMO_LINE_XY_BEGIN exp = hip.Experiment() exp.display_data(hip.Displays.XY).update({ 'axis_x': 'generation', 'axis_y': 'loss', }) for i in range(200): dp = hip.Datapoint( uid=str(i), values={ 'generation': i, 'param': 10 ** random.uniform(-1, 1), 'loss': random.uniform(-5, 5), }) if i > 10: from_parent = random.choice(exp.datapoints[-10:]) dp.from_uid = from_parent.uid # <-- Connect the parent to the child dp.values['loss'] += from_parent.values['loss'] # type: ignore dp.values['param'] *= from_parent.values['param'] # type: ignore exp.datapoints.append(dp) # DEMO_LINE_XY_END return exp def demo_bug_uid() -> hip.Experiment: return hip.Experiment.from_iterable([{'a': 1, 'b': 2, 'uid': 50.0}, {'a': 2, 'b': 3, 'uid': 49.33}]) def demo(n: int = 100) -> hip.Experiment: xp = hip.Experiment() xp.display_data(hip.Displays.XY).update({ 'axis_x': 'time', 'axis_y': 'exp_metric', }) # Some fake PBT-ish data def fake_params() -> t.Dict[str, hip.DisplayableType]: r = random.random() p: t.Dict[str, hip.DisplayableType] = { "lr": 10 ** random.uniform(-5, 0), "seed": random.uniform(0, 10), "name": uuid.uuid4().hex[:6], "optimizer": random.choice(["sgd", "adam", "adamw"]), "r": r, "c": random.choice(["red", "green", "black"]), } if r < 0.1: del p['optimizer'] if r > 0.3: p["optionA"] = random.uniform(1, 5) else: p["optionB"] = random.uniform(1, 5) if r < 0.2: p["pctile"] = -1.0 elif r < 0.5: p["pctile"] = random.uniform(-1.0, 10.0) elif r < 0.8: p["pctile"] = 10 ** random.uniform(1, 2) else: p["pctile"] = random.uniform(100, 101) if random.random() > 0.3: p["special_values"] = random.uniform(1, 5) else: p["special_values"] = random.choice([math.inf, -math.inf, math.nan]) return p def fake_metrics(tm: float) -> t.Dict[str, hip.DisplayableType]: return { "exp_metric": 10 ** random.uniform(-5, 0), "pct_success": random.uniform(10, 90), "chkpt": uuid.uuid4().hex[:6], "time": tm + random.uniform(-0.2, 0.2), "force_numericlog": random.uniform(1, 100), 'timestamp': int(time.time() + (task_idx * 2000)), } current_pop: t.List[t.Dict[str, t.Any]] = [dict(uid=f"init{i}", params=fake_params(), last_ckpt_uid=None) for i in range(10)] continue_num = 0 for task_idx in range(n): # All drop checkpoints for p in current_pop: ckpt_uid = f"{p['uid']}_{uuid.uuid4().hex[:6]}" xp.datapoints.append(hip.Datapoint(uid=ckpt_uid, from_uid=p['last_ckpt_uid'], values={**p['params'], **fake_metrics(task_idx)})) p['last_ckpt_uid'] = ckpt_uid # Randomly drop some current_pop = [p for p in current_pop if random.random() > 0.3] # Respawn as needed for _ in range(10 - len(current_pop)): continue_num += 1 parent = random.choice(xp.datapoints[-10:]) current_pop.append(dict(uid=f"continue{continue_num}", params=fake_params(), last_ckpt_uid=parent.uid)) xp.parameters_definition["c"].colors = {"red": "rgb(255, 0, 0)", "green": "rgb(0, 255, 0)", "black": "rgb(0, 0, 0)"} xp.parameters_definition["force_numericlog"].type = hip.ValueType.NUMERIC_LOG xp.parameters_definition["pctile"].type = hip.ValueType.NUMERIC_PERCENTILE xp.parameters_definition["timestamp"].type = hip.ValueType.TIMESTAMP return xp def demo_customize() -> hip.Experiment: exp = demo() # EXPERIMENT_SETTINGS_SNIPPET2_BEGIN # Provide configuration for the parallel plot exp.display_data(hip.Displays.PARALLEL_PLOT).update({ # Hide some columns in the parallel plot 'hide': ['optionB'], # Specify the order for others 'order': ['time'], # Put column time first on the left }) # Provide configuration for the table with all the rows exp.display_data(hip.Displays.TABLE).update({ # Don't display `uid` and `from_uid` columns to the user 'hide': ['uid', 'from_uid'], # In the table, order rows by default 'order_by': [['pct_success', 'desc']], # Specify the order for columns 'order': ['time'], # Put column time first on the left }) # Provide configuration for the XY graph exp.display_data(hip.Displays.XY).update({ # Default X axis for the XY plot 'axis_x': 'time', # Default Y axis 'axis_y': 'lr', # Configure lines 'lines_thickness': 1.0, 'lines_opacity': 0.1, # Configure dots 'dots_thickness': 2.0, 'dots_opacity': 0.3, }) # EXPERIMENT_SETTINGS_SNIPPET2_END return exp def demo_force_scale() -> hip.Experiment: xp = hip.Experiment() for _ in range(100): values = [abs(random.gauss(0.0, 1.0)) for _ in range(4)] xp.datapoints.append(hip.Datapoint({ f"value{i}": v / sum(values) for i, v in enumerate(values) })) for i in range(4): xp.parameters_definition[f"value{i}"].force_range(0.0, 1.0) return xp def demo_distribution(**kwargs: t.Any) -> hip.Experiment: xp = hip.Experiment.from_iterable([{ 'cat': random.choice(["a", "b", "c", "d", "e", "f", "g", "h"]), 'numeric': random.uniform(0.0, 1.0), } for i in range(1000)]) xp.display_data(hip.Displays.DISTRIBUTION).update(kwargs) return xp def demo_bool() -> hip.Experiment: return hip.Experiment.from_iterable([ {"bool": True}, {"bool": False} ]) def demo_color_interpolate() -> hip.Experiment: exp = demo() exp.parameters_definition["exp_metric"].colormap = "interpolateSinebow" return exp def demo_color_scheme_ylrd() -> hip.Experiment: exp = demo() exp.parameters_definition["exp_metric"].colormap = "schemeYlOrRd" return exp def demo_color_scheme_accent() -> hip.Experiment: exp = demo() exp.parameters_definition["exp_metric"].colormap = "schemeAccent" return exp def demo_color_interpolate_inverse() -> hip.Experiment: exp = demo_color_interpolate() assert exp.parameters_definition["exp_metric"].colormap is not None exp.parameters_definition["exp_metric"].colormap += "#inverse" return exp def demo_axis_style() -> hip.Experiment: data: t.List[t.Dict[str, t.Any]] = [] for _ in range(100): data.append({ **{ f'param{i}': random.uniform(0, 1) for i in range(6) }, 'loss': random.uniform(0, 100), 'metric': 10 ** random.uniform(0, 10) }) xp = hip.Experiment.from_iterable(data) for i in range(6): xp.parameters_definition[f"param{i}"].label_css = "badge badge-pill badge-secondary" xp.parameters_definition["loss"].label_css = "badge badge-pill badge-primary" xp.parameters_definition["metric"].label_css = "badge badge-pill badge-info" return xp def demo_categorical() -> hip.Experiment: data: t.List[t.Dict[str, t.Any]] = [] for _ in range(100): data.append({ 'cat_num_05': random.randint(0, 5), 'cat_num_15': random.randint(0, 10), 'cat_num_25': random.randint(0, 25), 'cat_str_05': f's{random.randint(0, 5)}', 'cat_str_15': f's{random.randint(0, 15)}', 'cat_str_25': f's{random.randint(0, 25)}', }) xp = hip.Experiment.from_iterable(data) for param in ["cat_num_05", "cat_num_15", "cat_num_25"]: xp.parameters_definition[param].type = hip.ValueType.CATEGORICAL xp.colorby = 'cat_num_25' return xp def demo_long_names() -> hip.Experiment: return hip.Experiment.from_iterable([ { 'some very very long name for a field': random.randint(0, 5), 'this one is also very long': random.randint(0, 10), 'another.long.one.but.with.dots': random.randint(0, 25), } for _ in range(100) ]) def demo_force_constant_pplot() -> hip.Experiment: exp = hip.Experiment.from_iterable([ {'uid': 123, 'a': 1, 'b': 3}, {'uid': 345, 'a': 2, 'b': 3} ]) exp.parameters_definition["b"].force_range(0, 100) return exp def demo_first_value_nan() -> hip.Experiment: return hip.Experiment.from_iterable([ {}, {'a': None}, {'a': 2}, {'a': 2.1}, {'a': 2.2}, {'a': 5.5}, {'a': math.nan}, ]) def demo_weighted_rows() -> hip.Experiment: experiment = hip.Experiment.from_iterable([ {'w': 1.0, 'a': 1, 'b': 1}, {'w': 2.0, 'a': 2, 'b': 1}, {'w': -2.0, 'a': 2, 'b': 1}, {'w': math.inf, 'a': 2, 'b': 2}, {'w': 'not_a_number', 'a': 2, 'b': 3}, {'w': None, 'a': 3, 'b': 3}, {'a': 4, 'b': 3}, ]) experiment.weightcolumn = "w" return experiment def demo_3xcols() -> hip.Experiment: xp = demo() for i in range(2): new_xp = demo() for dp, new_dp in zip(xp.datapoints, new_xp.datapoints): dp.values.update({ f"{k}{i}": v for k, v in new_dp.values.items() }) return xp def demo_col_html() -> hip.Experiment: COL1 = "<h1>col1</h1>" COL2 = "col_2" experiment = hip.Experiment.from_iterable([ {COL1: 1.0, COL2: 1}, {COL1: 2.0, COL2: 2}, {COL1: 3.0, COL2: 3}, ]) experiment.parameters_definition[COL2].label_html = "col<sub>2</sub>" return experiment def demo_disable_table() -> hip.Experiment: experiment = demo() experiment.enabledDisplays.remove(hip.Displays.TABLE) return experiment def demo_big_floats() -> hip.Experiment: return hip.Experiment.from_iterable( { 'bigfloat': math.nan if i < 10 else 10 ** random.uniform(15, 32), } for i in range(100) ) README_DEMOS: t.Dict[str, t.Callable[[], hip.Experiment]] = { "demo": demo, "demo_3xcols": demo_3xcols, "demo_big": lambda: demo(1000), "demo_change_column_properties": demo_change_column_properties, "demo_basic_usage": demo_basic_usage, "demo_line_xy": demo_line_xy, "demo_bug_uid": demo_bug_uid, "demo_force_scale": demo_force_scale, "demo_distribution_cat": lambda: demo_distribution(axis="cat"), "demo_distribution_num": lambda: demo_distribution(axis="numeric"), "demo_distribution_num_100bins": lambda: demo_distribution(axis="numeric", nbins=100), "demo_bool": demo_bool, "demo_color_interpolate": demo_color_interpolate, "demo_color_scheme_ylrd": demo_color_scheme_ylrd, "demo_color_scheme_accent": demo_color_scheme_accent, "demo_axis_style": demo_axis_style, "demo_categorical": demo_categorical, "demo_customize": demo_customize, "demo_long_names": demo_long_names, "demo_force_constant_pplot": demo_force_constant_pplot, "demo_color_interpolate_inverse": demo_color_interpolate_inverse, "demo_first_value_nan": demo_first_value_nan, "demo_weighted_rows": demo_weighted_rows, "demo_col_html": demo_col_html, "demo_disable_table": demo_disable_table, "demo_big_floats": demo_big_floats, }
[((2618, 2633), 'random.random', 'random.random', ([], {}), '()\n', (2631, 2633), False, 'import random\n'), ((1848, 1883), 'random.choice', 'random.choice', (['exp.datapoints[-10:]'], {}), '(exp.datapoints[-10:])\n', (1861, 1883), False, 'import random\n'), ((2749, 2770), 'random.uniform', 'random.uniform', (['(0)', '(10)'], {}), '(0, 10)\n', (2763, 2770), False, 'import random\n'), ((2839, 2878), 'random.choice', 'random.choice', (["['sgd', 'adam', 'adamw']"], {}), "(['sgd', 'adam', 'adamw'])\n", (2852, 2878), False, 'import random\n'), ((2917, 2957), 'random.choice', 'random.choice', (["['red', 'green', 'black']"], {}), "(['red', 'green', 'black'])\n", (2930, 2957), False, 'import random\n'), ((3067, 3087), 'random.uniform', 'random.uniform', (['(1)', '(5)'], {}), '(1, 5)\n', (3081, 3087), False, 'import random\n'), ((3129, 3149), 'random.uniform', 'random.uniform', (['(1)', '(5)'], {}), '(1, 5)\n', (3143, 3149), False, 'import random\n'), ((3429, 3444), 'random.random', 'random.random', ([], {}), '()\n', (3442, 3444), False, 'import random\n'), ((3486, 3506), 'random.uniform', 'random.uniform', (['(1)', '(5)'], {}), '(1, 5)\n', (3500, 3506), False, 'import random\n'), ((3555, 3601), 'random.choice', 'random.choice', (['[math.inf, -math.inf, math.nan]'], {}), '([math.inf, -math.inf, math.nan])\n', (3568, 3601), False, 'import random\n'), ((3788, 3810), 'random.uniform', 'random.uniform', (['(10)', '(90)'], {}), '(10, 90)\n', (3802, 3810), False, 'import random\n'), ((3939, 3961), 'random.uniform', 'random.uniform', (['(1)', '(100)'], {}), '(1, 100)\n', (3953, 3961), False, 'import random\n'), ((4751, 4785), 'random.choice', 'random.choice', (['xp.datapoints[-10:]'], {}), '(xp.datapoints[-10:])\n', (4764, 4785), False, 'import random\n'), ((2706, 2727), 'random.uniform', 'random.uniform', (['(-5)', '(0)'], {}), '(-5, 0)\n', (2720, 2727), False, 'import random\n'), ((3250, 3276), 'random.uniform', 'random.uniform', (['(-1.0)', '(10.0)'], {}), '(-1.0, 10.0)\n', (3264, 3276), False, 'import random\n'), ((3738, 3759), 'random.uniform', 'random.uniform', (['(-5)', '(0)'], {}), '(-5, 0)\n', (3752, 3759), False, 'import random\n'), ((3880, 3905), 'random.uniform', 'random.uniform', (['(-0.2)', '(0.2)'], {}), '(-0.2, 0.2)\n', (3894, 3905), False, 'import random\n'), ((6644, 6666), 'random.gauss', 'random.gauss', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (6656, 6666), False, 'import random\n'), ((7048, 7103), 'random.choice', 'random.choice', (["['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']"], {}), "(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'])\n", (7061, 7103), False, 'import random\n'), ((7124, 7148), 'random.uniform', 'random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (7138, 7148), False, 'import random\n'), ((8367, 8389), 'random.uniform', 'random.uniform', (['(0)', '(100)'], {}), '(0, 100)\n', (8381, 8389), False, 'import random\n'), ((8948, 8968), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (8962, 8968), False, 'import random\n'), ((8996, 9017), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (9010, 9017), False, 'import random\n'), ((9045, 9066), 'random.randint', 'random.randint', (['(0)', '(25)'], {}), '(0, 25)\n', (9059, 9066), False, 'import random\n'), ((9612, 9632), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (9626, 9632), False, 'import random\n'), ((9676, 9697), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (9690, 9697), False, 'import random\n'), ((9745, 9766), 'random.randint', 'random.randint', (['(0)', '(25)'], {}), '(0, 25)\n', (9759, 9766), False, 'import random\n'), ((1765, 1786), 'random.uniform', 'random.uniform', (['(-5)', '(5)'], {}), '(-5, 5)\n', (1779, 1786), False, 'import random\n'), ((2792, 2804), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2802, 2804), False, 'import uuid\n'), ((3392, 3416), 'random.uniform', 'random.uniform', (['(100)', '(101)'], {}), '(100, 101)\n', (3406, 3416), False, 'import random\n'), ((3833, 3845), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3843, 3845), False, 'import uuid\n'), ((3992, 4003), 'time.time', 'time.time', ([], {}), '()\n', (4001, 4003), False, 'import time\n'), ((4601, 4616), 'random.random', 'random.random', ([], {}), '()\n', (4614, 4616), False, 'import random\n'), ((8277, 8297), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (8291, 8297), False, 'import random\n'), ((8419, 8440), 'random.uniform', 'random.uniform', (['(0)', '(10)'], {}), '(0, 10)\n', (8433, 8440), False, 'import random\n'), ((1718, 1739), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (1732, 1739), False, 'import random\n'), ((3331, 3351), 'random.uniform', 'random.uniform', (['(1)', '(2)'], {}), '(1, 2)\n', (3345, 3351), False, 'import random\n'), ((9098, 9118), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (9112, 9118), False, 'import random\n'), ((9152, 9173), 'random.randint', 'random.randint', (['(0)', '(15)'], {}), '(0, 15)\n', (9166, 9173), False, 'import random\n'), ((9207, 9228), 'random.randint', 'random.randint', (['(0)', '(25)'], {}), '(0, 25)\n', (9221, 9228), False, 'import random\n'), ((11641, 11663), 'random.uniform', 'random.uniform', (['(15)', '(32)'], {}), '(15, 32)\n', (11655, 11663), False, 'import random\n'), ((4316, 4328), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4326, 4328), False, 'import uuid\n')]
brauls/ingredients-service
app/endpoints/common/dtos/ingredient.py
67c1408f96f4b407d7e7b3e5e62406a6931de1c1
"""Ingredient dto. """ class Ingredient(): """Class to represent an ingredient. """ def __init__(self, name, availability_per_month): self.name = name self.availability_per_month = availability_per_month def __repr__(self): return """{} is the name.""".format(self.name)
[]
kallangerard/grocery-barcode-scanner
barcode.py
0a866c5b20c43355b642c0b78ba09d5cf4b0383c
import logging import groceries.api as groceries import barcodescanner.scan as barcode def main(): grocy = groceries.GrocyAPIClient() while True: scanner = barcode.Scan() line = scanner.PollScanner() if line != None: response = grocy.consume_barcode(line) logging.debug(response) if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG) main()
[((113, 139), 'groceries.api.GrocyAPIClient', 'groceries.GrocyAPIClient', ([], {}), '()\n', (137, 139), True, 'import groceries.api as groceries\n'), ((371, 411), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (390, 411), False, 'import logging\n'), ((174, 188), 'barcodescanner.scan.Scan', 'barcode.Scan', ([], {}), '()\n', (186, 188), True, 'import barcodescanner.scan as barcode\n'), ((314, 337), 'logging.debug', 'logging.debug', (['response'], {}), '(response)\n', (327, 337), False, 'import logging\n')]
mr-sk/easy-icm-runner
setup.py
01cf9d7d8e4ef13afc18dbdda2862035121f3624
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="easy-icm-runner", version="1.0.6", author="Bachir El Koussa", author_email="[email protected]", description="A wrapper for IBM ICMs Scheduler API Calls", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/equinoxfitness/easy-icm-runner/", #packages=setuptools.find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), py_modules = ['icm_runner'], install_requires=[ 'requests', ], classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], )
[((88, 634), 'setuptools.setup', 'setuptools.setup', ([], {'name': '"""easy-icm-runner"""', 'version': '"""1.0.6"""', 'author': '"""Bachir El Koussa"""', 'author_email': '"""[email protected]"""', 'description': '"""A wrapper for IBM ICMs Scheduler API Calls"""', 'long_description': 'long_description', 'long_description_content_type': '"""text/markdown"""', 'url': '"""https://github.com/equinoxfitness/easy-icm-runner/"""', 'py_modules': "['icm_runner']", 'install_requires': "['requests']", 'classifiers': "['Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent']"}), "(name='easy-icm-runner', version='1.0.6', author=\n 'Bachir El Koussa', author_email='[email protected]', description=\n 'A wrapper for IBM ICMs Scheduler API Calls', long_description=\n long_description, long_description_content_type='text/markdown', url=\n 'https://github.com/equinoxfitness/easy-icm-runner/', py_modules=[\n 'icm_runner'], install_requires=['requests'], classifiers=[\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent'])\n", (104, 634), False, 'import setuptools\n')]
zee93/molecule_parser
test_molecule.py
42f5a3722d733ef9f7243bfa2b0b9a08c7bc5d23
import unittest from molecule import onize_formula, update_equation_with_multiplier, flaten_formula, parse_molecule class MoleculeParserTestCases(unittest.TestCase): def test_onizing_formulas(self): self.assertEqual(onize_formula('H'), 'H1') self.assertEqual(onize_formula('H2O'), 'H2O1') self.assertEqual(onize_formula('Mg(OH)2'), 'Mg1(O1H1)2') self.assertEqual(onize_formula('K4[ON(SO3)2]2'), 'K4[O1N1(S1O3)2]2') def test_updating_formula_with_multipler(self): self.assertEqual(update_equation_with_multiplier('H1', '2'), 'H2') self.assertEqual(update_equation_with_multiplier('K4[O1N1(SO3)2]2', '2'), 'K8[O2N2(SO6)4]4') def test_flatting_formula(self): self.assertEqual(flaten_formula('H2O'), 'H2O') self.assertEqual(flaten_formula('[H1]2O'), 'H2O') self.assertEqual(flaten_formula('M1g1(O1H1)2'), 'M1g1O2H2') self.assertEqual(flaten_formula('K4[O1N1(S1O3)2]2'), 'K4O2N2S4O12') def test_full_parsing(self): parsed_mole = parse_molecule('H2O') self.assertEqual(len(parsed_mole.keys()), 2) self.assertEqual(parsed_mole['H'], 2) self.assertEqual(parsed_mole['O'], 1) parsed_mole = parse_molecule('Mg(OH)2') self.assertEqual(len(parsed_mole.keys()), 3) self.assertEqual(parsed_mole['H'], 2) self.assertEqual(parsed_mole['O'], 2) self.assertEqual(parsed_mole['Mg'], 1) parsed_mole = parse_molecule('K4[ON(SO3)2]2') self.assertEqual(len(parsed_mole.keys()), 4) self.assertEqual(parsed_mole['K'], 4) self.assertEqual(parsed_mole['O'], 14) self.assertEqual(parsed_mole['N'], 2) self.assertEqual(parsed_mole['S'], 4) if __name__ == '__main__': unittest.main()
[((1768, 1783), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1781, 1783), False, 'import unittest\n'), ((1034, 1055), 'molecule.parse_molecule', 'parse_molecule', (['"""H2O"""'], {}), "('H2O')\n", (1048, 1055), False, 'from molecule import onize_formula, update_equation_with_multiplier, flaten_formula, parse_molecule\n'), ((1224, 1249), 'molecule.parse_molecule', 'parse_molecule', (['"""Mg(OH)2"""'], {}), "('Mg(OH)2')\n", (1238, 1249), False, 'from molecule import onize_formula, update_equation_with_multiplier, flaten_formula, parse_molecule\n'), ((1465, 1496), 'molecule.parse_molecule', 'parse_molecule', (['"""K4[ON(SO3)2]2"""'], {}), "('K4[ON(SO3)2]2')\n", (1479, 1496), False, 'from molecule import onize_formula, update_equation_with_multiplier, flaten_formula, parse_molecule\n'), ((231, 249), 'molecule.onize_formula', 'onize_formula', (['"""H"""'], {}), "('H')\n", (244, 249), False, 'from molecule import onize_formula, update_equation_with_multiplier, flaten_formula, parse_molecule\n'), ((282, 302), 'molecule.onize_formula', 'onize_formula', (['"""H2O"""'], {}), "('H2O')\n", (295, 302), False, 'from molecule import onize_formula, update_equation_with_multiplier, flaten_formula, parse_molecule\n'), ((337, 361), 'molecule.onize_formula', 'onize_formula', (['"""Mg(OH)2"""'], {}), "('Mg(OH)2')\n", (350, 361), False, 'from molecule import onize_formula, update_equation_with_multiplier, flaten_formula, parse_molecule\n'), ((402, 432), 'molecule.onize_formula', 'onize_formula', (['"""K4[ON(SO3)2]2"""'], {}), "('K4[ON(SO3)2]2')\n", (415, 432), False, 'from molecule import onize_formula, update_equation_with_multiplier, flaten_formula, parse_molecule\n'), ((532, 574), 'molecule.update_equation_with_multiplier', 'update_equation_with_multiplier', (['"""H1"""', '"""2"""'], {}), "('H1', '2')\n", (563, 574), False, 'from molecule import onize_formula, update_equation_with_multiplier, flaten_formula, parse_molecule\n'), ((607, 662), 'molecule.update_equation_with_multiplier', 'update_equation_with_multiplier', (['"""K4[O1N1(SO3)2]2"""', '"""2"""'], {}), "('K4[O1N1(SO3)2]2', '2')\n", (638, 662), False, 'from molecule import onize_formula, update_equation_with_multiplier, flaten_formula, parse_molecule\n'), ((746, 767), 'molecule.flaten_formula', 'flaten_formula', (['"""H2O"""'], {}), "('H2O')\n", (760, 767), False, 'from molecule import onize_formula, update_equation_with_multiplier, flaten_formula, parse_molecule\n'), ((801, 825), 'molecule.flaten_formula', 'flaten_formula', (['"""[H1]2O"""'], {}), "('[H1]2O')\n", (815, 825), False, 'from molecule import onize_formula, update_equation_with_multiplier, flaten_formula, parse_molecule\n'), ((859, 888), 'molecule.flaten_formula', 'flaten_formula', (['"""M1g1(O1H1)2"""'], {}), "('M1g1(O1H1)2')\n", (873, 888), False, 'from molecule import onize_formula, update_equation_with_multiplier, flaten_formula, parse_molecule\n'), ((927, 961), 'molecule.flaten_formula', 'flaten_formula', (['"""K4[O1N1(S1O3)2]2"""'], {}), "('K4[O1N1(S1O3)2]2')\n", (941, 961), False, 'from molecule import onize_formula, update_equation_with_multiplier, flaten_formula, parse_molecule\n')]
lyth031/ptb_lm
config.py
71f687fdf41c6b981a306269c1341ea8a8347bb6
# -*- coding: utf-8 -*- class Config(object): def __init__(self): self.init_scale = 0.1 self.learning_rate = 1.0 self.max_grad_norm = 5 self.num_layers = 2 self.slice_size = 30 self.hidden_size = 200 self.max_epoch = 13 self.keep_prob = 0.8 self.lr_const_epoch = 4 self.lr_decay = 0.7 self.batch_size = 30 self.vocab_size = 10000 self.rnn_model = "gru" self.data_path = "./data/" self.save_path = "../out/cudnn/gru/"
[]
HupuInc/node-mysql-listener
binding.gyp
d23e55910acd1559d8339f36b1549f21aee8adaa
{ 'targets': [ { # have to specify 'liblib' here since gyp will remove the first one :\ 'target_name': 'mysql_bindings', 'sources': [ 'src/mysql_bindings.cc', 'src/mysql_bindings_connection.cc', 'src/mysql_bindings_result.cc', 'src/mysql_bindings_statement.cc', ], 'conditions': [ ['OS=="win"', { # no Windows support yet... }, { 'libraries': [ '<!@(mysql_config --libs_r)' ], }], ['OS=="mac"', { # cflags on OS X are stupid and have to be defined like this 'xcode_settings': { 'OTHER_CFLAGS': [ '<!@(mysql_config --cflags)' ] } }, { 'cflags': [ '<!@(mysql_config --cflags)' ], }] ] } ] }
[]
GlenWalker/pymel
pymel/__init__.py
8b69b72e1bb726a66792707af39626a987bf5c21
# copyright Chad Dombrova [email protected] # created at luma pictures www.luma-pictures.com """ ******************************* PyMEL ******************************* PyMEL makes python scripting in Maya work the way it should. Maya's command module is a direct translation of MEL commands into python functions. The result is a very awkward and unpythonic syntax which does not take advantage of python's strengths -- particularly, a flexible, object-oriented design. PyMEL builds on the cmds module by organizing many of its commands into a class hierarchy, and by customizing them to operate in a more succinct and intuitive way. ======================================= Special Thanks ======================================= Special thanks to those studios with the foresight to support an open-source project of this nature: Luma Pictures, Attitude Studio, and ImageMovers Digital. """ __versiontuple__ = (1, 2, 0) __version_suffix__ = 'a1' __version__ = '.'.join(str(x) for x in __versiontuple__) + __version_suffix__ __authors__ = ['Chad Dombrova', 'Paul Molodowitch', 'Olivier Renouard', 'Ofer Koren'] import sys assert sys.version_info > (2, 7), ("pymel version %s is compatible with Maya2016/python2.7 or later" % __version__)
[]
rlbellaire/ActT
setup.py
b6e936e5037c5f92ad1c281e2bf3700bf91aea42
from setuptools import find_packages, setup setup(name='ActT', version='0.6', description='Active Testing', url='', author='', author_email='none', license='BSD', packages=find_packages(), install_requires=[ 'numpy', 'pandas', 'matplotlib','scipy','scikit-learn','opencv-python', 'statswag','tensorflow' ], zip_safe=True)
[((215, 230), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (228, 230), False, 'from setuptools import find_packages, setup\n')]
coylen/pySG
Fusion/deltat.py
6af1b8387c256f8898e2198c635c8e4b72ec3942
# deltat.py time difference calculation for sensor fusion # Released under the MIT License (MIT) # Copyright (c) 2018 Peter Hinch # Provides TimeDiff function and DeltaT class. # The following notes cover special cases. Where the device performing fusion # is linked to the IMU and is running MicroPython no special treatment is # needed. # The special cases are: # 1. Device connected to the IMU is linked to a separate platform doing fusion. # 2. Either or both are not running MicroPython. # If the device providing the vectors is not running on MicroPython the user # must supply timestamps and a function capable of differencing these. The # function is passed to the Fusion constructor and the timestamp is provided # along with the vector, being the time when the vector was acquired. # If the device providing the vectors is running MicroPython but fusion is # being performed on a device which is not, the user must provide their own # implementation of ticks_diff which accounts for MicroPython rollover and # must supply the returned ticks_us() values as a timestamp. # Under MicroPython TimeDiff(start, end) uses time.ticks_diff. # A DeltaT instance, called with function call syntax, returns a time # difference from the previous call as a float value. Units seconds. # If running under MicroPython and no time differencing function is supplied # to the Fusion constructor it uses time.ticks_us as its time source and a # default timediff function using time.ticks_diff() with a division by 1e6. # If time differencing function is supplied a timestamp must be passsed as an # arg to instance calls of Fusion.update() or Fusion.update_nomag(). In the # async version the user supplied read_coro() must return a timestamp with the # vector. # On 1st pass dt evidently can't be computed. A notional value of 100μs is # returned. The Madgwick algorithm takes seconds to stabilise. try: import utime as time except ImportError: import time is_micropython = hasattr(time, 'ticks_diff') class DeltaT(): def __init__(self, timediff): if timediff is None: self.expect_ts = False if is_micropython: self.timediff = lambda start, end : time.ticks_diff(start, end)/1000000 else: raise ValueError('You must define a timediff function') else: self.expect_ts = True self.timediff = timediff self.start_time = None def __call__(self, ts): if self.expect_ts: if ts is None: raise ValueError('Timestamp expected but not supplied.') else: if is_micropython: ts = time.ticks_us() else: raise RuntimeError('Not MicroPython: provide timestamps and a timediff function') # ts is now valid if self.start_time is None: # 1st call: self.start_time is invalid self.start_time = ts return 0.0001 # 100μs notional delay. 1st reading is invalid in any case dt = self.timediff(ts, self.start_time) self.start_time = ts return dt
[((2672, 2687), 'time.ticks_us', 'time.ticks_us', ([], {}), '()\n', (2685, 2687), False, 'import time\n'), ((2208, 2235), 'time.ticks_diff', 'time.ticks_diff', (['start', 'end'], {}), '(start, end)\n', (2223, 2235), False, 'import time\n')]
wenh06/colour
colour/models/rgb/datasets/sony.py
445fdad2711ae39c95b4375166905568d24a95f4
# -*- coding: utf-8 -*- """ Sony Colourspaces ================= Defines the *Sony* colourspaces: - :attr:`colour.models.RGB_COLOURSPACE_S_GAMUT`. - :attr:`colour.models.RGB_COLOURSPACE_S_GAMUT3`. - :attr:`colour.models.RGB_COLOURSPACE_S_GAMUT3_CINE`. - :attr:`colour.models.RGB_COLOURSPACE_VENICE_S_GAMUT3`. - :attr:`colour.models.RGB_COLOURSPACE_VENICE_S_GAMUT3_CINE`. Notes ----- - The *Venice S-Gamut3* and *Venice S-Gamut3.Cine* primaries and whitepoint were derived with the following `Google Colab Notebook \ <https://colab.research.google.com/drive/1ZGTij7jT8eZRMPUkyWlv_x5ix5Q5twMB>`__. References ---------- - :cite:`Gaggioni` : Gaggioni, H., Dhanendra, P., Yamashita, J., Kawada, N., Endo, K., & Clark, C. (n.d.). S-Log: A new LUT for digital production mastering and interchange applications (Vol. 709, pp. 1-13). http://pro.sony.com/bbsccms/assets/files/mkt/cinema/solutions/slog_manual.pdf - :cite:`SonyCorporation` : Sony Corporation. (n.d.). S-Log Whitepaper (pp. 1-17). http://www.theodoropoulos.info/attachments/076_on%20S-Log.pdf - :cite:`SonyCorporationd` : Sony Corporation. (n.d.). Technical Summary for S-Gamut3.Cine/S-Log3 and S-Gamut3/S-Log3 (pp. 1-7). http://community.sony.com/sony/attachments/sony/\ large-sensor-camera-F5-F55/12359/2/\ TechnicalSummary_for_S-Gamut3Cine_S-Gamut3_S-Log3_V1_00.pdf - :cite:`SonyCorporatione` : Sony Corporation. (n.d.). S-Gamut3_S-Gamut3Cine_Matrix.xlsx. https://community.sony.com/sony/attachments/sony/\ large-sensor-camera-F5-F55/12359/3/S-Gamut3_S-Gamut3Cine_Matrix.xlsx - :cite:`SonyElectronicsCorporation2020` : Sony Electronics Corporation. (2020). IDT.Sony.Venice_SLog3_SGamut3.ctl. https://github.com/ampas/\ aces-dev/blob/710ecbe52c87ce9f4a1e02c8ddf7ea0d6b611cc8/transforms/ctl/idt/\ vendorSupplied/sony/IDT.Sony.Venice_SLog3_SGamut3.ctl - :cite:`SonyElectronicsCorporation2020a` : Sony Electronics Corporation. (2020). IDT.Sony.Venice_SLog3_SGamut3Cine.ctl. https://github.com/ampas/\ aces-dev/blob/710ecbe52c87ce9f4a1e02c8ddf7ea0d6b611cc8/transforms/ctl/idt/\ vendorSupplied/sony/IDT.Sony.Venice_SLog3_SGamut3Cine.ctl """ from __future__ import division, unicode_literals import numpy as np from colour.colorimetry import CCS_ILLUMINANTS from colour.models.rgb import (RGB_Colourspace, log_encoding_SLog2, log_decoding_SLog2, log_encoding_SLog3, log_decoding_SLog3, normalised_primary_matrix) __author__ = 'Colour Developers' __copyright__ = 'Copyright (C) 2013-2020 - Colour Developers' __license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause' __maintainer__ = 'Colour Developers' __email__ = '[email protected]' __status__ = 'Production' __all__ = [ 'PRIMARIES_S_GAMUT', 'WHITEPOINT_NAME_S_GAMUT', 'CCS_WHITEPOINT_S_GAMUT', 'MATRIX_S_GAMUT_TO_XYZ', 'MATRIX_XYZ_TO_S_GAMUT', 'RGB_COLOURSPACE_S_GAMUT', 'PRIMARIES_S_GAMUT3', 'WHITEPOINT_NAME_S_GAMUT3', 'CCS_WHITEPOINT_S_GAMUT3', 'MATRIX_S_GAMUT3_TO_XYZ', 'MATRIX_XYZ_TO_S_GAMUT3', 'RGB_COLOURSPACE_S_GAMUT3', 'PRIMARIES_S_GAMUT3_CINE', 'WHITEPOINT_NAME_S_GAMUT3_CINE', 'CCS_WHITEPOINT_S_GAMUT3_CINE', 'MATRIX_S_GAMUT3_CINE_TO_XYZ', 'MATRIX_XYZ_TO_S_GAMUT3_CINE', 'RGB_COLOURSPACE_S_GAMUT3_CINE', 'PRIMARIES_VENICE_S_GAMUT3', 'WHITEPOINT_NAME_VENICE_S_GAMUT3', 'CCS_WHITEPOINT_VENICE_S_GAMUT3', 'MATRIX_VENICE_S_GAMUT3_TO_XYZ', 'MATRIX_XYZ_TO_VENICE_S_GAMUT3', 'RGB_COLOURSPACE_VENICE_S_GAMUT3', 'PRIMARIES_VENICE_S_GAMUT3_CINE', 'WHITEPOINT_NAME_VENICE_S_GAMUT3_CINE', 'CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE', 'MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ', 'MATRIX_XYZ_TO_VENICE_S_GAMUT3_CINE', 'RGB_COLOURSPACE_VENICE_S_GAMUT3_CINE' ] PRIMARIES_S_GAMUT = np.array([ [0.7300, 0.2800], [0.1400, 0.8550], [0.1000, -0.0500], ]) """ *S-Gamut* colourspace primaries. PRIMARIES_S_GAMUT : ndarray, (3, 2) """ WHITEPOINT_NAME_S_GAMUT = 'D65' """ *S-Gamut* colourspace whitepoint name. WHITEPOINT_NAME_S_GAMUT : unicode """ CCS_WHITEPOINT_S_GAMUT = (CCS_ILLUMINANTS[ 'CIE 1931 2 Degree Standard Observer'][WHITEPOINT_NAME_S_GAMUT]) """ *S-Gamut* colourspace whitepoint chromaticity coordinates. CCS_WHITEPOINT_S_GAMUT : ndarray """ MATRIX_S_GAMUT_TO_XYZ = np.array([ [0.7064827132, 0.1288010498, 0.1151721641], [0.2709796708, 0.7866064112, -0.0575860820], [-0.0096778454, 0.0046000375, 1.0941355587], ]) """ *S-Gamut* colourspace to *CIE XYZ* tristimulus values matrix. MATRIX_S_GAMUT_TO_XYZ : array_like, (3, 3) """ MATRIX_XYZ_TO_S_GAMUT = np.array([ [1.5073998991, -0.2458221374, -0.1716116808], [-0.5181517271, 1.3553912409, 0.1258786682], [0.0155116982, -0.0078727714, 0.9119163656], ]) """ *CIE XYZ* tristimulus values to *S-Gamut* colourspace matrix. MATRIX_XYZ_TO_S_GAMUT : array_like, (3, 3) """ RGB_COLOURSPACE_S_GAMUT = RGB_Colourspace( 'S-Gamut', PRIMARIES_S_GAMUT, CCS_WHITEPOINT_S_GAMUT, WHITEPOINT_NAME_S_GAMUT, MATRIX_S_GAMUT_TO_XYZ, MATRIX_XYZ_TO_S_GAMUT, log_encoding_SLog2, log_decoding_SLog2, ) RGB_COLOURSPACE_S_GAMUT.__doc__ = """ *S-Gamut* colourspace. References ---------- :cite:`Gaggioni`, :cite:`SonyCorporation` RGB_COLOURSPACE_S_GAMUT : RGB_Colourspace """ PRIMARIES_S_GAMUT3 = PRIMARIES_S_GAMUT """ *S-Gamut3* colourspace primaries. PRIMARIES_S_GAMUT3 : ndarray, (3, 2) """ WHITEPOINT_NAME_S_GAMUT3 = WHITEPOINT_NAME_S_GAMUT """ *S-Gamut3* colourspace whitepoint name. WHITEPOINT_NAME_S_GAMUT3 : unicode """ CCS_WHITEPOINT_S_GAMUT3 = CCS_WHITEPOINT_S_GAMUT """ *S-Gamut3* colourspace whitepoint chromaticity coordinates. CCS_WHITEPOINT_S_GAMUT3 : ndarray """ MATRIX_S_GAMUT3_TO_XYZ = MATRIX_S_GAMUT_TO_XYZ """ *S-Gamut3* colourspace to *CIE XYZ* tristimulus values matrix. MATRIX_S_GAMUT3_TO_XYZ : array_like, (3, 3) """ MATRIX_XYZ_TO_S_GAMUT3 = MATRIX_XYZ_TO_S_GAMUT """ *CIE XYZ* tristimulus values to *S-Gamut3* colourspace matrix. MATRIX_XYZ_TO_S_GAMUT3 : array_like, (3, 3) """ RGB_COLOURSPACE_S_GAMUT3 = RGB_Colourspace( 'S-Gamut3', PRIMARIES_S_GAMUT3, CCS_WHITEPOINT_S_GAMUT3, WHITEPOINT_NAME_S_GAMUT3, MATRIX_S_GAMUT3_TO_XYZ, MATRIX_XYZ_TO_S_GAMUT3, log_encoding_SLog3, log_decoding_SLog3, ) RGB_COLOURSPACE_S_GAMUT3.__doc__ = """ *S-Gamut3* colourspace. References ---------- :cite:`SonyCorporationd` RGB_COLOURSPACE_S_GAMUT3 : RGB_Colourspace """ PRIMARIES_S_GAMUT3_CINE = np.array([ [0.76600, 0.27500], [0.22500, 0.80000], [0.08900, -0.08700], ]) """ *S-Gamut3.Cine* colourspace primaries. PRIMARIES_S_GAMUT3_CINE : ndarray, (3, 2) """ WHITEPOINT_NAME_S_GAMUT3_CINE = WHITEPOINT_NAME_S_GAMUT """ *S-Gamut3.Cine* colourspace whitepoint name. WHITEPOINT_NAME_S_GAMUT3_CINE : unicode """ CCS_WHITEPOINT_S_GAMUT3_CINE = CCS_WHITEPOINT_S_GAMUT """ *S-Gamut3.Cine* colourspace whitepoint chromaticity coordinates. CCS_WHITEPOINT_S_GAMUT3_CINE : ndarray """ MATRIX_S_GAMUT3_CINE_TO_XYZ = np.array([ [0.5990839208, 0.2489255161, 0.1024464902], [0.2150758201, 0.8850685017, -0.1001443219], [-0.0320658495, -0.0276583907, 1.1487819910], ]) """ *S-Gamut3.Cine* colourspace to *CIE XYZ* tristimulus values matrix. MATRIX_S_GAMUT3_CINE_TO_XYZ : array_like, (3, 3) """ MATRIX_XYZ_TO_S_GAMUT3_CINE = np.array([ [1.8467789693, -0.5259861230, -0.2105452114], [-0.4441532629, 1.2594429028, 0.1493999729], [0.0408554212, 0.0156408893, 0.8682072487], ]) """ *CIE XYZ* tristimulus values to *S-Gamut3.Cine* colourspace matrix. MATRIX_XYZ_TO_S_GAMUT3_CINE : array_like, (3, 3) """ RGB_COLOURSPACE_S_GAMUT3_CINE = RGB_Colourspace( 'S-Gamut3.Cine', PRIMARIES_S_GAMUT3_CINE, CCS_WHITEPOINT_S_GAMUT3_CINE, WHITEPOINT_NAME_S_GAMUT3_CINE, MATRIX_S_GAMUT3_CINE_TO_XYZ, MATRIX_XYZ_TO_S_GAMUT3_CINE, log_encoding_SLog3, log_decoding_SLog3, ) RGB_COLOURSPACE_S_GAMUT3_CINE.__doc__ = """ *S-Gamut3.Cine* colourspace. References ---------- :cite:`SonyCorporatione` RGB_COLOURSPACE_S_GAMUT3_CINE : RGB_Colourspace """ PRIMARIES_VENICE_S_GAMUT3 = np.array([ [0.740464264304292, 0.279364374750660], [0.089241145423286, 0.893809528608105], [0.110488236673827, -0.052579333080476], ]) """ *Venice S-Gamut3* colourspace primaries. PRIMARIES_VENICE_S_GAMUT3 : ndarray, (3, 2) """ WHITEPOINT_NAME_VENICE_S_GAMUT3 = WHITEPOINT_NAME_S_GAMUT """ *Venice S-Gamut3* colourspace whitepoint name. WHITEPOINT_NAME_VENICE_S_GAMUT3 : unicode """ CCS_WHITEPOINT_VENICE_S_GAMUT3 = CCS_WHITEPOINT_S_GAMUT """ *Venice S-Gamut3* colourspace whitepoint chromaticity coordinates. CCS_WHITEPOINT_VENICE_S_GAMUT3 : ndarray """ MATRIX_VENICE_S_GAMUT3_TO_XYZ = normalised_primary_matrix( PRIMARIES_VENICE_S_GAMUT3, CCS_WHITEPOINT_VENICE_S_GAMUT3) """ *Venice S-Gamut3* colourspace to *CIE XYZ* tristimulus values matrix. MATRIX_VENICE_S_GAMUT3_TO_XYZ : array_like, (3, 3) """ MATRIX_XYZ_TO_VENICE_S_GAMUT3 = np.linalg.inv(MATRIX_VENICE_S_GAMUT3_TO_XYZ) """ *CIE XYZ* tristimulus values to *Venice S-Gamut3* colourspace matrix. MATRIX_XYZ_TO_VENICE_S_GAMUT3 : array_like, (3, 3) """ RGB_COLOURSPACE_VENICE_S_GAMUT3 = RGB_Colourspace( 'Venice S-Gamut3', PRIMARIES_VENICE_S_GAMUT3, CCS_WHITEPOINT_VENICE_S_GAMUT3, WHITEPOINT_NAME_VENICE_S_GAMUT3, MATRIX_VENICE_S_GAMUT3_TO_XYZ, MATRIX_XYZ_TO_VENICE_S_GAMUT3, log_encoding_SLog3, log_decoding_SLog3, ) RGB_COLOURSPACE_VENICE_S_GAMUT3.__doc__ = """ *Venice S-Gamut3* colourspace. References ---------- :cite:`SonyElectronicsCorporation2020` RGB_COLOURSPACE_VENICE_S_GAMUT3 : RGB_Colourspace """ PRIMARIES_VENICE_S_GAMUT3_CINE = np.array([ [0.775901871567345, 0.274502392854799], [0.188682902773355, 0.828684937020288], [0.101337382499301, -0.089187517306263], ]) """ *Venice S-Gamut3.Cine* colourspace primaries. PRIMARIES_VENICE_S_GAMUT3_CINE : ndarray, (3, 2) """ WHITEPOINT_NAME_VENICE_S_GAMUT3_CINE = WHITEPOINT_NAME_S_GAMUT """ *Venice S-Gamut3.Cine* colourspace whitepoint name. WHITEPOINT_NAME_VENICE_S_GAMUT3_CINE : unicode """ CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE = CCS_WHITEPOINT_S_GAMUT """ *Venice S-Gamut3.Cine* colourspace whitepoint chromaticity coordinates. CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE : ndarray """ MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ = normalised_primary_matrix( PRIMARIES_VENICE_S_GAMUT3_CINE, CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE) """ *Venice S-Gamut3.Cine* colourspace to *CIE XYZ* tristimulus values matrix. MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ : array_like, (3, 3) """ MATRIX_XYZ_TO_VENICE_S_GAMUT3_CINE = np.linalg.inv( MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ) """ *CIE XYZ* tristimulus values to *Venice S-Gamut3.Cine* colourspace matrix. MATRIX_XYZ_TO_VENICE_S_GAMUT3_CINE : array_like, (3, 3) """ RGB_COLOURSPACE_VENICE_S_GAMUT3_CINE = RGB_Colourspace( 'Venice S-Gamut3.Cine', PRIMARIES_VENICE_S_GAMUT3_CINE, CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE, WHITEPOINT_NAME_VENICE_S_GAMUT3_CINE, MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ, MATRIX_XYZ_TO_VENICE_S_GAMUT3_CINE, log_encoding_SLog3, log_decoding_SLog3, ) RGB_COLOURSPACE_VENICE_S_GAMUT3_CINE.__doc__ = """ *Venice S-Gamut3.Cine* colourspace. References ---------- :cite:`SonyElectronicsCorporation2020a` RGB_COLOURSPACE_VENICE_S_GAMUT3_CINE : RGB_Colourspace """
[((3812, 3865), 'numpy.array', 'np.array', (['[[0.73, 0.28], [0.14, 0.855], [0.1, -0.05]]'], {}), '([[0.73, 0.28], [0.14, 0.855], [0.1, -0.05]])\n', (3820, 3865), True, 'import numpy as np\n'), ((4325, 4473), 'numpy.array', 'np.array', (['[[0.7064827132, 0.1288010498, 0.1151721641], [0.2709796708, 0.7866064112, -\n 0.057586082], [-0.0096778454, 0.0046000375, 1.0941355587]]'], {}), '([[0.7064827132, 0.1288010498, 0.1151721641], [0.2709796708, \n 0.7866064112, -0.057586082], [-0.0096778454, 0.0046000375, 1.0941355587]])\n', (4333, 4473), True, 'import numpy as np\n'), ((4624, 4775), 'numpy.array', 'np.array', (['[[1.5073998991, -0.2458221374, -0.1716116808], [-0.5181517271, 1.3553912409,\n 0.1258786682], [0.0155116982, -0.0078727714, 0.9119163656]]'], {}), '([[1.5073998991, -0.2458221374, -0.1716116808], [-0.5181517271, \n 1.3553912409, 0.1258786682], [0.0155116982, -0.0078727714, 0.9119163656]])\n', (4632, 4775), True, 'import numpy as np\n'), ((4927, 5115), 'colour.models.rgb.RGB_Colourspace', 'RGB_Colourspace', (['"""S-Gamut"""', 'PRIMARIES_S_GAMUT', 'CCS_WHITEPOINT_S_GAMUT', 'WHITEPOINT_NAME_S_GAMUT', 'MATRIX_S_GAMUT_TO_XYZ', 'MATRIX_XYZ_TO_S_GAMUT', 'log_encoding_SLog2', 'log_decoding_SLog2'], {}), "('S-Gamut', PRIMARIES_S_GAMUT, CCS_WHITEPOINT_S_GAMUT,\n WHITEPOINT_NAME_S_GAMUT, MATRIX_S_GAMUT_TO_XYZ, MATRIX_XYZ_TO_S_GAMUT,\n log_encoding_SLog2, log_decoding_SLog2)\n", (4942, 5115), False, 'from colour.models.rgb import RGB_Colourspace, log_encoding_SLog2, log_decoding_SLog2, log_encoding_SLog3, log_decoding_SLog3, normalised_primary_matrix\n'), ((6081, 6275), 'colour.models.rgb.RGB_Colourspace', 'RGB_Colourspace', (['"""S-Gamut3"""', 'PRIMARIES_S_GAMUT3', 'CCS_WHITEPOINT_S_GAMUT3', 'WHITEPOINT_NAME_S_GAMUT3', 'MATRIX_S_GAMUT3_TO_XYZ', 'MATRIX_XYZ_TO_S_GAMUT3', 'log_encoding_SLog3', 'log_decoding_SLog3'], {}), "('S-Gamut3', PRIMARIES_S_GAMUT3, CCS_WHITEPOINT_S_GAMUT3,\n WHITEPOINT_NAME_S_GAMUT3, MATRIX_S_GAMUT3_TO_XYZ,\n MATRIX_XYZ_TO_S_GAMUT3, log_encoding_SLog3, log_decoding_SLog3)\n", (6096, 6275), False, 'from colour.models.rgb import RGB_Colourspace, log_encoding_SLog2, log_decoding_SLog2, log_encoding_SLog3, log_decoding_SLog3, normalised_primary_matrix\n'), ((6489, 6546), 'numpy.array', 'np.array', (['[[0.766, 0.275], [0.225, 0.8], [0.089, -0.087]]'], {}), '([[0.766, 0.275], [0.225, 0.8], [0.089, -0.087]])\n', (6497, 6546), True, 'import numpy as np\n'), ((7016, 7165), 'numpy.array', 'np.array', (['[[0.5990839208, 0.2489255161, 0.1024464902], [0.2150758201, 0.8850685017, -\n 0.1001443219], [-0.0320658495, -0.0276583907, 1.148781991]]'], {}), '([[0.5990839208, 0.2489255161, 0.1024464902], [0.2150758201, \n 0.8850685017, -0.1001443219], [-0.0320658495, -0.0276583907, 1.148781991]])\n', (7024, 7165), True, 'import numpy as np\n'), ((7334, 7483), 'numpy.array', 'np.array', (['[[1.8467789693, -0.525986123, -0.2105452114], [-0.4441532629, 1.2594429028,\n 0.1493999729], [0.0408554212, 0.0156408893, 0.8682072487]]'], {}), '([[1.8467789693, -0.525986123, -0.2105452114], [-0.4441532629, \n 1.2594429028, 0.1493999729], [0.0408554212, 0.0156408893, 0.8682072487]])\n', (7342, 7483), True, 'import numpy as np\n'), ((7654, 7882), 'colour.models.rgb.RGB_Colourspace', 'RGB_Colourspace', (['"""S-Gamut3.Cine"""', 'PRIMARIES_S_GAMUT3_CINE', 'CCS_WHITEPOINT_S_GAMUT3_CINE', 'WHITEPOINT_NAME_S_GAMUT3_CINE', 'MATRIX_S_GAMUT3_CINE_TO_XYZ', 'MATRIX_XYZ_TO_S_GAMUT3_CINE', 'log_encoding_SLog3', 'log_decoding_SLog3'], {}), "('S-Gamut3.Cine', PRIMARIES_S_GAMUT3_CINE,\n CCS_WHITEPOINT_S_GAMUT3_CINE, WHITEPOINT_NAME_S_GAMUT3_CINE,\n MATRIX_S_GAMUT3_CINE_TO_XYZ, MATRIX_XYZ_TO_S_GAMUT3_CINE,\n log_encoding_SLog3, log_decoding_SLog3)\n", (7669, 7882), False, 'from colour.models.rgb import RGB_Colourspace, log_encoding_SLog2, log_decoding_SLog2, log_encoding_SLog3, log_decoding_SLog3, normalised_primary_matrix\n'), ((8109, 8244), 'numpy.array', 'np.array', (['[[0.740464264304292, 0.27936437475066], [0.089241145423286, \n 0.893809528608105], [0.110488236673827, -0.052579333080476]]'], {}), '([[0.740464264304292, 0.27936437475066], [0.089241145423286, \n 0.893809528608105], [0.110488236673827, -0.052579333080476]])\n', (8117, 8244), True, 'import numpy as np\n'), ((8714, 8802), 'colour.models.rgb.normalised_primary_matrix', 'normalised_primary_matrix', (['PRIMARIES_VENICE_S_GAMUT3', 'CCS_WHITEPOINT_VENICE_S_GAMUT3'], {}), '(PRIMARIES_VENICE_S_GAMUT3,\n CCS_WHITEPOINT_VENICE_S_GAMUT3)\n', (8739, 8802), False, 'from colour.models.rgb import RGB_Colourspace, log_encoding_SLog2, log_decoding_SLog2, log_encoding_SLog3, log_decoding_SLog3, normalised_primary_matrix\n'), ((8967, 9011), 'numpy.linalg.inv', 'np.linalg.inv', (['MATRIX_VENICE_S_GAMUT3_TO_XYZ'], {}), '(MATRIX_VENICE_S_GAMUT3_TO_XYZ)\n', (8980, 9011), True, 'import numpy as np\n'), ((9177, 9417), 'colour.models.rgb.RGB_Colourspace', 'RGB_Colourspace', (['"""Venice S-Gamut3"""', 'PRIMARIES_VENICE_S_GAMUT3', 'CCS_WHITEPOINT_VENICE_S_GAMUT3', 'WHITEPOINT_NAME_VENICE_S_GAMUT3', 'MATRIX_VENICE_S_GAMUT3_TO_XYZ', 'MATRIX_XYZ_TO_VENICE_S_GAMUT3', 'log_encoding_SLog3', 'log_decoding_SLog3'], {}), "('Venice S-Gamut3', PRIMARIES_VENICE_S_GAMUT3,\n CCS_WHITEPOINT_VENICE_S_GAMUT3, WHITEPOINT_NAME_VENICE_S_GAMUT3,\n MATRIX_VENICE_S_GAMUT3_TO_XYZ, MATRIX_XYZ_TO_VENICE_S_GAMUT3,\n log_encoding_SLog3, log_decoding_SLog3)\n", (9192, 9417), False, 'from colour.models.rgb import RGB_Colourspace, log_encoding_SLog2, log_decoding_SLog2, log_encoding_SLog3, log_decoding_SLog3, normalised_primary_matrix\n'), ((9669, 9805), 'numpy.array', 'np.array', (['[[0.775901871567345, 0.274502392854799], [0.188682902773355, \n 0.828684937020288], [0.101337382499301, -0.089187517306263]]'], {}), '([[0.775901871567345, 0.274502392854799], [0.188682902773355, \n 0.828684937020288], [0.101337382499301, -0.089187517306263]])\n', (9677, 9805), True, 'import numpy as np\n'), ((10319, 10417), 'colour.models.rgb.normalised_primary_matrix', 'normalised_primary_matrix', (['PRIMARIES_VENICE_S_GAMUT3_CINE', 'CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE'], {}), '(PRIMARIES_VENICE_S_GAMUT3_CINE,\n CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE)\n', (10344, 10417), False, 'from colour.models.rgb import RGB_Colourspace, log_encoding_SLog2, log_decoding_SLog2, log_encoding_SLog3, log_decoding_SLog3, normalised_primary_matrix\n'), ((10597, 10646), 'numpy.linalg.inv', 'np.linalg.inv', (['MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ'], {}), '(MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ)\n', (10610, 10646), True, 'import numpy as np\n'), ((10832, 11106), 'colour.models.rgb.RGB_Colourspace', 'RGB_Colourspace', (['"""Venice S-Gamut3.Cine"""', 'PRIMARIES_VENICE_S_GAMUT3_CINE', 'CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE', 'WHITEPOINT_NAME_VENICE_S_GAMUT3_CINE', 'MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ', 'MATRIX_XYZ_TO_VENICE_S_GAMUT3_CINE', 'log_encoding_SLog3', 'log_decoding_SLog3'], {}), "('Venice S-Gamut3.Cine', PRIMARIES_VENICE_S_GAMUT3_CINE,\n CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE,\n WHITEPOINT_NAME_VENICE_S_GAMUT3_CINE,\n MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ, MATRIX_XYZ_TO_VENICE_S_GAMUT3_CINE,\n log_encoding_SLog3, log_decoding_SLog3)\n", (10847, 11106), False, 'from colour.models.rgb import RGB_Colourspace, log_encoding_SLog2, log_decoding_SLog2, log_encoding_SLog3, log_decoding_SLog3, normalised_primary_matrix\n')]
QiaoZhongzheng/EWC-sample-PMNIST
network.py
cd5e10b401582ab7f0dcd7a1e38aed6552192484
# -*- coding: UTF-8 -*- '''================================================= @Project -> File :EWC -> network @IDE :PyCharm @Author :Qiao Zhongzheng @Date :2021/6/23 20:28 @Desc : ==================================================''' from tensorflow.keras import Model from tensorflow.keras.layers import Dense, Conv2D,LeakyReLU,MaxPool2D,Flatten,Input def fcnn(): input = Input(shape=784,dtype='float32',name='input') # x = Dense(128,activation='relu')(input) # x = Dense(64,activation='relu')(x) # x = Dense(32,activation='relu')(x) x = Dense(256,activation='relu')(input) x = Dense(256,activation='relu')(x) output = Dense(10,activation='softmax')(x) return Model(input, output)
[((387, 434), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(784)', 'dtype': '"""float32"""', 'name': '"""input"""'}), "(shape=784, dtype='float32', name='input')\n", (392, 434), False, 'from tensorflow.keras.layers import Dense, Conv2D, LeakyReLU, MaxPool2D, Flatten, Input\n'), ((703, 723), 'tensorflow.keras.Model', 'Model', (['input', 'output'], {}), '(input, output)\n', (708, 723), False, 'from tensorflow.keras import Model\n'), ((569, 598), 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (574, 598), False, 'from tensorflow.keras.layers import Dense, Conv2D, LeakyReLU, MaxPool2D, Flatten, Input\n'), ((613, 642), 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (618, 642), False, 'from tensorflow.keras.layers import Dense, Conv2D, LeakyReLU, MaxPool2D, Flatten, Input\n'), ((658, 689), 'tensorflow.keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (663, 689), False, 'from tensorflow.keras.layers import Dense, Conv2D, LeakyReLU, MaxPool2D, Flatten, Input\n')]
Qfabiolous/QuanGuru
src/quanguru/classes/exceptions.py
285ca44ae857cc61337f73ea2eb600f485a09e32
# TODO turn prints into actual error raise, they are print for testing def qSystemInitErrors(init): def newFunction(obj, **kwargs): init(obj, **kwargs) if obj._genericQSys__dimension is None: className = obj.__class__.__name__ print(className + ' requires a dimension') elif obj.frequency is None: className = obj.__class__.__name__ print(className + ' requires a frequency') return newFunction def qCouplingInitErrors(init): def newFunction(obj, *args, **kwargs): init(obj, *args, **kwargs) if obj.couplingOperators is None: # pylint: disable=protected-access className = obj.__class__.__name__ print(className + ' requires a coupling functions') elif obj.coupledSystems is None: # pylint: disable=protected-access className = obj.__class__.__name__ print(className + ' requires a coupling systems') #for ind in range(len(obj._qCoupling__qSys)): # if len(obj._qCoupling__cFncs) != len(obj._qCoupling__qSys): # className = obj.__class__.__name__ # print(className + ' requires same number of systems as coupling functions') return newFunction def sweepInitError(init): def newFunction(obj, **kwargs): init(obj, **kwargs) if obj.sweepList is None: className = obj.__class__.__name__ print(className + ' requires either a list or relevant info, here are givens' + '\n' + # noqa: W503, W504 'sweepList: ', obj.sweepList, '\n' + # noqa: W504 'sweepMax: ', obj.sweepMax, '\n' + # noqa: W504 'sweepMin: ', obj.sweepMin, '\n' + # noqa: W504 'sweepPert: ', obj.sweepPert, '\n' + # noqa: W504 'logSweep: ', obj.logSweep) return newFunction
[]
alexnikulkov/ReAgent
reagent/gym/tests/test_gym.py
e404c5772ea4118105c2eb136ca96ad5ca8e01db
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging import os import pprint import unittest import numpy as np # pyre-fixme[21]: Could not find module `pytest`. import pytest import torch from parameterized import parameterized from reagent.core.types import RewardOptions from reagent.gym.agents.agent import Agent from reagent.gym.agents.post_step import train_with_replay_buffer_post_step from reagent.gym.envs.union import Env__Union from reagent.gym.runners.gymrunner import evaluate_for_n_episodes, run_episode from reagent.gym.utils import build_normalizer, fill_replay_buffer from reagent.model_managers.model_manager import ModelManager from reagent.model_managers.union import ModelManager__Union from reagent.replay_memory.circular_replay_buffer import ReplayBuffer from reagent.tensorboardX import summary_writer_context from reagent.test.base.horizon_test_base import HorizonTestBase from torch.utils.tensorboard import SummaryWriter try: # Use internal runner or OSS otherwise from reagent.runners.fb.fb_batch_runner import FbBatchRunner as BatchRunner except ImportError: from reagent.runners.oss_batch_runner import OssBatchRunner as BatchRunner # for seeding the environment SEED = 0 logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) """ Put on-policy gym tests here in the format (test name, path to yaml config). Format path to be: "configs/<env_name>/<model_name>_<env_name>_online.yaml." NOTE: These tests should ideally finish quickly (within 10 minutes) since they are unit tests which are run many times. """ GYM_TESTS = [ ("Discrete DQN Cartpole", "configs/cartpole/discrete_dqn_cartpole_online.yaml"), ("Discrete C51 Cartpole", "configs/cartpole/discrete_c51_cartpole_online.yaml"), ("Discrete QR Cartpole", "configs/cartpole/discrete_qr_cartpole_online.yaml"), ( "Discrete DQN Open Gridworld", "configs/open_gridworld/discrete_dqn_open_gridworld.yaml", ), ("SAC Pendulum", "configs/pendulum/sac_pendulum_online.yaml"), ("TD3 Pendulum", "configs/pendulum/td3_pendulum_online.yaml"), ("Parametric DQN Cartpole", "configs/cartpole/parametric_dqn_cartpole_online.yaml"), ( "Parametric SARSA Cartpole", "configs/cartpole/parametric_sarsa_cartpole_online.yaml", ), ( "Sparse DQN Changing Arms", "configs/sparse/discrete_dqn_changing_arms_online.yaml", ), ("SlateQ RecSim", "configs/recsim/slate_q_recsim_online.yaml"), ("PossibleActionsMask DQN", "configs/functionality/dqn_possible_actions_mask.yaml"), ] curr_dir = os.path.dirname(__file__) class TestGym(HorizonTestBase): # pyre-fixme[16]: Module `parameterized` has no attribute `expand`. @parameterized.expand(GYM_TESTS) def test_gym_cpu(self, name: str, config_path: str): logger.info(f"Starting {name} on CPU") self.run_from_config( run_test=run_test, config_path=os.path.join(curr_dir, config_path), use_gpu=False, ) logger.info(f"{name} passes!") # pyre-fixme[16]: Module `parameterized` has no attribute `expand`. @parameterized.expand(GYM_TESTS) @pytest.mark.serial # pyre-fixme[56]: Argument `not torch.cuda.is_available()` to decorator factory # `unittest.skipIf` could not be resolved in a global scope. @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") def test_gym_gpu(self, name: str, config_path: str): logger.info(f"Starting {name} on GPU") self.run_from_config( run_test=run_test, config_path=os.path.join(curr_dir, config_path), use_gpu=True, ) logger.info(f"{name} passes!") def run_test( env: Env__Union, model: ModelManager__Union, replay_memory_size: int, train_every_ts: int, train_after_ts: int, num_train_episodes: int, passing_score_bar: float, num_eval_episodes: int, use_gpu: bool, ): env = env.value env.seed(SEED) env.action_space.seed(SEED) normalization = build_normalizer(env) logger.info(f"Normalization is: \n{pprint.pformat(normalization)}") manager: ModelManager = model.value runner = BatchRunner(use_gpu, manager, RewardOptions(), normalization) trainer = runner.initialize_trainer() reporter = manager.get_reporter() trainer.reporter = reporter training_policy = manager.create_policy(trainer) replay_buffer = ReplayBuffer( replay_capacity=replay_memory_size, batch_size=trainer.minibatch_size ) device = torch.device("cuda") if use_gpu else torch.device("cpu") # first fill the replay buffer to burn_in train_after_ts = max(train_after_ts, trainer.minibatch_size) fill_replay_buffer( env=env, replay_buffer=replay_buffer, desired_size=train_after_ts ) post_step = train_with_replay_buffer_post_step( replay_buffer=replay_buffer, env=env, trainer=trainer, training_freq=train_every_ts, batch_size=trainer.minibatch_size, device=device, ) agent = Agent.create_for_env( env, policy=training_policy, post_transition_callback=post_step, device=device ) writer = SummaryWriter() with summary_writer_context(writer): train_rewards = [] for i in range(num_train_episodes): trajectory = run_episode( env=env, agent=agent, mdp_id=i, max_steps=env.max_steps ) ep_reward = trajectory.calculate_cumulative_reward() train_rewards.append(ep_reward) logger.info( f"Finished training episode {i} (len {len(trajectory)})" f" with reward {ep_reward}." ) logger.info("============Train rewards=============") logger.info(train_rewards) logger.info(f"average: {np.mean(train_rewards)};\tmax: {np.max(train_rewards)}") # Check whether the max score passed the score bar; we explore during training # the return could be bad (leading to flakiness in C51 and QRDQN). assert np.max(train_rewards) >= passing_score_bar, ( f"max reward ({np.max(train_rewards)})after training for " f"{len(train_rewards)} episodes is less than < {passing_score_bar}.\n" ) serving_policy = manager.create_serving_policy(normalization, trainer) agent = Agent.create_for_env_with_serving_policy(env, serving_policy) eval_rewards = evaluate_for_n_episodes( n=num_eval_episodes, env=env, agent=agent, max_steps=env.max_steps ).squeeze(1) logger.info("============Eval rewards==============") logger.info(eval_rewards) mean_eval = np.mean(eval_rewards) logger.info(f"average: {mean_eval};\tmax: {np.max(eval_rewards)}") assert ( mean_eval >= passing_score_bar ), f"Eval reward is {mean_eval}, less than < {passing_score_bar}.\n" if __name__ == "__main__": unittest.main()
[((1286, 1313), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1303, 1313), False, 'import logging\n'), ((2639, 2664), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2654, 2664), False, 'import os\n'), ((2776, 2807), 'parameterized.parameterized.expand', 'parameterized.expand', (['GYM_TESTS'], {}), '(GYM_TESTS)\n', (2796, 2807), False, 'from parameterized import parameterized\n'), ((3188, 3219), 'parameterized.parameterized.expand', 'parameterized.expand', (['GYM_TESTS'], {}), '(GYM_TESTS)\n', (3208, 3219), False, 'from parameterized import parameterized\n'), ((4117, 4138), 'reagent.gym.utils.build_normalizer', 'build_normalizer', (['env'], {}), '(env)\n', (4133, 4138), False, 'from reagent.gym.utils import build_normalizer, fill_replay_buffer\n'), ((4513, 4601), 'reagent.replay_memory.circular_replay_buffer.ReplayBuffer', 'ReplayBuffer', ([], {'replay_capacity': 'replay_memory_size', 'batch_size': 'trainer.minibatch_size'}), '(replay_capacity=replay_memory_size, batch_size=trainer.\n minibatch_size)\n', (4525, 4601), False, 'from reagent.replay_memory.circular_replay_buffer import ReplayBuffer\n'), ((4797, 4887), 'reagent.gym.utils.fill_replay_buffer', 'fill_replay_buffer', ([], {'env': 'env', 'replay_buffer': 'replay_buffer', 'desired_size': 'train_after_ts'}), '(env=env, replay_buffer=replay_buffer, desired_size=\n train_after_ts)\n', (4815, 4887), False, 'from reagent.gym.utils import build_normalizer, fill_replay_buffer\n'), ((4914, 5092), 'reagent.gym.agents.post_step.train_with_replay_buffer_post_step', 'train_with_replay_buffer_post_step', ([], {'replay_buffer': 'replay_buffer', 'env': 'env', 'trainer': 'trainer', 'training_freq': 'train_every_ts', 'batch_size': 'trainer.minibatch_size', 'device': 'device'}), '(replay_buffer=replay_buffer, env=env,\n trainer=trainer, training_freq=train_every_ts, batch_size=trainer.\n minibatch_size, device=device)\n', (4948, 5092), False, 'from reagent.gym.agents.post_step import train_with_replay_buffer_post_step\n'), ((5152, 5257), 'reagent.gym.agents.agent.Agent.create_for_env', 'Agent.create_for_env', (['env'], {'policy': 'training_policy', 'post_transition_callback': 'post_step', 'device': 'device'}), '(env, policy=training_policy, post_transition_callback=\n post_step, device=device)\n', (5172, 5257), False, 'from reagent.gym.agents.agent import Agent\n'), ((5281, 5296), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (5294, 5296), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((6426, 6487), 'reagent.gym.agents.agent.Agent.create_for_env_with_serving_policy', 'Agent.create_for_env_with_serving_policy', (['env', 'serving_policy'], {}), '(env, serving_policy)\n', (6466, 6487), False, 'from reagent.gym.agents.agent import Agent\n'), ((6730, 6751), 'numpy.mean', 'np.mean', (['eval_rewards'], {}), '(eval_rewards)\n', (6737, 6751), True, 'import numpy as np\n'), ((6981, 6996), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6994, 6996), False, 'import unittest\n'), ((4295, 4310), 'reagent.core.types.RewardOptions', 'RewardOptions', ([], {}), '()\n', (4308, 4310), False, 'from reagent.core.types import RewardOptions\n'), ((4625, 4645), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4637, 4645), False, 'import torch\n'), ((4662, 4681), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4674, 4681), False, 'import torch\n'), ((5306, 5336), 'reagent.tensorboardX.summary_writer_context', 'summary_writer_context', (['writer'], {}), '(writer)\n', (5328, 5336), False, 'from reagent.tensorboardX import summary_writer_context\n'), ((6140, 6161), 'numpy.max', 'np.max', (['train_rewards'], {}), '(train_rewards)\n', (6146, 6161), True, 'import numpy as np\n'), ((3419, 3444), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3442, 3444), False, 'import torch\n'), ((5434, 5502), 'reagent.gym.runners.gymrunner.run_episode', 'run_episode', ([], {'env': 'env', 'agent': 'agent', 'mdp_id': 'i', 'max_steps': 'env.max_steps'}), '(env=env, agent=agent, mdp_id=i, max_steps=env.max_steps)\n', (5445, 5502), False, 'from reagent.gym.runners.gymrunner import evaluate_for_n_episodes, run_episode\n'), ((6209, 6230), 'numpy.max', 'np.max', (['train_rewards'], {}), '(train_rewards)\n', (6215, 6230), True, 'import numpy as np\n'), ((6508, 6603), 'reagent.gym.runners.gymrunner.evaluate_for_n_episodes', 'evaluate_for_n_episodes', ([], {'n': 'num_eval_episodes', 'env': 'env', 'agent': 'agent', 'max_steps': 'env.max_steps'}), '(n=num_eval_episodes, env=env, agent=agent,\n max_steps=env.max_steps)\n', (6531, 6603), False, 'from reagent.gym.runners.gymrunner import evaluate_for_n_episodes, run_episode\n'), ((2997, 3032), 'os.path.join', 'os.path.join', (['curr_dir', 'config_path'], {}), '(curr_dir, config_path)\n', (3009, 3032), False, 'import os\n'), ((3657, 3692), 'os.path.join', 'os.path.join', (['curr_dir', 'config_path'], {}), '(curr_dir, config_path)\n', (3669, 3692), False, 'import os\n'), ((4178, 4207), 'pprint.pformat', 'pprint.pformat', (['normalization'], {}), '(normalization)\n', (4192, 4207), False, 'import pprint\n'), ((5917, 5939), 'numpy.mean', 'np.mean', (['train_rewards'], {}), '(train_rewards)\n', (5924, 5939), True, 'import numpy as np\n'), ((5949, 5970), 'numpy.max', 'np.max', (['train_rewards'], {}), '(train_rewards)\n', (5955, 5970), True, 'import numpy as np\n'), ((6799, 6819), 'numpy.max', 'np.max', (['eval_rewards'], {}), '(eval_rewards)\n', (6805, 6819), True, 'import numpy as np\n')]
CommissarSilver/TC-Bot
src/deep_dialog/usersims/usersim.py
4579706a18028b5da9b8a7807fb2e2d4043dcaf8
""" Created on June 7, 2016 a rule-based user simulator @author: xiul, t-zalipt """ import random class UserSimulator: """ Parent class for all user sims to inherit from """ def __init__(self, movie_dict=None, act_set=None, slot_set=None, start_set=None, params=None): """ Constructor shared by all user simulators """ self.movie_dict = movie_dict self.act_set = act_set self.slot_set = slot_set self.start_set = start_set self.max_turn = params['max_turn'] self.slot_err_probability = params['slot_err_probability'] self.slot_err_mode = params['slot_err_mode'] self.intent_err_probability = params['intent_err_probability'] def initialize_episode(self): """ Initialize a new episode (dialog)""" print ("initialize episode called, generating goal") self.goal = random.choice(self.start_set) self.goal['request_slots']['ticket'] = 'UNK' episode_over, user_action = self._sample_action() assert (episode_over != 1),' but we just started' return user_action def next(self, system_action): pass def set_nlg_model(self, nlg_model): self.nlg_model = nlg_model def set_nlu_model(self, nlu_model): self.nlu_model = nlu_model def add_nl_to_action(self, user_action): """ Add NL to User Dia_Act """ user_nlg_sentence = self.nlg_model.convert_diaact_to_nl(user_action, 'usr') user_action['nl'] = user_nlg_sentence if self.simulator_act_level == 1: user_nlu_res = self.nlu_model.generate_dia_act(user_action['nl']) # NLU if user_nlu_res != None: #user_nlu_res['diaact'] = user_action['diaact'] # or not? user_action.update(user_nlu_res)
[((939, 968), 'random.choice', 'random.choice', (['self.start_set'], {}), '(self.start_set)\n', (952, 968), False, 'import random\n')]
ugolbck/KendallW
kendall_w/__init__.py
ace7c68d6c3c2dfcf6b3ee5fb3817240ed050c9b
from .kendall_w import compute_w __version__ = (1, 0, 0)
[]
nielsrolf/django-error-logs
admin.py
4e516e021d34e255f1282c98bffa53a265c48bab
from django.contrib import admin from .models import * # Register your models here. admin.site.register(ErrorGroup) admin.site.register(Error)
[((84, 115), 'django.contrib.admin.site.register', 'admin.site.register', (['ErrorGroup'], {}), '(ErrorGroup)\n', (103, 115), False, 'from django.contrib import admin\n'), ((116, 142), 'django.contrib.admin.site.register', 'admin.site.register', (['Error'], {}), '(Error)\n', (135, 142), False, 'from django.contrib import admin\n')]
mcpython4-coding/core
mcpython/common/block/ISlab.py
e4c4f59dab68c90e2028db3add2e5065116bf4a6
""" mcpython - a minecraft clone written in python licenced under the MIT-licence (https://github.com/mcpython4-coding/core) Contributors: uuk, xkcdjerry (inactive) Based on the game of fogleman (https://github.com/fogleman/Minecraft), licenced under the MIT-licence Original game "minecraft" by Mojang Studios (www.minecraft.net), licenced under the EULA (https://account.mojang.com/documents/minecraft_eula) Mod loader inspired by "Minecraft Forge" (https://github.com/MinecraftForge/MinecraftForge) and similar This project is not official by mojang and does not relate to it. """ import mcpython.common.block.AbstractBlock import mcpython.engine.physics.AxisAlignedBoundingBox import mcpython.util.enums from mcpython.util.enums import SlabModes BBOX_DICT = { SlabModes.TOP: mcpython.engine.physics.AxisAlignedBoundingBox.AxisAlignedBoundingBox( (1, 0.5, 1), (0, 0.5, 0) ), SlabModes.BOTTOM: mcpython.engine.physics.AxisAlignedBoundingBox.AxisAlignedBoundingBox( (1, 0.5, 1) ), SlabModes.DOUBLE: mcpython.engine.physics.AxisAlignedBoundingBox.FULL_BLOCK_BOUNDING_BOX, } class ISlab(mcpython.common.block.AbstractBlock.AbstractBlock): """ Base class for slabs """ IS_SOLID = False DEFAULT_FACE_SOLID = 0 def __init__(self): super().__init__() self.type = SlabModes.TOP async def on_block_added(self): if self.real_hit and self.real_hit[1] - self.position[1] > 0: self.type = SlabModes.TOP else: self.type = SlabModes.BOTTOM await self.schedule_network_update() def get_model_state(self): return {"type": self.type.name.lower()} def set_model_state(self, state: dict): if "type" in state: self.type = SlabModes[state["type"].upper()] DEBUG_WORLD_BLOCK_STATES = [{"type": x.name.upper()} for x in SlabModes] async def on_player_interact( self, player, itemstack, button, modifiers, exact_hit ) -> bool: # todo: add half -> double convert return False def get_view_bbox(self): return BBOX_DICT[self.type]
[]
stancsz/web-development-project-ensf-607
CourseOutlineBackend/courseoutline/serializers.py
03b11df4971afd4f27fee54a1800a40d4cc10240
from rest_framework import serializers from .models import * class CoordinatorSerializer(serializers.ModelSerializer): # ModelID = serializers.CharField(max_length=100, required=True) CourseID = serializers.CharField(max_length=100, required=True) FName = serializers.CharField(max_length=100, required=False) LName = serializers.CharField(max_length=100, required=False) Phone = serializers.CharField(max_length=100, required=False) Office = serializers.CharField(max_length=100, required=False) Email = serializers.CharField(max_length=100, required=False) def create(self, validated_data): # Once the request data has been validated, we can create a todo item instance in the database return Coordinator.objects.create( ModelID=validated_data.get('ModelID'), CourseID=validated_data.get('CourseID'), FName=validated_data.get('FName'), LName=validated_data.get('LName'), Phone=validated_data.get('Phone'), Office=validated_data.get('Office'), Email=validated_data.get('Email') ) def update(self, instance, validated_data): # Once the request data has been validated, we can update the todo item instance in the database instance.ModelID = validated_data.get('ModelID', instance.ModelID) instance.CourseID = validated_data.get('CourseID', instance.CourseID) instance.FName = validated_data.get('FName', instance.FName) instance.LName = validated_data.get('LName', instance.LName) instance.Phone = validated_data.get('Phone', instance.Phone) instance.Office = validated_data.get('Office', instance.Office) instance.Email = validated_data.get('Email', instance.Email) instance.save() return instance class Meta: model = Coordinator fields = ( 'ModelID', 'CourseID', 'FName', 'LName', 'Phone', 'Office', 'Email' ) class InfoSerializer(serializers.ModelSerializer): # ModelID = serializers.CharField(max_length=100, required=True) CourseID = serializers.CharField(max_length=100, required=True) GradeNotes = serializers.CharField(max_length=5000, required=False) Examination = serializers.CharField(max_length=5000, required=False) CourseDescription = serializers.CharField(max_length=5000, required=False) UseCalc = serializers.CharField(max_length=100, required=False) def create(self, validated_data): return Info.objects.create( ModelID=validated_data.get('ModelID'), CourseID=validated_data.get('CourseID'), GradeNotes=validated_data.get('GradeNotes'), Examination=validated_data.get('Examination'), CourseDescription=validated_data.get('CourseDescription'), UseCalc=validated_data.get('UseCalc') ) def update(self, instance, validated_data): instance.ModelID = validated_data.get('ModelID', instance.ModelID) instance.CourseID = validated_data.get('CourseID', instance.CourseID) instance.GradeNotes = validated_data.get('GradeNotes', instance.GradeNotes) instance.Examination = validated_data.get('Examination', instance.Examination) instance.CourseDescription = validated_data.get('CourseDescription', instance.CourseDescription) instance.UseCalc = validated_data.get('UseCalc', instance.UseCalc) instance.save() return instance class Meta: model = Info fields = ( 'ModelID', 'CourseID', 'GradeNotes', 'Examination', 'CourseDescription', 'UseCalc' ) class GradeDeterminationSerializer(serializers.ModelSerializer): # ModelID = serializers.CharField(max_length=100, required=True) CourseID = serializers.CharField(max_length=100, required=True) Component = serializers.CharField(max_length=100, required=False) OutcomeEvaluated = serializers.CharField(max_length=100, required=False) Weight = serializers.IntegerField(required=False) def create(self, validated_data): # Once the request data has been validated, we can create a todo item instance in the database return GradeDetermination.objects.create( ModelID=validated_data.get('ModelID'), CourseID=validated_data.get('CourseID'), Component=validated_data.get('Component'), OutcomeEvaluated=validated_data.get('OutcomeEvaluated'), Weight=validated_data.get('Weight'), ) def update(self, instance, validated_data): # Once the request data has been validated, we can update the todo item instance in the database instance.ModelID = validated_data.get('ModelID', instance.ModelID) instance.CourseID = validated_data.get('CourseID', instance.CourseID) instance.Component = validated_data.get('Component', instance.Component) instance.OutcomeEvaluated = validated_data.get('OutcomeEvaluated', instance.OutcomeEvaluated) instance.Weight = validated_data.get('Weight', instance.Weight) instance.save() return instance class Meta: model = GradeDetermination fields = ( 'ModelID', 'CourseID', 'Component', 'OutcomeEvaluated', 'Weight' ) class OutcomeSerializer(serializers.ModelSerializer): # ModelID = serializers.CharField(max_length=100, required=True) CourseID = serializers.CharField(max_length=100, required=True) OutcomeNum = serializers.IntegerField(required=False) # removed max_length=100 Description = serializers.CharField(max_length=500, required=False) # Changed max_length to 500 GraduateAttribute = serializers.CharField(max_length=100, required=False) InstructionLvl = serializers.CharField(max_length=100, required=False) def create(self, validated_data): return Outcome.objects.create( ModelID=validated_data.get('ModelID'), CourseID=validated_data.get('CourseID'), OutcomeNum=validated_data.get('OutcomeNum'), Description=validated_data.get('Description'), GraduateAttribute=validated_data.get('GraduateAttribute'), InstructionLvl=validated_data.get('InstructionLvl'), ) def update(self, instance, validated_data): instance.ModelID = validated_data.get('ModelID', instance.ModelID) instance.CourseID = validated_data.get('CourseID', instance.CourseID) instance.OutcomeNum = validated_data.get('OutcomeNum', instance.OutcomeNum) instance.Description = validated_data.get('Description', instance.Description) instance.GraduateAttribute = validated_data.get('GraduateAttribute', instance.GraduateAttribute) instance.InstructionLvl = validated_data.get('InstructionLvl', instance.InstructionLvl) instance.save() return instance class Meta: model = Outcome fields = ( 'ModelID', 'CourseID', 'OutcomeNum', 'Description', 'GraduateAttribute', 'InstructionLvl' ) class TimetableSerializer(serializers.ModelSerializer): # ModelID = serializers.CharField(max_length=100, required=True) CourseID = serializers.CharField(max_length=100, required=True) SectionNum = serializers.CharField(max_length=100, required=False) Days = serializers.CharField(max_length=100, required=False) Time = serializers.CharField(max_length=100, required=False) Location = serializers.CharField(max_length=100, required=False) def create(self, validated_data): return Timetable.objects.create( ModelID=validated_data.get('ModelID'), CourseID=validated_data.get('CourseID'), SectionNum=validated_data.get('SectionNum'), Days=validated_data.get('Days'), Time=validated_data.get('Time'), Location=validated_data.get('Location'), ) def update(self, instance, validated_data): instance.ModelID = validated_data.get('ModelID', instance.ModelID) instance.CourseID = validated_data.get('CourseID', instance.CourseID) instance.SectionNum = validated_data.get('SectionNum', instance.SectionNum) instance.Days = validated_data.get('Days', instance.Days) instance.Time = validated_data.get('Time', instance.Time) instance.Location = validated_data.get('Location', instance.Location) instance.save() return instance class Meta: model = Timetable fields = ( 'ModelID', 'CourseID', 'SectionNum', 'Days', 'Time', 'Location' ) class GradeDistributionSerializer(serializers.ModelSerializer): # ModelID = serializers.CharField(max_length=100, required=True) CourseID = serializers.CharField(max_length=100, required=True) LowerLimit = serializers.IntegerField(required=False) # removed max_length = 100 UpperLimit = serializers.IntegerField(required=False) # removed max_length = 100 LetterGrade = serializers.CharField(max_length=100, required=False) def create(self, validated_data): return GradeDistribution.objects.create( ModelID=validated_data.get('ModelID'), CourseID=validated_data.get('CourseID'), LowerLimit=validated_data.get('LowerLimit'), UpperLimit=validated_data.get('UpperLimit'), LetterGrade=validated_data.get('LetterGrade'), ) def update(self, instance, validated_data): instance.ModelID = validated_data.get('ModelID', instance.ModelID) instance.CourseID = validated_data.get('CourseID', instance.CourseID) instance.LowerLimit = validated_data.get('LowerLimit', instance.LowerLimit) instance.UpperLimit = validated_data.get('UpperLimit', instance.UpperLimit) instance.LetterGrade = validated_data.get('LetterGrade', instance.LetterGrade) instance.save() return instance class Meta: model = GradeDistribution fields = ( 'ModelID', 'CourseID', 'LowerLimit', 'UpperLimit', 'LetterGrade' ) class LectureSerializer(serializers.ModelSerializer): # ModelID = serializers.CharField(max_length=100, required=True) CourseID = serializers.CharField(max_length=100, required=True) LectureNum = serializers.CharField(max_length=100, required=False) FName = serializers.CharField(max_length=100, required=False) LName = serializers.CharField(max_length=100, required=False) Phone = serializers.CharField(max_length=100, required=False) Office = serializers.CharField(max_length=100, required=False) Email = serializers.CharField(max_length=100, required=False) def create(self, validated_data): return Lecture.objects.create( ModelID=validated_data.get('ModelID'), CourseID=validated_data.get('CourseID'), LectureNum=validated_data.get('LectureNum'), FName=validated_data.get('FName'), LName=validated_data.get('LName'), Phone=validated_data.get('Phone'), Office=validated_data.get('Office'), Email=validated_data.get('Email'), ) def update(self, instance, validated_data): instance.ModelID = validated_data.get('ModelID', instance.ModelID) instance.CourseID = validated_data.get('CourseID', instance.CourseID) instance.LectureNum = validated_data.get('LectureNum', instance.LectureNum) instance.FName = validated_data.get('FName', instance.FName) instance.LName = validated_data.get('LName', instance.LName) instance.Phone = validated_data.get('Phone', instance.Phone) instance.Office = validated_data.get('Office', instance.Office) instance.Email = validated_data.get('Email', instance.Email) instance.save() return instance class Meta: model = Lecture fields = ( 'ModelID', 'CourseID', 'LectureNum', 'FName', 'LName', 'Phone', 'Office', 'Email' ) class TutorialSerializer(serializers.ModelSerializer): # ModelID = serializers.CharField(max_length=100, required=True) CourseID = serializers.CharField(max_length=100, required=True) TutorialNum = serializers.CharField(max_length=100, required=False) # Changed Tutorial Num to CharField FName = serializers.CharField(max_length=100, required=False) # Changed FName to CharField LName = serializers.CharField(max_length=100, required=False) Phone = serializers.CharField(max_length=100, required=False) Office = serializers.CharField(max_length=100, required=False) Email = serializers.CharField(max_length=100, required=False) def create(self, validated_data): return Tutorial.objects.create( ModelID=validated_data.get('ModelID'), CourseID=validated_data.get('CourseID'), TutorialNum=validated_data.get('TutorialNum'), FName=validated_data.get('FName'), LName=validated_data.get('LName'), Phone=validated_data.get('Phone'), Office=validated_data.get('Office'), Email=validated_data.get('Email'), ) def update(self, instance, validated_data): instance.ModelID = validated_data.get('ModelID', instance.ModelID) instance.CourseID = validated_data.get('CourseID', instance.CourseID) instance.TutorialNum = validated_data.get('TutorialNum', instance.TutorialNum) instance.FName = validated_data.get('FName', instance.FName) instance.LName = validated_data.get('LName', instance.LName) instance.Phone = validated_data.get('Phone', instance.Phone) instance.Office = validated_data.get('Office', instance.Office) instance.Email = validated_data.get('Email', instance.Email) instance.save() return instance class Meta: model = Tutorial fields = ( 'ModelID', 'CourseID', 'TutorialNum', 'FName', 'LName', 'Phone', 'Office', 'Email' ) class CourseSerializer(serializers.ModelSerializer): # ModelID = serializers.CharField(max_length=100, required=True) CourseID = serializers.CharField(max_length=100, required=True) CourseHours = serializers.CharField(max_length=100, required=False) # Changed CourseHours to CharField CourseName = serializers.CharField(max_length=100, required=False) # Changed CourseName to CharField CalenderRefrence = serializers.CharField(max_length=100, required=False) AcademicCredit = serializers.IntegerField(required=False) # Changed AcademicCredit to IntegerField DateCreated = serializers.CharField(max_length=100, required=False) def create(self, validated_data): return Course.objects.create( ModelID=validated_data.get('ModelID'), CourseID=validated_data.get('CourseID'), CourseHours=validated_data.get('CourseHours'), CourseName=validated_data.get('CourseName'), CalenderRefrence=validated_data.get('CalenderRefrence'), AcademicCredit=validated_data.get('AcademicCredit'), DateCreated=validated_data.get('DateCreated'), ) def update(self, instance, validated_data): instance.ModelID = validated_data.get('ModelID', instance.ModelID) instance.CourseID = validated_data.get('CourseID', instance.CourseID) instance.CourseHours = validated_data.get('CourseHours', instance.CourseHours) instance.CourseName = validated_data.get('CourseName', instance.CourseName) instance.CalenderRefrence = validated_data.get('CalenderRefrence', instance.CalenderRefrence) instance.AcademicCredit = validated_data.get('AcademicCredit', instance.AcademicCredit) instance.DateCreated = validated_data.get('DateCreated', instance.DateCreated) instance.save() return instance class Meta: model = Course fields = ( 'ModelID', 'CourseID', 'CourseHours', 'CourseName', 'CalenderRefrence', 'AcademicCredit', 'DateCreated' ) class TextbookSerializer(serializers.ModelSerializer): # ModelID = serializers.CharField(max_length=100, required=True) CourseID = serializers.CharField(max_length=100, required=True) TITLE = serializers.CharField(max_length=100, required=False) Publisher = serializers.CharField(max_length=100, required=False) Author = serializers.CharField(max_length=100, required=False) Edition = serializers.CharField(max_length=100, required=False) type = serializers.CharField(max_length=100, required=False) def create(self, validated_data): return Textbook.objects.create( ModelID=validated_data.get('ModelID'), CourseID=validated_data.get('CourseID'), TITLE=validated_data.get('TITLE'), Publisher=validated_data.get('Publisher'), Author=validated_data.get('Author'), Edition=validated_data.get('Edition'), type=validated_data.get('type'), ) def update(self, instance, validated_data): instance.ModelID = validated_data.get('ModelID', instance.ModelID) instance.CourseID = validated_data.get('CourseID', instance.CourseID) instance.TITLE = validated_data.get('TITLE', instance.TITLE) instance.Publisher = validated_data.get('Publisher', instance.Publisher) instance.Author = validated_data.get('Author', instance.Author) instance.Edition = validated_data.get('Edition', instance.Edition) instance.type = validated_data.get('type', instance.type) instance.save() return instance class Meta: model = Textbook fields = ( 'ModelID', 'CourseID', 'TITLE', 'Publisher', 'Author', 'Edition', 'type' ) class AuWeightSerializer(serializers.ModelSerializer): # ModelID = serializers.CharField(max_length=100, required=True) CourseID = serializers.CharField(max_length=100, required=True) Category = serializers.CharField(max_length=100, required=True) AU = serializers.IntegerField(required=False) def create(self, validated_data): return AuWeight.objects.create( ModelID=validated_data.get('ModelID'), CourseID=validated_data.get('CourseID'), Category=validated_data.get('Category'), AU=validated_data.get('AU'), ) def update(self, instance, validated_data): instance.ModelID = validated_data.get('ModelID', instance.ModelID) instance.CourseID = validated_data.get('CourseID', instance.CourseID) instance.Category = validated_data.get('Category', instance.Category) instance.AU = validated_data.get('AU', instance.AU) instance.save() return instance class Meta: model = AuWeight fields = ( 'ModelID', 'CourseID', 'Category', 'AU' ) class ContentCategorySerializer(serializers.ModelSerializer): # ModelID = serializers.CharField(max_length=100, required=True) CourseID = serializers.CharField(max_length=100, required=True) CategoryType = serializers.CharField(max_length=100, required=True) Element = serializers.CharField(max_length=100, required=True) def create(self, validated_data): return ContentCategory.objects.create( ModelID=validated_data.get('ModelID'), CourseID=validated_data.get('CourseID'), CategoryType=validated_data.get('CategoryType'), Element=validated_data.get('Element'), ) def update(self, instance, validated_data): instance.ModelID = validated_data.get('ModelID', instance.ModelID) instance.CourseID = validated_data.get('CourseID', instance.CourseID) instance.CategoryType = validated_data.get('CategoryType', instance.CategoryType) instance.Element = validated_data.get('Element', instance.Element) instance.save() return instance class Meta: model = ContentCategory fields = ( 'ModelID', 'CourseID', 'CategoryType', 'Element' ) class LabSerializer(serializers.ModelSerializer): # ModelID = serializers.CharField(max_length=100, required=True) CourseID = serializers.CharField(max_length=100, required=True) LabNum = serializers.CharField(max_length=100, required=True) NumberOfLabs = serializers.IntegerField(required=False) LabType = serializers.CharField(max_length=100, required=True) SafetyExamined = serializers.CharField(max_length=100, required=True) SafetyTaught = serializers.CharField(max_length=100, required=True) FName = serializers.CharField(max_length=100, required=True) LName = serializers.CharField(max_length=100, required=True) Phone = serializers.CharField(max_length=100, required=True) Office = serializers.CharField(max_length=100, required=True) Email = serializers.CharField(max_length=100, required=True) def create(self, validated_data): return Lab.objects.create( ModelID=validated_data.get('ModelID'), CourseID=validated_data.get('CourseID'), LabNum=validated_data.get('LabNum'), NumberOfLabs=validated_data.get('NumberOfLabs'), LabType=validated_data.get('LabType'), SafetyExamined=validated_data.get('SafetyExamined'), SafetyTaught=validated_data.get('SafetyTaught'), FName=validated_data.get('FName'), LName=validated_data.get('LName'), Phone=validated_data.get('Phone'), Office=validated_data.get('Office'), Email=validated_data.get('Email'), ) def update(self, instance, validated_data): instance.ModelID = validated_data.get('ModelID', instance.ModelID) instance.CourseID = validated_data.get('CourseID', instance.CourseID) instance.LabNum = validated_data.get('LabNum', instance.LabNum) instance.NumberOfLabs = validated_data.get('NumberOfLabs', instance.NumberOfLabs) instance.LabType = validated_data.get('LabType', instance.LabType) instance.SafetyExamined = validated_data.get('SafetyExamined', instance.SafetyExamined) instance.SafetyTaught = validated_data.get('SafetyTaught', instance.SafetyTaught) instance.FName = validated_data.get('FName', instance.FName) instance.LName = validated_data.get('LName', instance.LName) instance.Phone = validated_data.get('Phone', instance.Phone) instance.Office = validated_data.get('Office', instance.Office) instance.Email = validated_data.get('Email', instance.Email) instance.save() return instance class Meta: model = Lab fields = ( 'ModelID', 'CourseID', 'LabNum', 'NumberOfLabs', 'LabType', 'SafetyExamined', 'SafetyTaught', 'FName', 'LName', 'Phone', 'Office', 'Email' ) class SectionSerializer(serializers.ModelSerializer): # ModelID = serializers.CharField(max_length=100, required=True) CourseID = serializers.CharField(max_length=100, required=True) SectionNumber = serializers.CharField(max_length=100, required=False) Students = serializers.IntegerField(required=False) Hours = serializers.IntegerField(required=False) type = serializers.CharField(max_length=100, required=True) def create(self, validated_data): return Section.objects.create( ModelID=validated_data.get('ModelID'), CourseID=validated_data.get('CourseID'), SectionNumber=validated_data.get('SectionNumber'), Students=validated_data.get('Students'), Hours=validated_data.get('Hours'), type=validated_data.get('type'), ) def update(self, instance, validated_data): instance.ModelID = validated_data.get('ModelID', instance.ModelID) instance.CourseID = validated_data.get('CourseID', instance.CourseID) instance.SectionNumber = validated_data.get('SectionNumber', instance.SectionNumber) instance.Students = validated_data.get('Students', instance.Students) instance.Hours = validated_data.get('Hours', instance.Hours) instance.type = validated_data.get('type', instance.type) instance.save() return instance class Meta: model = Section fields = ( 'ModelID', 'CourseID', 'SectionNumber', 'Students', 'Hours', 'type' )
[((206, 258), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (227, 258), False, 'from rest_framework import serializers\n'), ((271, 324), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (292, 324), False, 'from rest_framework import serializers\n'), ((337, 390), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (358, 390), False, 'from rest_framework import serializers\n'), ((403, 456), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (424, 456), False, 'from rest_framework import serializers\n'), ((470, 523), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (491, 523), False, 'from rest_framework import serializers\n'), ((536, 589), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (557, 589), False, 'from rest_framework import serializers\n'), ((2191, 2243), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (2212, 2243), False, 'from rest_framework import serializers\n'), ((2261, 2315), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(5000)', 'required': '(False)'}), '(max_length=5000, required=False)\n', (2282, 2315), False, 'from rest_framework import serializers\n'), ((2334, 2388), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(5000)', 'required': '(False)'}), '(max_length=5000, required=False)\n', (2355, 2388), False, 'from rest_framework import serializers\n'), ((2413, 2467), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(5000)', 'required': '(False)'}), '(max_length=5000, required=False)\n', (2434, 2467), False, 'from rest_framework import serializers\n'), ((2482, 2535), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (2503, 2535), False, 'from rest_framework import serializers\n'), ((3936, 3988), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (3957, 3988), False, 'from rest_framework import serializers\n'), ((4005, 4058), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (4026, 4058), False, 'from rest_framework import serializers\n'), ((4082, 4135), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (4103, 4135), False, 'from rest_framework import serializers\n'), ((4149, 4189), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'required': '(False)'}), '(required=False)\n', (4173, 4189), False, 'from rest_framework import serializers\n'), ((5625, 5677), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (5646, 5677), False, 'from rest_framework import serializers\n'), ((5695, 5735), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'required': '(False)'}), '(required=False)\n', (5719, 5735), False, 'from rest_framework import serializers\n'), ((5780, 5833), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(500)', 'required': '(False)'}), '(max_length=500, required=False)\n', (5801, 5833), False, 'from rest_framework import serializers\n'), ((5887, 5940), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (5908, 5940), False, 'from rest_framework import serializers\n'), ((5962, 6015), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (5983, 6015), False, 'from rest_framework import serializers\n'), ((7456, 7508), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (7477, 7508), False, 'from rest_framework import serializers\n'), ((7526, 7579), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (7547, 7579), False, 'from rest_framework import serializers\n'), ((7591, 7644), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (7612, 7644), False, 'from rest_framework import serializers\n'), ((7656, 7709), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (7677, 7709), False, 'from rest_framework import serializers\n'), ((7725, 7778), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (7746, 7778), False, 'from rest_framework import serializers\n'), ((9075, 9127), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (9096, 9127), False, 'from rest_framework import serializers\n'), ((9145, 9185), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'required': '(False)'}), '(required=False)\n', (9169, 9185), False, 'from rest_framework import serializers\n'), ((9231, 9271), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'required': '(False)'}), '(required=False)\n', (9255, 9271), False, 'from rest_framework import serializers\n'), ((9318, 9371), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (9339, 9371), False, 'from rest_framework import serializers\n'), ((10597, 10649), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (10618, 10649), False, 'from rest_framework import serializers\n'), ((10667, 10720), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (10688, 10720), False, 'from rest_framework import serializers\n'), ((10733, 10786), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (10754, 10786), False, 'from rest_framework import serializers\n'), ((10799, 10852), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (10820, 10852), False, 'from rest_framework import serializers\n'), ((10865, 10918), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (10886, 10918), False, 'from rest_framework import serializers\n'), ((10932, 10985), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (10953, 10985), False, 'from rest_framework import serializers\n'), ((10998, 11051), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (11019, 11051), False, 'from rest_framework import serializers\n'), ((12609, 12661), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (12630, 12661), False, 'from rest_framework import serializers\n'), ((12680, 12733), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (12701, 12733), False, 'from rest_framework import serializers\n'), ((12783, 12836), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (12804, 12836), False, 'from rest_framework import serializers\n'), ((12879, 12932), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (12900, 12932), False, 'from rest_framework import serializers\n'), ((12945, 12998), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (12966, 12998), False, 'from rest_framework import serializers\n'), ((13012, 13065), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (13033, 13065), False, 'from rest_framework import serializers\n'), ((13078, 13131), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (13099, 13131), False, 'from rest_framework import serializers\n'), ((14695, 14747), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (14716, 14747), False, 'from rest_framework import serializers\n'), ((14766, 14819), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (14787, 14819), False, 'from rest_framework import serializers\n'), ((14873, 14926), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (14894, 14926), False, 'from rest_framework import serializers\n'), ((14985, 15038), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (15006, 15038), False, 'from rest_framework import serializers\n'), ((15060, 15100), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'required': '(False)'}), '(required=False)\n', (15084, 15100), False, 'from rest_framework import serializers\n'), ((15161, 15214), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (15182, 15214), False, 'from rest_framework import serializers\n'), ((16819, 16871), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (16840, 16871), False, 'from rest_framework import serializers\n'), ((16884, 16937), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (16905, 16937), False, 'from rest_framework import serializers\n'), ((16954, 17007), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (16975, 17007), False, 'from rest_framework import serializers\n'), ((17021, 17074), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (17042, 17074), False, 'from rest_framework import serializers\n'), ((17089, 17142), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (17110, 17142), False, 'from rest_framework import serializers\n'), ((17154, 17207), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (17175, 17207), False, 'from rest_framework import serializers\n'), ((18630, 18682), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (18651, 18682), False, 'from rest_framework import serializers\n'), ((18698, 18750), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (18719, 18750), False, 'from rest_framework import serializers\n'), ((18760, 18800), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'required': '(False)'}), '(required=False)\n', (18784, 18800), False, 'from rest_framework import serializers\n'), ((19783, 19835), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (19804, 19835), False, 'from rest_framework import serializers\n'), ((19855, 19907), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (19876, 19907), False, 'from rest_framework import serializers\n'), ((19922, 19974), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (19943, 19974), False, 'from rest_framework import serializers\n'), ((21013, 21065), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (21034, 21065), False, 'from rest_framework import serializers\n'), ((21079, 21131), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (21100, 21131), False, 'from rest_framework import serializers\n'), ((21151, 21191), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'required': '(False)'}), '(required=False)\n', (21175, 21191), False, 'from rest_framework import serializers\n'), ((21206, 21258), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (21227, 21258), False, 'from rest_framework import serializers\n'), ((21280, 21332), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (21301, 21332), False, 'from rest_framework import serializers\n'), ((21352, 21404), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (21373, 21404), False, 'from rest_framework import serializers\n'), ((21417, 21469), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (21438, 21469), False, 'from rest_framework import serializers\n'), ((21482, 21534), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (21503, 21534), False, 'from rest_framework import serializers\n'), ((21547, 21599), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (21568, 21599), False, 'from rest_framework import serializers\n'), ((21613, 21665), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (21634, 21665), False, 'from rest_framework import serializers\n'), ((21678, 21730), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (21699, 21730), False, 'from rest_framework import serializers\n'), ((23953, 24005), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (23974, 24005), False, 'from rest_framework import serializers\n'), ((24026, 24079), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (24047, 24079), False, 'from rest_framework import serializers\n'), ((24095, 24135), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'required': '(False)'}), '(required=False)\n', (24119, 24135), False, 'from rest_framework import serializers\n'), ((24148, 24188), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'required': '(False)'}), '(required=False)\n', (24172, 24188), False, 'from rest_framework import serializers\n'), ((24200, 24252), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(100)', 'required': '(True)'}), '(max_length=100, required=True)\n', (24221, 24252), False, 'from rest_framework import serializers\n')]
oliverscheer/python-pytest-ci
tools/math_tools.py
d67c379440d1873a753c47e7031bb9564d96de21
""" some math tools """ class MathTools: """ some math tools """ def add(a, b): """ add two values """ return a + b def sub(a, b): """ subtract two values """
[]
ATrain951/01.python-com_Qproject
hackerrank/Algorithms/Correctness and the Loop Invariant/solution.py
c164dd093954d006538020bdf2e59e716b24d67c
def insertion_sort(l): for i in range(1, len(l)): j = i - 1 key = l[i] while (j >= 0) and (l[j] > key): l[j + 1] = l[j] j -= 1 l[j + 1] = key m = int(input().strip()) ar = [int(i) for i in input().strip().split()] insertion_sort(ar) print(" ".join(map(str, ar)))
[]
kakkotetsu/CVP-Scripts
cvp_rest_api_examples/cvpLabelAdd.py
4075eaf9987be6220a7bed188dcee11f56a7bf35
#!/usrb/bin/env python # Copyright (c) 2019, Arista Networks, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # - Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # - Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # - Neither the name of Arista Networks nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR # BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN # IF NOT ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #!/usr/bin/env python import requests import json import argparse import urllib3 def parseArgs(): parser = argparse.ArgumentParser() parser.add_argument( '-c', '--cvpName', required=True, help='cvp name' ) parser.add_argument( '-u', '--userId', help='username', default='cvpadmin') parser.add_argument( '-p', '--password', help='password', default='arista') args = vars( parser.parse_args() ) return args.pop( 'cvpName' ), args def getCvpInfo( cvpName ): api = 'cvpInfo/getCvpInfo.do' url = 'https://%s:443/web/%s' % ( cvpName, api ) print 'calling url: ', url return requests.get( url, cookies=cookies, verify=False ) def addDeviceToLabel( cvpName, label, deviceMac ): api = 'label/labelAssignToDevice.do' url = 'https://%s:443/web/%s' % ( cvpName, api ) body = {'label': label, 'device': deviceMac} print 'calling url: ', url return requests.post( url, cookies=cookies, data=json.dumps(body), verify=False ) def authenticate( cvpName, loginInfo ): url = 'https://%s:443/web/login/authenticate.do' % ( cvpName, ) return requests.post( url, json.dumps( loginInfo ), verify=False ) if __name__ == '__main__': urllib3.disable_warnings() cvpName, loginInfo = parseArgs() cookies = authenticate( cvpName, loginInfo ).cookies #print json.loads(getCvpInfo( cvpName ).text) #print getCvpInfo( cvpName ).json() print 'getCvpInfo:' print json.dumps(getCvpInfo( cvpName ).json(), indent=2) # ADD DEVICE TO LABEL # label = "{ tagType: tagValue }" label = "mlag:mlagNY" device = "de:ad:be:ef:ca:fe" print 'addDeviceToLabel:', label, device print json.dumps(addDeviceToLabel( cvpName, label, device ).json(), indent=2)
[]
raikonenfnu/mlir-npcomp
frontends/pytorch/python/torch_mlir_torchscript_e2e_test_configs/torchscript.py
29e1b2fe89848d58c9bc07e7df7ce651850a5244
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception import copy from typing import Any import torch from torch_mlir_torchscript.e2e_test.framework import TestConfig, Trace, TraceItem class TorchScriptTestConfig(TestConfig): """TestConfig that runs the torch.nn.Module through TorchScript""" def __init__(self): super().__init__() def compile(self, program: torch.nn.Module) -> torch.jit.ScriptModule: return torch.jit.script(program) def run(self, artifact: torch.jit.ScriptModule, trace: Trace) -> Trace: # TODO: Deepcopy the torch.jit.ScriptModule, so that if the program is # stateful then it does not mutate the original compiled program. result: Trace = [] for item in trace: attr = artifact for part in item.symbol.split('.'): attr = getattr(attr, part) output = attr(*item.inputs) result.append( TraceItem(symbol=item.symbol, inputs=item.inputs, output=output)) return result
[((591, 616), 'torch.jit.script', 'torch.jit.script', (['program'], {}), '(program)\n', (607, 616), False, 'import torch\n'), ((1104, 1168), 'torch_mlir_torchscript.e2e_test.framework.TraceItem', 'TraceItem', ([], {'symbol': 'item.symbol', 'inputs': 'item.inputs', 'output': 'output'}), '(symbol=item.symbol, inputs=item.inputs, output=output)\n', (1113, 1168), False, 'from torch_mlir_torchscript.e2e_test.framework import TestConfig, Trace, TraceItem\n')]
gdmgent-1718-wot/interactive-wall
app/balltracking/pubnubpython/pnconfiguration.py
af7ecff126b1ee9c85c270fe13d1338aa790c34b
from .enums import PNHeartbeatNotificationOptions, PNReconnectionPolicy from . import utils class PNConfiguration(object): DEFAULT_PRESENCE_TIMEOUT = 300 DEFAULT_HEARTBEAT_INTERVAL = 280 def __init__(self): # TODO: add validation self.uuid = None self.origin = "ps.pndsn.com" self.ssl = False self.non_subscribe_request_timeout = 10 self.subscribe_request_timeout = 310 self.connect_timeout = 5 self.subscribe_key = None self.publish_key = None self.secret_key = None self.cipher_key = None self.auth_key = None self.filter_expression = None self.enable_subscribe = True self.crypto_instance = None self.log_verbosity = False self.heartbeat_notification_options = PNHeartbeatNotificationOptions.FAILURES self.reconnect_policy = PNReconnectionPolicy.NONE self.daemon = False self.heartbeat_default_values = True self._presence_timeout = PNConfiguration.DEFAULT_PRESENCE_TIMEOUT self._heartbeat_interval = PNConfiguration.DEFAULT_HEARTBEAT_INTERVAL def validate(self): assert self.uuid is None or isinstance(self.uuid, str) if self.uuid is None: self.uuid = utils.uuid() def scheme(self): if self.ssl: return "https" else: return "http" def scheme_extended(self): return self.scheme() + "://" def scheme_and_host(self): return self.scheme_extended() + self.origin def set_presence_timeout_with_custom_interval(self, timeout, interval): self.heartbeat_default_values = False self._presence_timeout = timeout self._heartbeat_interval = interval def set_presence_timeout(self, timeout): self.set_presence_timeout_with_custom_interval(timeout, (timeout / 2) - 1) @property def crypto(self): if self.crypto_instance is None: self._init_cryptodome() return self.crypto_instance def _init_cryptodome(self): from .crypto import PubNubCryptodome self.crypto_instance = PubNubCryptodome() @property def port(self): return 443 if self.ssl == "https" else 80 @property def presence_timeout(self): return self._presence_timeout @property def heartbeat_interval(self): return self._heartbeat_interval # TODO: set log level # TODO: set log level
[]
lmaciejonczyk/openthread
tests/scripts/thread-cert/thread_cert.py
9ca79ddd9af3d4e3f78cb6e611a3117a71b2198c
#!/usr/bin/env python3 # # Copyright (c) 2019, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # import json import logging import os import signal import subprocess import sys import time import traceback import unittest from typing import Optional, Callable import config import debug from node import Node, OtbrNode, HostNode from pktverify import utils as pvutils PACKET_VERIFICATION = int(os.getenv('PACKET_VERIFICATION', 0)) if PACKET_VERIFICATION: from pktverify.addrs import ExtAddr, EthAddr from pktverify.packet_verifier import PacketVerifier PORT_OFFSET = int(os.getenv('PORT_OFFSET', "0")) ENV_THREAD_VERSION = os.getenv('THREAD_VERSION', '1.1') DEFAULT_PARAMS = { 'is_mtd': False, 'is_bbr': False, 'is_otbr': False, 'is_host': False, 'mode': 'rdn', 'panid': 0xface, 'allowlist': None, 'version': ENV_THREAD_VERSION, } """Default configurations when creating nodes.""" EXTENDED_ADDRESS_BASE = 0x166e0a0000000000 """Extended address base to keep U/L bit 1. The value is borrowed from Thread Test Harness.""" class NcpSupportMixin(): """ The mixin to check whether a test case supports NCP. """ SUPPORT_NCP = True def __init__(self, *args, **kwargs): if os.getenv('NODE_TYPE', 'sim') == 'ncp-sim' and not self.SUPPORT_NCP: # 77 means skip this test case in automake tests sys.exit(77) super().__init__(*args, **kwargs) class TestCase(NcpSupportMixin, unittest.TestCase): """The base class for all thread certification test cases. The `topology` member of sub-class is used to create test topology. """ USE_MESSAGE_FACTORY = True TOPOLOGY = None CASE_WIRESHARK_PREFS = None def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s') self._start_time = None self._do_packet_verification = PACKET_VERIFICATION and hasattr(self, 'verify') def setUp(self): try: self._setUp() except: traceback.print_exc() for node in list(self.nodes.values()): try: node.destroy() except Exception: traceback.print_exc() raise def _setUp(self): """Create simulator, nodes and apply configurations. """ self._clean_up_tmp() self.simulator = config.create_default_simulator(use_message_factory=self.USE_MESSAGE_FACTORY) self.nodes = {} os.environ['LD_LIBRARY_PATH'] = '/tmp/thread-wireshark' if self._has_backbone_traffic(): self._prepare_backbone_network() self._start_backbone_sniffer() self._initial_topology = initial_topology = {} for i, params in self.TOPOLOGY.items(): params = self._parse_params(params) initial_topology[i] = params logging.info("Creating node %d: %r", i, params) if params['is_otbr']: nodeclass = OtbrNode elif params['is_host']: nodeclass = HostNode else: nodeclass = Node node = nodeclass( i, is_mtd=params['is_mtd'], simulator=self.simulator, name=params.get('name'), version=params['version'], is_bbr=params['is_bbr'], ) self.nodes[i] = node if node.is_host: continue self.nodes[i].set_panid(params['panid']) self.nodes[i].set_mode(params['mode']) if 'partition_id' in params: self.nodes[i].set_preferred_partition_id(params['partition_id']) if 'channel' in params: self.nodes[i].set_channel(params['channel']) if 'masterkey' in params: self.nodes[i].set_masterkey(params['masterkey']) if 'network_name' in params: self.nodes[i].set_network_name(params['network_name']) if 'router_selection_jitter' in params: self.nodes[i].set_router_selection_jitter(params['router_selection_jitter']) if 'router_upgrade_threshold' in params: self.nodes[i].set_router_upgrade_threshold(params['router_upgrade_threshold']) if 'router_downgrade_threshold' in params: self.nodes[i].set_router_downgrade_threshold(params['router_downgrade_threshold']) if 'router_eligible' in params: self.nodes[i].set_router_eligible(params['router_eligible']) if 'prefer_router_id' in params: self.nodes[i].prefer_router_id(params['prefer_router_id']) if 'timeout' in params: self.nodes[i].set_timeout(params['timeout']) if 'active_dataset' in params: self.nodes[i].set_active_dataset(params['active_dataset']['timestamp'], panid=params['active_dataset'].get('panid'), channel=params['active_dataset'].get('channel'), channel_mask=params['active_dataset'].get('channel_mask'), master_key=params['active_dataset'].get('master_key'), security_policy=params['active_dataset'].get('security_policy')) if 'pending_dataset' in params: self.nodes[i].set_pending_dataset(params['pending_dataset']['pendingtimestamp'], params['pending_dataset']['activetimestamp'], panid=params['pending_dataset'].get('panid'), channel=params['pending_dataset'].get('channel'), delay=params['pending_dataset'].get('delay')) if 'key_switch_guardtime' in params: self.nodes[i].set_key_switch_guardtime(params['key_switch_guardtime']) if 'key_sequence_counter' in params: self.nodes[i].set_key_sequence_counter(params['key_sequence_counter']) if 'network_id_timeout' in params: self.nodes[i].set_network_id_timeout(params['network_id_timeout']) if 'context_reuse_delay' in params: self.nodes[i].set_context_reuse_delay(params['context_reuse_delay']) if 'max_children' in params: self.nodes[i].set_max_children(params['max_children']) if 'bbr_registration_jitter' in params: self.nodes[i].set_bbr_registration_jitter(params['bbr_registration_jitter']) # we have to add allowlist after nodes are all created for i, params in initial_topology.items(): allowlist = params['allowlist'] if not allowlist: continue for j in allowlist: rssi = None if isinstance(j, tuple): j, rssi = j self.nodes[i].add_allowlist(self.nodes[j].get_addr64(), rssi=rssi) self.nodes[i].enable_allowlist() self._inspector = debug.Inspector(self) self._collect_test_info_after_setup() def inspect(self): self._inspector.inspect() def tearDown(self): """Destroy nodes and simulator. """ if self._do_packet_verification and os.uname().sysname != "Linux": raise NotImplementedError( f'{self.test_name}: Packet Verification not available on {os.uname().sysname} (Linux only).') if self._do_packet_verification: time.sleep(3) if self._has_backbone_traffic(): # Stop Backbone sniffer before stopping nodes so that we don't capture Codecov Uploading traffic self._stop_backbone_sniffer() for node in list(self.nodes.values()): node.stop() node.destroy() self.simulator.stop() if self._has_backbone_traffic(): self._remove_backbone_network() pcap_filename = self._merge_thread_backbone_pcaps() else: pcap_filename = self._get_thread_pcap_filename() if self._do_packet_verification: self._test_info['pcap'] = pcap_filename test_info_path = self._output_test_info() self._verify_packets(test_info_path) def flush_all(self): """Flush away all captured messages of all nodes. """ for i in list(self.nodes.keys()): self.simulator.get_messages_sent_by(i) def flush_nodes(self, nodes): """Flush away all captured messages of specified nodes. Args: nodes (list): nodes whose messages to flush. """ for i in nodes: if i in list(self.nodes.keys()): self.simulator.get_messages_sent_by(i) def _clean_up_tmp(self): """ Clean up node files in tmp directory """ os.system(f"rm -f tmp/{PORT_OFFSET}_*.flash tmp/{PORT_OFFSET}_*.data tmp/{PORT_OFFSET}_*.swap") def _verify_packets(self, test_info_path: str): pv = PacketVerifier(test_info_path, self.CASE_WIRESHARK_PREFS) pv.add_common_vars() self.verify(pv) print("Packet verification passed: %s" % test_info_path, file=sys.stderr) @property def test_name(self): return os.getenv('TEST_NAME', 'current') def collect_ipaddrs(self): if not self._do_packet_verification: return test_info = self._test_info for i, node in self.nodes.items(): ipaddrs = node.get_addrs() test_info['ipaddrs'][i] = ipaddrs if not node.is_host: mleid = node.get_mleid() test_info['mleids'][i] = mleid def collect_rloc16s(self): if not self._do_packet_verification: return test_info = self._test_info test_info['rloc16s'] = {} for i, node in self.nodes.items(): if not node.is_host: test_info['rloc16s'][i] = '0x%04x' % node.get_addr16() def collect_rlocs(self): if not self._do_packet_verification: return test_info = self._test_info test_info['rlocs'] = {} for i, node in self.nodes.items(): if node.is_host: continue test_info['rlocs'][i] = node.get_rloc() def collect_leader_aloc(self, node): if not self._do_packet_verification: return test_info = self._test_info test_info['leader_aloc'] = self.nodes[node].get_addr_leader_aloc() def collect_extra_vars(self, **vars): if not self._do_packet_verification: return for k in vars.keys(): assert isinstance(k, str), k test_vars = self._test_info.setdefault("extra_vars", {}) test_vars.update(vars) def _collect_test_info_after_setup(self): """ Collect test info after setUp """ if not self._do_packet_verification: return test_info = self._test_info = { 'script': os.path.abspath(sys.argv[0]), 'testcase': self.test_name, 'start_time': time.ctime(self._start_time), 'pcap': '', 'extaddrs': {}, 'ethaddrs': {}, 'ipaddrs': {}, 'mleids': {}, 'topology': self._initial_topology, 'backbone': { 'interface': config.BACKBONE_DOCKER_NETWORK_NAME, 'prefix': config.BACKBONE_PREFIX, }, 'domain_prefix': config.DOMAIN_PREFIX, 'env': { 'PORT_OFFSET': config.PORT_OFFSET, }, } for i, node in self.nodes.items(): if not node.is_host: extaddr = node.get_addr64() test_info['extaddrs'][i] = ExtAddr(extaddr).format_octets() if node.is_host or node.is_otbr: ethaddr = node.get_ether_mac() test_info['ethaddrs'][i] = EthAddr(ethaddr).format_octets() def _output_test_info(self): """ Output test info to json file after tearDown """ filename = f'{self.test_name}.json' with open(filename, 'wt') as ofd: ofd.write(json.dumps(self._test_info, indent=1, sort_keys=True)) return filename def _get_thread_pcap_filename(self): current_pcap = self.test_name + '.pcap' return os.path.abspath(current_pcap) def assure_run_ok(self, cmd, shell=False): if not shell and isinstance(cmd, str): cmd = cmd.split() proc = subprocess.run(cmd, stdout=sys.stdout, stderr=sys.stderr, shell=shell) print(">>> %s => %d" % (cmd, proc.returncode), file=sys.stderr) proc.check_returncode() def _parse_params(self, params: Optional[dict]) -> dict: params = params or {} if params.get('is_bbr') or params.get('is_otbr'): # BBRs must use thread version 1.2 assert params.get('version', '1.2') == '1.2', params params['version'] = '1.2' elif params.get('is_host'): # Hosts must not specify thread version assert params.get('version', '') == '', params params['version'] = '' if params: params = dict(DEFAULT_PARAMS, **params) else: params = DEFAULT_PARAMS.copy() return params def _has_backbone_traffic(self): for param in self.TOPOLOGY.values(): if param and (param.get('is_otbr') or param.get('is_host')): return True return False def _prepare_backbone_network(self): network_name = config.BACKBONE_DOCKER_NETWORK_NAME self.assure_run_ok( f'docker network create --driver bridge --ipv6 --subnet {config.BACKBONE_PREFIX} -o "com.docker.network.bridge.name"="{network_name}" {network_name} || true', shell=True) def _remove_backbone_network(self): network_name = config.BACKBONE_DOCKER_NETWORK_NAME self.assure_run_ok(f'docker network rm {network_name}', shell=True) def _start_backbone_sniffer(self): # don't know why but I have to create the empty bbr.pcap first, otherwise tshark won't work # self.assure_run_ok("truncate --size 0 bbr.pcap && chmod 664 bbr.pcap", shell=True) pcap_file = self._get_backbone_pcap_filename() try: os.remove(pcap_file) except FileNotFoundError: pass dumpcap = pvutils.which_dumpcap() self._dumpcap_proc = subprocess.Popen([dumpcap, '-i', config.BACKBONE_DOCKER_NETWORK_NAME, '-w', pcap_file], stdout=sys.stdout, stderr=sys.stderr) time.sleep(0.2) assert self._dumpcap_proc.poll() is None, 'tshark terminated unexpectedly' logging.info('Backbone sniffer launched successfully: pid=%s', self._dumpcap_proc.pid) def _get_backbone_pcap_filename(self): backbone_pcap = self.test_name + '_backbone.pcap' return os.path.abspath(backbone_pcap) def _get_merged_pcap_filename(self): backbone_pcap = self.test_name + '_merged.pcap' return os.path.abspath(backbone_pcap) def _stop_backbone_sniffer(self): self._dumpcap_proc.send_signal(signal.SIGTERM) self._dumpcap_proc.__exit__(None, None, None) logging.info('Backbone sniffer terminated successfully: pid=%s' % self._dumpcap_proc.pid) def _merge_thread_backbone_pcaps(self): thread_pcap = self._get_thread_pcap_filename() backbone_pcap = self._get_backbone_pcap_filename() merged_pcap = self._get_merged_pcap_filename() mergecap = pvutils.which_mergecap() self.assure_run_ok(f'{mergecap} -w {merged_pcap} {thread_pcap} {backbone_pcap}', shell=True) return merged_pcap def wait_until(self, cond: Callable[[], bool], timeout: int, go_interval: int = 1): while True: self.simulator.go(go_interval) if cond(): break timeout -= go_interval if timeout <= 0: raise RuntimeError(f'wait failed after {timeout} seconds') def wait_node_state(self, nodeid: int, state: str, timeout: int): self.wait_until(lambda: self.nodes[nodeid].get_state() == state, timeout)
[((2144, 2178), 'os.getenv', 'os.getenv', (['"""THREAD_VERSION"""', '"""1.1"""'], {}), "('THREAD_VERSION', '1.1')\n", (2153, 2178), False, 'import os\n'), ((1904, 1939), 'os.getenv', 'os.getenv', (['"""PACKET_VERIFICATION"""', '(0)'], {}), "('PACKET_VERIFICATION', 0)\n", (1913, 1939), False, 'import os\n'), ((2091, 2120), 'os.getenv', 'os.getenv', (['"""PORT_OFFSET"""', '"""0"""'], {}), "('PORT_OFFSET', '0')\n", (2100, 2120), False, 'import os\n'), ((3320, 3417), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s - %(levelname)s - %(message)s"""'}), "(level=logging.DEBUG, format=\n '%(asctime)s - %(levelname)s - %(message)s')\n", (3339, 3417), False, 'import logging\n'), ((3997, 4074), 'config.create_default_simulator', 'config.create_default_simulator', ([], {'use_message_factory': 'self.USE_MESSAGE_FACTORY'}), '(use_message_factory=self.USE_MESSAGE_FACTORY)\n', (4028, 4074), False, 'import config\n'), ((8913, 8934), 'debug.Inspector', 'debug.Inspector', (['self'], {}), '(self)\n', (8928, 8934), False, 'import debug\n'), ((10758, 10863), 'os.system', 'os.system', (['f"""rm -f tmp/{PORT_OFFSET}_*.flash tmp/{PORT_OFFSET}_*.data tmp/{PORT_OFFSET}_*.swap"""'], {}), "(\n f'rm -f tmp/{PORT_OFFSET}_*.flash tmp/{PORT_OFFSET}_*.data tmp/{PORT_OFFSET}_*.swap'\n )\n", (10767, 10863), False, 'import os\n'), ((10920, 10977), 'pktverify.packet_verifier.PacketVerifier', 'PacketVerifier', (['test_info_path', 'self.CASE_WIRESHARK_PREFS'], {}), '(test_info_path, self.CASE_WIRESHARK_PREFS)\n', (10934, 10977), False, 'from pktverify.packet_verifier import PacketVerifier\n'), ((11168, 11201), 'os.getenv', 'os.getenv', (['"""TEST_NAME"""', '"""current"""'], {}), "('TEST_NAME', 'current')\n", (11177, 11201), False, 'import os\n'), ((14326, 14355), 'os.path.abspath', 'os.path.abspath', (['current_pcap'], {}), '(current_pcap)\n', (14341, 14355), False, 'import os\n'), ((14496, 14566), 'subprocess.run', 'subprocess.run', (['cmd'], {'stdout': 'sys.stdout', 'stderr': 'sys.stderr', 'shell': 'shell'}), '(cmd, stdout=sys.stdout, stderr=sys.stderr, shell=shell)\n', (14510, 14566), False, 'import subprocess\n'), ((16416, 16439), 'pktverify.utils.which_dumpcap', 'pvutils.which_dumpcap', ([], {}), '()\n', (16437, 16439), True, 'from pktverify import utils as pvutils\n'), ((16469, 16598), 'subprocess.Popen', 'subprocess.Popen', (["[dumpcap, '-i', config.BACKBONE_DOCKER_NETWORK_NAME, '-w', pcap_file]"], {'stdout': 'sys.stdout', 'stderr': 'sys.stderr'}), "([dumpcap, '-i', config.BACKBONE_DOCKER_NETWORK_NAME, '-w',\n pcap_file], stdout=sys.stdout, stderr=sys.stderr)\n", (16485, 16598), False, 'import subprocess\n'), ((16695, 16710), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (16705, 16710), False, 'import time\n'), ((16802, 16893), 'logging.info', 'logging.info', (['"""Backbone sniffer launched successfully: pid=%s"""', 'self._dumpcap_proc.pid'], {}), "('Backbone sniffer launched successfully: pid=%s', self.\n _dumpcap_proc.pid)\n", (16814, 16893), False, 'import logging\n'), ((17006, 17036), 'os.path.abspath', 'os.path.abspath', (['backbone_pcap'], {}), '(backbone_pcap)\n', (17021, 17036), False, 'import os\n'), ((17150, 17180), 'os.path.abspath', 'os.path.abspath', (['backbone_pcap'], {}), '(backbone_pcap)\n', (17165, 17180), False, 'import os\n'), ((17337, 17431), 'logging.info', 'logging.info', (["('Backbone sniffer terminated successfully: pid=%s' % self._dumpcap_proc.pid)"], {}), "('Backbone sniffer terminated successfully: pid=%s' % self.\n _dumpcap_proc.pid)\n", (17349, 17431), False, 'import logging\n'), ((17661, 17685), 'pktverify.utils.which_mergecap', 'pvutils.which_mergecap', ([], {}), '()\n', (17683, 17685), True, 'from pktverify import utils as pvutils\n'), ((2889, 2901), 'sys.exit', 'sys.exit', (['(77)'], {}), '(77)\n', (2897, 2901), False, 'import sys\n'), ((4501, 4548), 'logging.info', 'logging.info', (['"""Creating node %d: %r"""', 'i', 'params'], {}), "('Creating node %d: %r', i, params)\n", (4513, 4548), False, 'import logging\n'), ((9394, 9407), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (9404, 9407), False, 'import time\n'), ((12944, 12972), 'os.path.abspath', 'os.path.abspath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (12959, 12972), False, 'import os\n'), ((13040, 13068), 'time.ctime', 'time.ctime', (['self._start_time'], {}), '(self._start_time)\n', (13050, 13068), False, 'import time\n'), ((16325, 16345), 'os.remove', 'os.remove', (['pcap_file'], {}), '(pcap_file)\n', (16334, 16345), False, 'import os\n'), ((2747, 2776), 'os.getenv', 'os.getenv', (['"""NODE_TYPE"""', '"""sim"""'], {}), "('NODE_TYPE', 'sim')\n", (2756, 2776), False, 'import os\n'), ((3622, 3643), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3641, 3643), False, 'import traceback\n'), ((14141, 14194), 'json.dumps', 'json.dumps', (['self._test_info'], {'indent': '(1)', 'sort_keys': '(True)'}), '(self._test_info, indent=1, sort_keys=True)\n', (14151, 14194), False, 'import json\n'), ((9160, 9170), 'os.uname', 'os.uname', ([], {}), '()\n', (9168, 9170), False, 'import os\n'), ((13720, 13736), 'pktverify.addrs.ExtAddr', 'ExtAddr', (['extaddr'], {}), '(extaddr)\n', (13727, 13736), False, 'from pktverify.addrs import ExtAddr, EthAddr\n'), ((13889, 13905), 'pktverify.addrs.EthAddr', 'EthAddr', (['ethaddr'], {}), '(ethaddr)\n', (13896, 13905), False, 'from pktverify.addrs import ExtAddr, EthAddr\n'), ((3805, 3826), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3824, 3826), False, 'import traceback\n'), ((9304, 9314), 'os.uname', 'os.uname', ([], {}), '()\n', (9312, 9314), False, 'import os\n')]
Anurag-Varma/facemask-detection
FaceMaskDetection with webcam.py
9ac681261e246e6ab1837c576d933dc7324e3a92
import cv2 import numpy as np from keras.models import model_from_json from keras.preprocessing.image import img_to_array #load model model = model_from_json(open("fer.json", "r").read()) #change the path accoring to files #load weights model.load_weights('fer.h5') #change the path accoring to files detection_model_path="C:/Users/panur/.spyder-py3/FaceMaskDetection/cascadeH5.xml" #change the path accoring to files face_detection = cv2.CascadeClassifier(detection_model_path) ret=1 flag=True cap = cv2.VideoCapture(0) #default 0 for webcam frameRate = cap.get(30) while(cap.isOpened()): ret, fm=cap.read() fm = cv2.resize(fm, (224, 224)) file = cv2.cvtColor(fm, cv2.COLOR_BGR2RGB) orig_frame = file frame = file faces = face_detection.detectMultiScale(frame,scaleFactor=1.1,minNeighbors=5,minSize=(30,30),flags=cv2.CASCADE_SCALE_IMAGE) if len(faces) : faces = sorted(faces, reverse=True,key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0] (fX, fY, fW, fH) = faces roi = frame[fY:fY + fH, fX:fX + fW] roi = cv2.resize(roi, (48, 48),3) roi = frame.astype("float") / 255.0 roi = img_to_array(roi) roi = np.expand_dims(roi, axis=0) preds=model.predict_classes(roi)[0] if preds==0: print("Mask worn") test='Mask worn' elif preds==1: print("Danger: No Mask") test='Danger: No Mask' cv2.putText(fm,test, (fX-15, fY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2) cv2.rectangle(fm, (fX, fY), (fX + fW, fY + fH),(0, 0, 255), 2) cv2.imshow("Live Video", fm) k=cv2.waitKey(25) #Press ESC to stop/exit if k == 27: ret=0 break print("closed") cap.release() cv2.destroyAllWindows()
[((472, 515), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['detection_model_path'], {}), '(detection_model_path)\n', (493, 515), False, 'import cv2\n'), ((564, 583), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (580, 583), False, 'import cv2\n'), ((1747, 1770), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1768, 1770), False, 'import cv2\n'), ((685, 711), 'cv2.resize', 'cv2.resize', (['fm', '(224, 224)'], {}), '(fm, (224, 224))\n', (695, 711), False, 'import cv2\n'), ((720, 755), 'cv2.cvtColor', 'cv2.cvtColor', (['fm', 'cv2.COLOR_BGR2RGB'], {}), '(fm, cv2.COLOR_BGR2RGB)\n', (732, 755), False, 'import cv2\n'), ((1596, 1624), 'cv2.imshow', 'cv2.imshow', (['"""Live Video"""', 'fm'], {}), "('Live Video', fm)\n", (1606, 1624), False, 'import cv2\n'), ((1629, 1644), 'cv2.waitKey', 'cv2.waitKey', (['(25)'], {}), '(25)\n', (1640, 1644), False, 'import cv2\n'), ((1101, 1129), 'cv2.resize', 'cv2.resize', (['roi', '(48, 48)', '(3)'], {}), '(roi, (48, 48), 3)\n', (1111, 1129), False, 'import cv2\n'), ((1184, 1201), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['roi'], {}), '(roi)\n', (1196, 1201), False, 'from keras.preprocessing.image import img_to_array\n'), ((1212, 1239), 'numpy.expand_dims', 'np.expand_dims', (['roi'], {'axis': '(0)'}), '(roi, axis=0)\n', (1226, 1239), True, 'import numpy as np\n'), ((1435, 1529), 'cv2.putText', 'cv2.putText', (['fm', 'test', '(fX - 15, fY - 10)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.45)', '(0, 0, 255)', '(2)'], {}), '(fm, test, (fX - 15, fY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (\n 0, 0, 255), 2)\n', (1446, 1529), False, 'import cv2\n'), ((1526, 1589), 'cv2.rectangle', 'cv2.rectangle', (['fm', '(fX, fY)', '(fX + fW, fY + fH)', '(0, 0, 255)', '(2)'], {}), '(fm, (fX, fY), (fX + fW, fY + fH), (0, 0, 255), 2)\n', (1539, 1589), False, 'import cv2\n')]
Binary-bug/Python
language/Basics/stringformatting.py
233425ded6abc26c889599a82a181487789e3bab
age = 24 print("My age is " + str(age) + " years ") # the above procedure is tedious since we dont really want to include str for every number we encounter #Method1 Replacement Fields print("My age is {0} years ".format(age)) # {0} is the actual replacement field, number important for multiple replacement fields print("There are {0} days in {1}, {2}, {3}, {4}, {5}, {6} and {7} ".format(31,"January","March","May","july","August","october","december")) #each of the arguments of .format are matched to their respective replacement fields print("""January:{2} February:{0} March:{2} April:{1} """.format(28,30,31)) #Method2 Formatting operator not recommended though style from python 2 print("My age is %d years" % age) print("My age is %d %s, %d %s" % (age,"years",6,"months")) #^ old format and it was elegant -__- # # for i in range(1,12): # print("No, %2d squared is %4d and cubed is %4d" %(i,i**2,i**3)) # ** operator raises power %xd x allocates spaces # # # # # #for comparison # print() # for i in range(1,12): # print("No, %d squared is %d and cubed is %d" % (i,i**2,i**3)) # # # #adding more precision # # print("Pi is approximately %12.50f" % (22/7)) # 50 decimal precsion and 12 for spaces default is 6 spaces # # # # #Replacement field syntax variant of above Python 2 tricks for i in range(1,12): print("No. {0:2} squared is {1:4} and cubed is {2:4}".format(i,i**2,i**3)) print() #for left alignment for i in range(1,12): print("NO. {0:<2} squared is {1:<4} and cubed is {2:<4}".format(i,i**2,i**3)) #floating point precision print("Pi is approximately {0:.50}".format(22/7)) #use of numbers in replacement fields is optional when the default order is implied for i in range(1,12): print("No. {:2} squared is {:4} and cubed is {:4}".format(i,i**2,i**3)) days = "Mon, Tue, Wed, Thu, Fri, Sat, Sun" print(days[::5])
[]
ulrikpedersen/toggl-gnome-applet
toggl.py
ae48358414d14d44ef5731c59f1813bac97e3257
#!/usr/bin/env python import logging from datetime import datetime logging.basicConfig(level=logging.WARNING) import os import urllib2, base64, json import dateutil.parser def from_ISO8601( str_iso8601 ): return dateutil.parser.parse(str_iso8601) def to_ISO8601( timestamp ): return timestamp.isoformat() def convert_time_strings(toggl_dicts): timestamp_fields = ['at', 'created_at', 'start', 'stop'] result = [] for tdict in toggl_dicts: d = tdict for tsf in timestamp_fields: if tdict.has_key(tsf): d[tsf] = from_ISO8601(tdict[tsf]) result.append(d) return result class Toggl: def __init__(self, api_token=None): self.log = logging.getLogger("Toggl") self.log.setLevel(logging.DEBUG) self.toggl_domain = "www.toggl.com" self.toggl_api = "https://%s/api/v8/" % self.toggl_domain self.report_api = "https://%s/reports/api/v2" % self.toggl_domain self._api_token = api_token # Search for an Toggl API token in a list of files # No validation of the collected token # TODO: encryption of tokenfiles could be nice tokenfiles = [os.path.expanduser(f) for f in ['.toggltoken', '~/.toggltoken', '~/.togglapplet/.toggltoken']] for tf in tokenfiles: if os.path.exists( tf ): try: f = open(tf) self._api_token = f.read().strip() f.close() except: self.log.exception("Could not read token from " + tf) self._api_token = None if self._api_token: break def send_request( self, api_call_url ): ''' Send a request or command to Toggl, retrieve and parse the json response. returns a list of dictionary objects. Throws an exception if the http response is not OK (200) or if no JSON can be decoded from the response. ''' request = urllib2.Request( api_call_url ) self.log.debug("http request url = \'%s\'", request.get_full_url()) # username:password # Use base64.standard_b64encode instead of replace... user_pass = base64.encodestring('%s:%s' % (self._api_token, 'api_token')).replace('\n', '') request.add_header("Authorization", "Basic %s" % user_pass) opener = urllib2.build_opener( urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.ProxyHandler({'https': 'http://wwwcache.rl.ac.uk:8080'})) urllib2.install_opener(opener) result = urllib2.urlopen(request, timeout = 3.0) # with no data, this is a http GET. self.log.debug("http request result: code=%s url=\'%s\'", result.getcode(), result.geturl()) js = json.load(result) #self.log.debug("JSON raw result: %s" % json.dumps(js,sort_keys=True, indent=4, separators=(',', ': '))) return js def get_workspaces(self): self.log.debug("get_workspaces()") js = self.send_request(self.toggl_api + "workspaces") js = convert_time_strings(js) return js def get_default_workspace(self): self.log.debug("get_default_workspace()") wid = self.get_user()['default_wid'] js = self.send_request(self.toggl_api + "workspaces/%s"%str(wid)) js = convert_time_strings([js['data']]) return js[0] def get_default_workspace_id(self): self.log.debug("get_default_workspace_id()") ws = self.get_default_workspace() self.log.debug(ws) return ws['id'] def get_projects(self, wid=None): self.log.debug("get_projects(wid=%s)"%str(wid)) if wid: js = self.send_request(self.toggl_api + "workspaces/%s/projects"%str(wid)) else: js = [] for w in self.get_workspaces(): js += self.send_request(self.toggl_api + "workspaces/%s/projects"%str(w['id'])) js = convert_time_strings(js) return js def get_current_entry(self): '''get the currently active time entry''' self.log.debug("get_current_entry()") js = self.send_request(self.toggl_api + "time_entries/current") self.log.debug( js ) js = convert_time_strings(js['data']) return js def get_range_entries(self, start_end=None): '''Get a list of entries in a range (max 1000 entries). If no start-end range is defined, the default is to return all entries from the last 9 days. start_end: tuple with start and end date''' self.log.debug("get_range_entries()") query = "time_entries" if start_end: start, end = start_end if type(start) == datetime.datetime: start = to_ISO8601(start) if type(end) == datetime.datetime: end = to_ISO8601(end) query += "?start_date=%s&end_date=%s"%(start, end) js = self.send_request(self.toggl_api + query) js = convert_time_strings(js) return js def get_user(self): self.log.debug("get_user()") js = self.send_request(self.toggl_api + "me") return js['data']
[((68, 110), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.WARNING'}), '(level=logging.WARNING)\n', (87, 110), False, 'import logging\n'), ((792, 818), 'logging.getLogger', 'logging.getLogger', (['"""Toggl"""'], {}), "('Toggl')\n", (809, 818), False, 'import logging\n'), ((2098, 2127), 'urllib2.Request', 'urllib2.Request', (['api_call_url'], {}), '(api_call_url)\n', (2113, 2127), False, 'import urllib2, base64, json\n'), ((2676, 2706), 'urllib2.install_opener', 'urllib2.install_opener', (['opener'], {}), '(opener)\n', (2698, 2706), False, 'import urllib2, base64, json\n'), ((2724, 2761), 'urllib2.urlopen', 'urllib2.urlopen', (['request'], {'timeout': '(3.0)'}), '(request, timeout=3.0)\n', (2739, 2761), False, 'import urllib2, base64, json\n'), ((2914, 2931), 'json.load', 'json.load', (['result'], {}), '(result)\n', (2923, 2931), False, 'import urllib2, base64, json\n'), ((1273, 1294), 'os.path.expanduser', 'os.path.expanduser', (['f'], {}), '(f)\n', (1291, 1294), False, 'import os\n'), ((1413, 1431), 'os.path.exists', 'os.path.exists', (['tf'], {}), '(tf)\n', (1427, 1431), False, 'import os\n'), ((2522, 2543), 'urllib2.HTTPHandler', 'urllib2.HTTPHandler', ([], {}), '()\n', (2541, 2543), False, 'import urllib2, base64, json\n'), ((2561, 2583), 'urllib2.HTTPSHandler', 'urllib2.HTTPSHandler', ([], {}), '()\n', (2581, 2583), False, 'import urllib2, base64, json\n'), ((2601, 2665), 'urllib2.ProxyHandler', 'urllib2.ProxyHandler', (["{'https': 'http://wwwcache.rl.ac.uk:8080'}"], {}), "({'https': 'http://wwwcache.rl.ac.uk:8080'})\n", (2621, 2665), False, 'import urllib2, base64, json\n'), ((2317, 2378), 'base64.encodestring', 'base64.encodestring', (["('%s:%s' % (self._api_token, 'api_token'))"], {}), "('%s:%s' % (self._api_token, 'api_token'))\n", (2336, 2378), False, 'import urllib2, base64, json\n')]
ISMHinoLab/geodesical_skew_divergence
gs_divergence/symmetrized_geodesical_skew_divergence.py
293648a30e86bdd14749af5b107f1d3687d67700
from typing import Optional import torch from gs_divergence import gs_div def symmetrized_gs_div( input: torch.Tensor, target: torch.Tensor, alpha: float = -1, lmd: float = 0.5, reduction: Optional[str] = 'sum', ) -> torch.Tensor: lhs = gs_div(input, target, alpha=alpha, lmd=lmd, reduction=reduction) rhs = gs_div(target, input, alpha=alpha, lmd=lmd, reduction=reduction) return (lhs + rhs) / 2
[((264, 328), 'gs_divergence.gs_div', 'gs_div', (['input', 'target'], {'alpha': 'alpha', 'lmd': 'lmd', 'reduction': 'reduction'}), '(input, target, alpha=alpha, lmd=lmd, reduction=reduction)\n', (270, 328), False, 'from gs_divergence import gs_div\n'), ((339, 403), 'gs_divergence.gs_div', 'gs_div', (['target', 'input'], {'alpha': 'alpha', 'lmd': 'lmd', 'reduction': 'reduction'}), '(target, input, alpha=alpha, lmd=lmd, reduction=reduction)\n', (345, 403), False, 'from gs_divergence import gs_div\n')]
kushaliitm/deep-learning
train.py
ab8e23d1414d3b79bbe4a3acd57a475f6def7277
import argparse import helper as hp import torch import os import json parser = argparse.ArgumentParser(description = 'train.py') parser.add_argument('--data-dir', nargs = '*', action = "store", default = "./flowers/", help = "folder path for data") parser.add_argument('--save-dir', action = "store", required=True, help = "filepath for saving checkpoint") parser.add_argument('--learning-rate', action = "store", default = 0.001, help = "learning rate for the optimizer") parser.add_argument('--epoch-num', action = "store", type = int, default = 3, help = "epoch value") parser.add_argument('--architecture', action = "store", default = "vgg16", type = str, help = "specify the neural network structure: vgg16 or densenet121") parser.add_argument('--hidden-size', type = int, action = "store", default = 1000, help = "state the units for fc2") parser.add_argument('--optimizer', action='store', default='adam', help='Optimizer to optimize') pa = parser.parse_args() pa = vars(pa) print(pa) data_path = pa['data_dir'] save_dir = pa["save_dir"] learning_rate = pa['learning_rate'] architecture = pa['architecture'] hidden_size = pa['hidden_size'] epoch_number = pa['epoch_num'] if (not os.path.exists(f'experiments/{save_dir}')): os.makedirs(f'experiments/{save_dir}') file_path = f'experiments/{save_dir}/checkpoint.pt' # saving parameters with open(f'experiments/{save_dir}/parameters.json', 'w') as f: json.dump(pa, f) # load the data - data_load() from help.py print('Loading data') train_loader, validation_loader, test_loader = hp.load_data(data_path) criterion = torch.nn.NLLLoss() # build model print(f'Loading weights from {architecture}') model, optimizer = hp.get_model_and_optimizer(pa) # train model print('Training model') hp.train_model(model, optimizer, learning_rate,train_loader,validation_loader,criterion,epoch_number, file_path) # checkpoint the model print("model has been successfully trained")
[((81, 128), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""train.py"""'}), "(description='train.py')\n", (104, 128), False, 'import argparse\n'), ((1558, 1581), 'helper.load_data', 'hp.load_data', (['data_path'], {}), '(data_path)\n', (1570, 1581), True, 'import helper as hp\n'), ((1594, 1612), 'torch.nn.NLLLoss', 'torch.nn.NLLLoss', ([], {}), '()\n', (1610, 1612), False, 'import torch\n'), ((1693, 1723), 'helper.get_model_and_optimizer', 'hp.get_model_and_optimizer', (['pa'], {}), '(pa)\n', (1719, 1723), True, 'import helper as hp\n'), ((1763, 1883), 'helper.train_model', 'hp.train_model', (['model', 'optimizer', 'learning_rate', 'train_loader', 'validation_loader', 'criterion', 'epoch_number', 'file_path'], {}), '(model, optimizer, learning_rate, train_loader,\n validation_loader, criterion, epoch_number, file_path)\n', (1777, 1883), True, 'import helper as hp\n'), ((1191, 1232), 'os.path.exists', 'os.path.exists', (['f"""experiments/{save_dir}"""'], {}), "(f'experiments/{save_dir}')\n", (1205, 1232), False, 'import os\n'), ((1239, 1277), 'os.makedirs', 'os.makedirs', (['f"""experiments/{save_dir}"""'], {}), "(f'experiments/{save_dir}')\n", (1250, 1277), False, 'import os\n'), ((1424, 1440), 'json.dump', 'json.dump', (['pa', 'f'], {}), '(pa, f)\n', (1433, 1440), False, 'import json\n')]
rbarillec/project_euler
Euler0001.py
db812f9ae53090b34716452d0cb9ec14bf218290
def Euler0001(): max = 1000 sum = 0 for i in range(1, max): if i%3 == 0 or i%5 == 0: sum += i print(sum) Euler0001()
[]
lmathia2/BLIP
models/blip.py
8ca42256e83654858856d40886509be8fbca51a7
''' * Copyright (c) 2022, salesforce.com, inc. * All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause * By Junnan Li ''' import warnings warnings.filterwarnings("ignore") from models.vit import VisionTransformer, interpolate_pos_embed from models.med import BertConfig, BertModel, BertLMHeadModel from transformers import BertTokenizer import torch from torch import nn import torch.nn.functional as F import os from urllib.parse import urlparse from timm.models.hub import download_cached_file class BLIP_Base(nn.Module): def __init__(self, med_config = 'configs/med_config.json', image_size = 224, vit = 'base', vit_grad_ckpt = False, vit_ckpt_layer = 0, ): """ Args: med_config (str): path for the mixture of encoder-decoder model's configuration file image_size (int): input image size vit (str): model size of vision transformer """ super().__init__() self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer) self.tokenizer = init_tokenizer() med_config = BertConfig.from_json_file(med_config) med_config.encoder_width = vision_width self.text_encoder = BertModel(config=med_config, add_pooling_layer=False) def forward(self, image, caption, mode): assert mode in ['image', 'text', 'multimodal'], "mode parameter must be image, text, or multimodal" text = self.tokenizer(caption, return_tensors="pt").to(image.device) if mode=='image': # return image features image_embeds = self.visual_encoder(image) return image_embeds elif mode=='text': # return text features text_output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask, return_dict = True, mode = 'text') return text_output.last_hidden_state elif mode=='multimodal': # return multimodel features image_embeds = self.visual_encoder(image) image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device) text.input_ids[:,0] = self.tokenizer.enc_token_id output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask, encoder_hidden_states = image_embeds, encoder_attention_mask = image_atts, return_dict = True, ) return output.last_hidden_state class BLIP_Decoder(nn.Module): def __init__(self, med_config = 'configs/med_config.json', image_size = 384, vit = 'base', vit_grad_ckpt = False, vit_ckpt_layer = 0, prompt = 'a picture of ', ): """ Args: med_config (str): path for the mixture of encoder-decoder model's configuration file image_size (int): input image size vit (str): model size of vision transformer """ super().__init__() self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer) self.tokenizer = init_tokenizer() med_config = BertConfig.from_json_file(med_config) med_config.encoder_width = vision_width self.text_decoder = BertLMHeadModel(config=med_config) self.prompt = prompt self.prompt_length = len(self.tokenizer(self.prompt).input_ids)-1 def forward(self, image, caption): image_embeds = self.visual_encoder(image) image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device) text = self.tokenizer(caption, padding='longest', truncation=True, max_length=40, return_tensors="pt").to(image.device) text.input_ids[:,0] = self.tokenizer.bos_token_id decoder_targets = text.input_ids.masked_fill(text.input_ids == self.tokenizer.pad_token_id, -100) decoder_targets[:,:self.prompt_length] = -100 decoder_output = self.text_decoder(text.input_ids, attention_mask = text.attention_mask, encoder_hidden_states = image_embeds, encoder_attention_mask = image_atts, labels = decoder_targets, return_dict = True, ) loss_lm = decoder_output.loss return loss_lm def generate(self, image, sample=False, num_beams=5, max_length=30, min_length=10, top_p=0.9, repetition_penalty=1.0): image_embeds = self.visual_encoder(image) if not sample: image_embeds = image_embeds.repeat_interleave(num_beams,dim=0) image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device) model_kwargs = {"encoder_hidden_states": image_embeds, "encoder_attention_mask":image_atts} prompt = [self.prompt] * image.size(0) input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids.to(image.device) input_ids[:,0] = self.tokenizer.bos_token_id input_ids = input_ids[:, :-1] if sample: #nucleus sampling outputs = self.text_decoder.generate(input_ids=input_ids, max_length=max_length, min_length=min_length, do_sample=True, top_p=top_p, num_return_sequences=3, eos_token_id=self.tokenizer.sep_token_id, pad_token_id=self.tokenizer.pad_token_id, repetition_penalty=1.1, **model_kwargs) else: #beam search outputs = self.text_decoder.generate(input_ids=input_ids, max_length=max_length, min_length=min_length, num_beams=num_beams, num_return_sequences=3, eos_token_id=self.tokenizer.sep_token_id, pad_token_id=self.tokenizer.pad_token_id, repetition_penalty=repetition_penalty, **model_kwargs) captions = [] for output in outputs: caption = self.tokenizer.decode(output, skip_special_tokens=True) captions.append(caption[len(self.prompt):]) return captions def blip_decoder(pretrained='',**kwargs): model = BLIP_Decoder(**kwargs) if pretrained: model,msg = load_checkpoint(model,pretrained) assert(len(msg.missing_keys)==0) return model def blip_feature_extractor(pretrained='',**kwargs): model = BLIP_Base(**kwargs) if pretrained: model,msg = load_checkpoint(model,pretrained) assert(len(msg.missing_keys)==0) return model def init_tokenizer(): tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') tokenizer.add_special_tokens({'bos_token':'[DEC]'}) tokenizer.add_special_tokens({'additional_special_tokens':['[ENC]']}) tokenizer.enc_token_id = tokenizer.additional_special_tokens_ids[0] return tokenizer def create_vit(vit, image_size, use_grad_checkpointing=False, ckpt_layer=0, drop_path_rate=0): assert vit in ['base', 'large'], "vit parameter must be base or large" if vit=='base': vision_width = 768 visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=12, num_heads=12, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, drop_path_rate=0 or drop_path_rate ) elif vit=='large': vision_width = 1024 visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=24, num_heads=16, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, drop_path_rate=0.1 or drop_path_rate ) return visual_encoder, vision_width def is_url(url_or_filename): parsed = urlparse(url_or_filename) return parsed.scheme in ("http", "https") def load_checkpoint(model,url_or_filename): if is_url(url_or_filename): cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True) checkpoint = torch.load(cached_file, map_location='cpu') elif os.path.isfile(url_or_filename): checkpoint = torch.load(url_or_filename, map_location='cpu') else: raise RuntimeError('checkpoint url or path is invalid') state_dict = checkpoint['model'] state_dict['visual_encoder.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],model.visual_encoder) if 'visual_encoder_m.pos_embed' in model.state_dict().keys(): state_dict['visual_encoder_m.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'], model.visual_encoder_m) for key in model.state_dict().keys(): if key in state_dict.keys(): if state_dict[key].shape!=model.state_dict()[key].shape: del state_dict[key] msg = model.load_state_dict(state_dict,strict=False) print('load checkpoint from %s'%url_or_filename) return model,msg
[((261, 294), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (284, 294), False, 'import warnings\n'), ((8350, 8400), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""bert-base-uncased"""'], {}), "('bert-base-uncased')\n", (8379, 8400), False, 'from transformers import BertTokenizer\n'), ((9729, 9754), 'urllib.parse.urlparse', 'urlparse', (['url_or_filename'], {}), '(url_or_filename)\n', (9737, 9754), False, 'from urllib.parse import urlparse\n'), ((10327, 10415), 'models.vit.interpolate_pos_embed', 'interpolate_pos_embed', (["state_dict['visual_encoder.pos_embed']", 'model.visual_encoder'], {}), "(state_dict['visual_encoder.pos_embed'], model.\n visual_encoder)\n", (10348, 10415), False, 'from models.vit import VisionTransformer, interpolate_pos_embed\n'), ((1387, 1424), 'models.med.BertConfig.from_json_file', 'BertConfig.from_json_file', (['med_config'], {}), '(med_config)\n', (1412, 1424), False, 'from models.med import BertConfig, BertModel, BertLMHeadModel\n'), ((1501, 1554), 'models.med.BertModel', 'BertModel', ([], {'config': 'med_config', 'add_pooling_layer': '(False)'}), '(config=med_config, add_pooling_layer=False)\n', (1510, 1554), False, 'from models.med import BertConfig, BertModel, BertLMHeadModel\n'), ((3885, 3922), 'models.med.BertConfig.from_json_file', 'BertConfig.from_json_file', (['med_config'], {}), '(med_config)\n', (3910, 3922), False, 'from models.med import BertConfig, BertModel, BertLMHeadModel\n'), ((3999, 4033), 'models.med.BertLMHeadModel', 'BertLMHeadModel', ([], {'config': 'med_config'}), '(config=med_config)\n', (4014, 4033), False, 'from models.med import BertConfig, BertModel, BertLMHeadModel\n'), ((8886, 9107), 'models.vit.VisionTransformer', 'VisionTransformer', ([], {'img_size': 'image_size', 'patch_size': '(16)', 'embed_dim': 'vision_width', 'depth': '(12)', 'num_heads': '(12)', 'use_grad_checkpointing': 'use_grad_checkpointing', 'ckpt_layer': 'ckpt_layer', 'drop_path_rate': '(0 or drop_path_rate)'}), '(img_size=image_size, patch_size=16, embed_dim=\n vision_width, depth=12, num_heads=12, use_grad_checkpointing=\n use_grad_checkpointing, ckpt_layer=ckpt_layer, drop_path_rate=0 or\n drop_path_rate)\n', (8903, 9107), False, 'from models.vit import VisionTransformer, interpolate_pos_embed\n'), ((9900, 9970), 'timm.models.hub.download_cached_file', 'download_cached_file', (['url_or_filename'], {'check_hash': '(False)', 'progress': '(True)'}), '(url_or_filename, check_hash=False, progress=True)\n', (9920, 9970), False, 'from timm.models.hub import download_cached_file\n'), ((9992, 10035), 'torch.load', 'torch.load', (['cached_file'], {'map_location': '"""cpu"""'}), "(cached_file, map_location='cpu')\n", (10002, 10035), False, 'import torch\n'), ((10046, 10077), 'os.path.isfile', 'os.path.isfile', (['url_or_filename'], {}), '(url_or_filename)\n', (10060, 10077), False, 'import os\n'), ((10528, 10620), 'models.vit.interpolate_pos_embed', 'interpolate_pos_embed', (["state_dict['visual_encoder_m.pos_embed']", 'model.visual_encoder_m'], {}), "(state_dict['visual_encoder_m.pos_embed'], model.\n visual_encoder_m)\n", (10549, 10620), False, 'from models.vit import VisionTransformer, interpolate_pos_embed\n'), ((9303, 9526), 'models.vit.VisionTransformer', 'VisionTransformer', ([], {'img_size': 'image_size', 'patch_size': '(16)', 'embed_dim': 'vision_width', 'depth': '(24)', 'num_heads': '(16)', 'use_grad_checkpointing': 'use_grad_checkpointing', 'ckpt_layer': 'ckpt_layer', 'drop_path_rate': '(0.1 or drop_path_rate)'}), '(img_size=image_size, patch_size=16, embed_dim=\n vision_width, depth=24, num_heads=16, use_grad_checkpointing=\n use_grad_checkpointing, ckpt_layer=ckpt_layer, drop_path_rate=0.1 or\n drop_path_rate)\n', (9320, 9526), False, 'from models.vit import VisionTransformer, interpolate_pos_embed\n'), ((10108, 10155), 'torch.load', 'torch.load', (['url_or_filename'], {'map_location': '"""cpu"""'}), "(url_or_filename, map_location='cpu')\n", (10118, 10155), False, 'import torch\n')]
OliviaNabbosa89/Disaster_Responses
venv/Lib/site-packages/pandas/tests/reshape/merge/test_multi.py
1e66d77c303cec685dfc2ca94f4fca4cc9400570
import numpy as np from numpy.random import randn import pytest import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series import pandas._testing as tm from pandas.core.reshape.concat import concat from pandas.core.reshape.merge import merge @pytest.fixture def left(): """left dataframe (not multi-indexed) for multi-index join tests""" # a little relevant example with NAs key1 = ["bar", "bar", "bar", "foo", "foo", "baz", "baz", "qux", "qux", "snap"] key2 = ["two", "one", "three", "one", "two", "one", "two", "two", "three", "one"] data = np.random.randn(len(key1)) return DataFrame({"key1": key1, "key2": key2, "data": data}) @pytest.fixture def right(): """right dataframe (multi-indexed) for multi-index join tests""" index = MultiIndex( levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]], codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=["key1", "key2"], ) return DataFrame( np.random.randn(10, 3), index=index, columns=["j_one", "j_two", "j_three"] ) @pytest.fixture def left_multi(): return DataFrame( dict( Origin=["A", "A", "B", "B", "C"], Destination=["A", "B", "A", "C", "A"], Period=["AM", "AM", "IP", "AM", "OP"], TripPurp=["hbw", "nhb", "hbo", "nhb", "hbw"], Trips=[1987, 3647, 2470, 4296, 4444], ), columns=["Origin", "Destination", "Period", "TripPurp", "Trips"], ).set_index(["Origin", "Destination", "Period", "TripPurp"]) @pytest.fixture def right_multi(): return DataFrame( dict( Origin=["A", "A", "B", "B", "C", "C", "E"], Destination=["A", "B", "A", "B", "A", "B", "F"], Period=["AM", "AM", "IP", "AM", "OP", "IP", "AM"], LinkType=["a", "b", "c", "b", "a", "b", "a"], Distance=[100, 80, 90, 80, 75, 35, 55], ), columns=["Origin", "Destination", "Period", "LinkType", "Distance"], ).set_index(["Origin", "Destination", "Period", "LinkType"]) @pytest.fixture def on_cols_multi(): return ["Origin", "Destination", "Period"] @pytest.fixture def idx_cols_multi(): return ["Origin", "Destination", "Period", "TripPurp", "LinkType"] class TestMergeMulti: def setup_method(self): self.index = MultiIndex( levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]], codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=["first", "second"], ) self.to_join = DataFrame( np.random.randn(10, 3), index=self.index, columns=["j_one", "j_two", "j_three"], ) # a little relevant example with NAs key1 = ["bar", "bar", "bar", "foo", "foo", "baz", "baz", "qux", "qux", "snap"] key2 = [ "two", "one", "three", "one", "two", "one", "two", "two", "three", "one", ] data = np.random.randn(len(key1)) self.data = DataFrame({"key1": key1, "key2": key2, "data": data}) def test_merge_on_multikey(self, left, right, join_type): on_cols = ["key1", "key2"] result = left.join(right, on=on_cols, how=join_type).reset_index(drop=True) expected = pd.merge(left, right.reset_index(), on=on_cols, how=join_type) tm.assert_frame_equal(result, expected) result = left.join(right, on=on_cols, how=join_type, sort=True).reset_index( drop=True ) expected = pd.merge( left, right.reset_index(), on=on_cols, how=join_type, sort=True ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("sort", [False, True]) def test_left_join_multi_index(self, left, right, sort): icols = ["1st", "2nd", "3rd"] def bind_cols(df): iord = lambda a: 0 if a != a else ord(a) f = lambda ts: ts.map(iord) - ord("a") return f(df["1st"]) + f(df["3rd"]) * 1e2 + df["2nd"].fillna(0) * 1e4 def run_asserts(left, right, sort): res = left.join(right, on=icols, how="left", sort=sort) assert len(left) < len(res) + 1 assert not res["4th"].isna().any() assert not res["5th"].isna().any() tm.assert_series_equal(res["4th"], -res["5th"], check_names=False) result = bind_cols(res.iloc[:, :-2]) tm.assert_series_equal(res["4th"], result, check_names=False) assert result.name is None if sort: tm.assert_frame_equal(res, res.sort_values(icols, kind="mergesort")) out = merge(left, right.reset_index(), on=icols, sort=sort, how="left") res.index = np.arange(len(res)) tm.assert_frame_equal(out, res) lc = list(map(chr, np.arange(ord("a"), ord("z") + 1))) left = DataFrame(np.random.choice(lc, (5000, 2)), columns=["1st", "3rd"]) left.insert(1, "2nd", np.random.randint(0, 1000, len(left))) i = np.random.permutation(len(left)) right = left.iloc[i].copy() left["4th"] = bind_cols(left) right["5th"] = -bind_cols(right) right.set_index(icols, inplace=True) run_asserts(left, right, sort) # inject some nulls left.loc[1::23, "1st"] = np.nan left.loc[2::37, "2nd"] = np.nan left.loc[3::43, "3rd"] = np.nan left["4th"] = bind_cols(left) i = np.random.permutation(len(left)) right = left.iloc[i, :-1] right["5th"] = -bind_cols(right) right.set_index(icols, inplace=True) run_asserts(left, right, sort) @pytest.mark.parametrize("sort", [False, True]) def test_merge_right_vs_left(self, left, right, sort): # compare left vs right merge with multikey on_cols = ["key1", "key2"] merged_left_right = left.merge( right, left_on=on_cols, right_index=True, how="left", sort=sort ) merge_right_left = right.merge( left, right_on=on_cols, left_index=True, how="right", sort=sort ) # Reorder columns merge_right_left = merge_right_left[merged_left_right.columns] tm.assert_frame_equal(merged_left_right, merge_right_left) def test_merge_multiple_cols_with_mixed_cols_index(self): # GH29522 s = pd.Series( range(6), pd.MultiIndex.from_product([["A", "B"], [1, 2, 3]], names=["lev1", "lev2"]), name="Amount", ) df = pd.DataFrame( {"lev1": list("AAABBB"), "lev2": [1, 2, 3, 1, 2, 3], "col": 0} ) result = pd.merge(df, s.reset_index(), on=["lev1", "lev2"]) expected = pd.DataFrame( { "lev1": list("AAABBB"), "lev2": [1, 2, 3, 1, 2, 3], "col": [0] * 6, "Amount": range(6), } ) tm.assert_frame_equal(result, expected) def test_compress_group_combinations(self): # ~ 40000000 possible unique groups key1 = tm.rands_array(10, 10000) key1 = np.tile(key1, 2) key2 = key1[::-1] df = DataFrame({"key1": key1, "key2": key2, "value1": np.random.randn(20000)}) df2 = DataFrame( {"key1": key1[::2], "key2": key2[::2], "value2": np.random.randn(10000)} ) # just to hit the label compression code path merge(df, df2, how="outer") def test_left_join_index_preserve_order(self): on_cols = ["k1", "k2"] left = DataFrame( { "k1": [0, 1, 2] * 8, "k2": ["foo", "bar"] * 12, "v": np.array(np.arange(24), dtype=np.int64), } ) index = MultiIndex.from_tuples([(2, "bar"), (1, "foo")]) right = DataFrame({"v2": [5, 7]}, index=index) result = left.join(right, on=on_cols) expected = left.copy() expected["v2"] = np.nan expected.loc[(expected.k1 == 2) & (expected.k2 == "bar"), "v2"] = 5 expected.loc[(expected.k1 == 1) & (expected.k2 == "foo"), "v2"] = 7 tm.assert_frame_equal(result, expected) result.sort_values(on_cols, kind="mergesort", inplace=True) expected = left.join(right, on=on_cols, sort=True) tm.assert_frame_equal(result, expected) # test join with multi dtypes blocks left = DataFrame( { "k1": [0, 1, 2] * 8, "k2": ["foo", "bar"] * 12, "k3": np.array([0, 1, 2] * 8, dtype=np.float32), "v": np.array(np.arange(24), dtype=np.int32), } ) index = MultiIndex.from_tuples([(2, "bar"), (1, "foo")]) right = DataFrame({"v2": [5, 7]}, index=index) result = left.join(right, on=on_cols) expected = left.copy() expected["v2"] = np.nan expected.loc[(expected.k1 == 2) & (expected.k2 == "bar"), "v2"] = 5 expected.loc[(expected.k1 == 1) & (expected.k2 == "foo"), "v2"] = 7 tm.assert_frame_equal(result, expected) result = result.sort_values(on_cols, kind="mergesort") expected = left.join(right, on=on_cols, sort=True) tm.assert_frame_equal(result, expected) def test_left_join_index_multi_match_multiindex(self): left = DataFrame( [ ["X", "Y", "C", "a"], ["W", "Y", "C", "e"], ["V", "Q", "A", "h"], ["V", "R", "D", "i"], ["X", "Y", "D", "b"], ["X", "Y", "A", "c"], ["W", "Q", "B", "f"], ["W", "R", "C", "g"], ["V", "Y", "C", "j"], ["X", "Y", "B", "d"], ], columns=["cola", "colb", "colc", "tag"], index=[3, 2, 0, 1, 7, 6, 4, 5, 9, 8], ) right = DataFrame( [ ["W", "R", "C", 0], ["W", "Q", "B", 3], ["W", "Q", "B", 8], ["X", "Y", "A", 1], ["X", "Y", "A", 4], ["X", "Y", "B", 5], ["X", "Y", "C", 6], ["X", "Y", "C", 9], ["X", "Q", "C", -6], ["X", "R", "C", -9], ["V", "Y", "C", 7], ["V", "R", "D", 2], ["V", "R", "D", -1], ["V", "Q", "A", -3], ], columns=["col1", "col2", "col3", "val"], ).set_index(["col1", "col2", "col3"]) result = left.join(right, on=["cola", "colb", "colc"], how="left") expected = DataFrame( [ ["X", "Y", "C", "a", 6], ["X", "Y", "C", "a", 9], ["W", "Y", "C", "e", np.nan], ["V", "Q", "A", "h", -3], ["V", "R", "D", "i", 2], ["V", "R", "D", "i", -1], ["X", "Y", "D", "b", np.nan], ["X", "Y", "A", "c", 1], ["X", "Y", "A", "c", 4], ["W", "Q", "B", "f", 3], ["W", "Q", "B", "f", 8], ["W", "R", "C", "g", 0], ["V", "Y", "C", "j", 7], ["X", "Y", "B", "d", 5], ], columns=["cola", "colb", "colc", "tag", "val"], index=[3, 3, 2, 0, 1, 1, 7, 6, 6, 4, 4, 5, 9, 8], ) tm.assert_frame_equal(result, expected) result = left.join(right, on=["cola", "colb", "colc"], how="left", sort=True) expected = expected.sort_values(["cola", "colb", "colc"], kind="mergesort") tm.assert_frame_equal(result, expected) def test_left_join_index_multi_match(self): left = DataFrame( [["c", 0], ["b", 1], ["a", 2], ["b", 3]], columns=["tag", "val"], index=[2, 0, 1, 3], ) right = DataFrame( [ ["a", "v"], ["c", "w"], ["c", "x"], ["d", "y"], ["a", "z"], ["c", "r"], ["e", "q"], ["c", "s"], ], columns=["tag", "char"], ).set_index("tag") result = left.join(right, on="tag", how="left") expected = DataFrame( [ ["c", 0, "w"], ["c", 0, "x"], ["c", 0, "r"], ["c", 0, "s"], ["b", 1, np.nan], ["a", 2, "v"], ["a", 2, "z"], ["b", 3, np.nan], ], columns=["tag", "val", "char"], index=[2, 2, 2, 2, 0, 1, 1, 3], ) tm.assert_frame_equal(result, expected) result = left.join(right, on="tag", how="left", sort=True) expected2 = expected.sort_values("tag", kind="mergesort") tm.assert_frame_equal(result, expected2) # GH7331 - maintain left frame order in left merge result = merge(left, right.reset_index(), how="left", on="tag") expected.index = np.arange(len(expected)) tm.assert_frame_equal(result, expected) def test_left_merge_na_buglet(self): left = DataFrame( { "id": list("abcde"), "v1": randn(5), "v2": randn(5), "dummy": list("abcde"), "v3": randn(5), }, columns=["id", "v1", "v2", "dummy", "v3"], ) right = DataFrame( { "id": ["a", "b", np.nan, np.nan, np.nan], "sv3": [1.234, 5.678, np.nan, np.nan, np.nan], } ) result = merge(left, right, on="id", how="left") rdf = right.drop(["id"], axis=1) expected = left.join(rdf) tm.assert_frame_equal(result, expected) def test_merge_na_keys(self): data = [ [1950, "A", 1.5], [1950, "B", 1.5], [1955, "B", 1.5], [1960, "B", np.nan], [1970, "B", 4.0], [1950, "C", 4.0], [1960, "C", np.nan], [1965, "C", 3.0], [1970, "C", 4.0], ] frame = DataFrame(data, columns=["year", "panel", "data"]) other_data = [ [1960, "A", np.nan], [1970, "A", np.nan], [1955, "A", np.nan], [1965, "A", np.nan], [1965, "B", np.nan], [1955, "C", np.nan], ] other = DataFrame(other_data, columns=["year", "panel", "data"]) result = frame.merge(other, how="outer") expected = frame.fillna(-999).merge(other.fillna(-999), how="outer") expected = expected.replace(-999, np.nan) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("klass", [None, np.asarray, Series, Index]) def test_merge_datetime_index(self, klass): # see gh-19038 df = DataFrame( [1, 2, 3], ["2016-01-01", "2017-01-01", "2018-01-01"], columns=["a"] ) df.index = pd.to_datetime(df.index) on_vector = df.index.year if klass is not None: on_vector = klass(on_vector) expected = DataFrame({"a": [1, 2, 3], "key_1": [2016, 2017, 2018]}) result = df.merge(df, on=["a", on_vector], how="inner") tm.assert_frame_equal(result, expected) expected = DataFrame( {"key_0": [2016, 2017, 2018], "a_x": [1, 2, 3], "a_y": [1, 2, 3]} ) result = df.merge(df, on=[df.index.year], how="inner") tm.assert_frame_equal(result, expected) def test_join_multi_levels(self): # GH 3662 # merge multi-levels household = DataFrame( dict( household_id=[1, 2, 3], male=[0, 1, 0], wealth=[196087.3, 316478.7, 294750], ), columns=["household_id", "male", "wealth"], ).set_index("household_id") portfolio = DataFrame( dict( household_id=[1, 2, 2, 3, 3, 3, 4], asset_id=[ "nl0000301109", "nl0000289783", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "nl0000289965", np.nan, ], name=[ "ABN Amro", "Robeco", "Royal Dutch Shell", "Royal Dutch Shell", "AAB Eastern Europe Equity Fund", "Postbank BioTech Fonds", np.nan, ], share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0], ), columns=["household_id", "asset_id", "name", "share"], ).set_index(["household_id", "asset_id"]) result = household.join(portfolio, how="inner") expected = ( DataFrame( dict( male=[0, 1, 1, 0, 0, 0], wealth=[196087.3, 316478.7, 316478.7, 294750.0, 294750.0, 294750.0], name=[ "ABN Amro", "Robeco", "Royal Dutch Shell", "Royal Dutch Shell", "AAB Eastern Europe Equity Fund", "Postbank BioTech Fonds", ], share=[1.00, 0.40, 0.60, 0.15, 0.60, 0.25], household_id=[1, 2, 2, 3, 3, 3], asset_id=[ "nl0000301109", "nl0000289783", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "nl0000289965", ], ) ) .set_index(["household_id", "asset_id"]) .reindex(columns=["male", "wealth", "name", "share"]) ) tm.assert_frame_equal(result, expected) # equivalency result = merge( household.reset_index(), portfolio.reset_index(), on=["household_id"], how="inner", ).set_index(["household_id", "asset_id"]) tm.assert_frame_equal(result, expected) result = household.join(portfolio, how="outer") expected = concat( [ expected, ( DataFrame( dict(share=[1.00]), index=MultiIndex.from_tuples( [(4, np.nan)], names=["household_id", "asset_id"] ), ) ), ], axis=0, sort=True, ).reindex(columns=expected.columns) tm.assert_frame_equal(result, expected) # invalid cases household.index.name = "foo" with pytest.raises( ValueError, match="cannot join with no overlapping index names" ): household.join(portfolio, how="inner") portfolio2 = portfolio.copy() portfolio2.index.set_names(["household_id", "foo"]) with pytest.raises(ValueError, match="columns overlap but no suffix specified"): portfolio2.join(portfolio, how="inner") def test_join_multi_levels2(self): # some more advanced merges # GH6360 household = DataFrame( dict( household_id=[1, 2, 2, 3, 3, 3, 4], asset_id=[ "nl0000301109", "nl0000301109", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "nl0000289965", np.nan, ], share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0], ), columns=["household_id", "asset_id", "share"], ).set_index(["household_id", "asset_id"]) log_return = DataFrame( dict( asset_id=[ "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237", ], t=[233, 234, 235, 180, 181], log_return=[0.09604978, -0.06524096, 0.03532373, 0.03025441, 0.036997], ) ).set_index(["asset_id", "t"]) expected = ( DataFrame( dict( household_id=[2, 2, 2, 3, 3, 3, 3, 3], asset_id=[ "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237", ], t=[233, 234, 235, 233, 234, 235, 180, 181], share=[0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6], log_return=[ 0.09604978, -0.06524096, 0.03532373, 0.09604978, -0.06524096, 0.03532373, 0.03025441, 0.036997, ], ) ) .set_index(["household_id", "asset_id", "t"]) .reindex(columns=["share", "log_return"]) ) # this is the equivalency result = merge( household.reset_index(), log_return.reset_index(), on=["asset_id"], how="inner", ).set_index(["household_id", "asset_id", "t"]) tm.assert_frame_equal(result, expected) expected = ( DataFrame( dict( household_id=[1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4], asset_id=[ "nl0000301109", "nl0000301109", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29", "lu0197800237", "lu0197800237", "nl0000289965", None, ], t=[None, None, 233, 234, 235, 233, 234, 235, 180, 181, None, None], share=[ 1.0, 0.4, 0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6, 0.25, 1.0, ], log_return=[ None, None, 0.09604978, -0.06524096, 0.03532373, 0.09604978, -0.06524096, 0.03532373, 0.03025441, 0.036997, None, None, ], ) ) .set_index(["household_id", "asset_id", "t"]) .reindex(columns=["share", "log_return"]) ) result = merge( household.reset_index(), log_return.reset_index(), on=["asset_id"], how="outer", ).set_index(["household_id", "asset_id", "t"]) tm.assert_frame_equal(result, expected) class TestJoinMultiMulti: def test_join_multi_multi( self, left_multi, right_multi, join_type, on_cols_multi, idx_cols_multi ): # Multi-index join tests expected = ( pd.merge( left_multi.reset_index(), right_multi.reset_index(), how=join_type, on=on_cols_multi, ) .set_index(idx_cols_multi) .sort_index() ) result = left_multi.join(right_multi, how=join_type).sort_index() tm.assert_frame_equal(result, expected) def test_join_multi_empty_frames( self, left_multi, right_multi, join_type, on_cols_multi, idx_cols_multi ): left_multi = left_multi.drop(columns=left_multi.columns) right_multi = right_multi.drop(columns=right_multi.columns) expected = ( pd.merge( left_multi.reset_index(), right_multi.reset_index(), how=join_type, on=on_cols_multi, ) .set_index(idx_cols_multi) .sort_index() ) result = left_multi.join(right_multi, how=join_type).sort_index() tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("box", [None, np.asarray, Series, Index]) def test_merge_datetime_index(self, box): # see gh-19038 df = DataFrame( [1, 2, 3], ["2016-01-01", "2017-01-01", "2018-01-01"], columns=["a"] ) df.index = pd.to_datetime(df.index) on_vector = df.index.year if box is not None: on_vector = box(on_vector) expected = DataFrame({"a": [1, 2, 3], "key_1": [2016, 2017, 2018]}) result = df.merge(df, on=["a", on_vector], how="inner") tm.assert_frame_equal(result, expected) expected = DataFrame( {"key_0": [2016, 2017, 2018], "a_x": [1, 2, 3], "a_y": [1, 2, 3]} ) result = df.merge(df, on=[df.index.year], how="inner") tm.assert_frame_equal(result, expected) def test_single_common_level(self): index_left = pd.MultiIndex.from_tuples( [("K0", "X0"), ("K0", "X1"), ("K1", "X2")], names=["key", "X"] ) left = pd.DataFrame( {"A": ["A0", "A1", "A2"], "B": ["B0", "B1", "B2"]}, index=index_left ) index_right = pd.MultiIndex.from_tuples( [("K0", "Y0"), ("K1", "Y1"), ("K2", "Y2"), ("K2", "Y3")], names=["key", "Y"] ) right = pd.DataFrame( {"C": ["C0", "C1", "C2", "C3"], "D": ["D0", "D1", "D2", "D3"]}, index=index_right, ) result = left.join(right) expected = pd.merge( left.reset_index(), right.reset_index(), on=["key"], how="inner" ).set_index(["key", "X", "Y"]) tm.assert_frame_equal(result, expected) def test_join_multi_wrong_order(self): # GH 25760 # GH 28956 midx1 = pd.MultiIndex.from_product([[1, 2], [3, 4]], names=["a", "b"]) midx3 = pd.MultiIndex.from_tuples([(4, 1), (3, 2), (3, 1)], names=["b", "a"]) left = pd.DataFrame(index=midx1, data={"x": [10, 20, 30, 40]}) right = pd.DataFrame(index=midx3, data={"y": ["foo", "bar", "fing"]}) result = left.join(right) expected = pd.DataFrame( index=midx1, data={"x": [10, 20, 30, 40], "y": ["fing", "foo", "bar", np.nan]}, ) tm.assert_frame_equal(result, expected)
[((641, 694), 'pandas.DataFrame', 'DataFrame', (["{'key1': key1, 'key2': key2, 'data': data}"], {}), "({'key1': key1, 'key2': key2, 'data': data})\n", (650, 694), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n'), ((813, 991), 'pandas.MultiIndex', 'MultiIndex', ([], {'levels': "[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']]", 'codes': '[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]]', 'names': "['key1', 'key2']"}), "(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']],\n codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['key1', 'key2'])\n", (823, 991), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n'), ((3945, 3991), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sort"""', '[False, True]'], {}), "('sort', [False, True])\n", (3968, 3991), False, 'import pytest\n'), ((5995, 6041), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sort"""', '[False, True]'], {}), "('sort', [False, True])\n", (6018, 6041), False, 'import pytest\n'), ((15436, 15503), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""klass"""', '[None, np.asarray, Series, Index]'], {}), "('klass', [None, np.asarray, Series, Index])\n", (15459, 15503), False, 'import pytest\n'), ((26189, 26254), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""box"""', '[None, np.asarray, Series, Index]'], {}), "('box', [None, np.asarray, Series, Index])\n", (26212, 26254), False, 'import pytest\n'), ((1053, 1075), 'numpy.random.randn', 'np.random.randn', (['(10)', '(3)'], {}), '(10, 3)\n', (1068, 1075), True, 'import numpy as np\n'), ((2442, 2623), 'pandas.MultiIndex', 'MultiIndex', ([], {'levels': "[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']]", 'codes': '[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]]', 'names': "['first', 'second']"}), "(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']],\n codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['first', 'second'])\n", (2452, 2623), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n'), ((3268, 3321), 'pandas.DataFrame', 'DataFrame', (["{'key1': key1, 'key2': key2, 'data': data}"], {}), "({'key1': key1, 'key2': key2, 'data': data})\n", (3277, 3321), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n'), ((3604, 3643), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (3625, 3643), True, 'import pandas._testing as tm\n'), ((3897, 3936), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (3918, 3936), True, 'import pandas._testing as tm\n'), ((6563, 6621), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['merged_left_right', 'merge_right_left'], {}), '(merged_left_right, merge_right_left)\n', (6584, 6621), True, 'import pandas._testing as tm\n'), ((7306, 7345), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (7327, 7345), True, 'import pandas._testing as tm\n'), ((7460, 7485), 'pandas._testing.rands_array', 'tm.rands_array', (['(10)', '(10000)'], {}), '(10, 10000)\n', (7474, 7485), True, 'import pandas._testing as tm\n'), ((7502, 7518), 'numpy.tile', 'np.tile', (['key1', '(2)'], {}), '(key1, 2)\n', (7509, 7518), True, 'import numpy as np\n'), ((7827, 7854), 'pandas.core.reshape.merge.merge', 'merge', (['df', 'df2'], {'how': '"""outer"""'}), "(df, df2, how='outer')\n", (7832, 7854), False, 'from pandas.core.reshape.merge import merge\n'), ((8175, 8223), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (["[(2, 'bar'), (1, 'foo')]"], {}), "([(2, 'bar'), (1, 'foo')])\n", (8197, 8223), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n'), ((8241, 8279), 'pandas.DataFrame', 'DataFrame', (["{'v2': [5, 7]}"], {'index': 'index'}), "({'v2': [5, 7]}, index=index)\n", (8250, 8279), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n'), ((8561, 8600), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (8582, 8600), True, 'import pandas._testing as tm\n'), ((8743, 8782), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (8764, 8782), True, 'import pandas._testing as tm\n'), ((9129, 9177), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (["[(2, 'bar'), (1, 'foo')]"], {}), "([(2, 'bar'), (1, 'foo')])\n", (9151, 9177), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n'), ((9195, 9233), 'pandas.DataFrame', 'DataFrame', (["{'v2': [5, 7]}"], {'index': 'index'}), "({'v2': [5, 7]}, index=index)\n", (9204, 9233), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n'), ((9515, 9554), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (9536, 9554), True, 'import pandas._testing as tm\n'), ((9692, 9731), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (9713, 9731), True, 'import pandas._testing as tm\n'), ((9810, 10136), 'pandas.DataFrame', 'DataFrame', (["[['X', 'Y', 'C', 'a'], ['W', 'Y', 'C', 'e'], ['V', 'Q', 'A', 'h'], ['V',\n 'R', 'D', 'i'], ['X', 'Y', 'D', 'b'], ['X', 'Y', 'A', 'c'], ['W', 'Q',\n 'B', 'f'], ['W', 'R', 'C', 'g'], ['V', 'Y', 'C', 'j'], ['X', 'Y', 'B', 'd']\n ]"], {'columns': "['cola', 'colb', 'colc', 'tag']", 'index': '[3, 2, 0, 1, 7, 6, 4, 5, 9, 8]'}), "([['X', 'Y', 'C', 'a'], ['W', 'Y', 'C', 'e'], ['V', 'Q', 'A', 'h'],\n ['V', 'R', 'D', 'i'], ['X', 'Y', 'D', 'b'], ['X', 'Y', 'A', 'c'], ['W',\n 'Q', 'B', 'f'], ['W', 'R', 'C', 'g'], ['V', 'Y', 'C', 'j'], ['X', 'Y',\n 'B', 'd']], columns=['cola', 'colb', 'colc', 'tag'], index=[3, 2, 0, 1,\n 7, 6, 4, 5, 9, 8])\n", (9819, 10136), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n'), ((11142, 11638), 'pandas.DataFrame', 'DataFrame', (["[['X', 'Y', 'C', 'a', 6], ['X', 'Y', 'C', 'a', 9], ['W', 'Y', 'C', 'e', np.\n nan], ['V', 'Q', 'A', 'h', -3], ['V', 'R', 'D', 'i', 2], ['V', 'R', 'D',\n 'i', -1], ['X', 'Y', 'D', 'b', np.nan], ['X', 'Y', 'A', 'c', 1], ['X',\n 'Y', 'A', 'c', 4], ['W', 'Q', 'B', 'f', 3], ['W', 'Q', 'B', 'f', 8], [\n 'W', 'R', 'C', 'g', 0], ['V', 'Y', 'C', 'j', 7], ['X', 'Y', 'B', 'd', 5]]"], {'columns': "['cola', 'colb', 'colc', 'tag', 'val']", 'index': '[3, 3, 2, 0, 1, 1, 7, 6, 6, 4, 4, 5, 9, 8]'}), "([['X', 'Y', 'C', 'a', 6], ['X', 'Y', 'C', 'a', 9], ['W', 'Y', 'C',\n 'e', np.nan], ['V', 'Q', 'A', 'h', -3], ['V', 'R', 'D', 'i', 2], ['V',\n 'R', 'D', 'i', -1], ['X', 'Y', 'D', 'b', np.nan], ['X', 'Y', 'A', 'c', \n 1], ['X', 'Y', 'A', 'c', 4], ['W', 'Q', 'B', 'f', 3], ['W', 'Q', 'B',\n 'f', 8], ['W', 'R', 'C', 'g', 0], ['V', 'Y', 'C', 'j', 7], ['X', 'Y',\n 'B', 'd', 5]], columns=['cola', 'colb', 'colc', 'tag', 'val'], index=[3,\n 3, 2, 0, 1, 1, 7, 6, 6, 4, 4, 5, 9, 8])\n", (11151, 11638), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n'), ((11930, 11969), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (11951, 11969), True, 'import pandas._testing as tm\n'), ((12157, 12196), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (12178, 12196), True, 'import pandas._testing as tm\n'), ((12264, 12363), 'pandas.DataFrame', 'DataFrame', (["[['c', 0], ['b', 1], ['a', 2], ['b', 3]]"], {'columns': "['tag', 'val']", 'index': '[2, 0, 1, 3]'}), "([['c', 0], ['b', 1], ['a', 2], ['b', 3]], columns=['tag', 'val'],\n index=[2, 0, 1, 3])\n", (12273, 12363), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n'), ((12851, 13061), 'pandas.DataFrame', 'DataFrame', (["[['c', 0, 'w'], ['c', 0, 'x'], ['c', 0, 'r'], ['c', 0, 's'], ['b', 1, np.\n nan], ['a', 2, 'v'], ['a', 2, 'z'], ['b', 3, np.nan]]"], {'columns': "['tag', 'val', 'char']", 'index': '[2, 2, 2, 2, 0, 1, 1, 3]'}), "([['c', 0, 'w'], ['c', 0, 'x'], ['c', 0, 'r'], ['c', 0, 's'], ['b',\n 1, np.nan], ['a', 2, 'v'], ['a', 2, 'z'], ['b', 3, np.nan]], columns=[\n 'tag', 'val', 'char'], index=[2, 2, 2, 2, 0, 1, 1, 3])\n", (12860, 13061), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n'), ((13267, 13306), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (13288, 13306), True, 'import pandas._testing as tm\n'), ((13455, 13495), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected2'], {}), '(result, expected2)\n', (13476, 13495), True, 'import pandas._testing as tm\n'), ((13691, 13730), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (13712, 13730), True, 'import pandas._testing as tm\n'), ((14095, 14199), 'pandas.DataFrame', 'DataFrame', (["{'id': ['a', 'b', np.nan, np.nan, np.nan], 'sv3': [1.234, 5.678, np.nan, np\n .nan, np.nan]}"], {}), "({'id': ['a', 'b', np.nan, np.nan, np.nan], 'sv3': [1.234, 5.678,\n np.nan, np.nan, np.nan]})\n", (14104, 14199), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n'), ((14290, 14329), 'pandas.core.reshape.merge.merge', 'merge', (['left', 'right'], {'on': '"""id"""', 'how': '"""left"""'}), "(left, right, on='id', how='left')\n", (14295, 14329), False, 'from pandas.core.reshape.merge import merge\n'), ((14418, 14457), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (14439, 14457), True, 'import pandas._testing as tm\n'), ((14828, 14878), 'pandas.DataFrame', 'DataFrame', (['data'], {'columns': "['year', 'panel', 'data']"}), "(data, columns=['year', 'panel', 'data'])\n", (14837, 14878), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n'), ((15137, 15193), 'pandas.DataFrame', 'DataFrame', (['other_data'], {'columns': "['year', 'panel', 'data']"}), "(other_data, columns=['year', 'panel', 'data'])\n", (15146, 15193), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n'), ((15388, 15427), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (15409, 15427), True, 'import pandas._testing as tm\n'), ((15591, 15670), 'pandas.DataFrame', 'DataFrame', (['[1, 2, 3]', "['2016-01-01', '2017-01-01', '2018-01-01']"], {'columns': "['a']"}), "([1, 2, 3], ['2016-01-01', '2017-01-01', '2018-01-01'], columns=['a'])\n", (15600, 15670), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n'), ((15715, 15739), 'pandas.to_datetime', 'pd.to_datetime', (['df.index'], {}), '(df.index)\n', (15729, 15739), True, 'import pandas as pd\n'), ((15872, 15928), 'pandas.DataFrame', 'DataFrame', (["{'a': [1, 2, 3], 'key_1': [2016, 2017, 2018]}"], {}), "({'a': [1, 2, 3], 'key_1': [2016, 2017, 2018]})\n", (15881, 15928), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n'), ((16005, 16044), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (16026, 16044), True, 'import pandas._testing as tm\n'), ((16067, 16143), 'pandas.DataFrame', 'DataFrame', (["{'key_0': [2016, 2017, 2018], 'a_x': [1, 2, 3], 'a_y': [1, 2, 3]}"], {}), "({'key_0': [2016, 2017, 2018], 'a_x': [1, 2, 3], 'a_y': [1, 2, 3]})\n", (16076, 16143), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n'), ((16243, 16282), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (16264, 16282), True, 'import pandas._testing as tm\n'), ((18771, 18810), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (18792, 18810), True, 'import pandas._testing as tm\n'), ((19057, 19096), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (19078, 19096), True, 'import pandas._testing as tm\n'), ((19642, 19681), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (19663, 19681), True, 'import pandas._testing as tm\n'), ((22759, 22798), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (22780, 22798), True, 'import pandas._testing as tm\n'), ((24850, 24889), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (24871, 24889), True, 'import pandas._testing as tm\n'), ((25454, 25493), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (25475, 25493), True, 'import pandas._testing as tm\n'), ((26141, 26180), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (26162, 26180), True, 'import pandas._testing as tm\n'), ((26340, 26419), 'pandas.DataFrame', 'DataFrame', (['[1, 2, 3]', "['2016-01-01', '2017-01-01', '2018-01-01']"], {'columns': "['a']"}), "([1, 2, 3], ['2016-01-01', '2017-01-01', '2018-01-01'], columns=['a'])\n", (26349, 26419), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n'), ((26464, 26488), 'pandas.to_datetime', 'pd.to_datetime', (['df.index'], {}), '(df.index)\n', (26478, 26488), True, 'import pandas as pd\n'), ((26617, 26673), 'pandas.DataFrame', 'DataFrame', (["{'a': [1, 2, 3], 'key_1': [2016, 2017, 2018]}"], {}), "({'a': [1, 2, 3], 'key_1': [2016, 2017, 2018]})\n", (26626, 26673), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n'), ((26750, 26789), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (26771, 26789), True, 'import pandas._testing as tm\n'), ((26812, 26888), 'pandas.DataFrame', 'DataFrame', (["{'key_0': [2016, 2017, 2018], 'a_x': [1, 2, 3], 'a_y': [1, 2, 3]}"], {}), "({'key_0': [2016, 2017, 2018], 'a_x': [1, 2, 3], 'a_y': [1, 2, 3]})\n", (26821, 26888), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n'), ((26988, 27027), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (27009, 27027), True, 'import pandas._testing as tm\n'), ((27093, 27187), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (["[('K0', 'X0'), ('K0', 'X1'), ('K1', 'X2')]"], {'names': "['key', 'X']"}), "([('K0', 'X0'), ('K0', 'X1'), ('K1', 'X2')], names\n =['key', 'X'])\n", (27118, 27187), True, 'import pandas as pd\n'), ((27225, 27312), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': ['A0', 'A1', 'A2'], 'B': ['B0', 'B1', 'B2']}"], {'index': 'index_left'}), "({'A': ['A0', 'A1', 'A2'], 'B': ['B0', 'B1', 'B2']}, index=\n index_left)\n", (27237, 27312), True, 'import pandas as pd\n'), ((27357, 27464), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (["[('K0', 'Y0'), ('K1', 'Y1'), ('K2', 'Y2'), ('K2', 'Y3')]"], {'names': "['key', 'Y']"}), "([('K0', 'Y0'), ('K1', 'Y1'), ('K2', 'Y2'), ('K2',\n 'Y3')], names=['key', 'Y'])\n", (27382, 27464), True, 'import pandas as pd\n'), ((27504, 27603), 'pandas.DataFrame', 'pd.DataFrame', (["{'C': ['C0', 'C1', 'C2', 'C3'], 'D': ['D0', 'D1', 'D2', 'D3']}"], {'index': 'index_right'}), "({'C': ['C0', 'C1', 'C2', 'C3'], 'D': ['D0', 'D1', 'D2', 'D3']},\n index=index_right)\n", (27516, 27603), True, 'import pandas as pd\n'), ((27834, 27873), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (27855, 27873), True, 'import pandas._testing as tm\n'), ((27979, 28041), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['[[1, 2], [3, 4]]'], {'names': "['a', 'b']"}), "([[1, 2], [3, 4]], names=['a', 'b'])\n", (28005, 28041), True, 'import pandas as pd\n'), ((28059, 28128), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['[(4, 1), (3, 2), (3, 1)]'], {'names': "['b', 'a']"}), "([(4, 1), (3, 2), (3, 1)], names=['b', 'a'])\n", (28084, 28128), True, 'import pandas as pd\n'), ((28147, 28202), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'midx1', 'data': "{'x': [10, 20, 30, 40]}"}), "(index=midx1, data={'x': [10, 20, 30, 40]})\n", (28159, 28202), True, 'import pandas as pd\n'), ((28220, 28281), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'midx3', 'data': "{'y': ['foo', 'bar', 'fing']}"}), "(index=midx3, data={'y': ['foo', 'bar', 'fing']})\n", (28232, 28281), True, 'import pandas as pd\n'), ((28341, 28437), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'midx1', 'data': "{'x': [10, 20, 30, 40], 'y': ['fing', 'foo', 'bar', np.nan]}"}), "(index=midx1, data={'x': [10, 20, 30, 40], 'y': ['fing', 'foo',\n 'bar', np.nan]})\n", (28353, 28437), True, 'import pandas as pd\n'), ((28483, 28522), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (28504, 28522), True, 'import pandas._testing as tm\n'), ((2715, 2737), 'numpy.random.randn', 'np.random.randn', (['(10)', '(3)'], {}), '(10, 3)\n', (2730, 2737), True, 'import numpy as np\n'), ((4585, 4651), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (["res['4th']", "(-res['5th'])"], {'check_names': '(False)'}), "(res['4th'], -res['5th'], check_names=False)\n", (4607, 4651), True, 'import pandas._testing as tm\n'), ((4715, 4776), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (["res['4th']", 'result'], {'check_names': '(False)'}), "(res['4th'], result, check_names=False)\n", (4737, 4776), True, 'import pandas._testing as tm\n'), ((5074, 5105), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['out', 'res'], {}), '(out, res)\n', (5095, 5105), True, 'import pandas._testing as tm\n'), ((5198, 5229), 'numpy.random.choice', 'np.random.choice', (['lc', '(5000, 2)'], {}), '(lc, (5000, 2))\n', (5214, 5229), True, 'import numpy as np\n'), ((6766, 6841), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[['A', 'B'], [1, 2, 3]]"], {'names': "['lev1', 'lev2']"}), "([['A', 'B'], [1, 2, 3]], names=['lev1', 'lev2'])\n", (6792, 6841), True, 'import pandas as pd\n'), ((19763, 19841), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""cannot join with no overlapping index names"""'}), "(ValueError, match='cannot join with no overlapping index names')\n", (19776, 19841), False, 'import pytest\n'), ((20037, 20111), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""columns overlap but no suffix specified"""'}), "(ValueError, match='columns overlap but no suffix specified')\n", (20050, 20111), False, 'import pytest\n'), ((7611, 7633), 'numpy.random.randn', 'np.random.randn', (['(20000)'], {}), '(20000)\n', (7626, 7633), True, 'import numpy as np\n'), ((7726, 7748), 'numpy.random.randn', 'np.random.randn', (['(10000)'], {}), '(10000)\n', (7741, 7748), True, 'import numpy as np\n'), ((8978, 9019), 'numpy.array', 'np.array', (['([0, 1, 2] * 8)'], {'dtype': 'np.float32'}), '([0, 1, 2] * 8, dtype=np.float32)\n', (8986, 9019), True, 'import numpy as np\n'), ((10377, 10731), 'pandas.DataFrame', 'DataFrame', (["[['W', 'R', 'C', 0], ['W', 'Q', 'B', 3], ['W', 'Q', 'B', 8], ['X', 'Y', 'A',\n 1], ['X', 'Y', 'A', 4], ['X', 'Y', 'B', 5], ['X', 'Y', 'C', 6], ['X',\n 'Y', 'C', 9], ['X', 'Q', 'C', -6], ['X', 'R', 'C', -9], ['V', 'Y', 'C',\n 7], ['V', 'R', 'D', 2], ['V', 'R', 'D', -1], ['V', 'Q', 'A', -3]]"], {'columns': "['col1', 'col2', 'col3', 'val']"}), "([['W', 'R', 'C', 0], ['W', 'Q', 'B', 3], ['W', 'Q', 'B', 8], ['X',\n 'Y', 'A', 1], ['X', 'Y', 'A', 4], ['X', 'Y', 'B', 5], ['X', 'Y', 'C', 6\n ], ['X', 'Y', 'C', 9], ['X', 'Q', 'C', -6], ['X', 'R', 'C', -9], ['V',\n 'Y', 'C', 7], ['V', 'R', 'D', 2], ['V', 'R', 'D', -1], ['V', 'Q', 'A', \n -3]], columns=['col1', 'col2', 'col3', 'val'])\n", (10386, 10731), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n'), ((12430, 12566), 'pandas.DataFrame', 'DataFrame', (["[['a', 'v'], ['c', 'w'], ['c', 'x'], ['d', 'y'], ['a', 'z'], ['c', 'r'], [\n 'e', 'q'], ['c', 's']]"], {'columns': "['tag', 'char']"}), "([['a', 'v'], ['c', 'w'], ['c', 'x'], ['d', 'y'], ['a', 'z'], ['c',\n 'r'], ['e', 'q'], ['c', 's']], columns=['tag', 'char'])\n", (12439, 12566), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n'), ((13878, 13886), 'numpy.random.randn', 'randn', (['(5)'], {}), '(5)\n', (13883, 13886), False, 'from numpy.random import randn\n'), ((13911, 13919), 'numpy.random.randn', 'randn', (['(5)'], {}), '(5)\n', (13916, 13919), False, 'from numpy.random import randn\n'), ((13985, 13993), 'numpy.random.randn', 'randn', (['(5)'], {}), '(5)\n', (13990, 13993), False, 'from numpy.random import randn\n'), ((8098, 8111), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (8107, 8111), True, 'import numpy as np\n'), ((9052, 9065), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (9061, 9065), True, 'import numpy as np\n'), ((19353, 19426), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (['[(4, np.nan)]'], {'names': "['household_id', 'asset_id']"}), "([(4, np.nan)], names=['household_id', 'asset_id'])\n", (19375, 19426), False, 'from pandas import DataFrame, Index, MultiIndex, Series\n')]
mmiladi/galaxy
test/api/test_histories.py
7857b152cd10d9490ac2433ff2905ca1a47ee32c
# -*- coding: utf-8 -*- from requests import ( get, post, put ) from base import api # noqa: I100 from base.populators import ( # noqa: I100 DatasetCollectionPopulator, DatasetPopulator, wait_on ) class HistoriesApiTestCase(api.ApiTestCase): def setUp(self): super(HistoriesApiTestCase, self).setUp() self.dataset_populator = DatasetPopulator(self.galaxy_interactor) self.dataset_collection_populator = DatasetCollectionPopulator(self.galaxy_interactor) def test_create_history(self): # Create a history. create_response = self._create_history("TestHistory1") created_id = create_response["id"] # Make sure new history appears in index of user's histories. index_response = self._get("histories").json() indexed_history = [h for h in index_response if h["id"] == created_id][0] self.assertEquals(indexed_history["name"], "TestHistory1") def test_show_history(self): history_id = self._create_history("TestHistoryForShow")["id"] show_response = self._show(history_id) self._assert_has_key( show_response, 'id', 'name', 'annotation', 'size', 'contents_url', 'state', 'state_details', 'state_ids' ) state_details = show_response["state_details"] state_ids = show_response["state_ids"] states = [ 'discarded', 'empty', 'error', 'failed_metadata', 'new', 'ok', 'paused', 'queued', 'running', 'setting_metadata', 'upload' ] assert isinstance(state_details, dict) assert isinstance(state_ids, dict) self._assert_has_keys(state_details, *states) self._assert_has_keys(state_ids, *states) def test_show_most_recently_used(self): history_id = self._create_history("TestHistoryRecent")["id"] show_response = self._get("histories/most_recently_used").json() assert show_response["id"] == history_id def test_index_order(self): slightly_older_history_id = self._create_history("TestHistorySlightlyOlder")["id"] newer_history_id = self._create_history("TestHistoryNewer")["id"] index_response = self._get("histories").json() assert index_response[0]["id"] == newer_history_id assert index_response[1]["id"] == slightly_older_history_id def test_delete(self): # Setup a history and ensure it is in the index history_id = self._create_history("TestHistoryForDelete")["id"] index_response = self._get("histories").json() assert index_response[0]["id"] == history_id show_response = self._show(history_id) assert not show_response["deleted"] # Delete the history self._delete("histories/%s" % history_id) # Check can view it - but it is deleted show_response = self._show(history_id) assert show_response["deleted"] # Verify it is dropped from history index index_response = self._get("histories").json() assert len(index_response) == 0 or index_response[0]["id"] != history_id # Add deleted filter to index to view it index_response = self._get("histories", {"deleted": "true"}).json() assert index_response[0]["id"] == history_id def test_purge(self): history_id = self._create_history("TestHistoryForPurge")["id"] data = {'purge': True} self._delete("histories/%s" % history_id, data=data) show_response = self._show(history_id) assert show_response["deleted"] assert show_response["purged"] def test_undelete(self): history_id = self._create_history("TestHistoryForDeleteAndUndelete")["id"] self._delete("histories/%s" % history_id) self._post("histories/deleted/%s/undelete" % history_id) show_response = self._show(history_id) assert not show_response["deleted"] def test_update(self): history_id = self._create_history("TestHistoryForUpdating")["id"] self._update(history_id, {"name": "New Name"}) show_response = self._show(history_id) assert show_response["name"] == "New Name" unicode_name = u'桜ゲノム' self._update(history_id, {"name": unicode_name}) show_response = self._show(history_id) assert show_response["name"] == unicode_name, show_response quoted_name = "'MooCow'" self._update(history_id, {"name": quoted_name}) show_response = self._show(history_id) assert show_response["name"] == quoted_name self._update(history_id, {"deleted": True}) show_response = self._show(history_id) assert show_response["deleted"], show_response self._update(history_id, {"deleted": False}) show_response = self._show(history_id) assert not show_response["deleted"] self._update(history_id, {"published": True}) show_response = self._show(history_id) assert show_response["published"] self._update(history_id, {"genome_build": "hg18"}) show_response = self._show(history_id) assert show_response["genome_build"] == "hg18" self._update(history_id, {"annotation": "The annotation is cool"}) show_response = self._show(history_id) assert show_response["annotation"] == "The annotation is cool" self._update(history_id, {"annotation": unicode_name}) show_response = self._show(history_id) assert show_response["annotation"] == unicode_name, show_response self._update(history_id, {"annotation": quoted_name}) show_response = self._show(history_id) assert show_response["annotation"] == quoted_name def test_update_invalid_attribute(self): history_id = self._create_history("TestHistoryForInvalidUpdating")["id"] put_response = self._update(history_id, {"invalidkey": "moo"}) assert "invalidkey" not in put_response.json() def test_update_invalid_types(self): history_id = self._create_history("TestHistoryForUpdatingInvalidTypes")["id"] for str_key in ["name", "annotation"]: assert self._update(history_id, {str_key: False}).status_code == 400 for bool_key in ['deleted', 'importable', 'published']: assert self._update(history_id, {bool_key: "a string"}).status_code == 400 assert self._update(history_id, {"tags": "a simple string"}).status_code == 400 assert self._update(history_id, {"tags": [True]}).status_code == 400 def test_invalid_keys(self): invalid_history_id = "1234123412341234" assert self._get("histories/%s" % invalid_history_id).status_code == 400 assert self._update(invalid_history_id, {"name": "new name"}).status_code == 400 assert self._delete("histories/%s" % invalid_history_id).status_code == 400 assert self._post("histories/deleted/%s/undelete" % invalid_history_id).status_code == 400 def test_create_anonymous_fails(self): post_data = dict(name="CannotCreate") # Using lower-level _api_url will cause key to not be injected. histories_url = self._api_url("histories") create_response = post(url=histories_url, data=post_data) self._assert_status_code_is(create_response, 403) def test_import_export(self): history_name = "for_export" history_id = self.dataset_populator.new_history(name=history_name) self.dataset_populator.new_dataset(history_id, content="1 2 3") imported_history_id = self._reimport_history(history_id, history_name) contents_response = self._get("histories/%s/contents" % imported_history_id) self._assert_status_code_is(contents_response, 200) contents = contents_response.json() assert len(contents) == 1 imported_content = self.dataset_populator.get_history_dataset_content( history_id=imported_history_id, dataset_id=contents[0]["id"] ) assert imported_content == "1 2 3\n" def test_import_export_collection(self): from nose.plugins.skip import SkipTest raise SkipTest("Collection import/export not yet implemented") history_name = "for_export_with_collections" history_id = self.dataset_populator.new_history(name=history_name) self.dataset_collection_populator.create_list_in_history(history_id, contents=["Hello", "World"]) imported_history_id = self._reimport_history(history_id, history_name) contents_response = self._get("histories/%s/contents" % imported_history_id) self._assert_status_code_is(contents_response, 200) contents = contents_response.json() assert len(contents) == 3 def _reimport_history(self, history_id, history_name): # Ensure the history is ready to go... self.dataset_populator.wait_for_history(history_id, assert_ok=True) # Export the history. download_path = self._export(history_id) # Create download for history full_download_url = "%s%s?key=%s" % (self.url, download_path, self.galaxy_interactor.api_key) download_response = get(full_download_url) self._assert_status_code_is(download_response, 200) def history_names(): history_index = self._get("histories") return dict((h["name"], h) for h in history_index.json()) import_name = "imported from archive: %s" % history_name assert import_name not in history_names() import_data = dict(archive_source=full_download_url, archive_type="url") import_response = self._post("histories", data=import_data) self._assert_status_code_is(import_response, 200) def has_history_with_name(): histories = history_names() return histories.get(import_name, None) imported_history = wait_on(has_history_with_name, desc="import history") imported_history_id = imported_history["id"] self.dataset_populator.wait_for_history(imported_history_id) return imported_history_id def test_create_tag(self): post_data = dict(name="TestHistoryForTag") history_id = self._post("histories", data=post_data).json()["id"] tag_data = dict(value="awesometagvalue") tag_url = "histories/%s/tags/awesometagname" % history_id tag_create_response = self._post(tag_url, data=tag_data) self._assert_status_code_is(tag_create_response, 200) def _export(self, history_id): export_url = self._api_url("histories/%s/exports" % history_id, use_key=True) put_response = put(export_url) self._assert_status_code_is(put_response, 202) def export_ready_response(): put_response = put(export_url) if put_response.status_code == 202: return None return put_response put_response = wait_on(export_ready_response, desc="export ready") self._assert_status_code_is(put_response, 200) response = put_response.json() self._assert_has_keys(response, "download_url") download_path = response["download_url"] return download_path def _show(self, history_id): return self._get("histories/%s" % history_id).json() def _update(self, history_id, data): update_url = self._api_url("histories/%s" % history_id, use_key=True) put_response = put(update_url, json=data) return put_response def _create_history(self, name): post_data = dict(name=name) create_response = self._post("histories", data=post_data).json() self._assert_has_keys(create_response, "name", "id") self.assertEquals(create_response["name"], name) return create_response # TODO: (CE) test_create_from_copy
[((376, 416), 'base.populators.DatasetPopulator', 'DatasetPopulator', (['self.galaxy_interactor'], {}), '(self.galaxy_interactor)\n', (392, 416), False, 'from base.populators import DatasetCollectionPopulator, DatasetPopulator, wait_on\n'), ((461, 511), 'base.populators.DatasetCollectionPopulator', 'DatasetCollectionPopulator', (['self.galaxy_interactor'], {}), '(self.galaxy_interactor)\n', (487, 511), False, 'from base.populators import DatasetCollectionPopulator, DatasetPopulator, wait_on\n'), ((7255, 7294), 'requests.post', 'post', ([], {'url': 'histories_url', 'data': 'post_data'}), '(url=histories_url, data=post_data)\n', (7259, 7294), False, 'from requests import get, post, put\n'), ((8200, 8256), 'nose.plugins.skip.SkipTest', 'SkipTest', (['"""Collection import/export not yet implemented"""'], {}), "('Collection import/export not yet implemented')\n", (8208, 8256), False, 'from nose.plugins.skip import SkipTest\n'), ((9228, 9250), 'requests.get', 'get', (['full_download_url'], {}), '(full_download_url)\n', (9231, 9250), False, 'from requests import get, post, put\n'), ((9945, 9998), 'base.populators.wait_on', 'wait_on', (['has_history_with_name'], {'desc': '"""import history"""'}), "(has_history_with_name, desc='import history')\n", (9952, 9998), False, 'from base.populators import DatasetCollectionPopulator, DatasetPopulator, wait_on\n'), ((10701, 10716), 'requests.put', 'put', (['export_url'], {}), '(export_url)\n', (10704, 10716), False, 'from requests import get, post, put\n'), ((10985, 11036), 'base.populators.wait_on', 'wait_on', (['export_ready_response'], {'desc': '"""export ready"""'}), "(export_ready_response, desc='export ready')\n", (10992, 11036), False, 'from base.populators import DatasetCollectionPopulator, DatasetPopulator, wait_on\n'), ((11503, 11529), 'requests.put', 'put', (['update_url'], {'json': 'data'}), '(update_url, json=data)\n', (11506, 11529), False, 'from requests import get, post, put\n'), ((10837, 10852), 'requests.put', 'put', (['export_url'], {}), '(export_url)\n', (10840, 10852), False, 'from requests import get, post, put\n')]
VinceW0/Leetcode_Python_solutions
Algorithms_easy/0461. Hamming Distance.py
09e9720afce21632372431606ebec4129eb79734
""" 0461. Hamming Distance The Hamming distance between two integers is the number of positions at which the corresponding bits are different. Given two integers x and y, calculate the Hamming distance. Note: 0 ≤ x, y < 231. Example: Input: x = 1, y = 4 Output: 2 Explanation: 1 (0 0 0 1) 4 (0 1 0 0) ↑ ↑ The above arrows point to positions where the corresponding bits are different. """ class Solution: def hammingDistance(self, x: int, y: int) : z = x^y res = 0 while z: res += z&1 z = z>>1 return res class Solution: def hammingDistance(self, x: int, y: int) : return bin(x^y).count('1')
[]
CoderXAndZ/PycharmProjects
AllProjects/Projects/Spider/DownloadImage.py
94b3cc68d39614a4291bd63d4811dab61eb2e64a
#! /usr/local/bin/python3 # -*- coding: UTF-8 -*- # 抓取 妹子图 并存储 import urllib.request import os import random def open_url(url): request = urllib.request.Request(url) request.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:60.0) Gecko/20100101 Firefox/60.0') # 添加代理,改变 ip iplist = ['182.90.94.113:53281', '119.28.152.208:80', '116.226.219.94:9797', ] proxy_support = urllib.request.ProxyHandler({'http': random.choice(iplist)}) opener = urllib.request.build_opener(proxy_support) # 创建 urllib.request.install_opener(opener) # 安装 # 访问网页 response = urllib.request.urlopen(request) html = response.read() return html # 获取图片id def get_page(url): html = open_url(url).decode('utf-8') a = html.find('current-comment-page') + 23 b = html.find(']',a) print("图片id是:",html[a:b]) return html[a:b] # 根据 url 获取图片添加到数组并返回 def find_imgs(url): html = open_url(url).decode('utf-8') print("html内容:", html) imgs_addrs = [] a = html.find('img src=') while a != -1: # 找到字符串 print("找到字符串a") b = html.find('.gif',a,a+255) if b != -1: print("找到字符串b") imgs_addrs.append(html[a+9:b+4]) else: print("未找到字符串b") b = a + 9 a = html.find('img src=',b) return imgs_addrs # 保存图片 def save_imgs(folder,imgs_addrs): print("folder", folder, "imgs_addrs", imgs_addrs) for each in imgs_addrs: filename = each.split('/')[-1] with open(filename,'wb') as f: img = open_url(each) f.write(img) # 下载图片 def download_img(folder='Image',pages=10): if os.path.exists(folder) == False: os.mkdir(folder) os.chdir(folder) url = 'http://jandan.net/ooxx/' page_num = int(get_page(url)) for i in range(pages): page_num -= i page_url = url + 'page-' + str(page_num) + '#comments' print("页面链接是:",page_url) # 图片列表 imgs_addrs = find_imgs(page_url) save_imgs(folder,imgs_addrs) if __name__ == '__main__': download_img()
[((1697, 1719), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (1711, 1719), False, 'import os\n'), ((1738, 1754), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (1746, 1754), False, 'import os\n'), ((1763, 1779), 'os.chdir', 'os.chdir', (['folder'], {}), '(folder)\n', (1771, 1779), False, 'import os\n'), ((476, 497), 'random.choice', 'random.choice', (['iplist'], {}), '(iplist)\n', (489, 497), False, 'import random\n')]
BA-HanseML/NF_Prj_MIMII_Dataset
utility/extractor_batch.py
c9dd130a48c5ee28491a3f9369ace8f7217753d6
print('load extractor_batch') # Utility to run multiple feature extraction # diagrams over many files with multiple threats import pandas as pd import os import sys import glob from tqdm.auto import tqdm from queue import Queue from threading import Thread from datetime import datetime import time import logging # thread class class ExtractorDiagramThread(Thread): def __init__(self, queue,extdia ,wn): Thread.__init__(self) self.queue = queue self.wn = wn self.extdia = extdia self.stop = False def run(self): while not self.stop: # Get the work from the queue and expand the tuple file_path, target_class = self.queue.get() # execute diagaram self.extdia.execute_diagram(file_path,target_class) self.queue.task_done() def IfStrReturnList(s): if type(s) == str: return [s] else: return s def time_stemp_str(): now = datetime.now() return (now.strftime("%Y-%m-%d %H:%M:%S")) class LoggerWrap(): def __init__(self): self.logger = logging.getLogger('feature_extraction_batch') if (self.logger.hasHandlers()): self.logger.handlers.clear() self.logger.setLevel(logging.DEBUG) # create file handler which logs even debug messages self.fh = logging.FileHandler('feature_extraction_batch.log') self.fh.setLevel(logging.DEBUG) self.logger.addHandler(self.fh) def close(self): print('close log file') #print(self.fh) self.fh.close() logging.shutdown() def log(self,s): m = time_stemp_str() + ': ' + s self.logger.info(m) print(m) def get_file_list(machine, snr, id, target_class_map, FileCountLimit, datset_folder_from_base, base_folder): flist = [] tlsit = [] tn = {} fn = {} for tc in target_class_map: fn[tc] = sorted( \ glob.glob( \ os.path.abspath( "{base}/{SNR}/{machine}/id_{ID}/{n}/*.{ext}".format( base=base_folder+datset_folder_from_base, SNR=snr, machine=machine,ID=id, n=tc, ext='wav' )))) if FileCountLimit: if FileCountLimit < len(fn[tc]): fn[tc] = fn[tc][:FileCountLimit] tn[tc] = np.ones(len(fn[tc]), dtype='int')*target_class_map[tc] for tc in target_class_map: flist+= fn[tc] tlsit+=(list((tn[tc]))) return flist, tlsit def multithreadpolltracker(queue, total): last = total done_l = 0 pbar = tqdm(total=total) while not queue.empty(): time.sleep(0.05) if last > queue.qsize(): done = total-int(queue.qsize()) #print(done, end ="--") pbar.update(done-done_l) done_l = done last = queue.qsize() queue.join() done = total pbar.update(done) # Main Function def extractor_batch(base_folder, target_folder, extdia, FileFindDict = {'SNR': '6dB', 'machine': 'pump', 'ID': ['00']}, n_jobs = 1, target_class_map = {'abnormal':1, 'normal': 0}, FileCountLimit = None, datset_folder_from_base = 'dataset', augment = False, # create one augmentation for a given target class i.e. 'normal' DeviceType = 0, # 0 continuses or 1 sporatic fHP = None, # simple FIR HP to cut of very low freq to not overload MEL main_channel = 0): # assuming a DOA was able to get mein direction (pseudo DOA ...) lw = LoggerWrap() base_folder_full = os.path.abspath(base_folder) target_folder_full = os.path.abspath(base_folder+target_folder) os.makedirs(target_folder_full, exist_ok=True) lw.log('Target folder will be: ' + target_folder_full) lw.log('Extractor diagram is fof type: ' + str(extdia)) for m in IfStrReturnList(FileFindDict['machine']): for snr in IfStrReturnList(FileFindDict['SNR']): for id in IfStrReturnList(FileFindDict['ID']): lw.log('-'*44 ) lw.log('Working on machinepart:' + m + ' SNR:' + snr + ' ID:' + id ) ts = time.time() # create file list for ID batch filelist, targetlist = get_file_list(m, snr, id, target_class_map, FileCountLimit, datset_folder_from_base, base_folder) lw.log('Files to process: ' + str(len(filelist)) ) # start processing if n_jobs == 1: # in the notebook ed = extdia(base_folder,0,main_channel,augment,DeviceType,fHP) pbar= tqdm(total = len(filelist)) for f,tc in (zip(filelist, targetlist)): ed.execute_diagram(f,tc) pbar.update() outport_akkulist_tofile(base_folder,target_folder,ed,m,snr,id) lw.log('list for the id pickled' ) else: # to threads # create the threads and akku diagram edl = [] wl = [] queue = Queue() for w in range(n_jobs): edl.append(extdia(base_folder,w,main_channel,augment,DeviceType,fHP)) worker = ExtractorDiagramThread(queue,edl[w],w) worker.daemon = True worker.start() wl.append(worker) # fill the Queue lw.log('multithread mode filling the queue' ) for f,tc in (zip(filelist, targetlist)): queue.put((f, tc)) multithreadpolltracker(queue, len(filelist)) for w in wl: w.stop = True lw.log('multithread mode all threads done' ) joinlist = outport_akkulist_join(exdia_list=edl) outport_akkulist_tofile(base_folder, target_folder, joinlist, m, snr, id) lw.log('multithread mode list joined and pickled for the id' ) del edl # trying to fiht the memory leak del joinlist tneeded_sec = np.round(time.time()- ts,2) tneeded_min = np.round(tneeded_sec/60,2) lw.log('total time needed for the ID: ' + str(tneeded_sec) + 'sec' + ' = ' + str(tneeded_min) + 'min') lw.close()
[((1020, 1034), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1032, 1034), False, 'from datetime import datetime\n'), ((2848, 2865), 'tqdm.auto.tqdm', 'tqdm', ([], {'total': 'total'}), '(total=total)\n', (2852, 2865), False, 'from tqdm.auto import tqdm\n'), ((4071, 4099), 'os.path.abspath', 'os.path.abspath', (['base_folder'], {}), '(base_folder)\n', (4086, 4099), False, 'import os\n'), ((4126, 4170), 'os.path.abspath', 'os.path.abspath', (['(base_folder + target_folder)'], {}), '(base_folder + target_folder)\n', (4141, 4170), False, 'import os\n'), ((4174, 4220), 'os.makedirs', 'os.makedirs', (['target_folder_full'], {'exist_ok': '(True)'}), '(target_folder_full, exist_ok=True)\n', (4185, 4220), False, 'import os\n'), ((446, 467), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (461, 467), False, 'from threading import Thread\n'), ((1158, 1203), 'logging.getLogger', 'logging.getLogger', (['"""feature_extraction_batch"""'], {}), "('feature_extraction_batch')\n", (1175, 1203), False, 'import logging\n'), ((1413, 1464), 'logging.FileHandler', 'logging.FileHandler', (['"""feature_extraction_batch.log"""'], {}), "('feature_extraction_batch.log')\n", (1432, 1464), False, 'import logging\n'), ((1661, 1679), 'logging.shutdown', 'logging.shutdown', ([], {}), '()\n', (1677, 1679), False, 'import logging\n'), ((2905, 2921), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (2915, 2921), False, 'import time\n'), ((4663, 4674), 'time.time', 'time.time', ([], {}), '()\n', (4672, 4674), False, 'import time\n'), ((5821, 5828), 'queue.Queue', 'Queue', ([], {}), '()\n', (5826, 5828), False, 'from queue import Queue\n'), ((7063, 7074), 'time.time', 'time.time', ([], {}), '()\n', (7072, 7074), False, 'import time\n')]
cmalek/django-site-multitenancy
multitenancy/context_processors.py
1b943f63c0d6247529805e05dcced68ceffa2a69
from .models import Tenant def tenant(request): """ Return context variables required by apps that use django-site-multitenancy. If there is no 'tenant' attribute in the request, extract one from the request. """ if hasattr(request, 'tenant'): tenant = request.tenant else: tenant = Tenant.objects.get_current(request) return {'tenant': tenant}
[]
odhiambocuttice/mypersonalapp
hello/forms.py
b2fb12046302104569aa5c4e4869aeb669e51b1b
from django import forms from .models import Project class ProjectForm(forms.ModelForm): class Meta: model = Project fields = ["title", "describe", "technology"]
[]
mottaquikarim/pydev-psets
pset_classes/class_basics/solutions/p1.py
9749e0d216ee0a5c586d0d3013ef481cc21dee27
""" Person class """ # Create a Person class with the following properties # 1. name # 2. age # 3. social security number class Person: def __init__(self, name, age, social_number): self.name = name self.age = age self.social = social_number p1 = Person("John", 36, "111-11-1111") print(p1.name) print(p1.age) print(p1.social)
[]
huangxu96/Paddle
python/paddle/fluid/contrib/slim/tests/test_imperative_out_scale.py
372ac08a171d76c745deaab0feed2d587798f734
# copyright (c) 2018 paddlepaddle authors. all rights reserved. # # licensed under the apache license, version 2.0 (the "license"); # you may not use this file except in compliance with the license. # you may obtain a copy of the license at # # http://www.apache.org/licenses/license-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the license is distributed on an "as is" basis, # without warranties or conditions of any kind, either express or implied. # see the license for the specific language governing permissions and # limitations under the license. from __future__ import print_function import os import numpy as np import random import unittest import logging import warnings import paddle import paddle.fluid as fluid import paddle.fluid.layers as layers from paddle.fluid import core from paddle.fluid.optimizer import AdamOptimizer from paddle.fluid.framework import IrGraph from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware from paddle.fluid.contrib.slim.quantization import OutScaleForTrainingPass, OutScaleForInferencePass, QuantizationTransformPass from paddle.fluid.dygraph.container import Sequential from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX from paddle.nn.layer import ReLU, LeakyReLU, Sigmoid, Softmax, PReLU from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D from paddle.fluid.dygraph.nn import Pool2D from paddle.fluid.log_helper import get_logger from paddle.fluid.dygraph import nn paddle.enable_static() os.environ["CPU_NUM"] = "1" if core.is_compiled_with_cuda(): fluid.set_flags({"FLAGS_cudnn_deterministic": True}) _logger = get_logger( __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s') def get_vaild_warning_num(warning, w): num = 0 for i in range(len(w)): if warning in str(w[i].message): num += 1 return num def StaticLenet(data, num_classes=10, classifier_activation='softmax'): conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1") conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2") fc_w1_attr = fluid.ParamAttr(name="fc_w_1") fc_w2_attr = fluid.ParamAttr(name="fc_w_2") fc_w3_attr = fluid.ParamAttr(name="fc_w_3") conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2") fc_b1_attr = fluid.ParamAttr(name="fc_b_1") fc_b2_attr = fluid.ParamAttr(name="fc_b_2") fc_b3_attr = fluid.ParamAttr(name="fc_b_3") conv1 = fluid.layers.conv2d( data, num_filters=6, filter_size=3, stride=1, padding=1, param_attr=conv2d_w1_attr, bias_attr=False) batch_norm1 = layers.batch_norm(conv1) relu1 = layers.relu(batch_norm1) pool1 = fluid.layers.pool2d( relu1, pool_size=2, pool_type='max', pool_stride=2) conv2 = fluid.layers.conv2d( pool1, num_filters=16, filter_size=5, stride=1, padding=0, param_attr=conv2d_w2_attr, bias_attr=conv2d_b2_attr) batch_norm2 = layers.batch_norm(conv2) prelu1 = layers.prelu(batch_norm2, mode='all') pool2 = fluid.layers.pool2d( prelu1, pool_size=2, pool_type='max', pool_stride=2) fc1 = fluid.layers.fc(input=pool2, size=120, param_attr=fc_w1_attr, bias_attr=fc_b1_attr) leaky_relu1 = layers.leaky_relu(fc1, alpha=0.01) fc2 = fluid.layers.fc(input=leaky_relu1, size=84, param_attr=fc_w2_attr, bias_attr=fc_b2_attr) sigmoid1 = layers.sigmoid(fc2) fc3 = fluid.layers.fc(input=sigmoid1, size=num_classes, param_attr=fc_w3_attr, bias_attr=fc_b3_attr) softmax1 = layers.softmax(fc3, use_cudnn=True) return softmax1 class ImperativeLenet(fluid.dygraph.Layer): def __init__(self, num_classes=10): super(ImperativeLenet, self).__init__() conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1") conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2") fc_w1_attr = fluid.ParamAttr(name="fc_w_1") fc_w2_attr = fluid.ParamAttr(name="fc_w_2") fc_w3_attr = fluid.ParamAttr(name="fc_w_3") conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2") fc_b1_attr = fluid.ParamAttr(name="fc_b_1") fc_b2_attr = fluid.ParamAttr(name="fc_b_2") fc_b3_attr = fluid.ParamAttr(name="fc_b_3") self.features = Sequential( Conv2D( in_channels=1, out_channels=6, kernel_size=3, stride=1, padding=1, weight_attr=conv2d_w1_attr, bias_attr=False), BatchNorm2D(6), ReLU(), Pool2D( pool_size=2, pool_type='max', pool_stride=2), Conv2D( in_channels=6, out_channels=16, kernel_size=5, stride=1, padding=0, weight_attr=conv2d_w2_attr, bias_attr=conv2d_b2_attr), BatchNorm2D(16), PReLU(), MaxPool2D( kernel_size=2, stride=2)) self.fc = Sequential( Linear( in_features=400, out_features=120, weight_attr=fc_w1_attr, bias_attr=fc_b1_attr), LeakyReLU(), Linear( in_features=120, out_features=84, weight_attr=fc_w2_attr, bias_attr=fc_b2_attr), Sigmoid(), Linear( in_features=84, out_features=num_classes, weight_attr=fc_w3_attr, bias_attr=fc_b3_attr), Softmax()) def forward(self, inputs): x = self.features(inputs) x = fluid.layers.flatten(x, 1) x = self.fc(x) return x class TestImperativeOutSclae(unittest.TestCase): def test_out_scale_acc(self): def _build_static_lenet(main, startup, is_test=False, seed=1000): with fluid.unique_name.guard(): with fluid.program_guard(main, startup): main.random_seed = seed startup.random_seed = seed img = fluid.layers.data( name='image', shape=[1, 28, 28], dtype='float32') label = fluid.layers.data( name='label', shape=[1], dtype='int64') prediction = StaticLenet(img) if not is_test: loss = fluid.layers.cross_entropy( input=prediction, label=label) avg_loss = fluid.layers.mean(loss) else: avg_loss = prediction return img, label, avg_loss reader = paddle.batch( paddle.dataset.mnist.test(), batch_size=32, drop_last=True) weight_quantize_type = 'abs_max' activation_quantize_type = 'moving_average_abs_max' param_init_map = {} seed = 1000 lr = 0.001 dynamic_out_scale_list = [] static_out_scale_list = [] # imperative train _logger.info( "--------------------------dynamic graph qat--------------------------" ) imperative_out_scale = ImperativeQuantAware( weight_quantize_type=weight_quantize_type, activation_quantize_type=activation_quantize_type) with fluid.dygraph.guard(): np.random.seed(seed) fluid.default_main_program().random_seed = seed fluid.default_startup_program().random_seed = seed lenet = ImperativeLenet() fixed_state = {} for name, param in lenet.named_parameters(): p_shape = param.numpy().shape p_value = param.numpy() if name.endswith("bias"): value = np.zeros_like(p_value).astype('float32') else: value = np.random.normal( loc=0.0, scale=0.01, size=np.product(p_shape)).reshape( p_shape).astype('float32') fixed_state[name] = value param_init_map[param.name] = value lenet.set_dict(fixed_state) imperative_out_scale.quantize(lenet) adam = AdamOptimizer( learning_rate=lr, parameter_list=lenet.parameters()) dynamic_loss_rec = [] lenet.train() for batch_id, data in enumerate(reader()): x_data = np.array([x[0].reshape(1, 28, 28) for x in data]).astype('float32') y_data = np.array( [x[1] for x in data]).astype('int64').reshape(-1, 1) img = fluid.dygraph.to_variable(x_data) label = fluid.dygraph.to_variable(y_data) out = lenet(img) loss = fluid.layers.cross_entropy(out, label) avg_loss = fluid.layers.mean(loss) avg_loss.backward() adam.minimize(avg_loss) lenet.clear_gradients() dynamic_loss_rec.append(avg_loss.numpy()[0]) if batch_id % 100 == 0: _logger.info('{}: {}'.format('loss', avg_loss.numpy())) lenet.eval() param_save_path = "test_save_quantized_model/lenet.pdparams" save_dict = lenet.state_dict() paddle.save(save_dict, param_save_path) path = "./dynamic_outscale_infer_model/lenet" dynamic_save_dir = "./dynamic_outscale_infer_model" imperative_out_scale.save_quantized_model( layer=lenet, path=path, input_spec=[ paddle.static.InputSpec( shape=[None, 1, 28, 28], dtype='float32') ]) _logger.info( "--------------------------static graph qat--------------------------" ) static_loss_rec = [] if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) else: place = core.CPUPlace() exe = fluid.Executor(place) main = fluid.Program() infer = fluid.Program() startup = fluid.Program() static_img, static_label, static_loss = _build_static_lenet( main, startup, False, seed) infer_img, _, infer_pre = _build_static_lenet(infer, startup, True, seed) with fluid.unique_name.guard(): with fluid.program_guard(main, startup): opt = AdamOptimizer(learning_rate=lr) opt.minimize(static_loss) scope = core.Scope() with fluid.scope_guard(scope): exe.run(startup) for param in main.all_parameters(): if "batch_norm" in param.name: param_name = param.name.replace("norm", "norm2d") elif 'prelu' in param.name: param_name = param.name.replace("prelu", 'p_re_lu') else: param_name = param.name param_tensor = scope.var(param.name).get_tensor() param_tensor.set(param_init_map[param_name], place) main_graph = IrGraph(core.Graph(main.desc), for_test=False) infer_graph = IrGraph(core.Graph(infer.desc), for_test=True) transform_pass = QuantizationTransformPass( scope=scope, place=place, activation_quantize_type=activation_quantize_type, weight_quantize_type=weight_quantize_type, quantizable_op_type=['conv2d', 'depthwise_conv2d', 'mul']) transform_pass.apply(main_graph) transform_pass.apply(infer_graph) outscale_pass = OutScaleForTrainingPass(scope=scope, place=place) outscale_pass.apply(main_graph) build_strategy = fluid.BuildStrategy() build_strategy.fuse_all_reduce_ops = False binary = fluid.CompiledProgram(main_graph.graph).with_data_parallel( loss_name=static_loss.name, build_strategy=build_strategy) feeder = fluid.DataFeeder( feed_list=[static_img, static_label], place=place) with fluid.scope_guard(scope): for batch_id, data in enumerate(reader()): loss_v, = exe.run(binary, feed=feeder.feed(data), fetch_list=[static_loss]) static_loss_rec.append(loss_v[0]) if batch_id % 100 == 0: _logger.info('{}: {}'.format('loss', loss_v)) scale_inference_pass = OutScaleForInferencePass(scope=scope) scale_inference_pass.apply(infer_graph) save_program = infer_graph.to_program() static_save_dir = "./static_outscale_infer_model" with fluid.scope_guard(scope): fluid.io.save_inference_model( dirname=static_save_dir, feeded_var_names=[infer_img.name], target_vars=[infer_pre], executor=exe, main_program=save_program, model_filename="lenet" + INFER_MODEL_SUFFIX, params_filename="lenet" + INFER_PARAMS_SUFFIX) rtol = 1e-05 atol = 1e-08 for i, (loss_d, loss_s) in enumerate(zip(dynamic_loss_rec, static_loss_rec)): diff = np.abs(loss_d - loss_s) if diff > (atol + rtol * np.abs(loss_s)): _logger.info( "diff({}) at {}, dynamic loss = {}, static loss = {}". format(diff, i, loss_d, loss_s)) break self.assertTrue( np.allclose( np.array(dynamic_loss_rec), np.array(static_loss_rec), rtol=rtol, atol=atol, equal_nan=True), msg='Failed to do the imperative qat.') # load dynamic model [dynamic_inference_program, feed_target_names, fetch_targets] = ( fluid.io.load_inference_model( dirname=dynamic_save_dir, executor=exe, model_filename="lenet" + INFER_MODEL_SUFFIX, params_filename="lenet" + INFER_PARAMS_SUFFIX)) # load static model [static_inference_program, feed_target_names, fetch_targets] = ( fluid.io.load_inference_model( dirname=static_save_dir, executor=exe, model_filename="lenet" + INFER_MODEL_SUFFIX, params_filename="lenet" + INFER_PARAMS_SUFFIX)) dynamic_ops = dynamic_inference_program.global_block().ops static_ops = static_inference_program.global_block().ops for op in dynamic_ops[:]: if op.type == "flatten2" or 'fake' in op.type: dynamic_ops.remove(op) for op in static_ops[:]: if 'fake' in op.type: static_ops.remove(op) op_count = 0 for i in range(len(dynamic_ops)): if dynamic_ops[i].has_attr("out_threshold"): op_count += 1 self.assertTrue(dynamic_ops[i].type == static_ops[i].type) self.assertTrue(dynamic_ops[i].attr("out_threshold") == static_ops[i].attr("out_threshold")) self.assertTrue(op_count == 13) class TestSaveQuanztizedModelFromCheckPoint(unittest.TestCase): def test_save_quantized_model(self): weight_quantize_type = 'abs_max' activation_quantize_type = 'moving_average_abs_max' load_param_path = "test_save_quantized_model/lenet.pdparams" path = "./dynamic_outscale_infer_model_from_checkpoint/lenet" dynamic_model_save_dir = "./dynamic_outscale_infer_model_from_checkpoint" static_model_save_dir = "./static_outscale_infer_model" imperative_out_scale = ImperativeQuantAware( weight_quantize_type=weight_quantize_type, activation_quantize_type=activation_quantize_type) with fluid.dygraph.guard(): lenet = ImperativeLenet() load_dict = paddle.load(load_param_path) imperative_out_scale.quantize(lenet) lenet.set_dict(load_dict) imperative_out_scale.save_quantized_model( layer=lenet, path=path, input_spec=[ paddle.static.InputSpec( shape=[None, 1, 28, 28], dtype='float32') ]) if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) else: place = core.CPUPlace() exe = fluid.Executor(place) # load dynamic model [dynamic_inference_program, feed_target_names, fetch_targets] = ( fluid.io.load_inference_model( dirname=dynamic_model_save_dir, executor=exe, model_filename="lenet" + INFER_MODEL_SUFFIX, params_filename="lenet" + INFER_PARAMS_SUFFIX)) # load static model [static_inference_program, feed_target_names, fetch_targets] = ( fluid.io.load_inference_model( dirname=static_model_save_dir, executor=exe, model_filename="lenet" + INFER_MODEL_SUFFIX, params_filename="lenet" + INFER_PARAMS_SUFFIX)) dynamic_ops = dynamic_inference_program.global_block().ops static_ops = static_inference_program.global_block().ops for op in dynamic_ops[:]: if op.type == "flatten2" or 'fake' in op.type: dynamic_ops.remove(op) for op in static_ops[:]: if 'fake' in op.type: static_ops.remove(op) op_count = 0 for i in range(len(dynamic_ops)): if dynamic_ops[i].has_attr("out_threshold"): op_count += 1 self.assertTrue(dynamic_ops[i].type == static_ops[i].type) self.assertTrue(dynamic_ops[i].attr("out_threshold") == static_ops[i].attr("out_threshold")) self.assertTrue(op_count == 13) class TestSaveQuantizedModel_Warning(unittest.TestCase): def test_warning(self): path = "./dynamic_outscale_infer_model_with_warnings/lenet" imperative_out_scale = ImperativeQuantAware() with fluid.dygraph.guard(): lenet = ImperativeLenet() with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") imperative_out_scale.save_quantized_model( layer=lenet, path=path, input_spec=[ paddle.static.InputSpec( shape=[None, 1, 28, 28], dtype='float32') ]) warning_message = "Warning: No Layer of the model while to be saved contains the out_threshold attribute, " \ "so the generated inference model would not contain the out_threshold." num = get_vaild_warning_num(warning_message, w) assert num == 1 if __name__ == '__main__': unittest.main()
[((1541, 1563), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (1561, 1563), False, 'import paddle\n'), ((1596, 1624), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (1622, 1624), False, 'from paddle.fluid import core\n'), ((1694, 1779), 'paddle.fluid.log_helper.get_logger', 'get_logger', (['__name__', 'logging.INFO'], {'fmt': '"""%(asctime)s-%(levelname)s: %(message)s"""'}), "(__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s'\n )\n", (1704, 1779), False, 'from paddle.fluid.log_helper import get_logger\n'), ((1630, 1682), 'paddle.fluid.set_flags', 'fluid.set_flags', (["{'FLAGS_cudnn_deterministic': True}"], {}), "({'FLAGS_cudnn_deterministic': True})\n", (1645, 1682), True, 'import paddle.fluid as fluid\n'), ((2033, 2067), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""conv2d_w_1"""'}), "(name='conv2d_w_1')\n", (2048, 2067), True, 'import paddle.fluid as fluid\n'), ((2089, 2123), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""conv2d_w_2"""'}), "(name='conv2d_w_2')\n", (2104, 2123), True, 'import paddle.fluid as fluid\n'), ((2141, 2171), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_w_1"""'}), "(name='fc_w_1')\n", (2156, 2171), True, 'import paddle.fluid as fluid\n'), ((2189, 2219), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_w_2"""'}), "(name='fc_w_2')\n", (2204, 2219), True, 'import paddle.fluid as fluid\n'), ((2237, 2267), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_w_3"""'}), "(name='fc_w_3')\n", (2252, 2267), True, 'import paddle.fluid as fluid\n'), ((2289, 2323), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""conv2d_b_2"""'}), "(name='conv2d_b_2')\n", (2304, 2323), True, 'import paddle.fluid as fluid\n'), ((2341, 2371), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_b_1"""'}), "(name='fc_b_1')\n", (2356, 2371), True, 'import paddle.fluid as fluid\n'), ((2389, 2419), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_b_2"""'}), "(name='fc_b_2')\n", (2404, 2419), True, 'import paddle.fluid as fluid\n'), ((2437, 2467), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_b_3"""'}), "(name='fc_b_3')\n", (2452, 2467), True, 'import paddle.fluid as fluid\n'), ((2480, 2604), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', (['data'], {'num_filters': '(6)', 'filter_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'param_attr': 'conv2d_w1_attr', 'bias_attr': '(False)'}), '(data, num_filters=6, filter_size=3, stride=1, padding=1,\n param_attr=conv2d_w1_attr, bias_attr=False)\n', (2499, 2604), True, 'import paddle.fluid as fluid\n'), ((2676, 2700), 'paddle.fluid.layers.batch_norm', 'layers.batch_norm', (['conv1'], {}), '(conv1)\n', (2693, 2700), True, 'import paddle.fluid.layers as layers\n'), ((2713, 2737), 'paddle.fluid.layers.relu', 'layers.relu', (['batch_norm1'], {}), '(batch_norm1)\n', (2724, 2737), True, 'import paddle.fluid.layers as layers\n'), ((2750, 2821), 'paddle.fluid.layers.pool2d', 'fluid.layers.pool2d', (['relu1'], {'pool_size': '(2)', 'pool_type': '"""max"""', 'pool_stride': '(2)'}), "(relu1, pool_size=2, pool_type='max', pool_stride=2)\n", (2769, 2821), True, 'import paddle.fluid as fluid\n'), ((2843, 2979), 'paddle.fluid.layers.conv2d', 'fluid.layers.conv2d', (['pool1'], {'num_filters': '(16)', 'filter_size': '(5)', 'stride': '(1)', 'padding': '(0)', 'param_attr': 'conv2d_w2_attr', 'bias_attr': 'conv2d_b2_attr'}), '(pool1, num_filters=16, filter_size=5, stride=1, padding\n =0, param_attr=conv2d_w2_attr, bias_attr=conv2d_b2_attr)\n', (2862, 2979), True, 'import paddle.fluid as fluid\n'), ((3050, 3074), 'paddle.fluid.layers.batch_norm', 'layers.batch_norm', (['conv2'], {}), '(conv2)\n', (3067, 3074), True, 'import paddle.fluid.layers as layers\n'), ((3088, 3125), 'paddle.fluid.layers.prelu', 'layers.prelu', (['batch_norm2'], {'mode': '"""all"""'}), "(batch_norm2, mode='all')\n", (3100, 3125), True, 'import paddle.fluid.layers as layers\n'), ((3138, 3210), 'paddle.fluid.layers.pool2d', 'fluid.layers.pool2d', (['prelu1'], {'pool_size': '(2)', 'pool_type': '"""max"""', 'pool_stride': '(2)'}), "(prelu1, pool_size=2, pool_type='max', pool_stride=2)\n", (3157, 3210), True, 'import paddle.fluid as fluid\n'), ((3231, 3319), 'paddle.fluid.layers.fc', 'fluid.layers.fc', ([], {'input': 'pool2', 'size': '(120)', 'param_attr': 'fc_w1_attr', 'bias_attr': 'fc_b1_attr'}), '(input=pool2, size=120, param_attr=fc_w1_attr, bias_attr=\n fc_b1_attr)\n', (3246, 3319), True, 'import paddle.fluid as fluid\n'), ((3411, 3445), 'paddle.fluid.layers.leaky_relu', 'layers.leaky_relu', (['fc1'], {'alpha': '(0.01)'}), '(fc1, alpha=0.01)\n', (3428, 3445), True, 'import paddle.fluid.layers as layers\n'), ((3456, 3548), 'paddle.fluid.layers.fc', 'fluid.layers.fc', ([], {'input': 'leaky_relu1', 'size': '(84)', 'param_attr': 'fc_w2_attr', 'bias_attr': 'fc_b2_attr'}), '(input=leaky_relu1, size=84, param_attr=fc_w2_attr,\n bias_attr=fc_b2_attr)\n', (3471, 3548), True, 'import paddle.fluid as fluid\n'), ((3638, 3657), 'paddle.fluid.layers.sigmoid', 'layers.sigmoid', (['fc2'], {}), '(fc2)\n', (3652, 3657), True, 'import paddle.fluid.layers as layers\n'), ((3668, 3766), 'paddle.fluid.layers.fc', 'fluid.layers.fc', ([], {'input': 'sigmoid1', 'size': 'num_classes', 'param_attr': 'fc_w3_attr', 'bias_attr': 'fc_b3_attr'}), '(input=sigmoid1, size=num_classes, param_attr=fc_w3_attr,\n bias_attr=fc_b3_attr)\n', (3683, 3766), True, 'import paddle.fluid as fluid\n'), ((3856, 3891), 'paddle.fluid.layers.softmax', 'layers.softmax', (['fc3'], {'use_cudnn': '(True)'}), '(fc3, use_cudnn=True)\n', (3870, 3891), True, 'import paddle.fluid.layers as layers\n'), ((19436, 19451), 'unittest.main', 'unittest.main', ([], {}), '()\n', (19449, 19451), False, 'import unittest\n'), ((4071, 4105), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""conv2d_w_1"""'}), "(name='conv2d_w_1')\n", (4086, 4105), True, 'import paddle.fluid as fluid\n'), ((4131, 4165), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""conv2d_w_2"""'}), "(name='conv2d_w_2')\n", (4146, 4165), True, 'import paddle.fluid as fluid\n'), ((4187, 4217), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_w_1"""'}), "(name='fc_w_1')\n", (4202, 4217), True, 'import paddle.fluid as fluid\n'), ((4239, 4269), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_w_2"""'}), "(name='fc_w_2')\n", (4254, 4269), True, 'import paddle.fluid as fluid\n'), ((4291, 4321), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_w_3"""'}), "(name='fc_w_3')\n", (4306, 4321), True, 'import paddle.fluid as fluid\n'), ((4347, 4381), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""conv2d_b_2"""'}), "(name='conv2d_b_2')\n", (4362, 4381), True, 'import paddle.fluid as fluid\n'), ((4403, 4433), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_b_1"""'}), "(name='fc_b_1')\n", (4418, 4433), True, 'import paddle.fluid as fluid\n'), ((4455, 4485), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_b_2"""'}), "(name='fc_b_2')\n", (4470, 4485), True, 'import paddle.fluid as fluid\n'), ((4507, 4537), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""fc_b_3"""'}), "(name='fc_b_3')\n", (4522, 4537), True, 'import paddle.fluid as fluid\n'), ((6004, 6030), 'paddle.fluid.layers.flatten', 'fluid.layers.flatten', (['x', '(1)'], {}), '(x, 1)\n', (6024, 6030), True, 'import paddle.fluid as fluid\n'), ((7545, 7663), 'paddle.fluid.contrib.slim.quantization.ImperativeQuantAware', 'ImperativeQuantAware', ([], {'weight_quantize_type': 'weight_quantize_type', 'activation_quantize_type': 'activation_quantize_type'}), '(weight_quantize_type=weight_quantize_type,\n activation_quantize_type=activation_quantize_type)\n', (7565, 7663), False, 'from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware\n'), ((9736, 9775), 'paddle.save', 'paddle.save', (['save_dict', 'param_save_path'], {}), '(save_dict, param_save_path)\n', (9747, 9775), False, 'import paddle\n'), ((10290, 10318), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (10316, 10318), False, 'from paddle.fluid import core\n'), ((10422, 10443), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (10436, 10443), True, 'import paddle.fluid as fluid\n'), ((10460, 10475), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (10473, 10475), True, 'import paddle.fluid as fluid\n'), ((10492, 10507), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (10505, 10507), True, 'import paddle.fluid as fluid\n'), ((10526, 10541), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (10539, 10541), True, 'import paddle.fluid as fluid\n'), ((10993, 11005), 'paddle.fluid.core.Scope', 'core.Scope', ([], {}), '()\n', (11003, 11005), False, 'from paddle.fluid import core\n'), ((11681, 11898), 'paddle.fluid.contrib.slim.quantization.QuantizationTransformPass', 'QuantizationTransformPass', ([], {'scope': 'scope', 'place': 'place', 'activation_quantize_type': 'activation_quantize_type', 'weight_quantize_type': 'weight_quantize_type', 'quantizable_op_type': "['conv2d', 'depthwise_conv2d', 'mul']"}), "(scope=scope, place=place,\n activation_quantize_type=activation_quantize_type, weight_quantize_type\n =weight_quantize_type, quantizable_op_type=['conv2d',\n 'depthwise_conv2d', 'mul'])\n", (11706, 11898), False, 'from paddle.fluid.contrib.slim.quantization import OutScaleForTrainingPass, OutScaleForInferencePass, QuantizationTransformPass\n'), ((12054, 12103), 'paddle.fluid.contrib.slim.quantization.OutScaleForTrainingPass', 'OutScaleForTrainingPass', ([], {'scope': 'scope', 'place': 'place'}), '(scope=scope, place=place)\n', (12077, 12103), False, 'from paddle.fluid.contrib.slim.quantization import OutScaleForTrainingPass, OutScaleForInferencePass, QuantizationTransformPass\n'), ((12169, 12190), 'paddle.fluid.BuildStrategy', 'fluid.BuildStrategy', ([], {}), '()\n', (12188, 12190), True, 'import paddle.fluid as fluid\n'), ((12408, 12475), 'paddle.fluid.DataFeeder', 'fluid.DataFeeder', ([], {'feed_list': '[static_img, static_label]', 'place': 'place'}), '(feed_list=[static_img, static_label], place=place)\n', (12424, 12475), True, 'import paddle.fluid as fluid\n'), ((12930, 12967), 'paddle.fluid.contrib.slim.quantization.OutScaleForInferencePass', 'OutScaleForInferencePass', ([], {'scope': 'scope'}), '(scope=scope)\n', (12954, 12967), False, 'from paddle.fluid.contrib.slim.quantization import OutScaleForTrainingPass, OutScaleForInferencePass, QuantizationTransformPass\n'), ((14350, 14519), 'paddle.fluid.io.load_inference_model', 'fluid.io.load_inference_model', ([], {'dirname': 'dynamic_save_dir', 'executor': 'exe', 'model_filename': "('lenet' + INFER_MODEL_SUFFIX)", 'params_filename': "('lenet' + INFER_PARAMS_SUFFIX)"}), "(dirname=dynamic_save_dir, executor=exe,\n model_filename='lenet' + INFER_MODEL_SUFFIX, params_filename='lenet' +\n INFER_PARAMS_SUFFIX)\n", (14379, 14519), True, 'import paddle.fluid as fluid\n'), ((14691, 14859), 'paddle.fluid.io.load_inference_model', 'fluid.io.load_inference_model', ([], {'dirname': 'static_save_dir', 'executor': 'exe', 'model_filename': "('lenet' + INFER_MODEL_SUFFIX)", 'params_filename': "('lenet' + INFER_PARAMS_SUFFIX)"}), "(dirname=static_save_dir, executor=exe,\n model_filename='lenet' + INFER_MODEL_SUFFIX, params_filename='lenet' +\n INFER_PARAMS_SUFFIX)\n", (14720, 14859), True, 'import paddle.fluid as fluid\n'), ((16223, 16341), 'paddle.fluid.contrib.slim.quantization.ImperativeQuantAware', 'ImperativeQuantAware', ([], {'weight_quantize_type': 'weight_quantize_type', 'activation_quantize_type': 'activation_quantize_type'}), '(weight_quantize_type=weight_quantize_type,\n activation_quantize_type=activation_quantize_type)\n', (16243, 16341), False, 'from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware\n'), ((16833, 16861), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (16859, 16861), False, 'from paddle.fluid import core\n'), ((16965, 16986), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (16979, 16986), True, 'import paddle.fluid as fluid\n'), ((17103, 17278), 'paddle.fluid.io.load_inference_model', 'fluid.io.load_inference_model', ([], {'dirname': 'dynamic_model_save_dir', 'executor': 'exe', 'model_filename': "('lenet' + INFER_MODEL_SUFFIX)", 'params_filename': "('lenet' + INFER_PARAMS_SUFFIX)"}), "(dirname=dynamic_model_save_dir, executor=exe,\n model_filename='lenet' + INFER_MODEL_SUFFIX, params_filename='lenet' +\n INFER_PARAMS_SUFFIX)\n", (17132, 17278), True, 'import paddle.fluid as fluid\n'), ((17450, 17624), 'paddle.fluid.io.load_inference_model', 'fluid.io.load_inference_model', ([], {'dirname': 'static_model_save_dir', 'executor': 'exe', 'model_filename': "('lenet' + INFER_MODEL_SUFFIX)", 'params_filename': "('lenet' + INFER_PARAMS_SUFFIX)"}), "(dirname=static_model_save_dir, executor=exe,\n model_filename='lenet' + INFER_MODEL_SUFFIX, params_filename='lenet' +\n INFER_PARAMS_SUFFIX)\n", (17479, 17624), True, 'import paddle.fluid as fluid\n'), ((18648, 18670), 'paddle.fluid.contrib.slim.quantization.ImperativeQuantAware', 'ImperativeQuantAware', ([], {}), '()\n', (18668, 18670), False, 'from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware\n'), ((4586, 4708), 'paddle.nn.Conv2D', 'Conv2D', ([], {'in_channels': '(1)', 'out_channels': '(6)', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'weight_attr': 'conv2d_w1_attr', 'bias_attr': '(False)'}), '(in_channels=1, out_channels=6, kernel_size=3, stride=1, padding=1,\n weight_attr=conv2d_w1_attr, bias_attr=False)\n', (4592, 4708), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((4831, 4845), 'paddle.nn.BatchNorm2D', 'BatchNorm2D', (['(6)'], {}), '(6)\n', (4842, 4845), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((4859, 4865), 'paddle.nn.layer.ReLU', 'ReLU', ([], {}), '()\n', (4863, 4865), False, 'from paddle.nn.layer import ReLU, LeakyReLU, Sigmoid, Softmax, PReLU\n'), ((4879, 4930), 'paddle.fluid.dygraph.nn.Pool2D', 'Pool2D', ([], {'pool_size': '(2)', 'pool_type': '"""max"""', 'pool_stride': '(2)'}), "(pool_size=2, pool_type='max', pool_stride=2)\n", (4885, 4930), False, 'from paddle.fluid.dygraph.nn import Pool2D\n'), ((4961, 5093), 'paddle.nn.Conv2D', 'Conv2D', ([], {'in_channels': '(6)', 'out_channels': '(16)', 'kernel_size': '(5)', 'stride': '(1)', 'padding': '(0)', 'weight_attr': 'conv2d_w2_attr', 'bias_attr': 'conv2d_b2_attr'}), '(in_channels=6, out_channels=16, kernel_size=5, stride=1, padding=0,\n weight_attr=conv2d_w2_attr, bias_attr=conv2d_b2_attr)\n', (4967, 5093), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((5216, 5231), 'paddle.nn.BatchNorm2D', 'BatchNorm2D', (['(16)'], {}), '(16)\n', (5227, 5231), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((5245, 5252), 'paddle.nn.layer.PReLU', 'PReLU', ([], {}), '()\n', (5250, 5252), False, 'from paddle.nn.layer import ReLU, LeakyReLU, Sigmoid, Softmax, PReLU\n'), ((5266, 5300), 'paddle.nn.MaxPool2D', 'MaxPool2D', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (5275, 5300), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((5362, 5454), 'paddle.nn.Linear', 'Linear', ([], {'in_features': '(400)', 'out_features': '(120)', 'weight_attr': 'fc_w1_attr', 'bias_attr': 'fc_b1_attr'}), '(in_features=400, out_features=120, weight_attr=fc_w1_attr, bias_attr\n =fc_b1_attr)\n', (5368, 5454), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((5528, 5539), 'paddle.nn.layer.LeakyReLU', 'LeakyReLU', ([], {}), '()\n', (5537, 5539), False, 'from paddle.nn.layer import ReLU, LeakyReLU, Sigmoid, Softmax, PReLU\n'), ((5553, 5644), 'paddle.nn.Linear', 'Linear', ([], {'in_features': '(120)', 'out_features': '(84)', 'weight_attr': 'fc_w2_attr', 'bias_attr': 'fc_b2_attr'}), '(in_features=120, out_features=84, weight_attr=fc_w2_attr, bias_attr=\n fc_b2_attr)\n', (5559, 5644), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((5718, 5727), 'paddle.nn.layer.Sigmoid', 'Sigmoid', ([], {}), '()\n', (5725, 5727), False, 'from paddle.nn.layer import ReLU, LeakyReLU, Sigmoid, Softmax, PReLU\n'), ((5741, 5839), 'paddle.nn.Linear', 'Linear', ([], {'in_features': '(84)', 'out_features': 'num_classes', 'weight_attr': 'fc_w3_attr', 'bias_attr': 'fc_b3_attr'}), '(in_features=84, out_features=num_classes, weight_attr=fc_w3_attr,\n bias_attr=fc_b3_attr)\n', (5747, 5839), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((5914, 5923), 'paddle.nn.Softmax', 'Softmax', ([], {}), '()\n', (5921, 5923), False, 'from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D\n'), ((7071, 7098), 'paddle.dataset.mnist.test', 'paddle.dataset.mnist.test', ([], {}), '()\n', (7096, 7098), False, 'import paddle\n'), ((7699, 7720), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (7718, 7720), True, 'import paddle.fluid as fluid\n'), ((7734, 7754), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (7748, 7754), True, 'import numpy as np\n'), ((10340, 10357), 'paddle.fluid.core.CUDAPlace', 'core.CUDAPlace', (['(0)'], {}), '(0)\n', (10354, 10357), False, 'from paddle.fluid import core\n'), ((10392, 10407), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (10405, 10407), False, 'from paddle.fluid import core\n'), ((10800, 10825), 'paddle.fluid.unique_name.guard', 'fluid.unique_name.guard', ([], {}), '()\n', (10823, 10825), True, 'import paddle.fluid as fluid\n'), ((11019, 11043), 'paddle.fluid.scope_guard', 'fluid.scope_guard', (['scope'], {}), '(scope)\n', (11036, 11043), True, 'import paddle.fluid as fluid\n'), ((11548, 11569), 'paddle.fluid.core.Graph', 'core.Graph', (['main.desc'], {}), '(main.desc)\n', (11558, 11569), False, 'from paddle.fluid import core\n'), ((11617, 11639), 'paddle.fluid.core.Graph', 'core.Graph', (['infer.desc'], {}), '(infer.desc)\n', (11627, 11639), False, 'from paddle.fluid import core\n'), ((12502, 12526), 'paddle.fluid.scope_guard', 'fluid.scope_guard', (['scope'], {}), '(scope)\n', (12519, 12526), True, 'import paddle.fluid as fluid\n'), ((13136, 13160), 'paddle.fluid.scope_guard', 'fluid.scope_guard', (['scope'], {}), '(scope)\n', (13153, 13160), True, 'import paddle.fluid as fluid\n'), ((13174, 13435), 'paddle.fluid.io.save_inference_model', 'fluid.io.save_inference_model', ([], {'dirname': 'static_save_dir', 'feeded_var_names': '[infer_img.name]', 'target_vars': '[infer_pre]', 'executor': 'exe', 'main_program': 'save_program', 'model_filename': "('lenet' + INFER_MODEL_SUFFIX)", 'params_filename': "('lenet' + INFER_PARAMS_SUFFIX)"}), "(dirname=static_save_dir, feeded_var_names=[\n infer_img.name], target_vars=[infer_pre], executor=exe, main_program=\n save_program, model_filename='lenet' + INFER_MODEL_SUFFIX,\n params_filename='lenet' + INFER_PARAMS_SUFFIX)\n", (13203, 13435), True, 'import paddle.fluid as fluid\n'), ((13699, 13722), 'numpy.abs', 'np.abs', (['(loss_d - loss_s)'], {}), '(loss_d - loss_s)\n', (13705, 13722), True, 'import numpy as np\n'), ((16377, 16398), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (16396, 16398), True, 'import paddle.fluid as fluid\n'), ((16462, 16490), 'paddle.load', 'paddle.load', (['load_param_path'], {}), '(load_param_path)\n', (16473, 16490), False, 'import paddle\n'), ((16883, 16900), 'paddle.fluid.core.CUDAPlace', 'core.CUDAPlace', (['(0)'], {}), '(0)\n', (16897, 16900), False, 'from paddle.fluid import core\n'), ((16935, 16950), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (16948, 16950), False, 'from paddle.fluid import core\n'), ((18684, 18705), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (18703, 18705), True, 'import paddle.fluid as fluid\n'), ((18759, 18795), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (18782, 18795), False, 'import warnings\n'), ((18814, 18845), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (18835, 18845), False, 'import warnings\n'), ((6247, 6272), 'paddle.fluid.unique_name.guard', 'fluid.unique_name.guard', ([], {}), '()\n', (6270, 6272), True, 'import paddle.fluid as fluid\n'), ((7767, 7795), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (7793, 7795), True, 'import paddle.fluid as fluid\n'), ((7827, 7858), 'paddle.fluid.default_startup_program', 'fluid.default_startup_program', ([], {}), '()\n', (7856, 7858), True, 'import paddle.fluid as fluid\n'), ((9061, 9094), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['x_data'], {}), '(x_data)\n', (9086, 9094), True, 'import paddle.fluid as fluid\n'), ((9119, 9152), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['y_data'], {}), '(y_data)\n', (9144, 9152), True, 'import paddle.fluid as fluid\n'), ((9210, 9248), 'paddle.fluid.layers.cross_entropy', 'fluid.layers.cross_entropy', (['out', 'label'], {}), '(out, label)\n', (9236, 9248), True, 'import paddle.fluid as fluid\n'), ((9276, 9299), 'paddle.fluid.layers.mean', 'fluid.layers.mean', (['loss'], {}), '(loss)\n', (9293, 9299), True, 'import paddle.fluid as fluid\n'), ((10844, 10878), 'paddle.fluid.program_guard', 'fluid.program_guard', (['main', 'startup'], {}), '(main, startup)\n', (10863, 10878), True, 'import paddle.fluid as fluid\n'), ((10902, 10933), 'paddle.fluid.optimizer.AdamOptimizer', 'AdamOptimizer', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (10915, 10933), False, 'from paddle.fluid.optimizer import AdamOptimizer\n'), ((12259, 12298), 'paddle.fluid.CompiledProgram', 'fluid.CompiledProgram', (['main_graph.graph'], {}), '(main_graph.graph)\n', (12280, 12298), True, 'import paddle.fluid as fluid\n'), ((14024, 14050), 'numpy.array', 'np.array', (['dynamic_loss_rec'], {}), '(dynamic_loss_rec)\n', (14032, 14050), True, 'import numpy as np\n'), ((14068, 14093), 'numpy.array', 'np.array', (['static_loss_rec'], {}), '(static_loss_rec)\n', (14076, 14093), True, 'import numpy as np\n'), ((6295, 6329), 'paddle.fluid.program_guard', 'fluid.program_guard', (['main', 'startup'], {}), '(main, startup)\n', (6314, 6329), True, 'import paddle.fluid as fluid\n'), ((6448, 6515), 'paddle.fluid.layers.data', 'fluid.layers.data', ([], {'name': '"""image"""', 'shape': '[1, 28, 28]', 'dtype': '"""float32"""'}), "(name='image', shape=[1, 28, 28], dtype='float32')\n", (6465, 6515), True, 'import paddle.fluid as fluid\n'), ((6569, 6626), 'paddle.fluid.layers.data', 'fluid.layers.data', ([], {'name': '"""label"""', 'shape': '[1]', 'dtype': '"""int64"""'}), "(name='label', shape=[1], dtype='int64')\n", (6586, 6626), True, 'import paddle.fluid as fluid\n'), ((10032, 10097), 'paddle.static.InputSpec', 'paddle.static.InputSpec', ([], {'shape': '[None, 1, 28, 28]', 'dtype': '"""float32"""'}), "(shape=[None, 1, 28, 28], dtype='float32')\n", (10055, 10097), False, 'import paddle\n'), ((16719, 16784), 'paddle.static.InputSpec', 'paddle.static.InputSpec', ([], {'shape': '[None, 1, 28, 28]', 'dtype': '"""float32"""'}), "(shape=[None, 1, 28, 28], dtype='float32')\n", (16742, 16784), False, 'import paddle\n'), ((6769, 6826), 'paddle.fluid.layers.cross_entropy', 'fluid.layers.cross_entropy', ([], {'input': 'prediction', 'label': 'label'}), '(input=prediction, label=label)\n', (6795, 6826), True, 'import paddle.fluid as fluid\n'), ((6891, 6914), 'paddle.fluid.layers.mean', 'fluid.layers.mean', (['loss'], {}), '(loss)\n', (6908, 6914), True, 'import paddle.fluid as fluid\n'), ((13760, 13774), 'numpy.abs', 'np.abs', (['loss_s'], {}), '(loss_s)\n', (13766, 13774), True, 'import numpy as np\n'), ((19006, 19071), 'paddle.static.InputSpec', 'paddle.static.InputSpec', ([], {'shape': '[None, 1, 28, 28]', 'dtype': '"""float32"""'}), "(shape=[None, 1, 28, 28], dtype='float32')\n", (19029, 19071), False, 'import paddle\n'), ((8158, 8180), 'numpy.zeros_like', 'np.zeros_like', (['p_value'], {}), '(p_value)\n', (8171, 8180), True, 'import numpy as np\n'), ((8955, 8985), 'numpy.array', 'np.array', (['[x[1] for x in data]'], {}), '([x[1] for x in data])\n', (8963, 8985), True, 'import numpy as np\n'), ((8317, 8336), 'numpy.product', 'np.product', (['p_shape'], {}), '(p_shape)\n', (8327, 8336), True, 'import numpy as np\n')]
nickamon/grr
grr/server/hunts/results.py
ad1936c74728de00db90f6fafa47892b54cfc92d
#!/usr/bin/env python """Classes to store and manage hunt results. """ from grr.lib import rdfvalue from grr.lib import registry from grr.lib.rdfvalues import structs as rdf_structs from grr_response_proto import jobs_pb2 from grr.server import access_control from grr.server import aff4 from grr.server import data_store from grr.server import sequential_collection from grr.server.aff4_objects import aff4_queue class HuntResultNotification(rdf_structs.RDFProtoStruct): protobuf = jobs_pb2.HuntResultNotification rdf_deps = [ rdfvalue.RDFDatetime, rdfvalue.RDFURN, ] def ResultRecord(self): # TODO(amoser): The subpath could be part of the notification. return data_store.Record( queue_id=self.result_collection_urn, timestamp=self.timestamp, suffix=self.suffix, subpath="Results", value=None) RESULT_NOTIFICATION_QUEUE = rdfvalue.RDFURN("aff4:/hunt_results_queue") class HuntResultQueue(aff4_queue.Queue): """A global queue of hunt results which need to be processed.""" rdf_type = HuntResultNotification @classmethod def ClaimNotificationsForCollection(cls, token=None, start_time=None, lease_time=200, collection=None): """Return unclaimed hunt result notifications for collection. Args: token: The security token to perform database operations with. start_time: If set, an RDFDateTime indicating at what point to start claiming notifications. Only notifications with a timestamp after this point will be claimed. lease_time: How long to claim the notifications for. collection: The urn of the collection to find notifications for. If unset, the earliest (unclaimed) notification will determine the collection. Returns: A pair (collection, results) where collection is the collection that notifications were retrieved for and results is a list of Record objects which identify GrrMessage within the result collection. """ class CollectionFilter(object): def __init__(self, collection): self.collection = collection def FilterRecord(self, notification): if self.collection is None: self.collection = notification.result_collection_urn return self.collection != notification.result_collection_urn f = CollectionFilter(collection) results = [] with aff4.FACTORY.OpenWithLock( RESULT_NOTIFICATION_QUEUE, aff4_type=HuntResultQueue, lease_time=300, blocking=True, blocking_sleep_interval=15, blocking_lock_timeout=600, token=token) as queue: for record in queue.ClaimRecords( record_filter=f.FilterRecord, start_time=start_time, timeout=lease_time, limit=100000): results.append(record) return (f.collection, results) @classmethod def DeleteNotifications(cls, records, token=None): """Delete hunt notifications.""" cls.DeleteRecords(records, token=token) class HuntResultCollection(sequential_collection.GrrMessageCollection): """Sequential HuntResultCollection.""" @classmethod def StaticAdd(cls, collection_urn, rdf_value, mutation_pool=None, timestamp=None, suffix=None, **kwargs): ts = super(HuntResultCollection, cls).StaticAdd( collection_urn, rdf_value, mutation_pool=mutation_pool, timestamp=timestamp, suffix=suffix, **kwargs) HuntResultQueue.StaticAdd( RESULT_NOTIFICATION_QUEUE, HuntResultNotification( result_collection_urn=collection_urn, timestamp=ts[0], suffix=ts[1]), mutation_pool=mutation_pool) return ts class ResultQueueInitHook(registry.InitHook): pre = [aff4.AFF4InitHook] def Run(self): try: with aff4.FACTORY.Create( RESULT_NOTIFICATION_QUEUE, HuntResultQueue, mode="w", token=aff4.FACTORY.root_token): pass except access_control.UnauthorizedAccess: pass
[((898, 941), 'grr.lib.rdfvalue.RDFURN', 'rdfvalue.RDFURN', (['"""aff4:/hunt_results_queue"""'], {}), "('aff4:/hunt_results_queue')\n", (913, 941), False, 'from grr.lib import rdfvalue\n'), ((695, 831), 'grr.server.data_store.Record', 'data_store.Record', ([], {'queue_id': 'self.result_collection_urn', 'timestamp': 'self.timestamp', 'suffix': 'self.suffix', 'subpath': '"""Results"""', 'value': 'None'}), "(queue_id=self.result_collection_urn, timestamp=self.\n timestamp, suffix=self.suffix, subpath='Results', value=None)\n", (712, 831), False, 'from grr.server import data_store\n'), ((2547, 2735), 'grr.server.aff4.FACTORY.OpenWithLock', 'aff4.FACTORY.OpenWithLock', (['RESULT_NOTIFICATION_QUEUE'], {'aff4_type': 'HuntResultQueue', 'lease_time': '(300)', 'blocking': '(True)', 'blocking_sleep_interval': '(15)', 'blocking_lock_timeout': '(600)', 'token': 'token'}), '(RESULT_NOTIFICATION_QUEUE, aff4_type=\n HuntResultQueue, lease_time=300, blocking=True, blocking_sleep_interval\n =15, blocking_lock_timeout=600, token=token)\n', (2572, 2735), False, 'from grr.server import aff4\n'), ((4072, 4180), 'grr.server.aff4.FACTORY.Create', 'aff4.FACTORY.Create', (['RESULT_NOTIFICATION_QUEUE', 'HuntResultQueue'], {'mode': '"""w"""', 'token': 'aff4.FACTORY.root_token'}), "(RESULT_NOTIFICATION_QUEUE, HuntResultQueue, mode='w',\n token=aff4.FACTORY.root_token)\n", (4091, 4180), False, 'from grr.server import aff4\n')]
NodeJSmith/py-simple-rest-sharepoint
src/simple_sharepoint/site.py
77ee5f76364e7b6096228945ed7e3bd637214a66
""" Module for higher level SharePoint REST api actions - utilize methods in the api.py module """ class Site(): def __init__(self, sp): self.sp = sp @property def info(self): endpoint = "_api/site" value = self.sp.get(endpoint).json() return value @property def web(self): endpoint = "_api/web" value = self.sp.get(endpoint).json() return value @property def contextinfo(self): return self.sp.contextinfo @property def contenttypes(self): endpoint = "_api/web/contenttypes" value = self.sp.get(endpoint).json().get('value') return value @property def eventreceivers(self): endpoint = "_api/web/eventreceivers" value = self.sp.get(endpoint).json().get('value') return value @property def features(self): endpoint = "_api/web/features" value = self.sp.get(endpoint).json().get('value') return value @property def fields(self): endpoint = "_api/web/fields" value = self.sp.get(endpoint).json().get('value') return value @property def lists(self): endpoint = "_api/web/lists" value = self.sp.get(endpoint).json().get('value') return value @property def siteusers(self): endpoint = "_api/web/siteusers" value = self.sp.get(endpoint).json().get('value') return value @property def groups(self): endpoint = "_api/web/sitegroups" value = self.sp.get(endpoint).json().get('value') return value @property def roleassignments(self): endpoint = "_api/web/roleassignments" value = self.sp.get(endpoint).json().get('value') return value # def set_title_field_to_optional(self, list_title): # """Sets the Title field in the given list to optional # :param list_title: str: title of SharePoint list # """ # # TODO - this likely is not necessary anymore, since we are not creating new lists # field_rec = [x for x in self.get_field(list_title) # if x['InternalName'] == "Title"][0] # if field_rec and field_rec.get('Required'): # body = {'Required': False} # self.update_list_field(field_rec, list_title, body) # def check_field_exists(self, list_title, field_title): # """Check that a field exists to avoid error from attempting to access non-existent field # :param list_title: str: title of SharePoint list # :param field_title: str: title of field in SharePoint list # :returns: bool # """ # field_rec = self._get_first_or_none( # "InternalName", field_title, list_data=self.get_list_fields(list_title)) # return field_rec is not None # def update_list_field(self, field_rec, list_title, body): # """Given a field record, a list title, and the json body to update with, updates the SharePoint list field # :param field_rec: dict: field record from SharePoint field query # :param list_title: str: title of SharePoint list # :param body: dict: dictionary structured for SharePoint REST api fields endpoint # """ # field_id = field_rec.get('Id') # update_field_url = "_api/web/lists/GetByTitle('{0}')/fields('{1}')".format( # list_title, field_id) # response = self.sp.post(url=update_field_url, json=body) # response.raise_for_status() # def get_email_from_sharepoint_id(self, sharepoint_id: int): # """Returns email address from a SharePoint integer user id value # :param sp_user_id: int: SharePoint user id # :returns: str # """ # return self._get_first_or_none("Id", sharepoint_id, list_data=self.siteusers).get("Email") # def get_sharepoint_id_from_email(self, email): # """Returns SharePoint integer user ID from an email address # :param username: str: email address # :returns: int # """ # return self._get_first_or_none("Email", email, list_data=self.siteusers).get("Id") def _get_first_or_none(self, compare_column, compare_value, list_data=None, url=None): if not list_data and not url: return ValueError("either list_data or url must be provided") if not list_data: list_data = self.sp.get(url).json().get('value') try: return [x for x in list_data if x[compare_column] == compare_value][0] except IndexError as e: return None # TODO Add large file upload with chunking # https://github.com/JonathanHolvey/sharepy/issues/23
[]
rcooke-ast/PYPIT
pypeit/tests/test_metadata.py
0cb9c4cb422736b855065a35aefc2bdba6d51dd0
import os import glob import shutil import yaml from IPython import embed import pytest import numpy as np from pypeit.par.util import parse_pypeit_file from pypeit.pypeitsetup import PypeItSetup from pypeit.tests.tstutils import dev_suite_required, data_path from pypeit.metadata import PypeItMetaData from pypeit.spectrographs.util import load_spectrograph from pypeit.scripts.setup import Setup def test_read_combid(): # ------------------------------------------------------------------ # In case of failed tests setup_dir = data_path('setup_files') if os.path.isdir(setup_dir): shutil.rmtree(setup_dir) config_dir = data_path('shane_kast_blue_A') if os.path.isdir(config_dir): shutil.rmtree(config_dir) # ------------------------------------------------------------------ # Generate the pypeit file with the comb_id droot = data_path('b') pargs = Setup.parse_args(['-r', droot, '-s', 'shane_kast_blue', '-c=all', '-b', '--extension=fits.gz', '--output_path={:s}'.format(data_path(''))]) Setup.main(pargs) shutil.rmtree(setup_dir) pypeit_file = os.path.join(config_dir, 'shane_kast_blue_A.pypeit') cfg_lines, data_files, frametype, usrdata, setups, _ = parse_pypeit_file(pypeit_file) # Get the spectrograph spectrograph = None for l in cfg_lines: if 'spectrograph' in l: spectrograph = load_spectrograph(l.split(' ')[-1]) break assert spectrograph is not None, 'Did not appropriately read spectrograph' # Set the metadata pmd = PypeItMetaData(spectrograph, spectrograph.default_pypeit_par(), files=data_files, usrdata=usrdata, strict=False) indx = pmd['filename'] == 'b27.fits.gz' assert pmd['comb_id'][indx] == [1], 'Incorrect combination group ID' assert pmd['comb_id'][np.where(~indx)[0]][0] == -1, 'Incorrect combination group ID' shutil.rmtree(config_dir) @dev_suite_required def test_lris_red_multi_400(): file_list = glob.glob(os.path.join(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red', 'multi_400_8500_d560', '*.fits.gz')) cfg_lines = ['[rdx]', 'spectrograph = keck_lris_red'] ps = PypeItSetup(file_list, cfg_lines=cfg_lines) ps.build_fitstbl() ps.get_frame_types(flag_unknown=True) cfgs = ps.fitstbl.unique_configurations() ps.fitstbl.set_configurations(cfgs) ps.fitstbl.set_calibration_groups() #global_frames=['bias', 'dark']) # Test assert np.all(ps.fitstbl['setup'] == 'A') @dev_suite_required def test_lris_red_multi(): file_list = glob.glob(os.path.join(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red', 'multi*', '*.fits*')) cfg_lines = ['[rdx]', 'spectrograph = keck_lris_red'] ps = PypeItSetup(file_list, cfg_lines=cfg_lines) ps.build_fitstbl() ps.get_frame_types(flag_unknown=True) cfgs = ps.fitstbl.unique_configurations() ps.fitstbl.set_configurations(cfgs) ps.fitstbl.set_calibration_groups() #global_frames=['bias', 'dark']) @dev_suite_required def test_lris_red_multi_calib(): file_list = glob.glob(os.path.join(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red', 'multi_400_8500_d560', '*.fits.gz')) cfg_lines = ['[rdx]', 'spectrograph = keck_lris_red'] ps = PypeItSetup(file_list, cfg_lines=cfg_lines) ps.build_fitstbl() ps.get_frame_types(flag_unknown=True) cfgs = ps.fitstbl.unique_configurations() ps.fitstbl.set_configurations(cfgs) ps.fitstbl.set_calibration_groups() #global_frames=['bias', 'dark']) cfile = data_path('test.calib') ps.fitstbl.write_calib(cfile) with open(cfile, 'r') as f: calib = yaml.load(f, Loader=yaml.FullLoader) assert np.array_equal(list(calib['A'].keys()), ['--', 1]), \ 'Calibrations dictionary read incorrectly.' os.remove(cfile) @dev_suite_required def test_lris_red_multi_run(): # Perform the setup file_list = glob.glob(os.path.join(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red', 'multi*', '*.fits*')) cfg_lines = ['[rdx]', 'spectrograph = keck_lris_red'] ps = PypeItSetup(file_list, cfg_lines=cfg_lines) ps.run(setup_only=True) # Test #assert len(ps.setup_dict) == 2, 'Should find two setups' assert len(ps.fitstbl) >= 40, 'Should find 40+ files' arcs = ps.fitstbl['filename'][ps.fitstbl.find_frames('arc')] assert len(arcs) >= 2, 'Should find two or more arcs' assert 'r170320_2017.fits.gz' in arcs, \ 'Should have identified r170320_2017.fits.gz as an arc' assert 'r170816_0057.fits' in ps.fitstbl['filename'][ps.fitstbl.find_frames('science')], \ 'Should have identified r170816_0057.fits as a science frame' # Clean-up #os.remove('keck_lris_red.lst') #os.remove('keck_lris_red.setups') os.remove('keck_lris_red.sorted') @dev_suite_required def test_lris_blue_pypeit_overwrite(): f = os.path.join(os.environ['PYPEIT_DEV'], 'pypeit_files/keck_lris_blue_long_400_3400_d560.pypeit') assert os.path.isfile(f), 'Could not find pypeit file.' cfg_lines, data_files, frametype, usrdata, setups, _ = parse_pypeit_file(f, file_check=False) # Change the dev path for i in range(len(data_files)): path_list = data_files[i].split('/') for j,p in enumerate(path_list): if p == 'RAW_DATA': break data_files[i] = os.path.join(os.environ['PYPEIT_DEV'], '/'.join(path_list[j:])) # Read the fits table with and without the user data spectrograph = load_spectrograph('keck_lris_blue') par = spectrograph.default_pypeit_par() fitstbl = PypeItMetaData(spectrograph, par, files=data_files) fitstbl_usr = PypeItMetaData(spectrograph, par, files=data_files, usrdata=usrdata) assert fitstbl['target'][0] == 'unknown', 'Grating name changed in file header' assert fitstbl_usr['target'][0] == 'test', 'Grating name changed in pypeit file' assert fitstbl['target'][0] != fitstbl_usr['target'][0], \ 'Fits header value and input pypeit file value expected to be different.'
[((547, 571), 'pypeit.tests.tstutils.data_path', 'data_path', (['"""setup_files"""'], {}), "('setup_files')\n", (556, 571), False, 'from pypeit.tests.tstutils import dev_suite_required, data_path\n'), ((579, 603), 'os.path.isdir', 'os.path.isdir', (['setup_dir'], {}), '(setup_dir)\n', (592, 603), False, 'import os\n'), ((655, 685), 'pypeit.tests.tstutils.data_path', 'data_path', (['"""shane_kast_blue_A"""'], {}), "('shane_kast_blue_A')\n", (664, 685), False, 'from pypeit.tests.tstutils import dev_suite_required, data_path\n'), ((693, 718), 'os.path.isdir', 'os.path.isdir', (['config_dir'], {}), '(config_dir)\n', (706, 718), False, 'import os\n'), ((888, 902), 'pypeit.tests.tstutils.data_path', 'data_path', (['"""b"""'], {}), "('b')\n", (897, 902), False, 'from pypeit.tests.tstutils import dev_suite_required, data_path\n'), ((1088, 1105), 'pypeit.scripts.setup.Setup.main', 'Setup.main', (['pargs'], {}), '(pargs)\n', (1098, 1105), False, 'from pypeit.scripts.setup import Setup\n'), ((1110, 1134), 'shutil.rmtree', 'shutil.rmtree', (['setup_dir'], {}), '(setup_dir)\n', (1123, 1134), False, 'import shutil\n'), ((1154, 1206), 'os.path.join', 'os.path.join', (['config_dir', '"""shane_kast_blue_A.pypeit"""'], {}), "(config_dir, 'shane_kast_blue_A.pypeit')\n", (1166, 1206), False, 'import os\n'), ((1266, 1296), 'pypeit.par.util.parse_pypeit_file', 'parse_pypeit_file', (['pypeit_file'], {}), '(pypeit_file)\n', (1283, 1296), False, 'from pypeit.par.util import parse_pypeit_file\n'), ((1949, 1974), 'shutil.rmtree', 'shutil.rmtree', (['config_dir'], {}), '(config_dir)\n', (1962, 1974), False, 'import shutil\n'), ((2268, 2311), 'pypeit.pypeitsetup.PypeItSetup', 'PypeItSetup', (['file_list'], {'cfg_lines': 'cfg_lines'}), '(file_list, cfg_lines=cfg_lines)\n', (2279, 2311), False, 'from pypeit.pypeitsetup import PypeItSetup\n'), ((2558, 2592), 'numpy.all', 'np.all', (["(ps.fitstbl['setup'] == 'A')"], {}), "(ps.fitstbl['setup'] == 'A')\n", (2564, 2592), True, 'import numpy as np\n'), ((2868, 2911), 'pypeit.pypeitsetup.PypeItSetup', 'PypeItSetup', (['file_list'], {'cfg_lines': 'cfg_lines'}), '(file_list, cfg_lines=cfg_lines)\n', (2879, 2911), False, 'from pypeit.pypeitsetup import PypeItSetup\n'), ((3432, 3475), 'pypeit.pypeitsetup.PypeItSetup', 'PypeItSetup', (['file_list'], {'cfg_lines': 'cfg_lines'}), '(file_list, cfg_lines=cfg_lines)\n', (3443, 3475), False, 'from pypeit.pypeitsetup import PypeItSetup\n'), ((3713, 3736), 'pypeit.tests.tstutils.data_path', 'data_path', (['"""test.calib"""'], {}), "('test.calib')\n", (3722, 3736), False, 'from pypeit.tests.tstutils import dev_suite_required, data_path\n'), ((3984, 4000), 'os.remove', 'os.remove', (['cfile'], {}), '(cfile)\n', (3993, 4000), False, 'import os\n'), ((4304, 4347), 'pypeit.pypeitsetup.PypeItSetup', 'PypeItSetup', (['file_list'], {'cfg_lines': 'cfg_lines'}), '(file_list, cfg_lines=cfg_lines)\n', (4315, 4347), False, 'from pypeit.pypeitsetup import PypeItSetup\n'), ((5008, 5041), 'os.remove', 'os.remove', (['"""keck_lris_red.sorted"""'], {}), "('keck_lris_red.sorted')\n", (5017, 5041), False, 'import os\n'), ((5111, 5210), 'os.path.join', 'os.path.join', (["os.environ['PYPEIT_DEV']", '"""pypeit_files/keck_lris_blue_long_400_3400_d560.pypeit"""'], {}), "(os.environ['PYPEIT_DEV'],\n 'pypeit_files/keck_lris_blue_long_400_3400_d560.pypeit')\n", (5123, 5210), False, 'import os\n'), ((5239, 5256), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (5253, 5256), False, 'import os\n'), ((5356, 5394), 'pypeit.par.util.parse_pypeit_file', 'parse_pypeit_file', (['f'], {'file_check': '(False)'}), '(f, file_check=False)\n', (5373, 5394), False, 'from pypeit.par.util import parse_pypeit_file\n'), ((5764, 5799), 'pypeit.spectrographs.util.load_spectrograph', 'load_spectrograph', (['"""keck_lris_blue"""'], {}), "('keck_lris_blue')\n", (5781, 5799), False, 'from pypeit.spectrographs.util import load_spectrograph\n'), ((5858, 5909), 'pypeit.metadata.PypeItMetaData', 'PypeItMetaData', (['spectrograph', 'par'], {'files': 'data_files'}), '(spectrograph, par, files=data_files)\n', (5872, 5909), False, 'from pypeit.metadata import PypeItMetaData\n'), ((5928, 5996), 'pypeit.metadata.PypeItMetaData', 'PypeItMetaData', (['spectrograph', 'par'], {'files': 'data_files', 'usrdata': 'usrdata'}), '(spectrograph, par, files=data_files, usrdata=usrdata)\n', (5942, 5996), False, 'from pypeit.metadata import PypeItMetaData\n'), ((613, 637), 'shutil.rmtree', 'shutil.rmtree', (['setup_dir'], {}), '(setup_dir)\n', (626, 637), False, 'import shutil\n'), ((728, 753), 'shutil.rmtree', 'shutil.rmtree', (['config_dir'], {}), '(config_dir)\n', (741, 753), False, 'import shutil\n'), ((2053, 2160), 'os.path.join', 'os.path.join', (["os.environ['PYPEIT_DEV']", '"""RAW_DATA"""', '"""keck_lris_red"""', '"""multi_400_8500_d560"""', '"""*.fits.gz"""'], {}), "(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',\n 'multi_400_8500_d560', '*.fits.gz')\n", (2065, 2160), False, 'import os\n'), ((2668, 2760), 'os.path.join', 'os.path.join', (["os.environ['PYPEIT_DEV']", '"""RAW_DATA"""', '"""keck_lris_red"""', '"""multi*"""', '"""*.fits*"""'], {}), "(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',\n 'multi*', '*.fits*')\n", (2680, 2760), False, 'import os\n'), ((3217, 3324), 'os.path.join', 'os.path.join', (["os.environ['PYPEIT_DEV']", '"""RAW_DATA"""', '"""keck_lris_red"""', '"""multi_400_8500_d560"""', '"""*.fits.gz"""'], {}), "(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',\n 'multi_400_8500_d560', '*.fits.gz')\n", (3229, 3324), False, 'import os\n'), ((3820, 3856), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (3829, 3856), False, 'import yaml\n'), ((4104, 4196), 'os.path.join', 'os.path.join', (["os.environ['PYPEIT_DEV']", '"""RAW_DATA"""', '"""keck_lris_red"""', '"""multi*"""', '"""*.fits*"""'], {}), "(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',\n 'multi*', '*.fits*')\n", (4116, 4196), False, 'import os\n'), ((1067, 1080), 'pypeit.tests.tstutils.data_path', 'data_path', (['""""""'], {}), "('')\n", (1076, 1080), False, 'from pypeit.tests.tstutils import dev_suite_required, data_path\n'), ((1881, 1896), 'numpy.where', 'np.where', (['(~indx)'], {}), '(~indx)\n', (1889, 1896), True, 'import numpy as np\n')]
DDevine/tortoise-orm
tortoise/query_utils.py
414737a78e98ffd247174590720f5c90aeac4dde
from copy import copy from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, cast from pypika import Table from pypika.terms import Criterion from tortoise.exceptions import FieldError, OperationalError from tortoise.fields.relational import BackwardFKRelation, ManyToManyFieldInstance, RelationalField if TYPE_CHECKING: # pragma: nocoverage from tortoise.models import Model from tortoise.queryset import QuerySet def _process_filter_kwarg( model: "Type[Model]", key: str, value: Any, table: Table ) -> Tuple[Criterion, Optional[Tuple[Table, Criterion]]]: join = None if value is None and f"{key}__isnull" in model._meta.filters: param = model._meta.get_filter(f"{key}__isnull") value = True else: param = model._meta.get_filter(key) pk_db_field = model._meta.db_pk_column if param.get("table"): join = ( param["table"], table[pk_db_field] == param["table"][param["backward_key"]], ) if param.get("value_encoder"): value = param["value_encoder"](value, model) criterion = param["operator"](param["table"][param["field"]], value) else: field_object = model._meta.fields_map[param["field"]] encoded_value = ( param["value_encoder"](value, model, field_object) if param.get("value_encoder") else model._meta.db.executor_class._field_to_db(field_object, value, model) ) criterion = param["operator"](table[param["source_field"]], encoded_value) return criterion, join def _get_joins_for_related_field( table: Table, related_field: RelationalField, related_field_name: str ) -> List[Tuple[Table, Criterion]]: required_joins = [] related_table: Table = related_field.related_model._meta.basetable if isinstance(related_field, ManyToManyFieldInstance): through_table = Table(related_field.through) required_joins.append( ( through_table, table[related_field.model._meta.db_pk_column] == through_table[related_field.backward_key], ) ) required_joins.append( ( related_table, through_table[related_field.forward_key] == related_table[related_field.related_model._meta.db_pk_column], ) ) elif isinstance(related_field, BackwardFKRelation): to_field_source_field = ( related_field.to_field_instance.source_field or related_field.to_field_instance.model_field_name ) if table == related_table: related_table = related_table.as_(f"{table.get_table_name()}__{related_field_name}") required_joins.append( ( related_table, table[to_field_source_field] == related_table[related_field.relation_source_field], ) ) else: to_field_source_field = ( related_field.to_field_instance.source_field or related_field.to_field_instance.model_field_name ) from_field = related_field.model._meta.fields_map[related_field.source_field] # type: ignore from_field_source_field = from_field.source_field or from_field.model_field_name related_table = related_table.as_(f"{table.get_table_name()}__{related_field_name}") required_joins.append( (related_table, related_table[to_field_source_field] == table[from_field_source_field],) ) return required_joins class EmptyCriterion(Criterion): # type: ignore def __or__(self, other: Criterion) -> Criterion: return other def __and__(self, other: Criterion) -> Criterion: return other def __bool__(self) -> bool: return False def _and(left: Criterion, right: Criterion) -> Criterion: if left and not right: return left return left & right def _or(left: Criterion, right: Criterion) -> Criterion: if left and not right: return left return left | right class QueryModifier: """ Internal structure used to generate SQL Queries. """ def __init__( self, where_criterion: Optional[Criterion] = None, joins: Optional[List[Tuple[Table, Criterion]]] = None, having_criterion: Optional[Criterion] = None, ) -> None: self.where_criterion: Criterion = where_criterion or EmptyCriterion() self.joins = joins if joins else [] self.having_criterion: Criterion = having_criterion or EmptyCriterion() def __and__(self, other: "QueryModifier") -> "QueryModifier": return QueryModifier( where_criterion=_and(self.where_criterion, other.where_criterion), joins=self.joins + other.joins, having_criterion=_and(self.having_criterion, other.having_criterion), ) def __or__(self, other: "QueryModifier") -> "QueryModifier": if self.having_criterion or other.having_criterion: # TODO: This could be optimized? result_having_criterion = _or( _and(self.where_criterion, self.having_criterion), _and(other.where_criterion, other.having_criterion), ) return QueryModifier( joins=self.joins + other.joins, having_criterion=result_having_criterion ) if self.where_criterion and other.where_criterion: return QueryModifier( where_criterion=self.where_criterion | other.where_criterion, joins=self.joins + other.joins, ) return QueryModifier( where_criterion=self.where_criterion or other.where_criterion, joins=self.joins + other.joins, ) def __invert__(self) -> "QueryModifier": if not self.where_criterion and not self.having_criterion: return QueryModifier(joins=self.joins) if self.having_criterion: # TODO: This could be optimized? return QueryModifier( joins=self.joins, having_criterion=_and(self.where_criterion, self.having_criterion).negate(), ) return QueryModifier(where_criterion=self.where_criterion.negate(), joins=self.joins) def get_query_modifiers(self) -> Tuple[Criterion, List[Tuple[Table, Criterion]], Criterion]: """ Returns a tuple of the query criterion. """ return self.where_criterion, self.joins, self.having_criterion class Q: """ Q Expression container. Q Expressions are a useful tool to compose a query from many small parts. :param join_type: Is the join an AND or OR join type? :param args: Inner ``Q`` expressions that you want to wrap. :param kwargs: Filter statements that this Q object should encapsulate. """ __slots__ = ( "children", "filters", "join_type", "_is_negated", "_annotations", "_custom_filters", ) AND = "AND" OR = "OR" def __init__(self, *args: "Q", join_type: str = AND, **kwargs: Any) -> None: if args and kwargs: newarg = Q(join_type=join_type, **kwargs) args = (newarg,) + args kwargs = {} if not all(isinstance(node, Q) for node in args): raise OperationalError("All ordered arguments must be Q nodes") #: Contains the sub-Q's that this Q is made up of self.children: Tuple[Q, ...] = args #: Contains the filters applied to this Q self.filters: Dict[str, Any] = kwargs if join_type not in {self.AND, self.OR}: raise OperationalError("join_type must be AND or OR") #: Specifies if this Q does an AND or OR on its children self.join_type = join_type self._is_negated = False self._annotations: Dict[str, Any] = {} self._custom_filters: Dict[str, Dict[str, Any]] = {} def __and__(self, other: "Q") -> "Q": """ Returns a binary AND of Q objects, use ``AND`` operator. :raises OperationalError: AND operation requires a Q node """ if not isinstance(other, Q): raise OperationalError("AND operation requires a Q node") return Q(self, other, join_type=self.AND) def __or__(self, other: "Q") -> "Q": """ Returns a binary OR of Q objects, use ``OR`` operator. :raises OperationalError: OR operation requires a Q node """ if not isinstance(other, Q): raise OperationalError("OR operation requires a Q node") return Q(self, other, join_type=self.OR) def __invert__(self) -> "Q": """ Returns a negated instance of the Q object, use ``~`` operator. """ q = Q(*self.children, join_type=self.join_type, **self.filters) q.negate() return q def negate(self) -> None: """ Negates the curent Q object. (mutation) """ self._is_negated = not self._is_negated def _resolve_nested_filter( self, model: "Type[Model]", key: str, value: Any, table: Table ) -> QueryModifier: related_field_name = key.split("__")[0] related_field = cast(RelationalField, model._meta.fields_map[related_field_name]) required_joins = _get_joins_for_related_field(table, related_field, related_field_name) modifier = Q(**{"__".join(key.split("__")[1:]): value}).resolve( model=related_field.related_model, annotations=self._annotations, custom_filters=self._custom_filters, table=required_joins[-1][0], ) return QueryModifier(joins=required_joins) & modifier def _resolve_custom_kwarg( self, model: "Type[Model]", key: str, value: Any, table: Table ) -> QueryModifier: having_info = self._custom_filters[key] annotation = self._annotations[having_info["field"]] annotation_info = annotation.resolve(model, table) operator = having_info["operator"] overridden_operator = model._meta.db.executor_class.get_overridden_filter_func( filter_func=operator ) if overridden_operator: operator = overridden_operator if annotation_info["field"].is_aggregate: modifier = QueryModifier(having_criterion=operator(annotation_info["field"], value)) else: modifier = QueryModifier(where_criterion=operator(annotation_info["field"], value)) return modifier def _resolve_regular_kwarg( self, model: "Type[Model]", key: str, value: Any, table: Table ) -> QueryModifier: if key not in model._meta.filters and key.split("__")[0] in model._meta.fetch_fields: modifier = self._resolve_nested_filter(model, key, value, table) else: criterion, join = _process_filter_kwarg(model, key, value, table) joins = [join] if join else [] modifier = QueryModifier(where_criterion=criterion, joins=joins) return modifier def _get_actual_filter_params( self, model: "Type[Model]", key: str, value: Table ) -> Tuple[str, Any]: filter_key = key if key in model._meta.fk_fields or key in model._meta.o2o_fields: field_object = model._meta.fields_map[key] if hasattr(value, "pk"): filter_value = value.pk else: filter_value = value filter_key = cast(str, field_object.source_field) elif key in model._meta.m2m_fields: if hasattr(value, "pk"): filter_value = value.pk else: filter_value = value elif ( key.split("__")[0] in model._meta.fetch_fields or key in self._custom_filters or key in model._meta.filters ): filter_value = value else: allowed = sorted( model._meta.fields | model._meta.fetch_fields | set(self._custom_filters) ) raise FieldError(f"Unknown filter param '{key}'. Allowed base values are {allowed}") return filter_key, filter_value def _resolve_kwargs(self, model: "Type[Model]", table: Table) -> QueryModifier: modifier = QueryModifier() for raw_key, raw_value in self.filters.items(): key, value = self._get_actual_filter_params(model, raw_key, raw_value) if key in self._custom_filters: filter_modifier = self._resolve_custom_kwarg(model, key, value, table) else: filter_modifier = self._resolve_regular_kwarg(model, key, value, table) if self.join_type == self.AND: modifier &= filter_modifier else: modifier |= filter_modifier if self._is_negated: modifier = ~modifier return modifier def _resolve_children(self, model: "Type[Model]", table: Table) -> QueryModifier: modifier = QueryModifier() for node in self.children: node_modifier = node.resolve(model, self._annotations, self._custom_filters, table) if self.join_type == self.AND: modifier &= node_modifier else: modifier |= node_modifier if self._is_negated: modifier = ~modifier return modifier def resolve( self, model: "Type[Model]", annotations: Dict[str, Any], custom_filters: Dict[str, Dict[str, Any]], table: Table, ) -> QueryModifier: """ Resolves the logical Q chain into the parts of a SQL statement. :param model: The Model this Q Expression should be resolved on. :param annotations: Extra annotations one wants to inject into the resultset. :param custom_filters: Pre-resolved filters to be passed though. :param table: ``pypika.Table`` to keep track of the virtual SQL table (to allow self referential joins) """ self._annotations = annotations self._custom_filters = custom_filters if self.filters: return self._resolve_kwargs(model, table) return self._resolve_children(model, table) class Prefetch: """ Prefetcher container. One would directly use this when wanting to attach a custom QuerySet for specialised prefetching. :param relation: Related field name. :param queryset: Custom QuerySet to use for prefetching. """ __slots__ = ("relation", "queryset") def __init__(self, relation: str, queryset: "QuerySet") -> None: self.relation = relation self.queryset = queryset self.queryset.query = copy(self.queryset.model._meta.basequery) def resolve_for_queryset(self, queryset: "QuerySet") -> None: """ Called internally to generate prefetching query. :param queryset: Custom QuerySet to use for prefetching. :raises OperationalError: If field does not exist in model. """ relation_split = self.relation.split("__") first_level_field = relation_split[0] if first_level_field not in queryset.model._meta.fetch_fields: raise OperationalError( f"relation {first_level_field} for {queryset.model._meta.db_table} not found" ) forwarded_prefetch = "__".join(relation_split[1:]) if forwarded_prefetch: if first_level_field not in queryset._prefetch_map.keys(): queryset._prefetch_map[first_level_field] = set() queryset._prefetch_map[first_level_field].add( Prefetch(forwarded_prefetch, self.queryset) ) else: queryset._prefetch_queries[first_level_field] = self.queryset
[((1917, 1945), 'pypika.Table', 'Table', (['related_field.through'], {}), '(related_field.through)\n', (1922, 1945), False, 'from pypika import Table\n'), ((9309, 9374), 'typing.cast', 'cast', (['RelationalField', 'model._meta.fields_map[related_field_name]'], {}), '(RelationalField, model._meta.fields_map[related_field_name])\n', (9313, 9374), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, cast\n'), ((14846, 14887), 'copy.copy', 'copy', (['self.queryset.model._meta.basequery'], {}), '(self.queryset.model._meta.basequery)\n', (14850, 14887), False, 'from copy import copy\n'), ((7402, 7459), 'tortoise.exceptions.OperationalError', 'OperationalError', (['"""All ordered arguments must be Q nodes"""'], {}), "('All ordered arguments must be Q nodes')\n", (7418, 7459), False, 'from tortoise.exceptions import FieldError, OperationalError\n'), ((7725, 7772), 'tortoise.exceptions.OperationalError', 'OperationalError', (['"""join_type must be AND or OR"""'], {}), "('join_type must be AND or OR')\n", (7741, 7772), False, 'from tortoise.exceptions import FieldError, OperationalError\n'), ((8268, 8319), 'tortoise.exceptions.OperationalError', 'OperationalError', (['"""AND operation requires a Q node"""'], {}), "('AND operation requires a Q node')\n", (8284, 8319), False, 'from tortoise.exceptions import FieldError, OperationalError\n'), ((8620, 8670), 'tortoise.exceptions.OperationalError', 'OperationalError', (['"""OR operation requires a Q node"""'], {}), "('OR operation requires a Q node')\n", (8636, 8670), False, 'from tortoise.exceptions import FieldError, OperationalError\n'), ((11589, 11625), 'typing.cast', 'cast', (['str', 'field_object.source_field'], {}), '(str, field_object.source_field)\n', (11593, 11625), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, cast\n'), ((15356, 15461), 'tortoise.exceptions.OperationalError', 'OperationalError', (['f"""relation {first_level_field} for {queryset.model._meta.db_table} not found"""'], {}), "(\n f'relation {first_level_field} for {queryset.model._meta.db_table} not found'\n )\n", (15372, 15461), False, 'from tortoise.exceptions import FieldError, OperationalError\n'), ((12171, 12249), 'tortoise.exceptions.FieldError', 'FieldError', (['f"""Unknown filter param \'{key}\'. Allowed base values are {allowed}"""'], {}), '(f"Unknown filter param \'{key}\'. Allowed base values are {allowed}")\n', (12181, 12249), False, 'from tortoise.exceptions import FieldError, OperationalError\n')]
angelmtenor/IDAFC
L3_numpy_pandas_2D/B_NumPy_Axis.py
9d23746fd02e4eda2569d75b3c7a1383277e6e78
import numpy as np # Change False to True for this block of code to see what it does # NumPy axis argument if True: a = np.array([ [1, 2, 3], [4, 5, 6], [7, 8, 9] ]) print(a.sum()) print(a.sum(axis=0)) print(a.sum(axis=1)) # Subway ridership for 5 stations on 10 different days ridership = np.array([ [0, 0, 2, 5, 0], [1478, 3877, 3674, 2328, 2539], [1613, 4088, 3991, 6461, 2691], [1560, 3392, 3826, 4787, 2613], [1608, 4802, 3932, 4477, 2705], [1576, 3933, 3909, 4979, 2685], [95, 229, 255, 496, 201], [2, 0, 1, 27, 0], [1438, 3785, 3589, 4174, 2215], [1342, 4043, 4009, 4665, 3033] ]) def min_and_max_riders_per_day(ridership): """ Fill in this function. First, for each subway station, calculate the mean ridership per day. Then, out of all the subway stations, return the maximum and minimum of these values. That is, find the maximum mean-ridership-per-day and the minimum mean-ridership-per-day for any subway station. """ mean_ridership_per_day = ridership.mean(axis=0) max_daily_ridership = mean_ridership_per_day.max() # Replace this with your code min_daily_ridership = mean_ridership_per_day.min() # Replace this with your code return max_daily_ridership, min_daily_ridership print(min_and_max_riders_per_day(ridership))
[((338, 652), 'numpy.array', 'np.array', (['[[0, 0, 2, 5, 0], [1478, 3877, 3674, 2328, 2539], [1613, 4088, 3991, 6461, \n 2691], [1560, 3392, 3826, 4787, 2613], [1608, 4802, 3932, 4477, 2705],\n [1576, 3933, 3909, 4979, 2685], [95, 229, 255, 496, 201], [2, 0, 1, 27,\n 0], [1438, 3785, 3589, 4174, 2215], [1342, 4043, 4009, 4665, 3033]]'], {}), '([[0, 0, 2, 5, 0], [1478, 3877, 3674, 2328, 2539], [1613, 4088, \n 3991, 6461, 2691], [1560, 3392, 3826, 4787, 2613], [1608, 4802, 3932, \n 4477, 2705], [1576, 3933, 3909, 4979, 2685], [95, 229, 255, 496, 201],\n [2, 0, 1, 27, 0], [1438, 3785, 3589, 4174, 2215], [1342, 4043, 4009, \n 4665, 3033]])\n', (346, 652), True, 'import numpy as np\n'), ((126, 169), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n', (134, 169), True, 'import numpy as np\n')]
WeilabMSU/TopologyNet
Protein-ligand-binding/TopBio/Feature/LigandFeature.py
4f4d13cec7e50624b43990c863dd84b8bbf359d8
import numpy as np import pickle import os def GenerateFeature_alpha(ligand_name, working_dir): Cut = 12.0 LIGELE = ['C','N','O','S','CN','CO','CS','NO','NS','OS','CCl','CBr','CP','CF','CNO','CNS','COS','NOS','CNOS','CNOSPFClBrI','H','CH','NH','OH','SH','CNH','COH','CSH','NOH','NSH','OSH','CNOH','CNSH','COSH','NOSH','CNOSH','CNOSPFClBrIH','CClH','CBrH','CPH','CFH'] Feature_i = [] pdb = ligand_name InFile = open(working_dir+'/'+ligand_name+'_alpha.pkl') BarCollection = pickle.load(InFile) for el in LIGELE: if 'lig_'+el in BarCollection.keys(): Bars = BarCollection['lig_'+el] Bar0Birth = []; Bar0Death = []; Bar1Birth = []; Bar1Death = []; Bar2Birth = []; Bar2Death = []; for Bar in Bars: if Bar[2] < Bar[1]: continue if Bar[2] > 12.0 and Bar[0] == 0: continue if Bar[2] > 12.0 and Bar[0] > 0: Bar[2] = 12.0 if Bar[0] == 0: Bar0Birth.append(Bar[1]) Bar0Death.append(Bar[2]) if Bar[0] == 1: Bar1Birth.append(Bar[1]) Bar1Death.append(Bar[2]) if Bar[0] == 2: Bar2Birth.append(Bar[1]) Bar2Death.append(Bar[2]) if len(Bar0Birth) > 0: Bar0Birth = np.asarray(Bar0Birth, float) Bar0Death = np.asarray(Bar0Death, float) if len(Bar1Birth) > 0: Bar1Birth = np.asarray(Bar1Birth, float) Bar1Death = np.asarray(Bar1Death, float) if len(Bar2Birth) > 0: Bar2Birth = np.asarray(Bar2Birth, float) Bar2Death = np.asarray(Bar2Death, float) if len(Bar0Death) > 0: Feature_i.append(np.mean(Bar0Death[:])) Feature_i.append(np.std(Bar0Death[:])) Feature_i.append(np.max(Bar0Death[:])) Feature_i.append(np.min(Bar0Death[:])) Feature_i.append(np.sum(Bar0Death[:])) Feature_i.append(len(Bar0Death)) else: Feature_i.extend([0.]*6) if len(Bar1Death) > 0: Feature_i.append(np.mean(Bar1Death[:] - Bar1Birth[:])) Feature_i.append(np.std(Bar1Death[:] - Bar1Birth[:])) Feature_i.append(np.max(Bar1Death[:] - Bar1Birth[:])) Feature_i.append(np.min(Bar1Death[:] - Bar1Birth[:])) Feature_i.append(np.sum(Bar1Death[:] - Bar1Birth[:])) Feature_i.append(Bar1Birth[np.argmax(Bar1Death[:] - Bar1Birth[:])]) Feature_i.append(Bar1Death[np.argmax(Bar1Death[:] - Bar1Birth[:])]) Feature_i.append(np.mean(Bar1Birth[:])) Feature_i.append(np.std(Bar1Birth[:])) Feature_i.append(np.max(Bar1Birth[:])) Feature_i.append(np.min(Bar1Birth[:])) Feature_i.append(np.sum(Bar1Birth[:])) Feature_i.append(np.mean(Bar1Death[:])) Feature_i.append(np.std(Bar1Death[:])) Feature_i.append(np.max(Bar1Death[:])) Feature_i.append(np.min(Bar1Death[:])) Feature_i.append(np.sum(Bar1Death[:])) Feature_i.append(len(Bar1Death)) else: Feature_i.extend([0.]*18) if len(Bar2Death) > 0: Feature_i.append(np.mean(Bar2Death[:] - Bar2Birth[:])) Feature_i.append(np.std(Bar2Death[:] - Bar2Birth[:])) Feature_i.append(np.max(Bar2Death[:] - Bar2Birth[:])) Feature_i.append(np.min(Bar2Death[:] - Bar2Birth[:])) Feature_i.append(np.sum(Bar2Death[:] - Bar2Birth[:])) Feature_i.append(Bar2Birth[np.argmax(Bar2Death[:] - Bar2Birth[:])]) Feature_i.append(Bar2Death[np.argmax(Bar2Death[:] - Bar2Birth[:])]) Feature_i.append(np.mean(Bar2Birth[:])) Feature_i.append(np.std(Bar2Birth[:])) Feature_i.append(np.max(Bar2Birth[:])) Feature_i.append(np.min(Bar2Birth[:])) Feature_i.append(np.sum(Bar2Birth[:])) Feature_i.append(np.mean(Bar2Death[:])) Feature_i.append(np.std(Bar2Death[:])) Feature_i.append(np.max(Bar2Death[:])) Feature_i.append(np.min(Bar2Death[:])) Feature_i.append(np.sum(Bar2Death[:])) Feature_i.append(len(Bar2Death)) else: Feature_i.extend([0.]*18) else: Feature_i.extend([0.]*42) Feature_i = np.asarray(Feature_i, float) outfile = open(working_dir+'/'+ligand_name+'_feature_alpha_handcrafted.npy', 'w') np.save(outfile, Feature_i) outfile.close() def GenerateFeature_level1(ligand_name, working_dir): small = 0.01 Feature_i = [] Cut = 12.0 LIGELE = ['C','N','O','S','CN','CO','CS','NO','NS','OS','CCl','CBr','CP','CF','CNO','CNS','COS','NOS','CNOS','CNOSPFClBrI','H','CH','NH','OH','SH','CNH','COH','CSH','NOH','NSH','OSH','CNOH','CNSH','COSH','NOSH','CNOSH','CNOSPFClBrIH','CClH','CBrH','CPH','CFH'] pdb = ligand_name for el in LIGELE: if os.path.exists(working_dir+'/'+ligand_name+'_'+el+'_level1.PH'): InFile = open(working_dir+'/'+ligand_name+'_'+el+'_level1.PH') lines = InFile.read().splitlines() Bars = [] for line in lines: a,b,c = line.split() Bars.append([int(a), float(b), float(c)]) InFile.close() Bar0Birth = []; Bar0Death = []; Bar1Birth = []; Bar1Death = []; Bar2Birth = []; Bar2Death = []; for Bar in Bars: if Bar[2] < Bar[1]: continue if Bar[2] > 12.0 and Bar[0] == 0: continue if Bar[2] > 12.0 and Bar[0] > 0: Bar[2] = 12.0 if Bar[0] == 0 and Bar[2]-Bar[1] >= small: Bar0Birth.append(Bar[1]) Bar0Death.append(Bar[2]) if Bar[0] == 1 and Bar[2]-Bar[1] >= small: Bar1Birth.append(Bar[1]) Bar1Death.append(Bar[2]) if Bar[0] == 2 and Bar[2]-Bar[1] >= small: Bar2Birth.append(Bar[1]) Bar2Death.append(Bar[2]) if len(Bar0Birth) > 0: Bar0Birth = np.asarray(Bar0Birth, float) Bar0Death = np.asarray(Bar0Death, float) if len(Bar1Birth) > 0: Bar1Birth = np.asarray(Bar1Birth, float) Bar1Death = np.asarray(Bar1Death, float) if len(Bar2Birth) > 0: Bar2Birth = np.asarray(Bar2Birth, float) Bar2Death = np.asarray(Bar2Death, float) if len(Bar0Death) > 0: Feature_i.append(np.mean(Bar0Death[:])) Feature_i.append(np.std(Bar0Death[:])) Feature_i.append(np.max(Bar0Death[:])) Feature_i.append(np.min(Bar0Death[:])) Feature_i.append(np.sum(Bar0Death[:])) Feature_i.append(len(Bar0Death)) else: Feature_i.extend([0.]*6) if len(Bar1Death) > 0: Feature_i.append(np.mean(Bar1Death[:] - Bar1Birth[:])) Feature_i.append(np.std(Bar1Death[:] - Bar1Birth[:])) Feature_i.append(np.max(Bar1Death[:] - Bar1Birth[:])) Feature_i.append(np.min(Bar1Death[:] - Bar1Birth[:])) Feature_i.append(np.sum(Bar1Death[:] - Bar1Birth[:])) Feature_i.append(Bar1Birth[np.argmax(Bar1Death[:] - Bar1Birth[:])]) Feature_i.append(Bar1Death[np.argmax(Bar1Death[:] - Bar1Birth[:])]) Feature_i.append(np.mean(Bar1Birth[:])) Feature_i.append(np.std(Bar1Birth[:])) Feature_i.append(np.max(Bar1Birth[:])) Feature_i.append(np.min(Bar1Birth[:])) Feature_i.append(np.sum(Bar1Birth[:])) Feature_i.append(np.mean(Bar1Death[:])) Feature_i.append(np.std(Bar1Death[:])) Feature_i.append(np.max(Bar1Death[:])) Feature_i.append(np.min(Bar1Death[:])) Feature_i.append(np.sum(Bar1Death[:])) Feature_i.append(len(Bar1Death)) else: Feature_i.extend([0.]*18) if len(Bar2Death) > 0: Feature_i.append(np.mean(Bar2Death[:] - Bar2Birth[:])) Feature_i.append(np.std(Bar2Death[:] - Bar2Birth[:])) Feature_i.append(np.max(Bar2Death[:] - Bar2Birth[:])) Feature_i.append(np.min(Bar2Death[:] - Bar2Birth[:])) Feature_i.append(np.sum(Bar2Death[:] - Bar2Birth[:])) Feature_i.append(Bar2Birth[np.argmax(Bar2Death[:] - Bar2Birth[:])]) Feature_i.append(Bar2Death[np.argmax(Bar2Death[:] - Bar2Birth[:])]) Feature_i.append(np.mean(Bar2Birth[:])) Feature_i.append(np.std(Bar2Birth[:])) Feature_i.append(np.max(Bar2Birth[:])) Feature_i.append(np.min(Bar2Birth[:])) Feature_i.append(np.sum(Bar2Birth[:])) Feature_i.append(np.mean(Bar2Death[:])) Feature_i.append(np.std(Bar2Death[:])) Feature_i.append(np.max(Bar2Death[:])) Feature_i.append(np.min(Bar2Death[:])) Feature_i.append(np.sum(Bar2Death[:])) Feature_i.append(len(Bar2Death)) else: Feature_i.extend([0.]*18) else: Feature_i.extend([0.]*42) Feature_i = np.asarray(Feature_i, float) outfile = open(working_dir+'/'+ligand_name+'_feature_ligand_level1_handcrafted.npy', 'w') np.save(outfile, Feature_i) outfile.close()
[((500, 519), 'pickle.load', 'pickle.load', (['InFile'], {}), '(InFile)\n', (511, 519), False, 'import pickle\n'), ((4686, 4714), 'numpy.asarray', 'np.asarray', (['Feature_i', 'float'], {}), '(Feature_i, float)\n', (4696, 4714), True, 'import numpy as np\n'), ((4806, 4833), 'numpy.save', 'np.save', (['outfile', 'Feature_i'], {}), '(outfile, Feature_i)\n', (4813, 4833), True, 'import numpy as np\n'), ((9780, 9808), 'numpy.asarray', 'np.asarray', (['Feature_i', 'float'], {}), '(Feature_i, float)\n', (9790, 9808), True, 'import numpy as np\n'), ((9908, 9935), 'numpy.save', 'np.save', (['outfile', 'Feature_i'], {}), '(outfile, Feature_i)\n', (9915, 9935), True, 'import numpy as np\n'), ((5283, 5356), 'os.path.exists', 'os.path.exists', (["(working_dir + '/' + ligand_name + '_' + el + '_level1.PH')"], {}), "(working_dir + '/' + ligand_name + '_' + el + '_level1.PH')\n", (5297, 5356), False, 'import os\n'), ((1385, 1413), 'numpy.asarray', 'np.asarray', (['Bar0Birth', 'float'], {}), '(Bar0Birth, float)\n', (1395, 1413), True, 'import numpy as np\n'), ((1442, 1470), 'numpy.asarray', 'np.asarray', (['Bar0Death', 'float'], {}), '(Bar0Death, float)\n', (1452, 1470), True, 'import numpy as np\n'), ((1534, 1562), 'numpy.asarray', 'np.asarray', (['Bar1Birth', 'float'], {}), '(Bar1Birth, float)\n', (1544, 1562), True, 'import numpy as np\n'), ((1591, 1619), 'numpy.asarray', 'np.asarray', (['Bar1Death', 'float'], {}), '(Bar1Death, float)\n', (1601, 1619), True, 'import numpy as np\n'), ((1683, 1711), 'numpy.asarray', 'np.asarray', (['Bar2Birth', 'float'], {}), '(Bar2Birth, float)\n', (1693, 1711), True, 'import numpy as np\n'), ((1740, 1768), 'numpy.asarray', 'np.asarray', (['Bar2Death', 'float'], {}), '(Bar2Death, float)\n', (1750, 1768), True, 'import numpy as np\n'), ((6479, 6507), 'numpy.asarray', 'np.asarray', (['Bar0Birth', 'float'], {}), '(Bar0Birth, float)\n', (6489, 6507), True, 'import numpy as np\n'), ((6536, 6564), 'numpy.asarray', 'np.asarray', (['Bar0Death', 'float'], {}), '(Bar0Death, float)\n', (6546, 6564), True, 'import numpy as np\n'), ((6628, 6656), 'numpy.asarray', 'np.asarray', (['Bar1Birth', 'float'], {}), '(Bar1Birth, float)\n', (6638, 6656), True, 'import numpy as np\n'), ((6685, 6713), 'numpy.asarray', 'np.asarray', (['Bar1Death', 'float'], {}), '(Bar1Death, float)\n', (6695, 6713), True, 'import numpy as np\n'), ((6777, 6805), 'numpy.asarray', 'np.asarray', (['Bar2Birth', 'float'], {}), '(Bar2Birth, float)\n', (6787, 6805), True, 'import numpy as np\n'), ((6834, 6862), 'numpy.asarray', 'np.asarray', (['Bar2Death', 'float'], {}), '(Bar2Death, float)\n', (6844, 6862), True, 'import numpy as np\n'), ((1837, 1858), 'numpy.mean', 'np.mean', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (1844, 1858), True, 'import numpy as np\n'), ((1893, 1913), 'numpy.std', 'np.std', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (1899, 1913), True, 'import numpy as np\n'), ((1948, 1968), 'numpy.max', 'np.max', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (1954, 1968), True, 'import numpy as np\n'), ((2003, 2023), 'numpy.min', 'np.min', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (2009, 2023), True, 'import numpy as np\n'), ((2058, 2078), 'numpy.sum', 'np.sum', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (2064, 2078), True, 'import numpy as np\n'), ((2256, 2292), 'numpy.mean', 'np.mean', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (2263, 2292), True, 'import numpy as np\n'), ((2327, 2362), 'numpy.std', 'np.std', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (2333, 2362), True, 'import numpy as np\n'), ((2397, 2432), 'numpy.max', 'np.max', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (2403, 2432), True, 'import numpy as np\n'), ((2467, 2502), 'numpy.min', 'np.min', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (2473, 2502), True, 'import numpy as np\n'), ((2537, 2572), 'numpy.sum', 'np.sum', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (2543, 2572), True, 'import numpy as np\n'), ((2775, 2796), 'numpy.mean', 'np.mean', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (2782, 2796), True, 'import numpy as np\n'), ((2831, 2851), 'numpy.std', 'np.std', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (2837, 2851), True, 'import numpy as np\n'), ((2886, 2906), 'numpy.max', 'np.max', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (2892, 2906), True, 'import numpy as np\n'), ((2941, 2961), 'numpy.min', 'np.min', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (2947, 2961), True, 'import numpy as np\n'), ((2996, 3016), 'numpy.sum', 'np.sum', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (3002, 3016), True, 'import numpy as np\n'), ((3051, 3072), 'numpy.mean', 'np.mean', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (3058, 3072), True, 'import numpy as np\n'), ((3107, 3127), 'numpy.std', 'np.std', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (3113, 3127), True, 'import numpy as np\n'), ((3162, 3182), 'numpy.max', 'np.max', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (3168, 3182), True, 'import numpy as np\n'), ((3217, 3237), 'numpy.min', 'np.min', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (3223, 3237), True, 'import numpy as np\n'), ((3272, 3292), 'numpy.sum', 'np.sum', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (3278, 3292), True, 'import numpy as np\n'), ((3471, 3507), 'numpy.mean', 'np.mean', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (3478, 3507), True, 'import numpy as np\n'), ((3542, 3577), 'numpy.std', 'np.std', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (3548, 3577), True, 'import numpy as np\n'), ((3612, 3647), 'numpy.max', 'np.max', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (3618, 3647), True, 'import numpy as np\n'), ((3682, 3717), 'numpy.min', 'np.min', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (3688, 3717), True, 'import numpy as np\n'), ((3752, 3787), 'numpy.sum', 'np.sum', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (3758, 3787), True, 'import numpy as np\n'), ((3990, 4011), 'numpy.mean', 'np.mean', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (3997, 4011), True, 'import numpy as np\n'), ((4046, 4066), 'numpy.std', 'np.std', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (4052, 4066), True, 'import numpy as np\n'), ((4101, 4121), 'numpy.max', 'np.max', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (4107, 4121), True, 'import numpy as np\n'), ((4156, 4176), 'numpy.min', 'np.min', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (4162, 4176), True, 'import numpy as np\n'), ((4211, 4231), 'numpy.sum', 'np.sum', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (4217, 4231), True, 'import numpy as np\n'), ((4266, 4287), 'numpy.mean', 'np.mean', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (4273, 4287), True, 'import numpy as np\n'), ((4322, 4342), 'numpy.std', 'np.std', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (4328, 4342), True, 'import numpy as np\n'), ((4377, 4397), 'numpy.max', 'np.max', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (4383, 4397), True, 'import numpy as np\n'), ((4432, 4452), 'numpy.min', 'np.min', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (4438, 4452), True, 'import numpy as np\n'), ((4487, 4507), 'numpy.sum', 'np.sum', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (4493, 4507), True, 'import numpy as np\n'), ((6931, 6952), 'numpy.mean', 'np.mean', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (6938, 6952), True, 'import numpy as np\n'), ((6987, 7007), 'numpy.std', 'np.std', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (6993, 7007), True, 'import numpy as np\n'), ((7042, 7062), 'numpy.max', 'np.max', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (7048, 7062), True, 'import numpy as np\n'), ((7097, 7117), 'numpy.min', 'np.min', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (7103, 7117), True, 'import numpy as np\n'), ((7152, 7172), 'numpy.sum', 'np.sum', (['Bar0Death[:]'], {}), '(Bar0Death[:])\n', (7158, 7172), True, 'import numpy as np\n'), ((7350, 7386), 'numpy.mean', 'np.mean', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (7357, 7386), True, 'import numpy as np\n'), ((7421, 7456), 'numpy.std', 'np.std', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (7427, 7456), True, 'import numpy as np\n'), ((7491, 7526), 'numpy.max', 'np.max', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (7497, 7526), True, 'import numpy as np\n'), ((7561, 7596), 'numpy.min', 'np.min', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (7567, 7596), True, 'import numpy as np\n'), ((7631, 7666), 'numpy.sum', 'np.sum', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (7637, 7666), True, 'import numpy as np\n'), ((7869, 7890), 'numpy.mean', 'np.mean', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (7876, 7890), True, 'import numpy as np\n'), ((7925, 7945), 'numpy.std', 'np.std', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (7931, 7945), True, 'import numpy as np\n'), ((7980, 8000), 'numpy.max', 'np.max', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (7986, 8000), True, 'import numpy as np\n'), ((8035, 8055), 'numpy.min', 'np.min', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (8041, 8055), True, 'import numpy as np\n'), ((8090, 8110), 'numpy.sum', 'np.sum', (['Bar1Birth[:]'], {}), '(Bar1Birth[:])\n', (8096, 8110), True, 'import numpy as np\n'), ((8145, 8166), 'numpy.mean', 'np.mean', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (8152, 8166), True, 'import numpy as np\n'), ((8201, 8221), 'numpy.std', 'np.std', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (8207, 8221), True, 'import numpy as np\n'), ((8256, 8276), 'numpy.max', 'np.max', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (8262, 8276), True, 'import numpy as np\n'), ((8311, 8331), 'numpy.min', 'np.min', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (8317, 8331), True, 'import numpy as np\n'), ((8366, 8386), 'numpy.sum', 'np.sum', (['Bar1Death[:]'], {}), '(Bar1Death[:])\n', (8372, 8386), True, 'import numpy as np\n'), ((8565, 8601), 'numpy.mean', 'np.mean', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (8572, 8601), True, 'import numpy as np\n'), ((8636, 8671), 'numpy.std', 'np.std', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (8642, 8671), True, 'import numpy as np\n'), ((8706, 8741), 'numpy.max', 'np.max', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (8712, 8741), True, 'import numpy as np\n'), ((8776, 8811), 'numpy.min', 'np.min', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (8782, 8811), True, 'import numpy as np\n'), ((8846, 8881), 'numpy.sum', 'np.sum', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (8852, 8881), True, 'import numpy as np\n'), ((9084, 9105), 'numpy.mean', 'np.mean', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (9091, 9105), True, 'import numpy as np\n'), ((9140, 9160), 'numpy.std', 'np.std', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (9146, 9160), True, 'import numpy as np\n'), ((9195, 9215), 'numpy.max', 'np.max', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (9201, 9215), True, 'import numpy as np\n'), ((9250, 9270), 'numpy.min', 'np.min', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (9256, 9270), True, 'import numpy as np\n'), ((9305, 9325), 'numpy.sum', 'np.sum', (['Bar2Birth[:]'], {}), '(Bar2Birth[:])\n', (9311, 9325), True, 'import numpy as np\n'), ((9360, 9381), 'numpy.mean', 'np.mean', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (9367, 9381), True, 'import numpy as np\n'), ((9416, 9436), 'numpy.std', 'np.std', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (9422, 9436), True, 'import numpy as np\n'), ((9471, 9491), 'numpy.max', 'np.max', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (9477, 9491), True, 'import numpy as np\n'), ((9526, 9546), 'numpy.min', 'np.min', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (9532, 9546), True, 'import numpy as np\n'), ((9581, 9601), 'numpy.sum', 'np.sum', (['Bar2Death[:]'], {}), '(Bar2Death[:])\n', (9587, 9601), True, 'import numpy as np\n'), ((2617, 2655), 'numpy.argmax', 'np.argmax', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (2626, 2655), True, 'import numpy as np\n'), ((2701, 2739), 'numpy.argmax', 'np.argmax', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (2710, 2739), True, 'import numpy as np\n'), ((3832, 3870), 'numpy.argmax', 'np.argmax', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (3841, 3870), True, 'import numpy as np\n'), ((3916, 3954), 'numpy.argmax', 'np.argmax', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (3925, 3954), True, 'import numpy as np\n'), ((7711, 7749), 'numpy.argmax', 'np.argmax', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (7720, 7749), True, 'import numpy as np\n'), ((7795, 7833), 'numpy.argmax', 'np.argmax', (['(Bar1Death[:] - Bar1Birth[:])'], {}), '(Bar1Death[:] - Bar1Birth[:])\n', (7804, 7833), True, 'import numpy as np\n'), ((8926, 8964), 'numpy.argmax', 'np.argmax', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (8935, 8964), True, 'import numpy as np\n'), ((9010, 9048), 'numpy.argmax', 'np.argmax', (['(Bar2Death[:] - Bar2Birth[:])'], {}), '(Bar2Death[:] - Bar2Birth[:])\n', (9019, 9048), True, 'import numpy as np\n')]
emersion/python-emailthreads
emailthreads/util.py
99f1a04fa0dd2ce8a9c870016b067bf56f3d3bfd
import re import sys from email.message import EmailMessage def get_message_by_id(msgs, msg_id): # TODO: handle weird brackets stuff for msg in msgs: if msg["message-id"] == msg_id: return msg return None def strip_prefix(s, prefix): if s.startswith(prefix): s = s[len(prefix):] return s def flatten_header_field(value): value = value.strip() # TODO: more of these while value.startswith("Re:"): value = strip_prefix(value, "Re:").strip() lines = value.splitlines() lines = [l.strip() for l in lines] return " ".join(lines) def get_text_part(msg): for part in msg.walk(): if part.get_content_type() == "text/plain": return part return None def normalize_whitespace(text): # TODO: more of these # No-break space return text.replace('\xa0', ' ') def get_text(msg): text_part = get_text_part(msg) text = text_part.get_payload(decode=True).decode('utf-8', 'replace') text = normalize_whitespace(text) return text def lines_as_list(lines): if isinstance(lines, list): return lines elif isinstance(lines, str): return lines.split("\n") else: return list(lines)
[]
JanAlexanderPersonal/covid19_weak_supervision
trainval.py
5599e48c9945f1e08a2731740bc8f6e44a031703
from haven import haven_chk as hc from haven import haven_results as hr from haven import haven_utils as hu import torch import torchvision import tqdm import pandas as pd import pprint import itertools import os import pylab as plt import exp_configs import time import numpy as np from src import models from src import datasets from src import utils as ut from pprint import pformat import argparse from torch.utils.data import sampler from torch.utils.data.sampler import RandomSampler from torch.backends import cudnn from torch.nn import functional as F from torch.utils.data import DataLoader cudnn.benchmark = True import logging def setupLogging(): """Setup the logger for this module """ # Create the Logger root_logger = logging.getLogger() root_logger.setLevel(logging.DEBUG) handler = logging.StreamHandler() logger_formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s') handler.setFormatter(logger_formatter) root_logger.addHandler(handler) def trainval(exp_dict, savedir_base, datadir, reset=False, num_workers=0): # bookkeepting stuff # ================== pprint.pprint(exp_dict) exp_id = hu.hash_dict(exp_dict) savedir = os.path.join(savedir_base, exp_id) if reset: hc.delete_and_backup_experiment(savedir) os.makedirs(savedir, exist_ok=True) hu.save_json(os.path.join(savedir, "exp_dict.json"), exp_dict) print("Experiment saved in %s" % savedir) logger.info(f'start trainval with experiment dict {pformat(exp_dict)}') input('press enter') # set seed # ================== seed = 42 np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) # Dataset # ================== # train set train_set = datasets.get_dataset(dataset_dict=exp_dict["dataset"], split="train", datadir=datadir, exp_dict=exp_dict, dataset_size=exp_dict['dataset_size']) # val set val_set = datasets.get_dataset(dataset_dict=exp_dict["dataset"], split="val", datadir=datadir, exp_dict=exp_dict, dataset_size=exp_dict['dataset_size']) # test set test_set = datasets.get_dataset(dataset_dict=exp_dict["dataset"], split="test", datadir=datadir, exp_dict=exp_dict, dataset_size=exp_dict['dataset_size']) # val_sampler = torch.utils.data.SequentialSampler(val_set) val_loader = DataLoader(val_set, # sampler=val_sampler, batch_size=1, collate_fn=ut.collate_fn, num_workers=num_workers) test_loader = DataLoader(test_set, # sampler=val_sampler, batch_size=1, collate_fn=ut.collate_fn, num_workers=num_workers) # Model # ================== print('get model') model = models.get_model(model_dict=exp_dict['model'], exp_dict=exp_dict, train_set=train_set).cuda() # model.opt = optimizers.get_optim(exp_dict['opt'], model) model_path = os.path.join(savedir, "model.pth") score_list_path = os.path.join(savedir, "score_list.pkl") print(model) if os.path.exists(score_list_path): # resume experiment model.load_state_dict(hu.torch_load(model_path)) score_list = hu.load_pkl(score_list_path) s_epoch = score_list[-1]['epoch'] + 1 else: # restart experiment score_list = [] s_epoch = 0 # Train & Val # ================== print("Starting experiment at epoch %d" % (s_epoch)) model.waiting = 0 model.val_score_best = -np.inf train_sampler = torch.utils.data.RandomSampler( train_set, replacement=True, num_samples=2*len(test_set)) train_loader = DataLoader(train_set, sampler=train_sampler, collate_fn=ut.collate_fn, batch_size=exp_dict["batch_size"], drop_last=True, num_workers=num_workers) for e in range(s_epoch, exp_dict['max_epoch']): # Validate only at the start of each cycle score_dict = {} test_dict = model.val_on_loader(test_loader, savedir_images=os.path.join(savedir, "images"), n_images=3) # Train the model train_dict = model.train_on_loader(train_loader) # Validate the model val_dict = model.val_on_loader(val_loader) score_dict["val_score"] = val_dict["val_score"] # Get new score_dict score_dict.update(train_dict) score_dict["epoch"] = e score_dict["waiting"] = model.waiting model.waiting += 1 # Add to score_list and save checkpoint score_list += [score_dict] # Save Best Checkpoint score_df = pd.DataFrame(score_list) if score_dict["val_score"] >= model.val_score_best: test_dict = model.val_on_loader(test_loader, savedir_images=os.path.join(savedir, "images"), n_images=3) score_dict.update(test_dict) hu.save_pkl(os.path.join(savedir, "score_list_best.pkl"), score_list) # score_df.to_csv(os.path.join(savedir, "score_best_df.csv")) hu.torch_save(os.path.join(savedir, "model_best.pth"), model.get_state_dict()) model.waiting = 0 model.val_score_best = score_dict["val_score"] print("Saved Best: %s" % savedir) # Report & Save score_df = pd.DataFrame(score_list) # score_df.to_csv(os.path.join(savedir, "score_df.csv")) print("\n", score_df.tail(), "\n") hu.torch_save(model_path, model.get_state_dict()) hu.save_pkl(score_list_path, score_list) print("Checkpoint Saved: %s" % savedir) if model.waiting > 100: break print('Experiment completed et epoch %d' % e) if __name__ == "__main__": setupLogging() parser = argparse.ArgumentParser() logger = logging.getLogger(__name__) parser.add_argument('-e', '--exp_group_list', nargs="+") parser.add_argument('-sb', '--savedir_base', required=True) parser.add_argument('-d', '--datadir', default=None) parser.add_argument("-r", "--reset", default=0, type=int) parser.add_argument("-ei", "--exp_id", default=None) parser.add_argument("-j", "--run_jobs", default=0, type=int) parser.add_argument("-nw", "--num_workers", type=int, default=0) args = parser.parse_args() # Collect experiments # =================== if args.exp_id is not None: # select one experiment savedir = os.path.join(args.savedir_base, args.exp_id) exp_dict = hu.load_json(os.path.join(savedir, "exp_dict.json")) exp_list = [exp_dict] else: # select exp group exp_list = [] for exp_group_name in args.exp_group_list: exp_list += exp_configs.EXP_GROUPS[exp_group_name] for exp_dict in exp_list: # do trainval trainval(exp_dict=exp_dict, savedir_base=args.savedir_base, datadir=args.datadir, reset=args.reset, num_workers=args.num_workers)
[((761, 780), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (778, 780), False, 'import logging\n'), ((836, 859), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (857, 859), False, 'import logging\n'), ((883, 942), 'logging.Formatter', 'logging.Formatter', (['"""%(name)s - %(levelname)s - %(message)s"""'], {}), "('%(name)s - %(levelname)s - %(message)s')\n", (900, 942), False, 'import logging\n'), ((1153, 1176), 'pprint.pprint', 'pprint.pprint', (['exp_dict'], {}), '(exp_dict)\n', (1166, 1176), False, 'import pprint\n'), ((1190, 1212), 'haven.haven_utils.hash_dict', 'hu.hash_dict', (['exp_dict'], {}), '(exp_dict)\n', (1202, 1212), True, 'from haven import haven_utils as hu\n'), ((1227, 1261), 'os.path.join', 'os.path.join', (['savedir_base', 'exp_id'], {}), '(savedir_base, exp_id)\n', (1239, 1261), False, 'import os\n'), ((1330, 1365), 'os.makedirs', 'os.makedirs', (['savedir'], {'exist_ok': '(True)'}), '(savedir, exist_ok=True)\n', (1341, 1365), False, 'import os\n'), ((1641, 1661), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1655, 1661), True, 'import numpy as np\n'), ((1666, 1689), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1683, 1689), False, 'import torch\n'), ((1694, 1726), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (1720, 1726), False, 'import torch\n'), ((1800, 1948), 'src.datasets.get_dataset', 'datasets.get_dataset', ([], {'dataset_dict': "exp_dict['dataset']", 'split': '"""train"""', 'datadir': 'datadir', 'exp_dict': 'exp_dict', 'dataset_size': "exp_dict['dataset_size']"}), "(dataset_dict=exp_dict['dataset'], split='train',\n datadir=datadir, exp_dict=exp_dict, dataset_size=exp_dict['dataset_size'])\n", (1820, 1948), False, 'from src import datasets\n'), ((2121, 2268), 'src.datasets.get_dataset', 'datasets.get_dataset', ([], {'dataset_dict': "exp_dict['dataset']", 'split': '"""val"""', 'datadir': 'datadir', 'exp_dict': 'exp_dict', 'dataset_size': "exp_dict['dataset_size']"}), "(dataset_dict=exp_dict['dataset'], split='val', datadir\n =datadir, exp_dict=exp_dict, dataset_size=exp_dict['dataset_size'])\n", (2141, 2268), False, 'from src import datasets\n'), ((2435, 2582), 'src.datasets.get_dataset', 'datasets.get_dataset', ([], {'dataset_dict': "exp_dict['dataset']", 'split': '"""test"""', 'datadir': 'datadir', 'exp_dict': 'exp_dict', 'dataset_size': "exp_dict['dataset_size']"}), "(dataset_dict=exp_dict['dataset'], split='test',\n datadir=datadir, exp_dict=exp_dict, dataset_size=exp_dict['dataset_size'])\n", (2455, 2582), False, 'from src import datasets\n'), ((2801, 2890), 'torch.utils.data.DataLoader', 'DataLoader', (['val_set'], {'batch_size': '(1)', 'collate_fn': 'ut.collate_fn', 'num_workers': 'num_workers'}), '(val_set, batch_size=1, collate_fn=ut.collate_fn, num_workers=\n num_workers)\n', (2811, 2890), False, 'from torch.utils.data import DataLoader\n'), ((3039, 3129), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set'], {'batch_size': '(1)', 'collate_fn': 'ut.collate_fn', 'num_workers': 'num_workers'}), '(test_set, batch_size=1, collate_fn=ut.collate_fn, num_workers=\n num_workers)\n', (3049, 3129), False, 'from torch.utils.data import DataLoader\n'), ((3566, 3600), 'os.path.join', 'os.path.join', (['savedir', '"""model.pth"""'], {}), "(savedir, 'model.pth')\n", (3578, 3600), False, 'import os\n'), ((3623, 3662), 'os.path.join', 'os.path.join', (['savedir', '"""score_list.pkl"""'], {}), "(savedir, 'score_list.pkl')\n", (3635, 3662), False, 'import os\n'), ((3689, 3720), 'os.path.exists', 'os.path.exists', (['score_list_path'], {}), '(score_list_path)\n', (3703, 3720), False, 'import os\n'), ((4344, 4494), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'sampler': 'train_sampler', 'collate_fn': 'ut.collate_fn', 'batch_size': "exp_dict['batch_size']", 'drop_last': '(True)', 'num_workers': 'num_workers'}), "(train_set, sampler=train_sampler, collate_fn=ut.collate_fn,\n batch_size=exp_dict['batch_size'], drop_last=True, num_workers=num_workers)\n", (4354, 4494), False, 'from torch.utils.data import DataLoader\n'), ((6682, 6707), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6705, 6707), False, 'import argparse\n'), ((6721, 6748), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (6738, 6748), False, 'import logging\n'), ((1284, 1324), 'haven.haven_chk.delete_and_backup_experiment', 'hc.delete_and_backup_experiment', (['savedir'], {}), '(savedir)\n', (1315, 1324), True, 'from haven import haven_chk as hc\n'), ((1383, 1421), 'os.path.join', 'os.path.join', (['savedir', '"""exp_dict.json"""'], {}), "(savedir, 'exp_dict.json')\n", (1395, 1421), False, 'import os\n'), ((3828, 3856), 'haven.haven_utils.load_pkl', 'hu.load_pkl', (['score_list_path'], {}), '(score_list_path)\n', (3839, 3856), True, 'from haven import haven_utils as hu\n'), ((5472, 5496), 'pandas.DataFrame', 'pd.DataFrame', (['score_list'], {}), '(score_list)\n', (5484, 5496), True, 'import pandas as pd\n'), ((6231, 6255), 'pandas.DataFrame', 'pd.DataFrame', (['score_list'], {}), '(score_list)\n', (6243, 6255), True, 'import pandas as pd\n'), ((6430, 6470), 'haven.haven_utils.save_pkl', 'hu.save_pkl', (['score_list_path', 'score_list'], {}), '(score_list_path, score_list)\n', (6441, 6470), True, 'from haven import haven_utils as hu\n'), ((7353, 7397), 'os.path.join', 'os.path.join', (['args.savedir_base', 'args.exp_id'], {}), '(args.savedir_base, args.exp_id)\n', (7365, 7397), False, 'import os\n'), ((3333, 3424), 'src.models.get_model', 'models.get_model', ([], {'model_dict': "exp_dict['model']", 'exp_dict': 'exp_dict', 'train_set': 'train_set'}), "(model_dict=exp_dict['model'], exp_dict=exp_dict, train_set\n =train_set)\n", (3349, 3424), False, 'from src import models\n'), ((3780, 3805), 'haven.haven_utils.torch_load', 'hu.torch_load', (['model_path'], {}), '(model_path)\n', (3793, 3805), True, 'from haven import haven_utils as hu\n'), ((7430, 7468), 'os.path.join', 'os.path.join', (['savedir', '"""exp_dict.json"""'], {}), "(savedir, 'exp_dict.json')\n", (7442, 7468), False, 'import os\n'), ((1535, 1552), 'pprint.pformat', 'pformat', (['exp_dict'], {}), '(exp_dict)\n', (1542, 1552), False, 'from pprint import pformat\n'), ((4865, 4896), 'os.path.join', 'os.path.join', (['savedir', '"""images"""'], {}), "(savedir, 'images')\n", (4877, 4896), False, 'import os\n'), ((5805, 5849), 'os.path.join', 'os.path.join', (['savedir', '"""score_list_best.pkl"""'], {}), "(savedir, 'score_list_best.pkl')\n", (5817, 5849), False, 'import os\n'), ((5963, 6002), 'os.path.join', 'os.path.join', (['savedir', '"""model_best.pth"""'], {}), "(savedir, 'model_best.pth')\n", (5975, 6002), False, 'import os\n'), ((5661, 5692), 'os.path.join', 'os.path.join', (['savedir', '"""images"""'], {}), "(savedir, 'images')\n", (5673, 5692), False, 'import os\n')]
unimauro/eden
languages/pt-br.py
b739d334e6828d0db14b3790f2f5e2666fc83576
# coding: utf8 { '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" é uma expressão opcional como "field1=\'newvalue\'". Não é possível atualizar ou excluir os resultados de uma junção', '# of International Staff': '# De equipe internacional', '# of National Staff': '# De equipe nacional', '# of Vehicles': '# De Veículos', '%(msg)s\nIf the request type is "%(type)s", please enter the %(type)s on the next screen.': '%(msg)s\nSe o tipo de pedido é "%(type)s", digite a %(type)s na próxima tela.', '%(system_name)s - Verify Email': '%(system_name)s - Verificar E-Mail', '%.1f km': '%.1f km', '%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S', '%m-%d-%Y': '%m-%d-%Y', '%m-%d-%Y %H:%M:%S': '%m-%d-%Y %H:%M:%S', '%s Create a new site or ensure that you have permissions for an existing site.': '%s Cria um novo site ou garante que você tenha permissões para um site existente.', '%s rows deleted': '%s linhas excluídas', '%s rows updated': '%s linhas atualizadas', '& then click on the map below to adjust the Lat/Lon fields': 'Em seguida selecione o mapa abaixo para ajustar os campos Lat/Lon', "'Cancel' will indicate an asset log entry did not occur": "'cancelar' irá indicar que a entrada de log de ativo não ocorreu", '* Required Fields': '* campos obrigatórios', '0-15 minutes': '0-15 minutos', '1 Assessment': '1 Avaliação', '1 location, shorter time, can contain multiple Tasks': '1 Local, menos tempo, pode conter várias Tarefas', '1-3 days': '1 a 3 dias', '15-30 minutes': '15 a 30 minutos', '2 different options are provided here currently:': '2 opções diferentes são fornecidos aqui atualmente:', '2x4 Car': 'Carro 2x4', '30-60 minutes': '30-60 minutos', '4-7 days': '4-7 Dias', '4x4 Car': 'Carro 4x4', '8-14 days': '8-14 Dias', 'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': 'Um marcador assinalado para um local individual é configurado se há a necessidade de substituir um marcador assinalado para o Recurso Classe.', 'A Reference Document such as a file, URL or contact person to verify this data.': 'A Reference Document such as a file, URL or contact person to verify this data.', 'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'Um documento de referência como um arquivo, URL ou contacto pessoal para verificar esses dados. Pode inserir as primeiras letras do nome dum documento para chegar a esse documento.', 'A brief description of the group (optional)': 'Uma descrição breve do grupo (opcional)', 'A file downloaded from a GPS containing a series of geographic points in XML format.': 'Um ficheiro descarregado de um GPS contendo uma série de pontos geográficos em formato XML.', 'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'Um ficheiro em formato GPX retirado de um GPS cujas datas e horas podem ser correlacionadas com as de fotografias para localização num mapa.', 'A file in GPX format taken from a GPS.': 'A file in GPX format taken from a GPS.', 'A library of digital resources, such as photos, documents and reports': 'Uma biblioteca de recursos digitais, como fotos, documentos e relatórios', 'A location group can be used to define the extent of an affected area, if it does not fall within one administrative region.': 'Um grupo local pode ser usado para definir a extensão de uma área afetada, se não cair dentro de uma região administrativa.', 'A location group is a set of locations (often, a set of administrative regions representing a combined area).': 'Um grupo de localização é um conjunto de locais (muitas vezes, um conjunto de regiões administrativas que representam uma área Combinada).', 'A location group is a set of locations (often, a set of administrative regions representing a combined area). Member locations are added to a location group here. Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group. A location group can be used to define the extent of an affected area, if it does not fall within one administrative region. Location groups can be used in the Regions menu.': 'Um grupo de localização é um conjunto de locais (muitas vezes, um conjunto de regiões administrativas que representam uma área Combinada). Membros locais são adicionados em grupos locais aqui. Grupos locais podem ser utilizados para filtrar o que é mostrado no mapa e nos resultados da procura apenas as entidades locais abrangidas no grupo. Um grupo local pode ser usado para definir a extensão de uma área afetada, se não cair dentro de uma região administrativa. Grupos local pode ser utilizado no menu Regiões.', 'A location group must have at least one member.': 'Um grupo de localização deve ter, pelo menos, um membro.', "A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": 'Um local que especifica a área geográfica dessa região. Este pode ser um local a partir da hierarquia local, ou um "grupo local", ou um local que tem um limite para a área.', 'A survey series with id %s does not exist. Please go back and create one.': 'Id% não foi encontrado na pesquisa. Por favor voltar e crie um.', 'A task is a piece of work that an individual or team can do in 1-2 days': 'A task is a piece of work that an individual or team can do in 1-2 days', 'ABOUT THIS MODULE': 'SOBRE ESTE MÓDULO', 'ACCESS DATA': 'Dados de Acesso', 'ANY': 'Todos', 'API Key': 'API Key', 'API is documented here': 'API está documentado aqui', 'ATC-20 Rapid Evaluation modified for New Zealand': 'ATC-20 Rápida Avaliação modificado para a Nova Zelândia', 'Abbreviation': 'Abreviatura', 'Ability to Fill Out Surveys': 'Capacidade para preencher Inquéritos', 'Ability to customize the list of details tracked at a Shelter': 'Capacidade de Customizar a lista de detalhes rastreados em um Abrigo', 'Ability to customize the list of human resource tracked at a Shelter': 'Capacidade de Customizar a lista de recursos humanos Rastreados em um Abrigo', 'Ability to customize the list of important facilities needed at a Shelter': 'Capacidade de Customizar a lista das instalações importante necessária em um Abrigo', 'Ability to view Results of Completed and/or partially filled out Surveys': 'Capacidade para visualizar resultados de Concluída e/ou parcialmente preenchido Pesquisas', 'About': 'sobre', 'About Sahana': 'Sobre Sahana', 'Access denied': 'Acesso negado', 'Access to Shelter': 'Acesso a Abrigo', 'Access to education services': 'Acesso a serviços de educação', 'Accessibility of Affected Location': 'Acessibilidade do Local Afectado', 'Accompanying Relative': 'Accompanying Relative', 'Account Registered - Please Check Your Email': 'Conta registrada - verifique seu e-mail', 'Account registered, however registration is still pending approval - please wait until confirmation received.': 'Conta registrada, mas registro pende aprovação - por favor aguarde até confirmação ser recebida.', 'Acronym': 'Iniciais', "Acronym of the organization's name, eg. IFRC.": 'Acrônimo do nome da organização, por exemplo, FICV.', 'Actionable by all targeted recipients': 'Acionáveis por todos os destinatários de destino', 'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': 'Acionáveis apenas pelos participantes exercício designado; Identificação do excercício deve aparecer em', 'Actioned?': 'Acionado?', 'Actions': 'Ações', 'Actions taken as a result of this request.': 'Ações tomadas como resultado desse pedido.', 'Activate Events from Scenario templates for allocation of appropriate Resources (Human, Assets & Facilities).': 'Ativar eventos dos templates de cenário para alocação adequada de recursos (humanos, ativos e equipamentos)', 'Active': 'ativo', 'Active Problems': 'Problemas ativos', 'Activities': 'atividades', 'Activities matching Assessments:': 'Atividades correspondentes a Avaliações:', 'Activities of boys 13-17yrs before disaster': 'Atividades de garotos 13-17 anos antes do desastre', 'Activities of boys 13-17yrs now': 'Atividades de garotos 13-17yrs agora', 'Activities of boys <12yrs before disaster': 'Atividades de garotos <12 anos antes do desastre', 'Activities of boys <12yrs now': 'Atividades de garotos <12 anos agora', 'Activities of children': 'Atividades de crianças', 'Activities of girls 13-17yrs before disaster': 'Atividades de meninas 13-17yrs antes de desastres', 'Activities of girls 13-17yrs now': 'Atividades de meninas 13-17yrs agora', 'Activities of girls <12yrs before disaster': 'Atividades de meninas <12yrs antes de desastres', 'Activities of girls <12yrs now': 'Agora atividades de meninas de menos de 12 anos', 'Activities:': 'Atividades:', 'Activity': 'atividade', 'Activity Added': 'Atividade Incluída', 'Activity Deleted': 'Atividade Apagada', 'Activity Details': 'Detalhes da Atividade', 'Activity Report': 'Relatório de atividades', 'Activity Reports': 'Relatório de Atividades', 'Activity Type': 'Tipo de atividade', 'Activity Updated': 'Atividade Atualizada', 'Activity added': 'Activity added', 'Activity removed': 'Activity removed', 'Activity updated': 'Activity updated', 'Add': 'incluir', 'Add Activity': 'Incluir Atividade', 'Add Activity Report': 'Incluir Relatório de atividade', 'Add Activity Type': 'Incluir tipo de atividade', 'Add Address': 'Incluir Endereço', 'Add Alternative Item': 'Adicionar item alternativo', 'Add Assessment': 'Incluir Avaliação', 'Add Assessment Summary': 'Incluir Avaliação De Resumo', 'Add Asset': 'Incluir ativo', 'Add Asset Log Entry - Change Label': 'Incluir recurso de entrada de entrada - trocar a Etiqueta', 'Add Availability': 'Incluir Disponibilidade', 'Add Baseline': 'Incluir Linha', 'Add Baseline Type': 'Incluir Linha De Tipo', 'Add Bed Type': 'Incluir Tipo De Cama', 'Add Brand': 'Incluir Marca', 'Add Budget': 'Incluir Orçamento', 'Add Bundle': 'Incluir Pacote Configurável', 'Add Camp': 'Incluir acampamento', 'Add Camp Service': 'Incluir acampamento de serviço', 'Add Camp Type': 'Incluir tipo de acampamento', 'Add Catalog': 'Incluir Catálogo', 'Add Catalog Item': 'Incluir Item de Catálogo', 'Add Certificate': 'Incluir certificado', 'Add Certification': 'Adicionar Certificação', 'Add Cholera Treatment Capability Information': 'Incluir Informação sobre capacidade para tratamento de cólera', 'Add Cluster': 'Incluir cluster', 'Add Cluster Subsector': 'Incluir Subsetor de Cluster', 'Add Competency': 'incluir competência', 'Add Competency Rating': 'Incluir Classificação da Competência', 'Add Contact': 'Incluir contato', 'Add Contact Information': 'Incluir informações de contato', 'Add Course': 'Incluir curso', 'Add Course Certificate': 'Incluir Certificado de Curso', 'Add Credential': 'Incluir referência', 'Add Credentials': 'Incluir Referências', 'Add Dead Body Report': 'Incluir Relatório de Cadáver', 'Add Disaster Victims': 'Incluir Vítimas de Desastre', 'Add Distribution.': 'Incluir distribuição.', 'Add Document': 'Add Document', 'Add Donor': 'Incluir doador', 'Add Facility': 'Incluir Recurso', 'Add Feature Class': 'Incluir classe de recurso', 'Add Feature Layer': 'Incluir camada de recurso', 'Add Flood Report': 'Incluir Relatório Enchente', 'Add GPS data': 'Add GPS data', 'Add Group': 'Incluir Grupo', 'Add Group Member': 'Incluir Membro do Grupo', 'Add Hospital': 'Incluir Hospital', 'Add Human Resource': 'Incluir Recurso Humano', 'Add Identification Report': 'Incluir Identificação Relatório', 'Add Identity': 'Incluir Identidade', 'Add Image': 'Incluir Imagem', 'Add Impact': 'Adicionar Impacto', 'Add Impact Type': 'Incluir Tipo De Impacto', 'Add Incident': 'Adicionar Incidente', 'Add Incident Report': 'Incluir relatório de incidente', 'Add Inventory Item': 'Inclúir item de inventário', 'Add Item': 'Incluir item', 'Add Item Category': 'Incluir categoria de item', 'Add Item Pack': 'Incluir pacote de itens', 'Add Item to Catalog': 'Incluir Item no Catálogo', 'Add Item to Commitment': 'Incluir Item no Compromisso', 'Add Item to Inventory': 'Incluir Item de Inventário', 'Add Item to Request': 'Incluir Item para pedido', 'Add Item to Shipment': 'Adicionar Item para Embarque', 'Add Job Role': 'Incluir tarefa Função', 'Add Key': 'Incluir Chave', 'Add Kit': 'Adicionar Kit', 'Add Layer': 'Incluir Camada', 'Add Level 1 Assessment': 'Incluir nível de Avaliação 1', 'Add Level 2 Assessment': 'Incluir nível de Avaliação 2', 'Add Location': 'Incluir Local', 'Add Log Entry': 'Adicionar Entrada de Log', 'Add Map Configuration': 'Incluir Mapa de configuração', 'Add Marker': 'Incluir Marcador', 'Add Member': 'Incluir membro', 'Add Membership': 'Incluir Associação', 'Add Message': 'Incluir Mensagem', 'Add Mission': 'Incluir Missão', 'Add Need': 'Incluir o necessário', 'Add Need Type': 'Adicionar o tipo Necessário', 'Add New': 'Incluir novo', 'Add New Activity': 'Incluir Nova Atividade', 'Add New Address': 'Incluir Novo Endereço', 'Add New Alternative Item': 'Incluir novo Item Alternativo', 'Add New Assessment': 'Adicionar Nova Avaliação', 'Add New Assessment Summary': 'Incluir novo Resumo de Avaliação', 'Add New Asset': 'Incluir Novo Ativo', 'Add New Baseline': 'Incluir nova linha de base', 'Add New Baseline Type': 'Incluir novo tipo de linha de base', 'Add New Brand': 'Adicionar Nova Marca', 'Add New Budget': 'Adicionar Novo Orçamento', 'Add New Bundle': 'Incluir Novo Pacote', 'Add New Camp': 'Incluir novo Campo', 'Add New Camp Service': 'Inlcuir Novo Campo de Serviço', 'Add New Camp Type': 'Incluir Novo Campo de Tipo', 'Add New Catalog': 'Incluir Novo Catálogo', 'Add New Catalog Item': 'Incluir novo Item de catálogo', 'Add New Cluster': 'Adicionar novo grupo', 'Add New Cluster Subsector': 'Adicionar novo subgrupo', 'Add New Commitment Item': 'Incluir novo item de compromisso', 'Add New Contact': 'Incluir novo contato', 'Add New Credential': 'Incluir nova credencial', 'Add New Document': 'Incluir Novo Documento', 'Add New Donor': 'Adicionar novo doador', 'Add New Entry': 'Incluir Nova Entrada', 'Add New Event': 'Adicionar novo evento', 'Add New Facility': 'Incluir novo Recurso', 'Add New Feature Class': 'Incluir nova classe do recurso', 'Add New Feature Layer': 'Adicionar nova camada de características', 'Add New Flood Report': 'Adicionar novo relatório de cheias', 'Add New Group': 'Adicionar novo grupo', 'Add New Home': 'Add New Home', 'Add New Hospital': 'Adicionar novo hospital', 'Add New Human Resource': 'Incluir novos recursos humanos', 'Add New Identity': 'Adicionar nova identidade', 'Add New Image': 'Adicionar nova imagem', 'Add New Impact': 'Adicionar novo impacto', 'Add New Impact Type': 'Incluir novo Tipo De Impacto', 'Add New Incident Report': 'Adicionar novo relatório de incidentes', 'Add New Inventory Item': 'Incluir novo Item De Inventário', 'Add New Item': 'Incluir novo item', 'Add New Item Category': 'Incluir nova categoria de itens', 'Add New Item Pack': 'Incluir novo pacote de itens', 'Add New Item to Kit': 'Incluir novo Item de Kit', 'Add New Key': 'Adicionar Nova Chave', 'Add New Kit': 'Incluir novo Kit', 'Add New Layer': 'Adicionar Nova Camada', 'Add New Level 1 Assessment': 'Incluir novo nível 1 avaliação', 'Add New Level 2 Assessment': 'Incluir novo nível 2 avaliação', 'Add New Location': 'Incluir Novo Local', 'Add New Log Entry': 'Incluir nova entrada de Log', 'Add New Map Configuration': 'Incluir Novo Mapa de Configuração', 'Add New Marker': 'Incluir novo Marcador', 'Add New Member': 'Incluir Novo Membro', 'Add New Membership': 'Incluir novo membro', 'Add New Need': 'Adicionar novas necessidades', 'Add New Need Type': 'Incluir novo Tipo Necessário', 'Add New Note': 'Adicionar NOVA NOTA', 'Add New Office': 'Adicionar novo escritório', 'Add New Organization': 'Incluir nova Organização', 'Add New Patient': 'Add New Patient', 'Add New Person to Commitment': 'Add New Person to Commitment', 'Add New Photo': 'Adicionar Nova Foto', 'Add New Population Statistic': 'Incluir nova População De Estatística', 'Add New Problem': 'Incluir novo Problema', 'Add New Project': 'Incluir novo projeto', 'Add New Projection': 'Adicionar Nova Projecção', 'Add New Rapid Assessment': 'Incluir nova Avaliação Rápida', 'Add New Received Item': 'Incluir novo Item Recebido', 'Add New Record': 'Incluir Novo Registro', 'Add New Relative': 'Add New Relative', 'Add New Report': 'Incluir Novo Relatório', 'Add New Request': 'Incluir novo pedido', 'Add New Request Item': 'Incluir novo Item de Pedido', 'Add New Resource': 'Incluir Novo Recurso', 'Add New River': 'Incluir novo Rio', 'Add New Role': 'INCLUIR NOVA FUNÇÃO', 'Add New Role to User': 'Incluir nova função para o usuário', 'Add New Room': 'Adicionar nova sala', 'Add New Scenario': 'Adicionar Novo cenário', 'Add New Sector': 'Incluir novo Sector', 'Add New Sent Item': 'Incluir novo Item Enviado', 'Add New Setting': 'Adicionar Nova Configuração', 'Add New Shelter': 'Incluir Novo Abrigo', 'Add New Shelter Service': 'Incluir Novo Serviço de Abrigo', 'Add New Shelter Type': 'Incluir Novo Tipo de Abrigo', 'Add New Skill': 'Adicionar nova habilidade', 'Add New Solution': 'Adicionar nova solução', 'Add New Staff': 'Adicionar Nova Equipe', 'Add New Staff Member': 'Incluir novo equipe do membro', 'Add New Staff Type': 'Incluir novo tipo de equipe.', 'Add New Subsector': 'Incluir novo Subsector', 'Add New Survey Answer': 'Incluir nova resposta na pesquisa.', 'Add New Survey Question': 'Incluir nova pergunta na pesquisa.', 'Add New Survey Section': 'Incluir nova seção na pesquisa.', 'Add New Survey Series': 'Incluir nova série na pesquisa.', 'Add New Survey Template': 'Incluir novo Modelo De Pesquisa', 'Add New Task': 'Incluir Nova Tarefa', 'Add New Team': 'Adicionar nova equipe', 'Add New Theme': 'Incluir novo tema', 'Add New Ticket': 'Incluir nova permissão', 'Add New Track': 'Adicionar Nova Pista', 'Add New User': 'Incluir Novo Usuário', 'Add New User to Role': 'Adicionar Novo usuário para Função', 'Add New Vehicle': 'Add New Vehicle', 'Add New Volunteer': 'Incluir novo Voluntário', 'Add New Warehouse': 'Adicionar novo armazém', 'Add Note': 'Incluir nota', 'Add Office': 'Adicionar Office', 'Add Organization': 'Incluir Organização', 'Add Peer': 'Incluír Par', 'Add Person': 'incluir pessoa', 'Add Person to Commitment': 'Add Person to Commitment', 'Add Personal Effects': 'Incluir efeitos pessoais', 'Add Photo': 'Incluir Foto', 'Add Population Statistic': 'Incluir População Estatística', 'Add Position': 'Adicionar Posição', 'Add Problem': 'Adicionar Problema', 'Add Project': 'Adicionar Projeto', 'Add Projection': 'Adicionar Projeção', 'Add Question': 'Adicionar Pergunta', 'Add Rapid Assessment': 'Adicionar Avaliação Rápida', 'Add Record': 'Incluir Registro', 'Add Reference Document': 'Incluir documento de referência', 'Add Report': 'Incluir Relatório', 'Add Request': 'Incluir Pedido', 'Add Resource': 'Incluir Recurso', 'Add River': 'Incluir Rio', 'Add Role': 'Incluir Função', 'Add Room': 'Incluir Sala', 'Add Section': 'Incluir Secção', 'Add Sector': 'Incluir Sector', 'Add Service Profile': 'Incluir Perfil de Serviço', 'Add Setting': 'Adicionar Definição', 'Add Shelter': 'Incluir Abrigo', 'Add Shelter Service': 'Incluir Serviço de Abrigo', 'Add Shelter Type': 'Incluir Tipo de Abrigo', 'Add Skill': 'Incluir Habilidade', 'Add Skill Equivalence': 'Incluir equivalência de habilidades', 'Add Skill Provision': 'Incluir provisão de habilidades', 'Add Skill Type': 'Incluir Tipo de Habilidade', 'Add Skill to Request': 'Add Skill to Request', 'Add Solution': 'Incluir Solução', 'Add Staff': 'Incluir equipe', 'Add Staff Member': 'Adicionar membro da equipe', 'Add Staff Type': 'Incluir tipo de equipe', 'Add Status': 'Incluir Status', 'Add Subscription': 'Incluir Assinatura', 'Add Subsector': 'Incluir Subsetor', 'Add Survey Answer': 'Incluir resposta de pesquisa', 'Add Survey Question': 'Adicionar pergunta da pesquisa', 'Add Survey Section': 'Incluir seção da pesquisa', 'Add Survey Series': 'Incluir série da pesquisa', 'Add Survey Template': 'Incluir Modelo De Pesquisa', 'Add Task': 'Incluir Tarefa', 'Add Team': 'Incluir equipe', 'Add Theme': 'Incluir Tema', 'Add Ticket': 'Adicionar Bilhete', 'Add Training': 'Incluir Treinamento', 'Add Unit': 'Incluir Unidade', 'Add User': 'Incluir Usuário', 'Add Vehicle': 'Add Vehicle', 'Add Vehicle Detail': 'Add Vehicle Detail', 'Add Vehicle Details': 'Add Vehicle Details', 'Add Volunteer': 'Incluir Voluntário', 'Add Volunteer Availability': 'Incluir disponibilidade do voluntário', 'Add Warehouse': 'Adicionar Data Warehouse', 'Add a Person': 'Incluir uma pessoa', 'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'Adicionar um documento de referência como um arquivo, URL ou contacto pessoal para verificar esses dados. Se você não inserir um documento de referência, seu e-mail será exibido no lugar.', 'Add a Volunteer': 'Incluir um Voluntário', 'Add a new certificate to the catalog.': 'Incluir um novo certificado no catálogo.', 'Add a new competency rating to the catalog.': 'Adicionar uma classificação nova competência para o catálogo.', 'Add a new course to the catalog.': 'Adicionar um novo rumo para o catálogo.', 'Add a new job role to the catalog.': 'Incluir uma função nova tarefa para o catálogo.', 'Add a new skill provision to the catalog.': 'Incluir uma disposição nova habilidade para o catálogo.', 'Add a new skill to the catalog.': 'Incluir uma nova habilidade para o catálogo.', 'Add a new skill type to the catalog.': 'Incluir um tipo novo de hailidade para o catálogo.', 'Add new Group': 'Adicionar novo grupo', 'Add new Individual': 'Incluir novo indivíduo', 'Add new Patient': 'Add new Patient', 'Add new project.': 'Adicionar novo projeto.', 'Add new staff role.': 'Incluir função de novos funcionários.', 'Add staff members': 'Incluir membros da equipe', 'Add to Bundle': 'Incluir no Pacote Configurável', 'Add to budget': 'Incluir no orçamento', 'Add volunteers': 'Incluir voluntários', 'Add/Edit/Remove Layers': 'Incluir/editar/remover camadas', 'Additional Beds / 24hrs': 'Camas adicionais / 24 horas', 'Address': 'endereços', 'Address Details': 'Detalhes do Endereço', 'Address Type': 'Tipo de Endereço', 'Address added': 'Endereço incluído', 'Address deleted': 'Endereço excluído', 'Address updated': 'Endereço actualizado', 'Addresses': 'Endereços', 'Adequate': 'adequar', 'Adequate food and water available': 'Comida e água adequado disponível', 'Admin Email': 'email do administrador', 'Admin Name': 'nome do administrador', 'Admin Tel': 'Telefone do administrador', 'Administration': 'administração', 'Admissions/24hrs': 'admissões/24 horas', 'Adolescent (12-20)': 'adolescente (12-20)', 'Adolescent participating in coping activities': 'Adolescente participando em actividades de superação', 'Adult (21-50)': 'Adulto (21-50)', 'Adult ICU': 'UTI para adultos', 'Adult Psychiatric': 'Psiquiátrico para adultos', 'Adult female': 'Mulher adulta', 'Adult male': 'Homem adulto', 'Adults in prisons': 'Adultos em prisões', 'Advanced:': 'Avançado:', 'Advisory': 'Aconselhamento', 'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'Depois de pressionar o botão será mostrado um conjunto de dois elementos, um de cada vez. Por favor selecione a uma solução de cada par de sua preferência sobre o outro.', 'Age Group': 'Grupo etário', 'Age group': 'Grupo etário', 'Age group does not match actual age.': 'Grupo etário não corresponde à idade real.', 'Aggravating factors': 'Fatores agravantes', 'Agriculture': 'Agricultura', 'Air Transport Service': 'Serviço de Transporte Aéreo', 'Air tajin': 'Tajin AR', 'Aircraft Crash': 'Despenho de Avião', 'Aircraft Hijacking': 'Sequestro de Avião', 'Airport Closure': 'Encerramento de Aeroporto', 'Airspace Closure': 'Encerramento de Espaço Aéreo', 'Alcohol': 'álcool', 'Alert': 'Alertar', 'All': 'Tudo', 'All Inbound & Outbound Messages are stored here': 'Todas as mensagens enviadas e recebidas são armazenados aqui', 'All Resources': 'Todos os recursos', 'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': 'Todos os dados fornecidos pelos Sahana Software Foundation a partir deste site é licenciado sob uma Licença Atribuição Comuns criativos. No entanto, nem todos os dados se origina aqui. Por favor consulte o campo de origem de cada entrada.', 'Allowed to push': 'Permissão para pressionar', 'Allows a Budget to be drawn up': 'Permite que um orçamento seja estabelecido', 'Allows authorized users to control which layers are available to the situation map.': 'Permite usuários autorizados a controlar quais camadas estão disponíveis no mapa de situação.', 'Alternative Item': 'Item Alternativo', 'Alternative Item Details': 'Detalhes do Item alternativo', 'Alternative Item added': 'Item alternativo incluído', 'Alternative Item deleted': 'Item alternativo excluído', 'Alternative Item updated': 'Item Alternativo atualizado', 'Alternative Items': 'Itens alternativos', 'Alternative places for studying': 'Locais alternativos para estudo', 'Ambulance Service': 'Serviço de Ambulância', 'An asset must be assigned to a person, site OR location.': 'Um ATIVO deve ser designado a uma pessoa, local ou site.', 'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': 'Um sistema de admissão, um sistema de gestão de depósitos, tracking and commodity, gestão da cadeia de fornecimentos, aquisições de ativos e outros e os recursos de gerenciamento de recurso.', 'An item which can be used in place of another item': 'Um item que pode ser utilizado no lugar de outro item', 'Analysis of Completed Surveys': 'Análise das Pesquisas Concluídas', 'Analysis of assessments': 'Analysis of assessments', 'Animal Die Off': 'Morte Animal', 'Animal Feed': 'Alimentação Animal', 'Answer Choices (One Per Line)': 'Resposta opções (Um por linha)', 'Anthropolgy': 'Anthropolgy', 'Antibiotics available': 'Antibióticos disponíveis', 'Antibiotics needed per 24h': 'Antibióticos necessário por H', 'Apparent Age': 'Idade aparente', 'Apparent Gender': 'Género aparente', 'Application Deadline': 'Prazo Final da aplicação', 'Applications': 'Requisições', 'Approve': 'Aprovar', 'Approved': 'aprovado', 'Approver': 'Aprovador', 'Arabic': 'Arabic', 'Arctic Outflow': 'Árctico Exfluxo', 'Area': 'Área', 'Areas inspected': 'Inspeccionados áreas', 'As of yet, no sections have been added to this template.': 'As of yet, no sections have been added to this template.', 'Assessment': 'Avaliação', 'Assessment Details': 'Detalhes da Avaliação', 'Assessment Reported': 'Avaliação Relatada', 'Assessment Summaries': 'Sumário de Avaliações', 'Assessment Summary Details': 'Detalhes do sumário de avaliação', 'Assessment Summary added': 'Anexado sumário de avaliações', 'Assessment Summary deleted': 'Avaliação de resumo apagado', 'Assessment Summary updated': 'Sumário de avaliação atualizado', 'Assessment added': 'Avaliação incluída', 'Assessment admin level': 'Avaliação de nível administrativo', 'Assessment deleted': 'Avaliação excluída', 'Assessment timeline': 'sequência temporal de avaliação', 'Assessment updated': 'Avaliação atualizada', 'Assessments': 'avaliações', 'Assessments Needs vs. Activities': 'Necessidades de Avaliações vs. Atividades', 'Assessments and Activities': 'Avaliações e Atividades', 'Assessments:': 'Avaliações', 'Assessor': 'Avaliador', 'Asset': 'Recurso', 'Asset Assigned': 'Ativo Designado', 'Asset Assignment Details': 'Detalhes da Designação de Recursos', 'Asset Assignment deleted': 'Designação De ativo excluído', 'Asset Assignment updated': 'Atribuição de Ativo atualizada', 'Asset Assignments': 'Designações de Ativo', 'Asset Details': 'Detalhes do Ativo', 'Asset Log': 'Log de ATIVOS', 'Asset Log Details': 'Detalhes do Log de ativos', 'Asset Log Empty': 'Log de Ativos vazio', 'Asset Log Entry Added - Change Label': 'Adicionada uma entrada no Log de ativos -Alterar Etiqueta', 'Asset Log Entry deleted': 'Apagada uma entrada no Log de ativos', 'Asset Log Entry updated': 'Atualizada uma entrada no Log de Ativos', 'Asset Management': 'gerenciamento de recursos', 'Asset Number': 'número do recurso', 'Asset added': 'Ativo Incluído', 'Asset deleted': 'ativo excluído', 'Asset removed': 'Ativo Removido', 'Asset updated': 'recurso atualizado', 'Assets': 'recursos', 'Assets are resources which are not consumable but are expected back, so they need tracking.': 'Os ativos são recursos que não são consumíveis e serão devolvidos, portanto precisam de rastreamento.', 'Assign': 'Designar', 'Assign Asset': 'designar recurso', 'Assign Group': 'Designar Grupo', 'Assign Staff': 'Atribuir Equipe', 'Assign to Org.': 'Designar para Org.', 'Assign to Organisation': 'Atribuir para Organização', 'Assign to Organization': 'Atribuir para Organização', 'Assign to Person': 'Atribuir uma Pessoa', 'Assign to Site': 'Atribuir um Site', 'Assigned': 'Designado', 'Assigned By': 'Designado por', 'Assigned To': 'Designado Para', 'Assigned to': 'Designado para', 'Assigned to Organisation': 'Designado para Organização', 'Assigned to Person': 'Designado para a Pessoa', 'Assigned to Site': 'Designado para o Site', 'Assignments': 'Designações', 'At/Visited Location (not virtual)': 'Em/Visitou Local (não virtual)', 'Attend to information sources as described in <instruction>': 'Participar de fontes de informação, conforme descrito em<instruction>', 'Attribution': 'Atribuição', "Authenticate system's Twitter account": 'Sistema de Autenticação para conta de Twitter', 'Author': 'autor', 'Availability': 'Disponibilidade', 'Available Alternative Inventories': 'Alternativas de Inventário disponíveis', 'Available Alternative Inventory Items': 'Itens alternativos de Inventário disponíveis', 'Available Beds': 'camas disponíveis', 'Available Forms': 'Available Forms', 'Available Inventories': 'Inventários disponíveis', 'Available Inventory Items': 'Itens de inventário disponíveis', 'Available Messages': 'Mensagens disponíveis', 'Available Records': 'Registros disponíveis', 'Available databases and tables': 'Banco de Dados e Tabelas disponíveis', 'Available for Location': 'Disponível para locação', 'Available from': 'disponível de', 'Available in Viewer?': 'Disponível no visualizador?', 'Available until': 'Disponível até', 'Avalanche': 'Avalanche', 'Avoid the subject event as per the <instruction>': 'Evitar o assunto do evento de acordo com a', 'Background Color': 'Background Color', 'Background Color': 'Cor de Plano de Fundo', 'Background Color for Text blocks': 'Cor de segundo plano para blocos de texto', 'Bahai': 'Bahai', 'Baldness': 'Calvície', 'Banana': 'Banana', 'Bank/micro finance': 'banco/micro finanças', 'Barricades are needed': 'Barricadas são necessárias', 'Base Layer?': 'Camada De Base?', 'Base Location': 'Local da Base', 'Base Site Set': 'Conjunto de Site básico', 'Baseline Data': 'Dados básicos', 'Baseline Number of Beds': 'Numero de camadas base de camas', 'Baseline Type': 'Tipo de Linha Base', 'Baseline Type Details': 'Detalhes de Tipo de Linha Base', 'Baseline Type added': 'Tipo de Linha Base adicionado', 'Baseline Type deleted': 'Tipo de Linha Base removido', 'Baseline Type updated': 'Tipo de Linha Base actualizado', 'Baseline Types': 'Tipos de Linha Base', 'Baseline added': 'Camada Base incluída', 'Baseline deleted': 'Camada Base Excluída', 'Baseline number of beds of that type in this unit.': 'Numero de camadas base de camas desse tipo nesta unidade.', 'Baseline updated': 'Linha Base actulizada', 'Baselines': 'Camadas Base', 'Baselines Details': 'Detalhes de Camadas Base', 'Basic Assessment': 'Avaliação Básica', 'Basic Assessment Reported': 'Avaliação Básica Relatada', 'Basic Details': 'Detalhes Básicos', 'Basic reports on the Shelter and drill-down by region': 'Relatórios básicos sobre o Abrigo e abertura por região', 'Baud': 'Transmissão', 'Baud rate to use for your modem - The default is safe for most cases': 'Taxa de transmissão para ser usada pelo seu modem - O padrão é seguro para a maioria dos casos', 'Beam': 'feixe', 'Bed Capacity': 'Capacidade de leitos', 'Bed Capacity per Unit': 'Capacidade cama por Unidade', 'Bed Type': 'Tipo de cama', 'Bed type already registered': 'Tipo de cama já registrado', 'Below ground level': 'Abaixo do nível do solo', 'Beneficiary Type': 'Tipo de beneficiário', "Bing Layers cannot be displayed if there isn't a valid API Key": "Bing Layers cannot be displayed if there isn't a valid API Key", 'Biological Hazard': 'Risco Biológico', 'Biscuits': 'Biscoitos', 'Blizzard': 'Nevasca', 'Blood Type (AB0)': 'Tipo sanguíneo (AB0)', 'Blowing Snow': 'Soprando neve', 'Boat': 'Barco', 'Bodies': 'Bodies', 'Bodies found': 'Corpos encontrados', 'Bodies recovered': 'corpos recuperados', 'Body': 'corpo', 'Body Recovery': 'Body Recovery', 'Body Recovery Request': 'Pedido de recuperação de corpos', 'Body Recovery Requests': 'Pedidos de recuperação de corpos', 'Bomb': 'Bomba', 'Bomb Explosion': 'Explosão de bomba', 'Bomb Threat': 'Ameaça de bomba', 'Border Color for Text blocks': 'Cor da borda para blocos de texto', 'Bounding Box Insets': 'Delimitadora Inserções Caixa', 'Bounding Box Size': 'CAIXA delimitadora Tamanho', 'Brand': 'Marca', 'Brand Details': 'Detalhes da Marca', 'Brand added': 'Marca incluída', 'Brand deleted': 'Marca excluída', 'Brand updated': 'marca atualizada', 'Brands': 'marcas', 'Bricks': 'Tijolos', 'Bridge Closed': 'PONTE FECHADA', 'Bucket': 'Balde', 'Buddhist': 'Budista', 'Budget': 'Orçamento', 'Budget Details': 'Detalhes de Orçamento', 'Budget Updated': 'Orçamento Atualizado', 'Budget added': 'Orçamento incluído', 'Budget deleted': 'Orçamento excluído', 'Budget updated': 'Orçamento atualizado', 'Budgeting Module': 'Módulo de Orçamento', 'Budgets': 'Orçamentos', 'Buffer': 'buffer', 'Bug': 'erro', 'Building Assessments': 'Avaliações de construção', 'Building Collapsed': 'Construção Fechada', 'Building Name': 'Nome do edifício', 'Building Safety Assessments': 'Regras de Segurança do Edifício', 'Building Short Name/Business Name': 'Nome curto/Nome completo do Edifício', 'Building or storey leaning': 'Edifício ou andar em inclinação', 'Built using the Template agreed by a group of NGOs working together as the': 'Construído de acordo com o formulário acordado por um grupo de ONGs', 'Bulk Uploader': 'Carregador em massa', 'Bundle': 'Pacote', 'Bundle Contents': 'Conteúdo do Pacote', 'Bundle Details': 'Detalhes do Pacote', 'Bundle Updated': 'Pacote configurável ATUALIZADO', 'Bundle added': 'Pacote incluído', 'Bundle deleted': 'Pacote Excluído', 'Bundle updated': 'Pacote atualizado', 'Bundles': 'Pacotes', 'Burn': 'Gravar', 'Burn ICU': 'Queimar ICU', 'Burned/charred': 'Queimados/carbonizados', 'By Facility': 'Por Facilidade', 'By Inventory': 'Por Inventário', 'By Person': 'Por pessoa', 'By Site': 'Por Site', 'CBA Women': 'CBA Mulheres', 'CLOSED': 'CLOSED', 'CN': 'CN', 'CSS file %s not writable - unable to apply theme!': 'Arquivo CSS %s não é gravável - Impossível aplicar o tema!', 'Calculate': 'calcular', 'Camp': 'Acampamento', 'Camp Coordination/Management': 'Campo Coordenação/gestão', 'Camp Details': 'Detalhes do Alojamento', 'Camp Service': 'Serviço de Alojamento', 'Camp Service Details': 'Detalhe do Serviço de Campo', 'Camp Service added': 'Serviço de Alojamento incluído', 'Camp Service deleted': 'Serviço de Alojamento excluído', 'Camp Service updated': 'Serviço de campo atualizado', 'Camp Services': 'Serviço de campo', 'Camp Type': 'Tipo de Campo', 'Camp Type Details': 'Detalhes do tipo de campo', 'Camp Type added': 'Tipo de Campo incluso.', 'Camp Type deleted': 'Tipo de campo excluído.', 'Camp Type updated': 'Tipo De acampamento atualizado', 'Camp Types': 'TIPOS DE acampamento', 'Camp Types and Services': 'Tipos e serviços de acampamentos', 'Camp added': 'Alojamento incluído', 'Camp deleted': 'Alojamento excluído', 'Camp updated': 'Acampamento atualizado', 'Camps': 'Alojamentos', 'Can only disable 1 record at a time!': 'Pode desativar apenas 1 registro por vez!', 'Can only enable 1 record at a time!': 'Can only enable 1 record at a time!', "Can't import tweepy": 'Não pode importar tweepy', 'Cancel': 'Cancelar', 'Cancel Log Entry': 'Cancelar Registro De Entrada', 'Cancel Shipment': 'Cancelar Embarque', 'Canceled': 'cancelado', 'Candidate Matches for Body %s': 'Candidato Corresponde ao Corpo %s', 'Canned Fish': 'Conservas de Peixe', 'Cannot be empty': 'Não pode ser vazio', 'Cannot disable your own account!': 'Voce não pode desativar sua própria conta!', 'Capacity (Max Persons)': 'Capacidade (Máximo De pessoas)', 'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'CAPTURA informações sobre grupos Desastre Vítima (Turistas, passageiros, Famílias, etc. ).', 'Capture Information on each disaster victim': 'Informações de captura em cada vítima Desastre', 'Capturing organizational information of a relief organization and all the projects they have in the region': 'Capturando informações organizacionais de uma organização de ajuda e todos os projetos têm na região', 'Capturing the projects each organization is providing and where': 'Capturando os projetos que cada organização está fornecendo e onde', 'Cardiology': 'Cardiologia', 'Cassava': 'Mandioca', 'Casual Labor': 'Trabalho Casual', 'Casualties': 'Acidentes', 'Catalog': 'catálogo', 'Catalog Details': 'Detalhes do Catálogo', 'Catalog Item': 'Item do catálogo de', 'Catalog Item added': 'Item incluído no catálogo', 'Catalog Item deleted': 'Catálogo de Item excluído', 'Catalog Item updated': 'Item do catálogo de atualização', 'Catalog Items': 'Itens do Catálogo', 'Catalog added': 'Catálogo Incluído', 'Catalog deleted': 'Catálogo excluído', 'Catalog updated': 'Catálogo Atualizado', 'Catalogs': 'Catálogos', 'Categories': 'Categorias', 'Category': 'category', "Caution: doesn't respect the framework rules!": 'Cuidado: não respeitar as regras de enquadramento!', 'Ceilings, light fixtures': 'Tetos, luminarias', 'Cell Phone': 'Cell Phone', 'Central point to record details on People': 'Ponto Central para registrar detalhes sobre pessoas', 'Certificate': 'Certificate', 'Certificate Catalog': 'Catálogo de Certificados', 'Certificate Details': 'Detalhes do Certificado', 'Certificate Status': 'Status do Certificado', 'Certificate added': 'Certificado incluído', 'Certificate deleted': 'Certificado Removido', 'Certificate updated': 'Certificado Actualizado', 'Certificates': 'Certificados', 'Certification': 'Certificação', 'Certification Details': 'Detalhes da Certificação', 'Certification added': 'Certificação incluída', 'Certification deleted': 'Certificação excluída', 'Certification updated': 'Certificação atualizada', 'Certifications': 'Certificações', 'Certifying Organization': 'Certificação da Organização', 'Change Password': 'Alterar Senha', 'Check': 'Verifique', 'Check Request': 'Verificar Pedido', 'Check for errors in the URL, maybe the address was mistyped.': 'Verifique se há erros na URL, talvez o endereço foi digitado incorretamente.', 'Check if the URL is pointing to a directory instead of a webpage.': 'Verifique se a URL está apontando para um diretório em vez de uma página da Web.', 'Check outbox for the message status': 'Outbox para verificar o status da mensagem', 'Check to delete': 'Verificar para Excluir', 'Check-in': 'Registrar Entrada', 'Check-in at Facility': 'Check-in at Facility', 'Check-out': 'Registrar Saída', 'Checked': 'verificado', 'Checklist': 'lista de verificação', 'Checklist created': 'Lista de verificação criada', 'Checklist deleted': 'Lista de verificação excluída', 'Checklist of Operations': 'Lista de Verificação das Operações', 'Checklist updated': 'Lista de verificação atualizado', 'Chemical Hazard': 'Risco Químico', 'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': 'Ameaça ou ataque Químico, Biológico, Radiológico, Nuclear ou de alto concentração Explosiva', 'Chicken': 'Frango', 'Child': 'Criança', 'Child (2-11)': 'Criança (2-11)', 'Child (< 18 yrs)': 'Criança (< 18 anos)', 'Child Abduction Emergency': 'Emergência de Rapto De Criança', 'Child headed households (<18 yrs)': 'Famílias chefiadas por Filho (<18 anos)', 'Children (2-5 years)': 'Crianças (2 a 5 anos)', 'Children (5-15 years)': 'Crianças (5 a 15 anos)', 'Children (< 2 years)': 'Crianças (< 2 anos)', 'Children in adult prisons': 'Crianças nas prisões para adultos', 'Children in boarding schools': 'Crianças em internatos', 'Children in homes for disabled children': 'Crianças em lares para crianças deficientes', 'Children in juvenile detention': 'Crianças em detenção juvenil', 'Children in orphanages': 'Crianças nos orfanatos', 'Children living on their own (without adults)': 'Crianças vivendo por conta própria (sem adultos)', 'Children not enrolled in new school': 'Crianças não matriculadas em Nova Escola', 'Children orphaned by the disaster': 'Crianças órfãs pela catástrofe', 'Children separated from their parents/caregivers': 'Crianças SEPARADAS de seus pais/responsáveis', 'Children that have been sent to safe places': 'Crianças que foram enviadas para locais seguros', 'Children who have disappeared since the disaster': 'Crianças que desapareceram desde o desastre', 'Chinese (Simplified)': 'Chinese (Simplified)', 'Chinese (Taiwan)': 'Chinês (Taiwan)', 'Cholera Treatment': 'Tratamento da cólera', 'Cholera Treatment Capability': 'Capacidade de Tratamento da Cólera', 'Cholera Treatment Center': 'Centro de Tratamento de Cólera', 'Cholera-Treatment-Center': 'Centro de tratamento de cólera', 'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': 'Escolha uma nova alocação baseada na nova avaliação e julgamento do time. Condições severas que afetem o prédio inteiro são base para uma colocação INSEGURA. Grave localizada e no geral condições moderadas podem exigir um USO RESTRITO. Local INSPECCIONADO cartaz na entrada principal. Coloque todos os outros cartazes em cada entrada importante.', 'Christian': 'Cristão', 'Church': 'Igreja', 'Circumstances of disappearance, other victims/witnesses who last saw the missing person alive.': 'Circunstâncias do desaparecimento, outras vítimas/testemunhas quais viram pela última vez a pessoa desaparecida viva.', 'City': 'CIDADE', 'Civil Emergency': 'Emergência Civil', 'Cladding, glazing': 'Revestimentos, vidros', 'Click on the link': 'Clique no link', 'Client IP': 'Client IP', 'Climate': 'Climate', 'Clinical Laboratory': 'Laboratório clínico', 'Clinical Operations': 'operações clinicas', 'Clinical Status': 'estado clínico', 'Close map': 'Close map', 'Closed': 'fechado', 'Clothing': 'vestuário', 'Cluster': 'agrupamento', 'Cluster Details': 'Detalhes do Grupo', 'Cluster Distance': 'Distância entre Grupos', 'Cluster Subsector': 'Subsector de Grupos', 'Cluster Subsector Details': 'Detalhes do sub-setor do cluster', 'Cluster Subsector added': 'Subsector de Grupos incluído', 'Cluster Subsector deleted': 'Subsector de Grupos removido', 'Cluster Subsector updated': 'Sub-setores do cluster atualizado', 'Cluster Subsectors': 'Sub-setores do cluster', 'Cluster Threshold': 'Limite do Cluster', 'Cluster added': 'adicionar agrupamento', 'Cluster deleted': 'Grupo removido', 'Cluster updated': 'Cluster atualizado', 'Cluster(s)': 'Grupo(s)', 'Clusters': 'clusters', 'Code': 'Código', 'Cold Wave': 'onda fria', 'Collapse, partial collapse, off foundation': 'Reduzir, reduzir parciais, off foundation', 'Collective center': 'Centro coletivo', 'Color for Underline of Subheadings': 'Cor para Sublinhar de Subposições', 'Color of Buttons when hovering': 'Cor dos botões quando erguidos', 'Color of bottom of Buttons when not pressed': 'Cor da parte inferior dos botões quando não for pressionado', 'Color of bottom of Buttons when pressed': 'Cor da parte de baixo dos botões quando pressionados', 'Color of dropdown menus': 'Cor de menus DROP-', 'Color of selected Input fields': 'Cor dos campos de entrada selecionados', 'Color of selected menu items': 'cor dos ítens selecionados do menu', 'Column Choices (One Per Line': 'Coluna de opções (uma por linha)', 'Columns, pilasters, corbels': 'Colunas, pilastras , cavaletes', 'Combined Method': 'Método combinado', 'Come back later.': 'Volte mais tarde.', 'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': 'Volte mais tarde. Todos que visitam este site esta, provavelmente, enfrentando o mesmo problema que você.', 'Comments': 'Comentários', 'Commercial/Offices': 'Comercial/Escritórios', 'Commit': 'Consolidar', 'Commit Date': 'Commit Data', 'Commit from %s': 'Consolidação de s%', 'Commit. Status': 'Commit. Status', 'Commiting a changed spreadsheet to the database': 'Consolidando uma planilha alterada no banco de dados', 'Commitment': 'Comprometimento', 'Commitment Added': 'Compromisso Incluído', 'Commitment Canceled': 'Compromisso cancelado', 'Commitment Details': 'Detalhes do compromisso', 'Commitment Item': 'Item do compromisso', 'Commitment Item Details': 'Detalhes do item de compromisso', 'Commitment Item added': 'Item de compromisso incluído', 'Commitment Item deleted': 'Item do compromisso excluído', 'Commitment Item updated': 'Compromisso Item atualizado', 'Commitment Items': 'Itens compromisso', 'Commitment Status': 'Empenhamento Status', 'Commitment Updated': 'Compromisso Atualizado', 'Commitments': 'Compromissos', 'Committed': 'Comprometido', 'Committed By': 'Cometido por', 'Committed People': 'Committed People', 'Committed Person Details': 'Committed Person Details', 'Committed Person updated': 'Committed Person updated', 'Committing Inventory': 'Confirmando Inventário', 'Committing Organization': 'Committing Organization', 'Committing Person': 'Committing Person', 'Communication problems': 'Problemas de Comunicação', 'Community Centre': 'Comunidade Centro', 'Community Health Center': 'Centro Comunitário de Saúde', 'Community Member': 'Membro da Comunidade', 'Competencies': 'Competências', 'Competency': 'Competência', 'Competency Details': 'Competência Detalhes', 'Competency Rating Catalog': 'Catálogo de Classificação de Competências', 'Competency Rating Details': 'Detalhes da classificação de competências', 'Competency Rating added': 'Classificação de Habilidades incluída', 'Competency Rating deleted': 'Classificação de competência excluída', 'Competency Rating updated': 'Atualização da classificação de competências', 'Competency Ratings': 'Classificação de competências', 'Competency added': 'Competência incluída', 'Competency deleted': 'Competência excluído', 'Competency updated': 'Competência atualizada', 'Complete': 'Concluir', 'Completed': 'Concluído', 'Complexion': 'Compleição', 'Compose': 'Redigir', 'Compromised': 'Comprometida', 'Concrete frame': 'Quadro concreto', 'Concrete shear wall': 'Muro de corteconcreto', 'Condition': 'Condição', 'Configurations': 'Configurações', 'Configure Run-time Settings': 'Configurar as configurações de tempo de execução', 'Confirm Shipment Received': 'Confirmar Remessa Recebida', 'Confirmed': 'Confirmado', 'Confirming Organization': 'Confirmando Organização', 'Conflict Details': 'Detalhes Do conflito', 'Conflict Resolution': 'Resolução de Conflito', 'Consignment Note': 'NOTA REMESSA', 'Constraints Only': 'Somente restrições', 'Consumable': 'Consumível', 'Contact': 'contato', 'Contact Data': 'Dados contato', 'Contact Details': 'Detalhes do contato', 'Contact Info': 'Informações de Contato', 'Contact Information': 'Informações de Contato', 'Contact Information Added': 'Informação de contato incluída', 'Contact Information Deleted': 'Informação de contato excluída', 'Contact Information Updated': 'Informações de contato atualizadas', 'Contact Method': 'Método de Contato', 'Contact Name': 'Nome do contato', 'Contact Person': 'Pessoa de Contato', 'Contact Phone': 'Telefone para Contato', 'Contact details': 'Detalhes do contato', 'Contact information added': 'Informações de contato incluídas', 'Contact information deleted': 'Informações de contato excluídas', 'Contact information updated': 'Informações de contato atualizadas', 'Contact person(s) in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': 'Pessoa(s) a contactar em caso de notícias ou mais perguntas (se for diferente da pessoa que reportou). Incluir número de telefone, endereço e correio electrónico se disponível.', 'Contact us': 'Fale Conosco', 'Contacts': 'contatos', 'Contents': 'Conteúdo', 'Contributor': 'Contribuidor', 'Conversion Tool': 'Ferramenta de Conversão', 'Cooking NFIs': 'Cozinhando NFIs', 'Cooking Oil': 'Cozinhando Óleo', 'Coordinate Conversion': 'COORDENAR a Conversão', 'Coping Activities': 'Atividades de lida', 'Copy': 'copiar', 'Corn': 'Milho', 'Cost Type': 'Tipo de custo', 'Cost per Megabyte': 'Custo por megabyte', 'Cost per Minute': 'Custo por Minuto', 'Country': 'País', 'Country of Residence': 'País de Residência', 'County': 'Município', 'Course': 'Curso', 'Course Catalog': 'Catálogo de Cursos', 'Course Certificate Details': 'Detalhes do Certificado do Curso', 'Course Certificate added': 'Certificado do Curso adicionado', 'Course Certificate deleted': 'Certificado do Curso excluído', 'Course Certificate updated': 'Certificado do Curso atualizado', 'Course Certificates': 'Certificados de Curso', 'Course Certificates': 'Certificados de Curso', 'Course Details': 'Detalhes do curso', 'Course added': 'Curso incluído', 'Course deleted': 'Curso excluído', 'Course updated': 'Curso atualizado', 'Courses': 'Cursos', 'Create & manage Distribution groups to receive Alerts': 'Criar & GERENCIAR grupos de distribuição de receber alertas', 'Create Checklist': 'Criar Lista de Verificação', 'Create Group Entry': 'Criar Grupo De Entrada', 'Create Impact Assessment': 'Criar Avaliação de Impacto', 'Create Mobile Impact Assessment': 'Criar Avaliação de Impacto Movel', 'Create New Asset': 'Criar Novo Recurso', 'Create New Catalog Item': 'Criar Novo Item de Catálogo', 'Create New Event': 'Criar Novo Evento', 'Create New Item Category': 'Criar Nova Categoria de Item', 'Create New Request': 'Criar Novo Pedido', 'Create New Scenario': 'Criar Novo cenário', 'Create New Vehicle': 'Create New Vehicle', 'Create Rapid Assessment': 'Criar Avaliação Rápida', 'Create Request': 'Criar solicitação', 'Create Task': 'Criar Tarefa', 'Create a group entry in the registry.': 'Criar uma entrada de grupo no registro.', 'Create new Office': 'Criar novo escritório', 'Create new Organization': 'Criar nova organização', 'Create, enter, and manage surveys.': 'Criar, digitar e gerenciar pesquisas.', 'Creation of Surveys': 'Criação de Pesquisas', 'Creation of assessments': 'Creation of assessments', 'Credential Details': 'Detalhes da Credencial', 'Credential added': 'Credencial incluída', 'Credential deleted': 'Credencial Excluída', 'Credential updated': 'Credencial ATUALIZADA', 'Credentialling Organization': 'Organização acreditada', 'Credentials': 'credenciais', 'Credit Card': 'Cartão de crédito', 'Crime': 'crime', 'Criteria': 'Critério', 'Currency': 'moeda', 'Current Entries': 'Entradas Atuais', 'Current Group Members': 'Membros do Grupo atual', 'Current Identities': 'Identidades atuais', 'Current Location': 'Posição Atual', 'Current Location Country': 'Current Location Country', 'Current Location Phone Number': 'Current Location Phone Number', 'Current Location Treating Hospital': 'Current Location Treating Hospital', 'Current Log Entries': 'Entradas de Log atuais', 'Current Memberships': 'Participações atuais', 'Current Mileage': 'Current Mileage', 'Current Notes': 'Notes atual', 'Current Records': 'Registros atuais', 'Current Registrations': 'Registros atuais', 'Current Status': 'Status atual', 'Current Team Members': 'Os atuais membros da equipe', 'Current Twitter account': 'Conta atual no Twitter', 'Current community priorities': 'Atuais prioridades da comunidade', 'Current general needs': 'Atuais necessidades gerais', 'Current greatest needs of vulnerable groups': 'Maiores necessidades atuais dos grupos vulneráveis', 'Current health problems': 'Problemas de saúde atuais', 'Current number of patients': 'Número atual de pacientes', 'Current problems, categories': 'Problemas atuais, categorias', 'Current problems, details': 'Problemas atuais, detalhes', 'Current request': 'Pedido atual', 'Current response': 'Resposta atual', 'Current session': 'Sessão atual', 'Currently no Certifications registered': 'Nenhuma certificação registrada atualmente', 'Currently no Competencies registered': 'Nenhuma competência registrada atualmente', 'Currently no Course Certificates registered': 'Nenhum Curso Certificado registrado atualmente', 'Currently no Credentials registered': 'Nenhuma credencial registrada atualmente', 'Currently no Missions registered': 'Nenhuma missão registrada atualmente', 'Currently no Skill Equivalences registered': 'Nenhuma equivelência de habilidade registrada atualmente', 'Currently no Skills registered': 'Currently no Skills registered', 'Currently no Trainings registered': 'Atualmente não há treinamentos registrados', 'Currently no entries in the catalog': 'Nenhuma entrada no catálogo atualmente', 'Custom Database Resource (e.g., anything defined as a resource in Sahana)': 'Bnaco de Dados customizado de Recursos (por exemplo, nada definido como recurso no Sahana)', 'DC': 'DC', 'DNA Profile': 'Perfil de DNA', 'DNA Profiling': 'Perfil de DNA', 'DVI Navigator': 'Navegador DVI', 'Dam Overflow': 'Barragem ESTOURO', 'Damage': 'dano', 'Dangerous Person': 'Pessoa perigosa', 'Dashboard': 'Painel', 'Data': 'Dados', 'Data Type': 'Data Type', 'Data uploaded': 'Dados carregados', 'Database': 'DATABASE', 'Date': 'date', 'Date & Time': 'Date & Time', 'Date Available': 'Data Disponível', 'Date Received': 'Data do recebimento', 'Date Requested': 'Data do pedido', 'Date Required': 'Necessária', 'Date Sent': 'Data de Envio', 'Date Until': 'Data Até', 'Date and Time': 'Data e Hora', 'Date and time this report relates to.': 'Data e hora relacionadas a este relatório.', 'Date of Birth': 'Data de Nascimento', 'Date of Latest Information on Beneficiaries Reached': 'Data da última informação sobre Beneficiários Alcançado', 'Date of Report': 'Data do relatório', 'Date of Treatment': 'Date of Treatment', 'Date/Time': 'data/hora', 'Date/Time of Find': 'Pesquisa de data/hora', 'Date/Time of disappearance': 'Data/hora do desaparecimento', 'Date/Time when found': 'Data/hora quando foi encontrado', 'Date/Time when last seen': 'Data/ hora em que foi visto pela última vez', 'De-duplicator': 'Anti duplicador', 'Dead Bodies': 'Dead Bodies', 'Dead Body': 'Cadáver', 'Dead Body Details': 'Detalhes do Cadáver', 'Dead Body Reports': 'Relatórios de Cadáver', 'Dead body report added': 'Relatório de cadaver incluso.', 'Dead body report deleted': 'Relatório de cadáver excluído.', 'Dead body report updated': 'Relatório de cadáver atualizado', 'Deaths in the past 24h': 'Mortes nas últimas 24 horas', 'Deaths/24hrs': 'Mortes/24hrs', 'Decimal Degrees': 'Graus decimais', 'Decision': 'DECISÃO', 'Decomposed': 'Decomposto', 'Default Height of the map window.': 'Altura Padrão da janela do mapa.', 'Default Location': 'Default Location', 'Default Map': 'Mapa padrão', 'Default Marker': 'Padrão de mercado', 'Default Width of the map window.': 'Padrão de largura da janela do mapa.', 'Default synchronization policy': 'Política de sincronização de padrão', 'Defecation area for animals': 'Área de defecação para animais', 'Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).': 'Cenários De definir para alocação adequado de recursos (humanos, Ativos & instalações).', 'Defines the icon used for display of features on handheld GPS.': 'Define o ícone utilizado para exibição de recursos no GPS portátil.', 'Defines the icon used for display of features on interactive map & KML exports.': 'Define o ícone utilizado para exibição de recursos no mapa interativo & exportações KML.', 'Defines the marker used for display & the attributes visible in the popup.': 'Define o marcador utilizado para exibir & os atributos visíveis no pop-up.', 'Degrees must be a number between -180 and 180': 'Os graus devem ser um número entre -180 e 180', 'Dehydration': 'Desidratação', 'Delete': 'Excluir', 'Delete Alternative Item': 'EXCLUIR Item Alternativo', 'Delete Assessment': 'Excluir Avaliação', 'Delete Assessment Summary': 'Excluir Resumo da Avaliação', 'Delete Asset': 'Excluir Ativo', 'Delete Asset Assignment': 'Excluir o recurso designado', 'Delete Asset Log Entry': 'EXCLUIR recurso de entrada de Log', 'Delete Baseline': 'apagar linha base', 'Delete Baseline Type': 'apagar tipo de linha base', 'Delete Brand': 'apagar marca', 'Delete Budget': 'apagar orçamento', 'Delete Bundle': 'apagar pacote', 'Delete Catalog': 'Excluir o Catálogo', 'Delete Catalog Item': 'apagar item do catálogo', 'Delete Certificate': 'Excluir Certificado', 'Delete Certification': 'Excluir Certificação', 'Delete Cluster': 'Exclui Cluster', 'Delete Cluster Subsector': 'EXCLUIR Cluster Subsector', 'Delete Commitment': 'Excluir Compromisso', 'Delete Commitment Item': 'Excluir Item de Compromisso', 'Delete Competency': 'Excluir Competência', 'Delete Competency Rating': 'Excluir Classificação da Competência', 'Delete Contact Information': 'Excluir Informações de Contato', 'Delete Course': 'Excluir Curso', 'Delete Course Certificate': 'Excluir Certificado do Curso', 'Delete Credential': 'Excluir Credencial', 'Delete Document': 'Excluir documento', 'Delete Donor': 'EXCLUIR Dador', 'Delete Entry': 'Excluir Entrada', 'Delete Event': 'Excluir Evento', 'Delete Feature Class': 'Excluir Classe de Recurso', 'Delete Feature Layer': 'Excluir Camada de Componentes', 'Delete GPS data': 'Delete GPS data', 'Delete Group': 'Excluir Grupo', 'Delete Home': 'Delete Home', 'Delete Hospital': 'Excluir Hospital', 'Delete Image': 'Excluir Imagem', 'Delete Impact': 'Excluir Impacto', 'Delete Impact Type': 'Excluir Tipo De Impacto', 'Delete Incident Report': 'Excluir Relatório de Incidentes', 'Delete Inventory Item': 'EXCLUIR Item De Inventário', 'Delete Item': 'Excluir Item', 'Delete Item Category': 'EXCLUIR categoria de Itens', 'Delete Item Pack': 'EXCLUIR Pacote de Itens', 'Delete Job Role': 'Excluir Cargo', 'Delete Key': 'Tecla de exclusão', 'Delete Kit': 'EXCLUIR Kit', 'Delete Layer': 'Excluir Camada', 'Delete Level 1 Assessment': 'EXCLUIR Nível 1 Avaliação', 'Delete Level 2 Assessment': 'EXCLUIR Nível 2 Avaliação', 'Delete Location': 'Excluir locação', 'Delete Map Configuration': 'EXCLUIR Mapa de configuração', 'Delete Marker': 'EXCLUIR Marcador', 'Delete Membership': 'Excluir membro', 'Delete Message': 'Excluir mensagem', 'Delete Mission': 'EXCLUIR Missão', 'Delete Need': 'Excluir necessidades', 'Delete Need Type': 'Excluir tipos de necessidades', 'Delete Office': 'Excluir escritório', 'Delete Organization': 'Excluir organização', 'Delete Patient': 'Delete Patient', 'Delete Peer': 'Excluir par', 'Delete Person': 'excluir pessoa', 'Delete Photo': 'Excluir Foto', 'Delete Population Statistic': 'EXCLUIR População Estatística', 'Delete Position': 'EXCLUIR Posição', 'Delete Project': 'Excluir Projeto', 'Delete Projection': 'Excluir Projeção', 'Delete Rapid Assessment': 'Excluir Avaliação Rápida', 'Delete Received Item': 'Excluir Item Recebido', 'Delete Received Shipment': 'Excluir Embarque Recebido', 'Delete Record': 'Excluir Registro', 'Delete Relative': 'Delete Relative', 'Delete Report': 'Excluir Relatório', 'Delete Request': 'Excluir Solicitação', 'Delete Request Item': 'Excluir item de solicitação', 'Delete Resource': 'Excluir Recurso', 'Delete Room': 'Excluir Sala', 'Delete Scenario': 'Excluir Cenário', 'Delete Section': 'Excluir seção', 'Delete Sector': 'Excluir Setor', 'Delete Sent Item': 'Excluir Item Enviado', 'Delete Sent Shipment': 'Excluir Embarque Enviado', 'Delete Service Profile': 'Excluir perfil de serviço', 'Delete Setting': 'Excluir Definição', 'Delete Skill': 'Excluir habilidade', 'Delete Skill Equivalence': 'Excluir equivalência de habilidade', 'Delete Skill Provision': 'Excluir Provisão de Habilidade', 'Delete Skill Type': 'Excluir Tipo de Habilidade', 'Delete Staff Type': 'Excluir Tipo De Equipe', 'Delete Status': 'Excluir Posição/Estado', 'Delete Subscription': 'Excluir assinatura', 'Delete Subsector': 'Excluir subsetor', 'Delete Survey Answer': 'Excluir reposta da pesquisa', 'Delete Survey Question': 'Excluir pergunta da pesquisa', 'Delete Survey Section': 'Excluir seção da pesquisa', 'Delete Survey Series': 'Excluir série da pesquisa', 'Delete Survey Template': 'Excluir modelo da pesquisa', 'Delete Training': 'Excluir Treinamento', 'Delete Unit': 'Excluir Unidade', 'Delete User': 'Excluir usuário', 'Delete Vehicle': 'Delete Vehicle', 'Delete Vehicle Details': 'Delete Vehicle Details', 'Delete Volunteer': 'EXCLUIR Voluntário', 'Delete Warehouse': 'Excluír Armazém', 'Delete from Server?': 'Excluir do Servidor?', 'Delphi Decision Maker': 'tomador de decisão Delphi', 'Demographic': 'Demográfico', 'Demonstrations': 'Demonstrações', 'Dental Examination': 'Exame Dentário', 'Dental Profile': 'Perfil Dentário', 'Deployment Location': 'Deployment Location', 'Describe the condition of the roads to your hospital.': 'Descreva as condições da estrada até o seu hospital.', 'Describe the procedure which this record relates to (e.g. "medical examination")': 'Descreva o procedimento ao qual este registro está relacionado (Ex: "exame médico")', 'Description': 'Descrição', 'Description of Contacts': 'Descrição dos Contatos', 'Description of defecation area': 'Descrição da área de defecação', 'Description of drinking water source': 'Descrição da fonte de água potável', 'Description of sanitary water source': 'Descrição da fonte de água sanitária', 'Description of water source before the disaster': 'Descrição da fonte de água antes do desastre', 'Descriptive Text (e.g., Prose, etc)': 'Texto Descritivo (por exemplo, Prosa, etc.)', 'Desire to remain with family': 'O desejo de permanecer com a família', 'Destination': 'destino', 'Destroyed': 'Destruído', 'Details': 'detalhes', 'Details field is required!': 'Campo de detalhes é obrigatório!', 'Dialysis': 'Diálise', 'Diaphragms, horizontal bracing': 'Diafragmas, interditará horizontal', 'Diarrhea': 'Diarréia', 'Dignitary Visit': 'Visita de Dignatários', 'Direction': 'Endereço', 'Disable': 'Desativar', 'Disabled': 'desativado', 'Disabled participating in coping activities': 'Deficiente participando de enfrentamento', 'Disabled?': 'Desativado?', 'Disaster Victim Identification': 'Identificação de Vítima de Desastre', 'Disaster Victim Registry': 'Registro de Vítima de Desastre', 'Disaster clean-up/repairs': 'Desastre limpeza/reparos', 'Discharge (cusecs)': 'Quitação (cusecs)', 'Discharges/24hrs': 'Descargas/24horas', 'Discussion Forum': 'Fórum de Discussão', 'Discussion Forum on item': 'Fórum de discussão do item', 'Disease vectors': 'Vectores doença', 'Dispensary': 'Dispensário', 'Displaced': 'Deslocadas', 'Displaced Populations': 'Populações deslocadas', 'Display Polygons?': 'exibir Polígonos?', 'Display Routes?': 'Exibir Rotas?', 'Display Tracks?': 'exibir Trilhas?', 'Display Waypoints?': 'Exibir Rota?', 'Distance between defecation area and water source': 'Distância entre área de esgoto e fonte de água', 'Distance from %s:': 'Distância de %s:', 'Distance(Kms)': 'Distância(Kms)', 'Distribution': 'Distribuição de', 'Distribution groups': 'Grupos de distribuição', 'District': 'Distrito', 'Do you really want to delete these records?': 'Você realmente deseja excluir esses registros?', 'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': 'Você deseja cancelar este carregamento que foi recebido? Os itens serão removidos do inventário. Esta ação não pode ser desfeita!', 'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': 'Você deseja cancelar esse carregamento enviado? Os itens serão retornados para o inventário. Esta ação não pode ser desfeita!', 'Do you want to receive this shipment?': 'Você deseja receber esse carregamento?', 'Do you want to send these Committed items?': 'Você deseja enviar esses itens Consolidados?', 'Do you want to send this shipment?': 'Você deseja enviar este carregamento?', 'Document': 'documento', 'Document Details': 'Detalhes do Documento', 'Document Scan': 'Scanear Documento', 'Document added': 'Documento incluído', 'Document deleted': 'Documento excluído', 'Document removed': 'Document removed', 'Document updated': 'Documento Atualizado', 'Documents': 'Documentos', 'Documents and Photos': 'Documentos e Fotos', 'Does this facility provide a cholera treatment center?': 'Esta facilidade proporciona um centro de tratamento da cólera?', 'Doing nothing (no structured activity)': 'Fazendo nada (sem atividade estruturada)', 'Dollars': 'dólares', 'Domain': 'domínio', 'Domestic chores': 'Afazeres domésticos', 'Donated': 'Doado', 'Donation Certificate': 'Certificado de doaçao', 'Donation Phone #': 'Número de Telefone de doaçao', 'Donor': 'Dador', 'Donor Details': 'Doador Detalhes', 'Donor added': 'Doador incluído', 'Donor deleted': 'Doador excluído', 'Donor updated': 'Doador ATUALIZADO', 'Donors': 'Doadores', 'Donors Report': 'Relatório de Doadores', 'Door frame': 'Quadro de porta', 'Download PDF': 'Fazer download do PDF', 'Download Template': 'Download Template', 'Draft': 'rascunho', 'Drainage': 'Drenagem', 'Drawing up a Budget for Staff & Equipment across various Locations.': 'Elaborar um orçamento para Equipe & Equipamento de vários locais.', 'Drill Down by Group': 'Detalhar por grupo', 'Drill Down by Incident': 'Detalhar por incidente', 'Drill Down by Shelter': 'Detalhar por abrigo', 'Driving License': 'Carteira de Motorista', 'Drought': 'Seca', 'Drugs': 'Drogas', 'Dug Well': 'Cavaram Bem', 'Duplicate?': 'Duplicado?', 'Duration': 'Duração', 'Dust Storm': 'Tempestade de Poeira', 'Dwelling': 'Habitação', 'Dwellings': 'Habitações', 'E-mail': 'Correio eletrônico', 'EMS Reason': 'Razão EMS', 'EMS Status': 'EMS Status', 'ER Status': 'ER Status', 'ER Status Reason': 'Razão ER Status', 'EXERCISE': 'EXERCISE', 'Early Recovery': 'Início De Recuperação', 'Earth Enabled?': 'Earth Enabled?', 'Earthquake': 'Terremotos', 'Edit': 'Editar', 'Edit Activity': 'Editar Atividade', 'Edit Address': 'Editar Endereço', 'Edit Alternative Item': 'Editar Item Alternativo', 'Edit Application': 'Editar Aplicação', 'Edit Assessment': 'Editar avaliação', 'Edit Assessment Summary': 'Editar resumo da avaliação', 'Edit Asset': 'Editar recurso', 'Edit Asset Assignment': 'Editar designação do recurso', 'Edit Asset Log Entry': 'EDITAR ENTRADA DE Log de ATIVOs', 'Edit Baseline': 'Editar base de avaliação', 'Edit Baseline Type': 'Editar tipo de base de avaliação', 'Edit Brand': 'Editar marca', 'Edit Budget': 'Editar orçamento', 'Edit Bundle': 'Editar Pacote', 'Edit Camp': 'EDITAR acampamento', 'Edit Camp Service': 'EDITAR Serviço de acampamento', 'Edit Camp Type': 'Editar Tipo de Campo', 'Edit Catalog': 'Editar catálogo', 'Edit Catalog Item': 'Editar item do catálogo', 'Edit Certificate': 'Editar Certificado', 'Edit Certification': 'Editar Certificação', 'Edit Cluster': 'Editar grupo', 'Edit Cluster Subsector': 'Editar subgrupo', 'Edit Commitment': 'Editar compromisso', 'Edit Commitment Item': 'Editar Item De Compromisso', 'Edit Committed Person': 'Edit Committed Person', 'Edit Competency': 'Editar Competência', 'Edit Competency Rating': 'Editar Classificação da Competência', 'Edit Contact': 'Editar Contato', 'Edit Contact Information': 'Editar Informações de Contato', 'Edit Contents': 'Editar Conteúdo', 'Edit Course': 'Editar Curso', 'Edit Course Certificate': 'Editar Certificado de Curso', 'Edit Credential': 'Editar Credencial', 'Edit Dead Body Details': 'Editar Detalhes do Cadáver', 'Edit Description': 'Editar Descrição', 'Edit Details': 'Editar detalhes', 'Edit Disaster Victims': 'Editar vítimas do desastre', 'Edit Document': 'Editar documento', 'Edit Donor': 'Editar Doador', 'Edit Email Settings': 'Editar As Configurações De E-Mail', 'Edit Entry': 'Editar Entrada', 'Edit Event': 'Editar evento', 'Edit Facility': 'Editar recurso', 'Edit Feature Class': 'EDITAR CLASSE DE RECURSO', 'Edit Feature Layer': 'Editar Recurso Camada', 'Edit Flood Report': 'Editar Relatório de Enchente', 'Edit GPS data': 'Edit GPS data', 'Edit Gateway Settings': 'Editar Configurações de Gateway', 'Edit Group': 'Grupo de edição', 'Edit Home': 'Edit Home', 'Edit Hospital': 'Editar Hospital', 'Edit Human Resource': 'Editar Recursos Humanos', 'Edit Identification Report': 'Editar Relatório de identificação', 'Edit Identity': 'Editar Identidade', 'Edit Image': 'Editar Imagem', 'Edit Image Details': 'Editar Detalhes da Imagem', 'Edit Impact': 'Editar Impacto', 'Edit Impact Type': 'Editar Tipo De Impacto', 'Edit Import File': 'Edit Import File', 'Edit Incident Report': 'Editar Relatório de Incidente', 'Edit Inventory Item': 'Editar Item De Inventário', 'Edit Item': 'Editar Item', 'Edit Item Category': 'Editar Item de categoria', 'Edit Item Pack': 'Editar Pacote de Itens', 'Edit Job Role': 'Editar cargo', 'Edit Key': 'Editar Tecla', 'Edit Kit': 'Editar Kit', 'Edit Layer': 'Editar Camada', 'Edit Level %d Locations?': 'Editar Locais Nível d% ?', 'Edit Level 1 Assessment': 'Editar Avaliação Nível 1', 'Edit Level 2 Assessment': 'Editar nível 2 de acesso', 'Edit Location': 'Local de edição', 'Edit Log Entry': 'EDITAR ENTRADA DE Log', 'Edit Map Configuration': 'Editar Mapa de configuração', 'Edit Map Services': 'Editar mapa de serviços', 'Edit Marker': 'Marcador de Edição', 'Edit Membership': 'Editar inscrição', 'Edit Message': 'Editar mensagem', 'Edit Messaging Settings': 'Editar Configurações De Mensagens', 'Edit Mission': 'Editar Missão', 'Edit Modem Settings': 'Editar Configurações Do Modem', 'Edit Need': 'Ediçao Necessária', 'Edit Need Type': 'Editar tipo de necessidade', 'Edit Note': 'Editar nota', 'Edit Office': 'Escritório de edição', 'Edit Options': 'Opções de edição', 'Edit Organization': 'Organizar edições', 'Edit Parameters': 'Parametros de edição', 'Edit Patient': 'Edit Patient', 'Edit Peer Details': 'Detalhes do par editado', 'Edit Person Details': 'Editar detalhes pessoais', 'Edit Personal Effects Details': 'Editar detalhes de objectos pessoais', 'Edit Photo': 'Editar Foto', 'Edit Population Statistic': 'Editar Estatística da População', 'Edit Position': 'Editar Posição', 'Edit Problem': 'Editar Problema', 'Edit Project': 'Editar Projecto', 'Edit Projection': 'Editar Projeção', 'Edit Rapid Assessment': 'Editar Rápida Avaliação', 'Edit Received Item': 'Editar Item Recebido', 'Edit Received Shipment': 'Editar Embarque Recebido', 'Edit Record': 'Editar Registro', 'Edit Registration': 'Editar Registro', 'Edit Registration Details': 'Editar Detalhes De Registro', 'Edit Relative': 'Edit Relative', 'Edit Report': 'Editar Relatório', 'Edit Request': 'Editar Pedido', 'Edit Request Item': 'Editar Item Pedido', 'Edit Requested Skill': 'Edit Requested Skill', 'Edit Resource': 'Editar Recurso', 'Edit River': 'EDITAR RIO', 'Edit Role': 'Editar Função', 'Edit Room': 'Editar Sala', 'Edit SMS Settings': 'Edit SMS Settings', 'Edit SMTP to SMS Settings': 'Edit SMTP to SMS Settings', 'Edit Scenario': 'Editar cenário', 'Edit Sector': 'Editar Setor', 'Edit Sent Item': 'Editar Item Enviado', 'Edit Setting': 'Editar Definição', 'Edit Settings': 'Editar Configurações', 'Edit Shelter': 'EDITAR ABRIGO', 'Edit Shelter Service': 'Editar Serviço de Abrigo', 'Edit Shelter Type': 'EDITAR Tipo De Abrigo', 'Edit Skill': 'editar competência', 'Edit Skill Equivalence': 'Editar Equivalência de Habilidade', 'Edit Skill Provision': 'Editar Habilidade de Fornecimento', 'Edit Skill Type': 'editar tipo de competência', 'Edit Solution': 'editar solução', 'Edit Staff': 'editar pessoal', 'Edit Staff Member Details': 'Editar detalhes do membro da equipe', 'Edit Staff Type': 'EDITAR Tipo De Equipe', 'Edit Subscription': 'Editar assinatura', 'Edit Subsector': 'EDITAR Subsector', 'Edit Survey Answer': 'Editar resposta da pesquisa', 'Edit Survey Question': 'Editar pergunta da pesquisa', 'Edit Survey Section': 'EDITAR Seção de Pesquisa', 'Edit Survey Series': 'EDITAR Pesquisa de Série', 'Edit Survey Template': 'EDITAR MODELO DE PESQUISA', 'Edit Task': 'Editar Tarefa', 'Edit Team': 'Editar equipe', 'Edit Theme': 'Editar tema', 'Edit Themes': 'EDITAR TEMAs', 'Edit Ticket': 'EDITAR Bilhete', 'Edit Track': 'EDITAR RASTREAMENTO', 'Edit Training': 'Editar Treinamento', 'Edit Tropo Settings': 'Editar Configurações Tropo', 'Edit User': 'Editar Usuário', 'Edit Vehicle': 'Edit Vehicle', 'Edit Vehicle Details': 'Edit Vehicle Details', 'Edit Volunteer Availability': 'Editar Disponibilidade de Voluntário', 'Edit Volunteer Details': 'Editar Detalhes de Voluntário', 'Edit Warehouse': 'Editar Armazém', 'Edit Web API Settings': 'Edit Web API Settings', 'Edit current record': 'Editar Registro Atual', 'Edit message': 'Editar mensagem', 'Edit the Application': 'Editar a Aplicação', 'Editable?': 'Editável?', 'Education': 'Educação', 'Education materials received': 'Materiais de educação recebido', 'Education materials, source': 'materiais de Educação, origem', 'Effects Inventory': 'Inventário de efeitos', 'Eggs': 'Ovos', 'Either a shelter or a location must be specified': 'Um abrigo ou um local deve ser especificado', 'Either file upload or document URL required.': 'Um arquivo de upload ou URL do documento são necessários.', 'Either file upload or image URL required.': 'Um arquivo de upload ou URL de imagem são necessárias.', 'Elderly person headed households (>60 yrs)': 'Chefes de Familia de idade avançada (>60 anos)', 'Electrical': 'Elétrico', 'Electrical, gas, sewerage, water, hazmats': 'Elétrica, gás, esgotos, água, hazmats', 'Elevated': 'Elevado', 'Elevators': 'Elevadores', 'Email': 'E-MAIL', 'Email Address': 'Endereço de e-mail', 'Email Address to which to send SMS messages. Assumes sending to phonenumber@address': 'Email Address to which to send SMS messages. Assumes sending to phonenumber@address', 'Email Settings': 'Configurações de e-mail', 'Email settings updated': 'As configurações de e-mail atualizado', 'Embalming': 'Embalsamento', 'Embassy': 'Embaixada', 'Emergency Capacity Building project': 'Plano de emergência de capacidade dos prédios', 'Emergency Department': 'Departamento de Emergência', 'Emergency Shelter': 'Abrigo de Emergência', 'Emergency Support Facility': 'Recurso De Suporte de emergência', 'Emergency Support Service': 'Suporte do Serviço de Emergência', 'Emergency Telecommunications': 'Emergência De Telecomunicações', 'Enable': 'Enable', 'Enable/Disable Layers': 'Ativar/Desativar Camadas', 'Enabled': 'Habilitado', 'Enabled?': 'Enabled?', 'Enabling MapMaker layers disables the StreetView functionality': 'Enabling MapMaker layers disables the StreetView functionality', 'End Date': 'Data de encerramento', 'End date': 'Data de Término', 'End date should be after start date': 'Data Final deve ser maior do que a data de início', 'End of Period': 'Fim de Período', 'English': 'Inglês', 'Enter Coordinates:': 'Entre as coordenadas:', 'Enter a GPS Coord': 'Digite uma Coordada GPS', 'Enter a name for the spreadsheet you are uploading (mandatory).': 'Digite um nome para a planilha que está fazendo Upload (obrigatório).', 'Enter a name for the spreadsheet you are uploading.': 'Enter a name for the spreadsheet you are uploading.', 'Enter a new support request.': 'Digite um pedido novo de suporte.', 'Enter a unique label!': 'Digite um rótulo exclusivo!', 'Enter a valid date before': 'Digite uma data válida antes de', 'Enter a valid email': 'Insira um email válido', 'Enter a valid future date': 'Digite uma data futura válida', 'Enter a valid past date': 'Enter a valid past date', 'Enter some characters to bring up a list of possible matches': 'Digite alguns caracteres para trazer uma lista de correspondências possíveis', 'Enter some characters to bring up a list of possible matches.': 'Digite alguns caracteres para trazer uma lista de correspondências possíveis.', 'Enter tags separated by commas.': 'Insira as tags separadas por vírgulas.', 'Enter the data for an assessment': 'Enter the data for an assessment', 'Enter the same password as above': 'Digite a mesma senha acima', 'Enter your firstname': 'Enter your firstname', 'Enter your organization': 'Enter your organization', 'Entered': 'Inserido', 'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'Digitar um número de telefone é opcional, mas ao fazer isto permite a voçe se registrar para receber mensagens SMS.', 'Entry deleted': 'Entrada removida', 'Environment': 'Ambiente do', 'Equipment': 'Equipamento', 'Error encountered while applying the theme.': 'Erro encontrado ao aplicar o tema.', 'Error in message': 'Erro na mensagem', 'Error logs for "%(app)s"': 'Registro de erros de "%(app)s"', 'Error: no such record': 'Erro: nenhum registro', 'Errors': 'Erros', 'Est. Delivery Date': 'Est. Data de entrega', 'Estimated # of households who are affected by the emergency': '# estimado das famílias que são afetados pela emergência', 'Estimated # of people who are affected by the emergency': '# estimado de pessoas que são afetados pela emergência', 'Estimated Overall Building Damage': 'Dano total de construção estimado', 'Estimated total number of people in institutions': 'Número total estimado de pessoas em instituições', 'Euros': 'Euros', 'Evacuating': 'abandono', 'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'Valide as informações desta mensagem. (Este valor não deve ser utilizado em aplicações de aviso público. ).', 'Event': 'Evento', 'Event Details': 'Detalhes do evento', 'Event added': 'Evento incluído', 'Event deleted': 'Evento excluído', 'Event updated': 'Evento atualizado', 'Events': 'eventos', 'Example': 'Exemplo:', 'Exceeded': 'Excedido', 'Excellent': 'Excelente', 'Exclude contents': 'Excluir conteúdo', 'Excreta disposal': 'Eliminação de dejetos', 'Execute a pre-planned activity identified in <instruction>': 'Executar uma atividade pré-planejada identificada no', 'Exercise': 'Excercício', 'Exercise?': 'Exercício ?', 'Exercises mean all screens have a watermark & all notifications have a prefix.': "Exercícios significa que todas as telas têm uma marca d'água & todas as comunicações têm um prefixo.", 'Existing Placard Type': 'Cartaz existente Tipo', 'Existing Sections': 'Existing Sections', 'Existing food stocks': 'Estoques de alimentos existente', 'Existing location cannot be converted into a group.': 'Local Existente não pode ser convertido em um grupo.', 'Exits': 'Saídas', 'Expected Return Home': 'Expected Return Home', 'Experience': 'Experiência', 'Expiry Date': 'Data de expiração', 'Explosive Hazard': 'Perigo explosivo', 'Export': 'Exportar', 'Export Data': 'Exportar dados.', 'Export Database as CSV': 'Exportar o banco de dados como CSV', 'Export in GPX format': 'Exportar no formato GPX', 'Export in KML format': 'Exportar no formato KML', 'Export in OSM format': 'Exportar no formato OSM', 'Export in PDF format': 'Exportar no formato PDF', 'Export in RSS format': 'Exportar no formato RSS', 'Export in XLS format': 'Exportar no formato XLS', 'Exterior Only': 'Exterior Apenas', 'Exterior and Interior': 'Exterior e Interior', 'Eye Color': 'Cor dos Olhos', 'Facebook': 'Facebook', 'Facial hair, color': 'Cabelo Facial, cor', 'Facial hair, type': 'Cabelo Facial, digite', 'Facial hear, length': 'Facial ouvir, COMPRIMENTO', 'Facilities': 'Instalações', 'Facility': 'Instalação', 'Facility Details': 'Detalhes da Instalação', 'Facility Operations': 'Facilidades nas Operações', 'Facility Status': 'Status Facility', 'Facility Type': 'Tipo de Instalação', 'Facility added': 'Instalação incluída', 'Facility or Location': 'Instalação ou Local', 'Facility removed': 'Recurso removido', 'Facility updated': 'Recurso atualizado', 'Fail': 'Falha', 'Failed!': 'Falha!', 'Fair': 'Razoável', 'Falling Object Hazard': 'Queda Objeto Risco', 'Families/HH': 'Famílias/HH', 'Family': 'Familia', 'Family tarpaulins received': 'lonas de familia recebidas', 'Family tarpaulins, source': 'lonas de familia, fuente', 'Family/friends': 'Família/amigos', 'Farmland/fishing material assistance, Rank': 'TERRAS/assistência de material de Pesca, posição', 'Fatalities': 'Fatalidades', 'Fax': 'Número do Fax', 'Feature Class': 'Classe de Recursos', 'Feature Class Details': 'Detalhes da classe de recurso', 'Feature Class added': 'Classe de Recurso incluída', 'Feature Class deleted': 'Classe de recurso excluída', 'Feature Class updated': 'Classe De recurso atualizada', 'Feature Classes': 'Classes de Recursos', 'Feature Classes are collections of Locations (Features) of the same type': 'Classes De recurso são grupos de localidades (recursos) do mesmo tipo', 'Feature Layer Details': 'Recurso Camada Detalhes', 'Feature Layer added': 'Recurso Camada incluída', 'Feature Layer deleted': 'Recurso Camada excluído', 'Feature Layer updated': 'Recurso Camada atualizada', 'Feature Layers': 'Camadas recurso', 'Feature Namespace': 'Espaço De recurso', 'Feature Request': 'Pedido de Componente', 'Feature Type': 'Tipo de Componente', 'Features Include': 'Componentes Incluidos', 'Female': 'Sexo Feminino', 'Female headed households': 'Famílias chefiadas por mulheres', 'Few': 'Poucos', 'Field': 'Campo', 'Field Hospital': 'Campo Hospital', 'File': 'arquivo', 'File Imported': 'File Imported', 'File Importer': 'File Importer', 'File name': 'File name', 'Fill in Latitude': 'Preencher na Latitude', 'Fill in Longitude': 'Preencher na Longitude', 'Filter': 'Filtro', 'Filter Field': 'Filtro de Campo', 'Filter Value': 'Filtro de Valor', 'Find': 'Localizar', 'Find All Matches': 'Localizar todos os equivalentes', 'Find Dead Body Report': 'Localizar Relatório de Cadáver', 'Find Hospital': 'Localizar Hospital', 'Find Person Record': 'Localizar registro de pessoa', 'Find Volunteers': 'Localizar Voluntários', 'Find a Person Record': 'Localizar um Registro de Pessoa', 'Finder': 'Localizador', 'Fingerprint': 'Impressão digital', 'Fingerprinting': 'Impressões digitais', 'Fingerprints': 'Impressões Digitais', 'Finish': 'Terminar', 'Finished Jobs': 'Tarefa Terminada', 'Fire': 'Fogo', 'Fire suppression and rescue': 'Supressão e salvamento de incêndio', 'First Name': 'Primeiro Nome', 'First name': 'Primeiro Nome', 'Fishing': 'Pesca', 'Flash Flood': 'Enchente', 'Flash Freeze': 'congelar o momento', 'Flexible Impact Assessments': 'Flexibilidade no Impacto de avaliações', 'Flood': 'Enchente', 'Flood Alerts': 'Alertas de Enchente', 'Flood Alerts show water levels in various parts of the country': 'Os alertas de inundação mostram o nível da água em várias partes do país', 'Flood Report': 'Relatório de Inundação', 'Flood Report Details': 'Detalhes do Relatório de Inundação', 'Flood Report added': 'Relatório de Inundação incluído', 'Flood Report deleted': 'Relatório de Inundação removido', 'Flood Report updated': 'Relatório de Inundação actualizado', 'Flood Reports': 'Relatórios de Inundação', 'Flow Status': 'posição de fluxo', 'Focal Point': 'Ponto Central', 'Fog': 'Nevoeiro', 'Food': 'Food', 'Food Supply': 'Alimentação', 'Food assistance': 'Ajuda alimentar', 'Footer': 'Rodapé', 'Footer file %s missing!': '% Arquivo rodapé ausente!', 'For': 'Por', 'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).': 'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).', 'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': 'Para um país este seria o código ISO2, para uma cidade, este seria o codigo do aeroporto (UNE/Locode).', 'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'Para cada parceiro de sincronização, há uma tarefa de sincronização padrão que é executada após um intervalo de tempo especificado. Você também pode configurar mais tarefas de sincronização que podem ser customizadas de acordo com as suas necessidades. Clique no link à direita para começar.', 'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'Para segurança reforçada, é recomendável digitar um nome de usuário e senha, e notificar os administradores de outras máquinas em sua organização para incluir esse usuário e senha no UUID em Sincronização -> Parceiros De Sincronização', 'For live help from the Sahana community on using this application, go to': 'Para ajuda ao vivo da comunidade do Sahana sobre como utilizar esse aplicativo, vá para', 'For messages that support alert network internal functions': 'Para mensagens que suportam funções internas de alertas de rede', 'For more details on the Sahana Eden system, see the': 'Para obter mais detalhes sobre o sistema Sahana Eden, consulte o', 'For more information, see': 'Para obter mais informações, consulte o', 'For more information, see ': 'For more information, see ', 'For other types, the next screen will allow you to enter the relevant details...': 'Para outros tipos, a próxima tela permitirá que você digite os detalhes relevantes.', 'Forest Fire': 'Incêndios florestais', 'Formal camp': 'Acampamento formal', 'Format': 'Formato', "Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}": "Formatar a lista de valores de atributos & o valor RGB a ser usado para esses como o objeto JSON, Exemplo: {Red: '#FF0000, Green: '#00FF00', Yellow: '#FFFF00'}", 'Forms': 'formulários', 'Found': 'localizado', 'Foundations': 'Fundações', 'Freezing Drizzle': 'Garoa gelada', 'Freezing Rain': 'Chuva Gelada', 'Freezing Spray': 'Spray Gelado', 'French': 'Francês', 'Friday': 'sexta-feira', 'From': 'from', 'From Facility': 'From Facility', 'From Inventory': 'A partir do Inventário', 'From Location': 'Do Local', 'From Organisation': 'Da organização', 'From Organization': 'Da Organização', 'From Person': 'Da Pessoa', 'Frost': 'Geada', 'Fulfil. Status': 'Encher. Status', 'Fulfillment Status': 'Status de preenchimento', 'Full': 'Cheio', 'Full beard': 'Barba completa', 'Fullscreen Map': 'Mapa em tela cheia', 'Functions available': 'Funções disponíveis', 'Funding Organization': 'Financiar a Organização', 'Funeral': 'Funeral', 'Further Action Recommended': 'Mais Acção Recomendada', 'GIS Reports of Shelter': 'Relatórios GIS de abrigos', 'GIS integration to view location details of the Shelter': 'Integration GIS para visualizar detalhes do local do Abrigo', 'GPS': 'GPS', 'GPS Data': 'GPS Data', 'GPS ID': 'GPS ID', 'GPS Marker': 'Marcador De GPS', 'GPS Track': 'Rastrear por GPS', 'GPS Track File': 'Rastrear Arquivo GPS', 'GPS data': 'GPS data', 'GPS data added': 'GPS data added', 'GPS data deleted': 'GPS data deleted', 'GPS data updated': 'GPS data updated', 'GPX Track': 'GPX RASTREAR', 'GRN': 'NRG', 'GRN Status': 'Status GRN', 'Gale Wind': 'Temporal', 'Gap Analysis': 'Análise de Falhas', 'Gap Analysis Map': 'Mapa de Análise de Falhas', 'Gap Analysis Report': 'Relatório de Análise de Falhas', 'Gap Map': 'Mapa de Falhas', 'Gap Report': 'Relatório de Falhas', 'Gateway': 'Portão', 'Gateway Settings': 'Configurações de Gateway', 'Gateway settings updated': 'Configurações de Gateway atualizadas', 'Gender': 'Sexo', 'General': 'geral', 'General Comment': 'Comentário Geral', 'General Medical/Surgical': 'Médico/Cirúrgico Geral', 'General emergency and public safety': 'Geral de emergência e segurança pública', 'General information on demographics': 'Informações gerais sobre demografia', 'Generator': 'Gerador', 'Geocode': 'Geocodificar', 'Geocoder Selection': 'Seleção De geocodificador', 'Geometry Name': 'Nome da geometria', 'Geophysical (inc. landslide)': 'Geofísica (inc. deslizamento)', 'Geotechnical': 'Geotécnica', 'Geotechnical Hazards': 'RISCOS geotécnicos', 'Geraldo module not available within the running Python - this needs installing for PDF output!': 'Geraldo não disponíveis no módulo a execução Python- é necessário instalar para saída PDF!', 'Geraldo not installed': 'Geraldo não instalado', 'German': 'German', 'Get incoming recovery requests as RSS feed': 'Obter pedidos recebidos de recuperação como feed RSS', 'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'Fornecer uma descrição breve da imagem, por exemplo, o que pode ser visto no local da imagem (opcional).', 'Give information about where and when you have seen them': 'Fornecer informações sobre onde e quando você os viu', 'Global Messaging Settings': 'Configurações Globais de Menssagem', 'Go': 'ir', 'Go to Request': 'Ir para Pedido', 'Goatee': 'Barbicha', 'Good': 'Válido', 'Good Condition': 'Boa Condição', 'Goods Received Note': 'Nota de Recebimento de Mercadorias', "Google Layers cannot be displayed if there isn't a valid API Key": "Google Layers cannot be displayed if there isn't a valid API Key", 'Government': 'Governamental', 'Government UID': 'GOVERNO UID', 'Government building': 'Prédios Públicos', 'Grade': 'Grau', 'Greek': 'grego', 'Green': 'verde', 'Ground movement, fissures': 'Movimento do solo terrestre, fissuras', 'Ground movement, settlement, slips': 'Movimento do solo terrestre, assentamentos, escorregões', 'Group': 'Grupo', 'Group Description': 'Descrição do Grupo', 'Group Details': 'Detalhes do grupo', 'Group ID': 'Group ID', 'Group Member added': 'Membro do grupo incluído', 'Group Members': 'membros do grupo', 'Group Memberships': 'Associados do Grupo', 'Group Name': 'Nome do grupo', 'Group Title': 'Título do grupo', 'Group Type': 'Tipo de grupo', 'Group added': 'Grupo adicionado', 'Group deleted': 'Grupo Excluído', 'Group description': 'Descrição do Grupo', 'Group updated': 'GRUPO ATUALIZADO', 'Groups': 'Grupos do', 'Groups removed': 'Grupos Removido', 'Guest': 'Convidado', 'HR Data': 'Dados de RH', 'HR Manager': 'Responsável de RH', 'Hail': 'granizo', 'Hair Color': 'Cor do Cabelo', 'Hair Length': 'Comprimento do cabelo', 'Hair Style': 'Estilo do Cabelo', 'Has additional rights to modify records relating to this Organization or Site.': 'Tem direitos adicionais para modificar os registros relativos a esta organização ou site.', 'Has data from this Reference Document been entered into Sahana?': 'Os dados deste documento de referência foi digitado no Sahana?', 'Has only read-only access to records relating to this Organization or Site.': 'Tem apenas acesso de leitura para os registros relativos a esta organização ou site.', 'Has the Certificate for receipt of the shipment been given to the sender?': 'O certificado de recepção do carregamento foi dado para o remetente?', 'Has the GRN (Goods Received Note) been completed?': 'O GRN (nota de mercadorias recebidas) foi concluído?', 'Hazard Pay': 'Pagar Risco', 'Hazardous Material': 'Material perigoso', 'Hazardous Road Conditions': 'Estradas em Condições de Risco', 'Header Background': 'Conhecimento de Chefia', 'Header background file %s missing!': 'Arquivo de Cabeçalho de Base %s ausente!', 'Headquarters': 'Matriz', 'Health': 'Saúde', 'Health care assistance, Rank': 'Assistência Saúde, Classificação', 'Health center': 'Centro de Saúde', 'Health center with beds': 'Centro de saúde com camas', 'Health center without beds': 'Centro de saúde sem camas', 'Health services status': 'Situação dos serviços de saúde', 'Healthcare Worker': 'Profissional de Saúde', 'Heat Wave': 'Onda de calor', 'Heat and Humidity': 'Calor e Umidade', 'Height': 'Altura', 'Height (cm)': 'Altura (cm)', 'Height (m)': 'Altura (m)', 'Help': 'Ajuda', 'Helps to monitor status of hospitals': 'Ajuda para monitorar status de hospitais', 'Helps to report and search for Missing Persons': 'Ajuda a reportar e procurar pessoas desaparecidas.', 'Helps to report and search for missing persons': 'Ajuda a reportar e procurar pessoas desaparecidas.', 'Here are the solution items related to the problem.': 'Aqui estão as soluções relacionadas ao problema.', 'Heritage Listed': 'Património Listado', 'Hierarchy Level %d Name': 'Hierarquia de Nível% de d Nome', 'Hierarchy Level 0 Name (e.g. Country)': 'Hierarquia Nível 0 Nome (por exemplo, País)', 'Hierarchy Level 0 Name (i.e. Country)': 'Hierarquia Nível 0 nome (por exemplo País)', 'Hierarchy Level 1 Name (e.g. Province)': 'Hierarquia Nível 1 Nome (por exemplo, Província)', 'Hierarchy Level 1 Name (e.g. State or Province)': 'Hierarquia Nível 1 nome (por exemplo, Estado ou Província)', 'Hierarchy Level 2 Name (e.g. District or County)': 'Hierarquia de Nível 2 Nome (por exemplo, Região ou Município)', 'Hierarchy Level 3 Name (e.g. City / Town / Village)': 'Hierarquia Nível 3 Nome (por exemplo, Cidade / Municipio / Vila)', 'Hierarchy Level 4 Name (e.g. Neighbourhood)': 'Hierarquia de Nível 4 Nome (por exemplo, Bairro)', 'Hierarchy Level 5 Name': 'Nome de Nível 5 na Hierarquia', 'High': 'Alta', 'High Water': "d'água alta", 'Hindu': 'Hindu', 'History': 'História', 'Hit the back button on your browser to try again.': 'Clique no ícone de voltar em seu navegador para tentar novamente.', 'Holiday Address': 'Endereço durante Feriado', 'Home': 'Residência', 'Home Address': 'Endereço Residencial', 'Home City': 'Home City', 'Home Country': 'País natal', 'Home Crime': 'Crime Doméstico', 'Home Details': 'Home Details', 'Home Phone Number': 'Home Phone Number', 'Home Relative': 'Home Relative', 'Home added': 'Home added', 'Home deleted': 'Home deleted', 'Home updated': 'Home updated', 'Homes': 'Homes', 'Hospital': 'Hospital', 'Hospital Details': 'Detalhes do Hospital', 'Hospital Status Report': 'Relatório de Status do Hospital', 'Hospital information added': 'Informações do hospital inclusas.', 'Hospital information deleted': 'Informações do hospital excluídas', 'Hospital information updated': 'informações do Hospital atualizadas', 'Hospital status assessment.': 'Avaliação de status do Hospital.', 'Hospitals': 'Hospitais', 'Hot Spot': 'ponto de acesso', 'Hour': 'Hora', 'Hours': 'Horas', 'Household kits received': 'Kits caseiros recebidos', 'Household kits, source': 'Kit de família, origem', 'How does it work?': 'Como funciona?', 'How is this person affected by the disaster? (Select all that apply)': 'Como esta pessoa é afetada pelo desastre? (selecione todos que se aplicam)', 'How long will the food last?': 'Quanto tempo irá durar a comida?', 'How many Boys (0-17 yrs) are Dead due to the crisis': 'Quantos rapazes (0-17 anos) estão Mortos devido à crise', 'How many Boys (0-17 yrs) are Injured due to the crisis': 'Quantos rapazes (0-17 anos) estão Feridos devido à crise', 'How many Boys (0-17 yrs) are Missing due to the crisis': 'Quantos rapazes (0-17 anos) estão Desaparecidos devido à crise', 'How many Girls (0-17 yrs) are Dead due to the crisis': 'Quantas garotas (0-17 anos) morreram devido à crise', 'How many Girls (0-17 yrs) are Injured due to the crisis': 'Quantas garotas (0-17 anos) estão feridas devido à crise', 'How many Girls (0-17 yrs) are Missing due to the crisis': 'Quantas garotas (0-17 anos) estão perdidas devido à crise', 'How many Men (18 yrs+) are Dead due to the crisis': 'Quantos homens (18 anos+) estão mortos devido à crise', 'How many Men (18 yrs+) are Injured due to the crisis': 'Quantos homens (18 anos +) são feridos devido à crise', 'How many Men (18 yrs+) are Missing due to the crisis': 'Quantos homens (18 anos +) estão ausentes devido à crise', 'How many Women (18 yrs+) are Dead due to the crisis': 'Quantas mulheres (+18 anos) estão mortas devido à crise', 'How many Women (18 yrs+) are Injured due to the crisis': 'Quantas mulheres (+18 anos) estão feridas devido à crise', 'How many Women (18 yrs+) are Missing due to the crisis': 'Quantas mulheres acima de 18 anos estão ausentes devido à crise', 'How many days will the supplies last?': 'Quantos dias irão durar os abastecimentos?', 'How many new cases have been admitted to this facility in the past 24h?': 'Quantos novos casos tenham sido admitidos a esta facilidade nas últimas 24 horas?', 'How many of the patients with the disease died in the past 24h at this facility?': 'Como muitos dos pacientes com a doença morreram nas últimas 24 horas nesta unidade?', 'How many patients with the disease are currently hospitalized at this facility?': 'Quantos pacientes com a doença estão atualmente internados nesta instalação?', 'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'Quanto detalhe é visto. Um nível alto de Zoom mostra muitos detalhes, mas não uma grande área. Um nível de Zoom baixo significa ver uma grande área, mas não com um alto nível de detalhe.', 'Human Resource': 'Recursos humanos', 'Human Resource Details': 'Detalhes de Recursos Humanos', 'Human Resource Management': 'Gerenciamento de recursos humanos', 'Human Resource added': 'Recurso humano adicionado', 'Human Resource removed': 'Recursos Humanos removido', 'Human Resource updated': 'Recursos Humanos atualizado', 'Human Resources': 'Recursos Humanos', 'Human Resources Management': 'Gerenciamento de Recursos Humanos', 'Humanitarian NGO': 'ONG humanitária', 'Hurricane': 'Furacão', 'Hurricane Force Wind': 'Furacão Força Vento', 'Hybrid Layer': 'Hybrid Layer', 'Hygiene': 'Higiene', 'Hygiene NFIs': 'Higiene NFIs', 'Hygiene kits received': 'Kits de higiene recebido', 'Hygiene kits, source': 'Kits de higiene, origem', 'Hygiene practice': 'Prática de higiene', 'Hygiene problems': 'PROBLEMAS DE HIGIENE', 'I accept. Create my account.': 'I accept. Create my account.', 'I am available in the following area(s)': 'Estou disponível na(s) seguinte(s) área(s)', 'ID Tag': 'Etiqueta de Identificação', 'ID Tag Number': 'Número da Etiqueta de Identificação', 'ID type': 'Tipo de ID', 'Ice Pressure': 'Pressão de gelo', 'Iceberg': 'Icebergue', 'Identification': 'Identification', 'Identification Report': 'Identificação Relatório', 'Identification Reports': 'Relatórios de Identificação', 'Identification Status': 'Status da Identificação', 'Identified as': 'Identificado como', 'Identified by': 'Identificado por', 'Identity': 'Identidade', 'Identity Details': 'Detalhes da identidade', 'Identity added': 'Identidade incluída', 'Identity deleted': 'Identidade excluída', 'Identity updated': 'Identidade atualizada', 'If Staff have login accounts then they are given access to edit the details of the': 'Se o pessoal tiver contas de login, então lhes é dado acesso para editar os detalhes do', 'If a ticket was issued then please provide the Ticket ID.': 'Se um bilhete foi emitido então por favor forneça o ID do bilhete.', 'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'Se um usuário verifica que eles possuem um endereço de email com este domínio, o campo Aprovador é utilizado para determinar se e por quem aprovação adicional é necessária.', 'If it is a URL leading to HTML, then this will downloaded.': 'Se for uma URL levando a HTML, então este será baixado.', 'If neither are defined, then the Default Marker is used.': 'Se nem são definidos, então o Marcador Padrão é utilizado.', 'If no marker defined then the system default marker is used': 'Se nenhum marcador definido, o marcador padrão do sistema é utilizada', 'If no, specify why': 'Se não, especifique por que', 'If none are selected, then all are searched.': 'Se nenhuma for selecionada, então todos são procurados.', "If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": 'Se selecionado, esta localização do ativo será atualizado sempre que a localização da pessoa é atualizada.', 'If the location is a geographic area, then state at what level here.': 'Se o local é uma área geográfica, então defina em que nível aqui.', 'If the request is for %s, please enter the details on the next screen.': 'If the request is for %s, please enter the details on the next screen.', 'If the request is for type "Other", you should enter a summary of the request here.': 'Se o pedido for para o tipo \ " Outro", você deve digitar um resumo do pedido aqui.', 'If the request type is "Other", please enter request details here.': 'Se o tipo de pedido é "other", por favor, digite aqui detalhes do pedido.', "If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'Se esta configuração representa uma região para o menu regiões, dê-lhe um nome a ser utilizado no menu. O nome de uma configuração pessoal do mapa será configurado para o nome do usuário.', "If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'Se esse campo for Preenchido, então, um usuário que especificar esta organização quando se registrar será designado como um agente desta organização a menos que seu domínio não corresponde ao campo de domínio.', 'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'Se esse campo for preenchido, o usuário de um específico Domain será automaticamente registrado como funcionário desta organização.', 'If this is set to True then mails will be deleted from the server after downloading.': 'Se isso for ajustado para “True”, as correspondências serão deletadas do servidor depois que o downloading for feito.', "If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": 'Se isso for ticado, se tornará a base geográfica do usuário e, consequentemente onde este aparece no mapa.', 'If this record should be restricted then select which role is required to access the record here.': 'Se esse registro deve ser restrito, selecione qual regra é necessária para acessar o record aqui.', 'If this record should be restricted then select which role(s) are permitted to access the record here.': 'Se esse registro deve ser restrito, selectione qual (is) regra (s) serão permitidas para assessá-lo aqui.', 'If yes, specify what and by whom': 'Se SIM, Especifique o quê e por quem', 'If yes, which and how': 'Se sim, quais e como', 'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': 'Se você não inserir um documento de referência, seu e-mail será exibido para permitir que esses dados sejam verificados.', "If you don't see the Hospital in the list, you can add a new one by clicking link 'Add Hospital'.": "Se você não vê o Hospital na lista, você pode incluir um novo clicando no link 'incluir Hospital'.", "If you don't see the Office in the list, you can add a new one by clicking link 'Add Office'.": "Se você não vê o escritório na lista, você pode incluir um novo clicando no link 'incluir escritório'.", "If you don't see the Organization in the list, you can add a new one by clicking link 'Add Organization'.": 'Se voce não vê a Organização na lista, voce poderá adicionar uma nova clicando no link "Incluir Organização"', 'If you know what the Geonames ID of this location is then you can enter it here.': 'Se voce conhecer o Geonames ID desta localização então voce poderá inserí-lo aqui.', 'If you know what the OSM ID of this location is then you can enter it here.': 'Se voce conhecer o OSM ID desta localização, então voce pode inserí-lo aqui.', 'If you need to add a new document then you can click here to attach one.': 'Se houver necessidade de incluir um novo documento então voce poderá clicar aqui para anexá-lo.', 'If you want several values, then separate with': 'Se voce deseja varios valores, separe com', 'If you would like to help, then please': 'Se você gostaria de ajudar, então por favor', 'Illegal Immigrant': 'Imigrante Ilegal', 'Image': 'Imagem', 'Image Details': 'Detalhes da Imagem', 'Image File(s), one image per page': 'Image File(s), one image per page', 'Image Tags': 'Imagem Tags', 'Image Type': 'Tipo de Imagem', 'Image Upload': 'Fazer atualizacao Da imagem', 'Image added': 'Imagem Adicionada', 'Image deleted': 'Imagem excluída', 'Image updated': 'Imagem atualizada', 'Imagery': 'Imagens', 'Images': 'Imagens', 'Impact Assessments': 'Avaliações de impacto', 'Impact Details': 'Detalhes de impacto', 'Impact Type': 'Tipo de impacto', 'Impact Type Details': 'Detalhes dos tipos de impacto', 'Impact Type added': 'Tipo de impacto incluído', 'Impact Type deleted': 'Tipo de impacto excluído', 'Impact Type updated': 'Atualização dos tipos de impacto', 'Impact Types': 'Tipos de impactos', 'Impact added': 'Impacto incluído', 'Impact deleted': 'Impacto excluído', 'Impact updated': 'Atualização de impacto', 'Impacts': 'Impactos', 'Import': 'Importação', 'Import & Export Data': 'Importar & Exportar Dados', 'Import Data': 'Importar Dados', 'Import File': 'Import File', 'Import File Details': 'Import File Details', 'Import File deleted': 'Import File deleted', 'Import Files': 'Import Files', 'Import Job Count': 'Import Job Count', 'Import Jobs': 'Importar Tarefas', 'Import New File': 'Import New File', 'Import and Export': 'Importação e Exportação', 'Import from Ushahidi Instance': 'Importação da Instância Ushahidi', 'Import if Master': 'Importar se Mestre', 'Import multiple tables as CSV': 'Importar tabelas multiplas como CSV', 'Import/Export': 'Importar/Exportar', 'Important': 'Importante', 'Importantly where there are no aid services being provided': 'Importante onde não há serviços de apoio a ser prestado', 'Imported': 'Imported', 'Importing data from spreadsheets': 'Importar dados de planilhas', 'Improper decontamination': 'Descontaminação Imprópria', 'Improper handling of dead bodies': 'Manipulação inadequada de cadáveres', 'In Catalogs': 'Em Catálogos', 'In Inventories': 'Em Inventários', 'In Process': 'Em Processo', 'In Progress': 'Em Progresso', 'In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Maximize o ajuste da janela para preenche-la toda, desta forma não será necessário configurar para uso de fonte grande.', 'Inbound Mail Settings': 'Definições de correio de entrada', 'Incident': 'Incidente', 'Incident Categories': 'Categorias Incidente', 'Incident Report': 'Relatório de Incidente', 'Incident Report Details': 'Detalhes do relatório de incidentes', 'Incident Report added': 'Relatório de Incidente incluído', 'Incident Report deleted': 'Relatório de Incidente excluído', 'Incident Report updated': 'Relatório de incidente atualizado', 'Incident Reporting': 'Relatório de incidentes', 'Incident Reporting System': 'Sistema de relatórios de incidentes', 'Incident Reports': 'Relatório de incidentes', 'Incidents': 'incidentes', 'Include any special requirements such as equipment which they need to bring.': 'Include any special requirements such as equipment which they need to bring.', 'Incoming': 'Entrada', 'Incoming Shipment canceled': 'Chegada da encomenda cancelada', 'Incoming Shipment updated': 'Chegada de encomenda actualizada.', 'Incomplete': 'Incompleto', 'Individuals': 'Individuais', 'Industrial': 'Industrial', 'Industrial Crime': 'Crime Industrial', 'Industry Fire': 'Indústria Fogo', 'Infant (0-1)': 'Criança (0-1)', 'Infectious Disease': 'Doença INFECCIOSA', 'Infectious Disease (Hazardous Material)': 'Doenças infecciosas (Material perigoso)', 'Infectious Diseases': 'Doenças infecciosas', 'Infestation': 'Infestação', 'Informal Leader': 'Líder Informal', 'Informal camp': 'Acampamento Informal', 'Information gaps': 'problemas de informação', 'Infusion catheters available': 'Cateteres de infusão disponível', 'Infusion catheters need per 24h': 'Cateteres infusão necessário por 24 H', 'Infusion catheters needed per 24h': 'Cateteres infusão necessário por H', 'Infusions available': 'Infusões disponíveis', 'Infusions needed per 24h': 'Infusões necessário por 24H', 'Inspected': 'Inspecionado', 'Inspection Date': 'Data de Inspeção', 'Inspection date and time': 'Data e hora de inspeção', 'Inspection time': 'Hora da inspeção', 'Inspector ID': 'ID do Inspetor', 'Instant Porridge': 'Mingau Instantâneo', "Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": 'Em vez de sincronizar automaticamente com outros pares pela rede, voce também pode sincronizar com arquivos, o que é necessário quando não há rede. Você pode utilizar esta página para importar dados de sincronização de arquivos e também exportar dados para arquivos de Sincronização. Clique no link à direita para ir para esta página.', 'Institution': 'Instituição', 'Insufficient': 'insuficiente', 'Insufficient privileges': 'Insufficient privileges', 'Insufficient vars: Need module, resource, jresource, instance': 'Variaveis insuficientes: necessario modulo, recurso, jrecurso, instância', 'Insurance Renewal Due': 'Insurance Renewal Due', 'Intake Items': 'Itens de admissão', 'Intergovernmental Organization': 'Organização Intergovernamental', 'Interior walls, partitions': 'Do Interior das paredes, partições', 'Internal State': 'Estado Interno', 'International NGO': 'ONG internacional', 'International Organization': 'Organização Internacional', 'Interview taking place at': 'Entrevista em', 'Invalid': 'Inválido', 'Invalid Query': 'Consulta inválida', 'Invalid email': 'Invalid email', 'Invalid phone number': 'Invalid phone number', 'Invalid phone number!': 'Invalid phone number!', 'Invalid request!': 'Pedido inválido!', 'Invalid ticket': 'Bilhete Inválido', 'Inventories': 'Inventários.', 'Inventory': 'Inventário', 'Inventory Item': 'Item do inventário', 'Inventory Item Details': 'Detalhes do Item de inventário', 'Inventory Item added': 'Item incluído no inventário', 'Inventory Item deleted': 'Item do inventário excluído', 'Inventory Item updated': 'Item de Inventário atualizado', 'Inventory Items': 'Itens do Inventário', 'Inventory Items Available for Request Item': 'Itens de inventário disponíveis para Pedir um Item', 'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.': 'Itens de invenrário incluem ambos suprimentos consumíveis & aqueles que se transformarão em Ativos no seu destino.', 'Inventory Management': 'Gerenciamento de Inventário', 'Inventory Stock Position': 'Inventory Stock Position', 'Inventory functionality is available for:': 'Inventário de funcionalidades esta disponível para:', 'Inventory of Effects': 'Inventário de Efeitos', 'Is editing level L%d locations allowed?': 'É permitido editar o nível dos locais L%d?', 'Is it safe to collect water?': 'É seguro coletar água?', 'Is this a strict hierarchy?': 'Esta é uma hierarquia rigorosa?', 'Issuing Authority': 'Autoridade emissora', 'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'Ele captura não apenas os locais onde elas estão ativas, mas também captura informações sobre o conjunto de projetos que está fornecendo em cada região.', 'Italian': 'Italian', 'Item': 'Item', 'Item Added to Shipment': 'Item Incluído para Embarque', 'Item Catalog Details': 'Detalhes do item do catálogo', 'Item Categories': 'Categorias do Item', 'Item Category': 'Categoria do Item', 'Item Category Details': 'Detalhes da categoria de item', 'Item Category added': 'Categoria de item incluída', 'Item Category deleted': 'Categoria de item excluída', 'Item Category updated': 'Atualização da categoria de item', 'Item Details': 'Detalhes do item', 'Item Pack Details': 'Detalhes do pacote de itens', 'Item Pack added': 'Pacote de itens', 'Item Pack deleted': 'Pacote de itens excluído', 'Item Pack updated': 'Itens de Pacote atualizados', 'Item Packs': 'Item de Pacotes', 'Item added': 'Item incluído', 'Item added to Inventory': 'Itens adicionados ao Inventário', 'Item added to shipment': 'Item incluído para embarque', 'Item already in Bundle!': 'Item já no pacote configurável!', 'Item already in Kit!': 'Item já no Kit!', 'Item already in budget!': 'Item já no Orçamento!', 'Item deleted': 'Item Excluído', 'Item removed from Inventory': 'Item removido do Inventário', 'Item updated': 'Item atualizado', 'Items': 'Itens', 'Items in Category can be Assets': 'itens na categoria podem ser ativos', 'Japanese': 'japonês', 'Jerry can': 'Jerry pode', 'Jew': 'Judeu', 'Job Market': 'Mercado de trabalho', 'Job Role': 'Função de trabalho', 'Job Role Catalog': 'Catalogo de Funçao de trabalho', 'Job Role Details': 'Detalhes da Função', 'Job Role added': 'funçao de trabalho inclusa', 'Job Role deleted': 'Funçao de trabalho excluida', 'Job Role updated': 'Função actualizada', 'Job Roles': 'Funções', 'Job Title': 'Título do Cargo', 'Jobs': 'Tarefas', 'Journal': 'Diário', 'Journal Entry Details': 'Detalhes da Entrada de Diário', 'Journal entry added': 'Entrada de diário incluída', 'Journal entry deleted': 'Entrada de diário removida', 'Journal entry updated': 'Entrada de diário atualizado', 'Key': 'Tecla', 'Key Details': 'Detalhes da Chave', 'Key added': 'Chave adicionada', 'Key deleted': 'Chave removida', 'Key updated': 'Chave actualizada', 'Keys': 'Teclas', 'Kit': 'kit', 'Kit Contents': 'Conteúdo Kit', 'Kit Details': 'Detalhes do Kit', 'Kit Updated': 'Kit de Atualização', 'Kit added': 'Pacote adicionado', 'Kit deleted': 'Kit excluído', 'Kit updated': 'Kit de atualização', 'Kits': 'Kits', 'Known Identities': 'Identidades conhecido', 'Known incidents of violence against women/girls': 'Incidentes de violência conhecidos contra mulheres/garotas', 'Known incidents of violence since disaster': 'Incidentes de violência conhecidos desde o desastre', 'Korean': 'Korean', 'LICENSE': 'LICENÇA', 'Lack of material': 'Falta de material', 'Lack of school uniform': 'Falta de uniforme escolar', 'Lack of supplies at school': 'Falta de suprimentos na escola', 'Lack of transport to school': 'Falta de transporte escolar', 'Lactating women': 'Mulheres lactantes', 'Lahar': 'Lahar', 'Landslide': 'Deslizamento', 'Language': 'Linguagem', 'Last Name': 'sobrenome', 'Last known location': 'Último local conhecido', 'Last name': 'Last name', 'Last synchronization time': 'Horário da última sincronização', 'Last updated': 'Última atualização', 'Last updated ': 'Last updated ', 'Last updated by': 'Última atualização por', 'Last updated on': 'Última Atualização em', 'Latitude': 'Latitude', 'Latitude & Longitude': 'Latitude & Longitude', 'Latitude is North-South (Up-Down).': 'Latitude é sentido norte-sul (emcima-embaixo).', 'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Latitude é zero na linha do Equador, positiva no hemisfério norte e negativa no hemisfério sul.', 'Latitude of Map Center': 'Latitude DO MAPA Centro', 'Latitude of far northern end of the region of interest.': 'Latitude do extremo Norte longe do Região de interesse.', 'Latitude of far southern end of the region of interest.': 'Latitude da extremidade sul longe do Região de interesse.', 'Latitude should be between': 'Latitude deve estar entre', 'Latrines': 'Privadas', 'Law enforcement, military, homeland and local/private security': 'Execução da lei militar, interna e segurança local/privada', 'Layer': 'Camada', 'Layer Details': 'Detalhes de Camada', 'Layer ID': 'Layer ID', 'Layer Name': 'Layer Name', 'Layer Type': 'Layer Type', 'Layer added': 'Camada incluída', 'Layer deleted': 'Camada excluída', 'Layer has been Disabled': 'Layer has been Disabled', 'Layer has been Enabled': 'Layer has been Enabled', 'Layer updated': 'Camada atualizada', 'Layers': 'Camadas', 'Layers updated': 'Camadas atualizadas', 'Layout': 'Modelo', 'Leader': 'guia', 'Leave blank to request an unskilled person': 'Leave blank to request an unskilled person', 'Legend Format': 'Formato da Legenda', 'Length (m)': 'Comprimento (m)', 'Level': 'Nível', 'Level 1': 'Nível 1', 'Level 1 Assessment Details': 'Detalhes da Avaliação Nível 1', 'Level 1 Assessment added': 'Avaliação Nível 1 incluído', 'Level 1 Assessment deleted': 'Avaliação Nível 1 excluído', 'Level 1 Assessment updated': 'Avaliação Nível 1 atualizada', 'Level 1 Assessments': 'Avaliações Nível 1', 'Level 2': 'nível 2', 'Level 2 Assessment Details': 'Nível 2 de avaliação Detalhado', 'Level 2 Assessment added': 'Nível 2 avaliação incluído', 'Level 2 Assessment deleted': 'Nível 2 de avaliação excluído', 'Level 2 Assessment updated': 'Nível 2 de avaliação atualizada', 'Level 2 Assessments': 'Nível 2 de Avaliações', 'Level 2 or detailed engineering evaluation recommended': 'Nível 2 ou engenharia detalhada de avaliação recomendado', "Level is higher than parent's": 'Nível superior ao dos pais', 'Library support not available for OpenID': 'Apoio de biblioteca não está disponível para OpenID', 'License Number': 'License Number', 'License Plate': 'License Plate', 'LineString': 'cadeia-de-linhas', 'List': 'Listar', 'List / Add Baseline Types': 'Lista / Incluir Linha de Tipos', 'List / Add Impact Types': 'Lista / Incluir Tipos de Impacto', 'List / Add Services': 'Lista / Incluir Serviços', 'List / Add Types': 'Lista / Incluir Tipos', 'List Activities': 'listar atividades', 'List All': 'Mostrar Tudo', 'List All Assets': 'Lista todos os ativos', 'List All Catalog Items': 'Lista todos os Itens Do Catálogo', 'List All Commitments': 'Lista todos os compromissos', 'List All Entries': 'Listar todas as entradas', 'List All Item Categories': 'Lista todos os itens Categorias', 'List All Memberships': 'Listar Todas As Associações', 'List All Received Shipments': 'Lista todas as transferências Recebidas', 'List All Records': 'Lista todos os registros', 'List All Reports': 'Listar todos os Relatórios', 'List All Requested Items': 'Lista Todos Os itens solicitados', 'List All Requested Skills': 'List All Requested Skills', 'List All Requests': 'Lista Todos Os Pedidos', 'List All Sent Shipments': 'Listar todos os embarques enviados', 'List All Vehicles': 'List All Vehicles', 'List Alternative Items': 'Listar Itens Alternativos', 'List Assessment Summaries': 'Listar Resumos das Avaliações', 'List Assessments': 'Listar as Avaliações', 'List Asset Assignments': 'Listar Atribuições de Ativos', 'List Assets': 'Listar Ativos', 'List Availability': 'Listar Disponibilidade', 'List Baseline Types': 'Lista de Tipos De Linha', 'List Baselines': 'Lista de Linhas', 'List Brands': 'Lista de Marcas', 'List Budgets': 'Listar Orçamentos', 'List Bundles': 'Listar Pacotes', 'List Camp Services': 'Listar Serviços de Acampamento', 'List Camp Types': 'Listar Tipos de Acampamentos', 'List Camps': 'Listar Acampamentos', 'List Catalog Items': 'Lista de Itens Do Catálogo', 'List Catalogs': 'Listar catálogos', 'List Certificates': 'Listar certificados', 'List Certifications': 'Listar certificações', 'List Checklists': 'Lista Listas de Verificação.', 'List Cluster Subsectors': 'Lista Subsetores de Cluster', 'List Clusters': 'Lista Clusters', 'List Commitment Items': 'Lista Itens de Compromisso', 'List Commitments': 'Lista Compromissos', 'List Committed People': 'List Committed People', 'List Competencies': 'Listar competencias', 'List Competency Ratings': 'Listar classificações de competencias', 'List Conflicts': 'Lista Conflitos', 'List Contact Information': 'Listar informações do contato', 'List Contacts': 'Listar contatos', 'List Course Certificates': 'Listar certificados de cursos', 'List Courses': 'Listar Cursos', 'List Credentials': 'Listar credenciais', 'List Current': 'Lista Atual', 'List Documents': 'Listar documentos', 'List Donors': 'Listar doadores', 'List Events': 'Lista de Eventos', 'List Facilities': 'Lista de Facilidades', 'List Feature Classes': 'Listar Classes De Recursos', 'List Feature Layers': 'LISTAr Camadas DE RECURSOS', 'List Flood Reports': 'Listar Relatórios de Inundações', 'List GPS data': 'List GPS data', 'List Groups': 'Listar grupos', 'List Groups/View Members': 'Listar Grupos/visualizar membros', 'List Homes': 'List Homes', 'List Hospitals': 'Listar de Hospitais', 'List Human Resources': 'Lista de Recursos Humanos', 'List Identities': 'Lista de Identidades', 'List Images': 'Lista de Imagens', 'List Impact Assessments': 'Lista de Avaliações De Impacto', 'List Impact Types': 'Lista de Tipos De Impacto', 'List Impacts': 'Lista de impactos', 'List Import Files': 'List Import Files', 'List Incident Reports': 'Lista de relatórios de incidentes', 'List Inventory Items': 'Listar ítens de inventário', 'List Item Categories': 'Listar categorias de ítens', 'List Item Packs': 'Lista pacotes de itens', 'List Items': 'Listar itens', 'List Items in Inventory': 'Lista de Itens no inventário', 'List Job Roles': 'Listar cargos', 'List Keys': 'Listar Chaves', 'List Kits': 'LISTAR Kits', 'List Layers': 'Listar Camadas', 'List Level 1 Assessments': 'Listar avaliações nível 1', 'List Level 1 assessments': 'Listar avaliação nível 1', 'List Level 2 Assessments': 'Listar avaliações nível 2', 'List Level 2 assessments': 'Listar avaliações nível 2', 'List Locations': 'Listar Localizações', 'List Log Entries': 'Listar as entradas de log', 'List Map Configurations': 'Listar configurações de mapa', 'List Markers': 'Listar marcadores', 'List Members': 'Lista de membros', 'List Memberships': 'Lista de associados', 'List Messages': 'Listar Mensagens', 'List Missing Persons': 'Lista de pessoas desaparecidas', 'List Missions': 'Listar Missões', 'List Need Types': 'Listar tipos de necessidades', 'List Needs': 'Lista de Necessidades', 'List Notes': 'Lista de Notas', 'List Offices': 'Lista de Escritórios', 'List Organizations': 'Listar Organizações', 'List Patients': 'List Patients', 'List Peers': 'LISTA DE PARES', 'List Personal Effects': 'Lista de objetos pessoais', 'List Persons': 'LISTA DE PESSOAS', 'List Photos': 'Lista de Fotos', 'List Population Statistics': 'Lista das Estatisticas da População', 'List Positions': 'Lista de Posições', 'List Problems': 'Lista de Problemas', 'List Projections': 'Lista de Projeções', 'List Projects': 'Listar Projectos', 'List Rapid Assessments': 'Listar Avaliações Rápidas', 'List Received Items': 'Listar Elementos Recebidos', 'List Received Shipments': 'Listar Carga Recebida', 'List Records': 'Listar Registros', 'List Registrations': 'Listar Registrações', 'List Relatives': 'List Relatives', 'List Reports': 'Relatórios de Listas', 'List Request Items': 'Pedido de Itens de lista', 'List Requested Skills': 'List Requested Skills', 'List Requests': 'LISTA DE PEDIDOS', 'List Resources': 'Listar Recursos', 'List Rivers': 'Lista de Rios', 'List Roles': 'Listar Funções', 'List Rooms': 'Listar Salas', 'List Scenarios': 'Listar cenários', 'List Sections': 'lista de Seções', 'List Sectors': 'Lista de Sectores', 'List Sent Items': 'Os itens da lista Enviada', 'List Sent Shipments': 'Embarques lista Enviada', 'List Service Profiles': 'Lista de serviços Perfis', 'List Settings': 'Lista de configurações', 'List Shelter Services': 'Lista de serviços de abrigo', 'List Shelter Types': 'Lista de Tipos De Abrigo', 'List Shelters': 'Lista de Abrigos', 'List Skill Equivalences': 'LISTA DE HABILIDADE Equivalências', 'List Skill Provisions': 'Listar suprimento de habilidades', 'List Skill Types': 'Lista de Tipos De Habilidade', 'List Skills': 'LISTA DE HABILIDADES', 'List Solutions': 'Listar Soluções', 'List Staff': 'Listar Pessoal', 'List Staff Members': 'Listar funcionários', 'List Staff Types': 'Listar Tipos De Equipe', 'List Status': 'Listar Status', 'List Subscriptions': 'Lista de Assinaturas', 'List Subsectors': 'Listar Subsetores', 'List Support Requests': 'Listar Pedidos de Suporte', 'List Survey Answers': 'Listar Respostas de Pesquisa', 'List Survey Questions': 'Listar Perguntas da Pesquisa', 'List Survey Sections': 'Listar Seções da Pesquisa', 'List Survey Series': 'Listar Séries de Pesquisa', 'List Survey Templates': 'Listar Modelos de Pesquisa', 'List Tasks': 'Lista de Tarefas', 'List Teams': 'Lista de Equipes', 'List Themes': 'Lista de Temas', 'List Tickets': 'lista de Bilhetes', 'List Tracks': 'Rastreia lista', 'List Trainings': 'Listar Treinamentos', 'List Units': 'Lista de Unidades', 'List Users': 'Mostrar usuários', 'List Vehicle Details': 'List Vehicle Details', 'List Vehicles': 'List Vehicles', 'List Volunteers': 'Mostrar Voluntários', 'List Warehouses': 'Mostrar Depósitos', 'List all': 'Mostrar tudo', 'List available Scenarios': 'Listar Cenários Disponíveis', 'List of CSV files': 'List of CSV files', 'List of CSV files uploaded': 'List of CSV files uploaded', 'List of Items': 'Lista de Itens', 'List of Missing Persons': 'Lista de pessoas desaparecidas', 'List of Peers': 'Lista de pares', 'List of Reports': 'Lista de Relatórios', 'List of Requests': 'Lista de Pedidos', 'List of Spreadsheets': 'Lista de Folhas de Cálculo', 'List of Spreadsheets uploaded': 'Lista de Folhas de Cálculo transferidas', 'List of Volunteers': 'Lista de Voluntários', 'List of Volunteers for this skill set': 'Lista de Voluntários para este conjunto de competências', 'List of addresses': 'Lista de endereços', 'List unidentified': 'Lista não identificada', 'List/Add': 'Lista/incluir', 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': 'Lista "quem está fazendo o que & aonde". Permite a agências humanitárias coordenar suas atividades', 'Live Help': 'Ajuda ao vivo', 'Livelihood': 'Subsistência', 'Load Cleaned Data into Database': 'Carregue Informações Claras no Banco de Dados', 'Load Raw File into Grid': 'Carregamento de arquivo bruto na Grid', 'Loading': 'Carregando', 'Local Name': 'Nome local', 'Local Names': 'Nomes locais', 'Location': 'Localização', 'Location 1': 'Local 1', 'Location 2': 'Local 2', 'Location Details': 'Detalhes da Localização', 'Location Hierarchy Level 0 Name': 'Nivel Local de hierarquia 0 nome', 'Location Hierarchy Level 1 Name': 'Nivel local de hierarquia 1 nome', 'Location Hierarchy Level 2 Name': 'Nivel local de hierarquia 2 nome', 'Location Hierarchy Level 3 Name': 'Hierarquia local Nível 3 Nome', 'Location Hierarchy Level 4 Name': 'Hierarquia local Nível 4 Nome', 'Location Hierarchy Level 5 Name': 'Hierarquia local Nível 5 Nome', 'Location added': 'Local incluído', 'Location cannot be converted into a group.': 'Local não pode ser convertido em um grupo.', 'Location deleted': 'Localidade excluída', 'Location details': 'Detalhes do Local', 'Location group cannot be a parent.': 'Localização de grupo não pode ser um pai.', 'Location group cannot have a parent.': 'Localização de grupo não tem um pai.', 'Location groups can be used in the Regions menu.': 'Grupos local pode ser utilizado no menu Regiões.', 'Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group.': 'Grupos locais podem ser utilizados para filtrar o que é mostrado no mapa e nos resultados da procura apenas as entidades locais abrangidas no grupo.', 'Location updated': 'Local atualizado', 'Location:': 'Localização:', 'Location: ': 'Location: ', 'Locations': 'Localizações', 'Locations of this level need to have a parent of level': 'Locais de esse nível precisa ter um pai de nível', 'Lockdown': 'BLOQUEIO', 'Log': 'registro', 'Log Entry Details': 'detalhes da entrada de registro', 'Log entry added': 'Entrada de Log incluída', 'Log entry deleted': 'Entrada de Log Excluída', 'Log entry updated': 'Entrada de Log de atualização', 'Login': 'login', 'Logistics': 'Logística', 'Logistics Management System': 'Sistema de Gestão de Logística', 'Logo': 'Logotipo', 'Logo file %s missing!': 'Arquivo de logotipo %s ausente!', 'Logout': 'Deslogar', 'Long Text': 'Texto Longo', 'Longitude': 'Longitude', 'Longitude is West - East (sideways).': 'Longitude é Oeste - Leste (lateral).', 'Longitude is West-East (sideways).': 'Longitude é leste-oeste (direções).', 'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Longitude é zero no primeiro meridiano (Greenwich Mean Time) e é positivo para o leste, em toda a Europa e Ásia. Longitude é negativo para o Ocidente, no outro lado do Atlântico e nas Américas.', 'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Longitude é zero no primeiro meridiano (por meio de Greenwich, Reino Unido) e é positivo para o leste, em toda a Europa e Ásia. Longitude é negativo para o Ocidente, no outro lado do Atlântico e nas Américas.', 'Longitude of Map Center': 'Longitude do Centro do Mapa', 'Longitude of far eastern end of the region of interest.': 'Longitude longe do Oeste no final da região de interesse.', 'Longitude of far western end of the region of interest.': 'Longitude de oeste longínquo no final da Região de interesse.', 'Longitude should be between': 'Longitude deve estar entre', 'Looting': 'Saques', 'Lost': 'Perdido', 'Lost Password': 'Senha Perdida', 'Low': 'Baixo', 'Magnetic Storm': 'Tempestade magnética', 'Major Damage': 'Grandes danos', 'Major expenses': 'Despesas principais', 'Major outward damage': 'Danos exteriores principais', 'Make Commitment': 'Ter obrigação', 'Make New Commitment': 'Fazer Novo Compromisso', 'Make Request': 'Fazer Pedido', 'Make preparations per the <instruction>': 'Fazer Preparações por', 'Male': 'masculino', 'Manage': 'Gerenciar', 'Manage Events': 'Manage Events', 'Manage Relief Item Catalogue': 'Gerenciar Catálogo de Item de Alívio', 'Manage Users & Roles': 'GERENCIAR Usuários & Funções', 'Manage Vehicles': 'Manage Vehicles', 'Manage Warehouses/Sites': 'GERENCIAR Armazéns/Sites', 'Manage Your Facilities': 'Gerenciar suas instalações', 'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': 'Gerenciar pedidos de suprimentos, patrimônio, pessoal ou outros recursos. Corresponde aos estoques onde os suprimentos são solicitados.', 'Manage requests of hospitals for assistance.': 'GERENCIAR Pedidos de hospitais para obter assistência.', 'Manage volunteers by capturing their skills, availability and allocation': 'GERENCIAR voluntários por captura sua capacidade, Alocação e disponibilidade', 'Manager': 'Gerente', 'Managing Office': 'Gerenciando Office', 'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Obrigatório. Em GeoServer, este é o nome Da Camada. No getCapabilities WFS, este é o nome da parte FeatureType após os dois pontos (:).', 'Mandatory. The URL to access the service.': 'Obrigatório. A URL para acessar o serviço.', 'Manual': 'Manual', 'Manual Synchronization': 'Sincronização Manual', 'Many': 'Muitos', 'Map': 'Mapa', 'Map Center Latitude': 'Latitude do Centro do Mapa', 'Map Center Longitude': 'Longitude do centro do mapa', 'Map Configuration': 'Configuração de Mapa', 'Map Configuration Details': 'Detalhes de configuração de mapa', 'Map Configuration added': 'Configuração de mapa incluído', 'Map Configuration deleted': 'Configuração de mapa excluído', 'Map Configuration removed': 'Configuração de mapa removido', 'Map Configuration updated': 'Configuração de mapa atualizada', 'Map Configurations': 'Configuracões de mapa', 'Map Height': 'Altura do Mapa', 'Map Service Catalog': 'Catálogo do serviço de mapas', 'Map Settings': 'Configurações do Mapa', 'Map Viewing Client': 'Cliente de visualização do mapa', 'Map Width': 'Largura do mapa', 'Map Zoom': 'Zoom do mapa', 'Map of Hospitals': 'Mapa de Hospitais', 'MapMaker Hybrid Layer': 'MapMaker Hybrid Layer', 'MapMaker Layer': 'MapMaker Layer', 'Maps': 'Maps', 'Marine Security': 'Segurança da marina', 'Marital Status': 'Estado Civil', 'Marker': 'Marcador', 'Marker Details': 'Detalhes do Marcador', 'Marker added': 'Marcador incluído', 'Marker deleted': 'Marcador removido', 'Marker updated': 'Marcador atualizado', 'Markers': 'Marcadores', 'Master': 'Master', 'Master Message Log': 'Mensagem de Log principal', 'Master Message Log to process incoming reports & requests': 'Log de Mensagem Principal para processar relatórios de entrada e pedidos', 'Match Percentage': 'Porcentagem de correspondência', 'Match Requests': 'Corresponder Pedidos', 'Match percentage indicates the % match between these two records': 'Porcentagem idêntica indica a % idêntica entre estes dois registros.', 'Match?': 'Combina?', 'Matching Catalog Items': 'Catálogo de itens correspondentes', 'Matching Items': 'Itens correspondentes', 'Matching Records': 'Registros de correspondência', 'Matrix of Choices (Multiple Answers)': 'Matrix de Opções (Respostas Múltiplas)', 'Matrix of Choices (Only one answer)': 'Matrix de Opções (Apenas uma resposta)', 'Matrix of Text Fields': 'Matriz de campos de texto', 'Max Persons per Dwelling': 'Máx. Pessoas por Habitação', 'Maximum Location Latitude': 'Latitude máxima local', 'Maximum Location Longitude': 'Longitude máxima local', 'Medical and public health': 'Saúde Médica e Pública', 'Medium': 'Médio', 'Megabytes per Month': 'Megabytes por mês', 'Members': 'membros', 'Membership': 'Membresia', 'Membership Details': 'Detalhes de Associação', 'Membership added': 'Associação incluído', 'Membership deleted': 'Associação Excluída', 'Membership updated': 'Associação ATUALIZADO', 'Memberships': 'Parcelas', 'Message': 'message', 'Message Details': 'deatlhes de mesagens', 'Message Variable': 'Mensagem variável', 'Message added': 'Mensagem incluída', 'Message deleted': 'Mensagem Excluída', 'Message field is required!': 'Campo mensagem é obrigatório!', 'Message updated': 'Mensagem atualizada', 'Message variable': 'Mensagem variável', 'Messages': 'mensagens.', 'Messaging': 'sistema de mensagens', 'Messaging settings updated': 'Configurações de mensagens atualizadas', 'Meteorite': 'Meteorito', 'Meteorological (inc. flood)': 'Meteorológico (inc. Enchente)', 'Method used': 'Método utilizado', 'Middle Name': 'Nome do meio', 'Migrants or ethnic minorities': 'Imigrantes ou minorias étnicas', 'Mileage': 'Mileage', 'Military': 'Militares', 'Minimum Bounding Box': 'Caixa Delimitadora Mínima', 'Minimum Location Latitude': 'Mínimo Latitude de Localidade', 'Minimum Location Longitude': 'Longitude de Localização Mínima', 'Minimum shift time is 6 hours': 'tempo mínimo de Shift é de 6 horas', 'Minor Damage': 'Dano secundário', 'Minor/None': 'Secundária/Nenhum', 'Minorities participating in coping activities': 'Minorias participando em atividades de cópia', 'Minute': 'Minuto', 'Minutes must be a number between 0 and 60': 'Minutos devem ser um número entre 0 e 60', 'Minutes per Month': 'Minutos por Mês', 'Minutes should be a number greater than 0 and less than 60': 'Minutos devem ser um número maior que 0 e menor que 60', 'Miscellaneous': 'Variados', 'Missing': 'Perdido', 'Missing Person': 'Pessoa desaparecida', 'Missing Person Details': 'Detalhes da pessoa perdida', 'Missing Person Registry': 'Faltando Registro da Pessoa', 'Missing Person Reports': 'Relatórios da pessoa desaparecida', 'Missing Persons': 'Pessoas desaparecidas', 'Missing Persons Registry': 'Registro de pessoas desaparecidas', 'Missing Persons Report': 'Relatório de pessoas desaparecidas', 'Missing Report': 'Relatório de desaparecimento', 'Missing Senior Citizen': 'Cidadão sênior desaparecido', 'Missing Vulnerable Person': 'Pessoa vulnerável desaparecida', 'Mission Details': 'Detalhes da Missão', 'Mission Record': 'Registro da Missão', 'Mission added': 'Missão incluída', 'Mission deleted': 'Missão excluída', 'Mission updated': 'Missão atualizada', 'Missions': 'Missões', 'Mobile': 'telefone celular', 'Mobile Basic Assessment': 'Taxação básica móvel', 'Mobile Phone': 'Telefone celular', 'Mode': 'modo', 'Model/Type': 'Modelo/Tipo', 'Modem': 'Modem', 'Modem Settings': 'Configurações do Modem', 'Modem settings updated': 'Configurações de modem atualizadas', 'Moderate': 'moderate', 'Moderator': 'moderator', 'Modify Information on groups and individuals': 'Modificar Informações sobre grupos e pessoas', 'Modifying data in spreadsheet before importing it to the database': 'Modificando dados na planilha antes de importá-los para o banco de dados', 'Module': 'Módulo', 'Module disabled!': 'Módulo desativado!', 'Module provides access to information on current Flood Levels.': 'Módulo fornece acesso a informações na atual Onda níveis.', 'Monday': 'segunda-feira', 'Monthly Cost': 'Custo mensal', 'Monthly Salary': 'Salário mensal', 'Months': 'meses', 'Morgue': 'Morgue', 'Morgue Details': 'Morgue Details', 'Morgue Status': 'Situação do necrotério', 'Morgue Units Available': 'Unidades disponíveis no necrotério', 'Morgues': 'Morgues', 'Mosque': 'Mesquita', 'Motorcycle': 'Motocicleta', 'Moustache': 'Bigode', 'MultiPolygon': 'multipolygon', 'Multiple': 'Múltiplos', 'Multiple Choice (Multiple Answers)': 'Múltipla escolha (Várias Respostas)', 'Multiple Choice (Only One Answer)': 'Múltipla Escolha (Apenas uma resposta)', 'Multiple Matches': 'Múltiplas Correspondências', 'Multiple Text Fields': 'Vários campos de texto', 'Muslim': 'Muçulmano', 'Must a location have a parent location?': 'Um local deve ter uma posição pai?', 'My Current function': 'Minha função Atual', 'My Details': 'My Details', 'My Tasks': 'Minhas tarefas', 'My Volunteering': 'My Volunteering', 'N/A': 'n/d', 'NO': 'no', 'NZSEE Level 1': 'NZSEE Nível 1', 'NZSEE Level 2': 'NZSEE Nível 2', 'Name': 'nome', 'Name and/or ID': 'Nome E/OU ID', 'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': 'O nome do arquivo (& sub OPCIONAL-path) localizado no estáticamente que deve ser utilizado para o segundo plano do Cabeçalho.', 'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': 'Nome do arquivo (e sub-caminho opcional) localizado estático que deveria ser utilizado para a imagem superior esquerda.', 'Name of the file (& optional sub-path) located in views which should be used for footer.': 'Nome do arquivo (e sub-caminho opcional) localizado nas visualizações que deve ser utilizado no rodapé.', 'Name of the person in local language and script (optional).': 'Nome da pessoa no idioma local e script local (opcional).', 'Name or Job Title': 'Nome ou cargo', 'Name, Org and/or ID': 'Nome, organização e/ou ID.', 'Name/Model/Type': 'Nome/Modelo/Tipo', 'Names can be added in multiple languages': 'Nomes podem ser adicionados em múltiplos idiomas', 'National': 'Nacional', 'National ID Card': 'Cartão de ID Nacional', 'National NGO': 'Nacional ONG', 'Nationality': 'Nacionalidade', 'Nationality of the person.': 'Nacionalidade da pessoa.', 'Nautical Accident': 'Acidente Náutico', 'Nautical Hijacking': 'Sequestro Náutico', 'Need Type': 'Precisa de Tipo', 'Need Type Details': 'Tipo precisa de Detalhes', 'Need Type added': 'Precisa de tipo incluído', 'Need Type deleted': 'Precisa de Tipo excluído', 'Need Type updated': 'Tipo de necessidade atualizada', 'Need Types': 'Tipos de necessidade', "Need a 'url' argument!": "Precisa de um argumento ' url!", 'Need added': 'Necessidade incluída', 'Need deleted': 'Necessidade excluída', 'Need to be logged-in to be able to submit assessments': 'Precisa estar conectado ao programa para conseguir submeter avaliações', 'Need to configure Twitter Authentication': 'Precisa configurar a autenticação do Twitter', 'Need to specify a Budget!': 'É necessário especificar um orçamento!', 'Need to specify a Kit!': 'É necessário especificar um Kit!', 'Need to specify a Resource!': 'É necessário especificar um recurso!', 'Need to specify a bundle!': 'É necessário especificar um pacote!', 'Need to specify a group!': 'É necessário especificar um grupo!', 'Need to specify a location to search for.': 'É necessário especificar um local para procurar.', 'Need to specify a role!': 'Será necessário especificar um papel!', 'Need to specify a table!': 'Será necessário especificar uma tabela!', 'Need to specify a user!': 'Será necessário especificar um usuário!', 'Need updated': 'Precisa de atualização', 'Needs': 'necessidades', 'Needs Details': 'detalhes necessarios', 'Needs Maintenance': 'Necessita Manutenção', 'Needs to reduce vulnerability to violence': 'Necessidade de reduzir a vulnerabilidade à violência.', 'Negative Flow Isolation': 'NEGATIVO Fluxo ISOLAMENTO', 'Neighborhood': 'Bairro', 'Neighbouring building hazard': 'Risco de construção vizinhos', 'Neonatal ICU': 'Neonatal ICU', 'Neonatology': 'Neonatologia', 'Network': 'rede', 'Neurology': 'Neurologia', 'New': 'Novo(a)', 'New Assessment reported from': 'Nova Avaliação relatada a partir de', 'New Certificate': 'Novo Certificado', 'New Checklist': 'Nova Verificação', 'New Entry': 'Nova Entrada', 'New Event': 'Novo Evento', 'New Home': 'New Home', 'New Item Category': 'Nova Categoria de Ítem', 'New Job Role': 'Novo Papel', 'New Location': 'Novo Local', 'New Location Group': 'Novo Grupo de Locais', 'New Patient': 'New Patient', 'New Peer': 'Novo Par', 'New Record': 'Novo Registro', 'New Relative': 'New Relative', 'New Request': 'Nova Requisição', 'New Scenario': 'Novo Cenário', 'New Skill': 'Nova Habilidade', 'New Solution Choice': 'Escolha nova solução', 'New Staff Member': 'Novo membro da equipe', 'New Support Request': 'Novo pedido de suporte', 'New Synchronization Peer': 'Novo par de sincronização', 'New Team': 'Nova equipe', 'New Ticket': 'New Ticket', 'New Training Course': 'Novo Curso de Treinamento', 'New Volunteer': 'Novo Voluntário', 'New cases in the past 24h': 'Novos casos nas últimas 24H', 'News': 'Notícias', 'Next': 'Seguinte', 'No': 'no', 'No Activities Found': 'Não há actividades', 'No Activities currently registered in this event': 'No Activities currently registered in this event', 'No Alternative Items currently registered': 'Nenhum item alternativo atualmente registrado', 'No Assessment Summaries currently registered': 'Nenhum Sumário De Avaliação actualmente registrado', 'No Assessments currently registered': 'Nenhuma Avaliação actualmente registrada', 'No Asset Assignments currently registered': 'Nenhum ativo designado encontra-se atualmente registrado', 'No Assets currently registered': 'Sem Ativos registrados atualmente', 'No Assets currently registered in this event': 'Sem ativos atualmente registrados neste evento', 'No Assets currently registered in this scenario': 'Sem ativos atualmente registrados neste cenário', 'No Baseline Types currently registered': 'Nenhum tipo de base line registrado atualmente', 'No Baselines currently registered': 'Nenhuma linha base registrada atualmente', 'No Brands currently registered': 'Sem Marcas atualmente registrado', 'No Budgets currently registered': 'Nenhum Dos Orçamentos registrados atualmente', 'No Bundles currently registered': 'Nenhum pacote atualmente registrado', 'No Camp Services currently registered': 'Nenhum serviço de acampamento atualmente registrado', 'No Camp Types currently registered': 'Nenhum tipo de acampamento atualmente registrado', 'No Camps currently registered': 'Sem Acampamentos atualmente registrados', 'No Catalog Items currently registered': 'Nenhum itens do catálogo registrado atualmente', 'No Catalogs currently registered': 'Nenhum catálogo atualmente registrado', 'No Checklist available': 'Checklist não disponível', 'No Cluster Subsectors currently registered': 'Nenhum sub-setor de cluster registrado atualmente', 'No Clusters currently registered': 'Nenhum Cluster registrado atualmente', 'No Commitment Items currently registered': 'Nenhum Item de Compromisso registrado atualmente', 'No Commitments': 'Sem Compromissos', 'No Credentials currently set': 'Nenhuma credencial atualmente configurada', 'No Details currently registered': 'Nenhum detalhes registrado atualmente', 'No Documents currently attached to this request': 'No Documents currently attached to this request', 'No Documents found': 'Nenhum Documento encontrado', 'No Donors currently registered': 'Sem doadores registrados atualmente', 'No Events currently registered': 'Não há eventos atualmente registrados', 'No Facilities currently registered in this event': 'Não há Recursos atualmente registrado nesse evento', 'No Facilities currently registered in this scenario': 'Não há recursos atualmente registrados neste cenário', 'No Feature Classes currently defined': 'Nenhuma Classe de Componentes atualmente definidos', 'No Feature Layers currently defined': 'Nenhuma Camada de Componentes atualmente definidos', 'No Flood Reports currently registered': 'Nenhum relatório de Inundação atualmente registrado', 'No GPS data currently registered': 'No GPS data currently registered', 'No Groups currently defined': 'Não há Grupos definidos atualmente', 'No Groups currently registered': 'Nenhum Grupo atualmente registrado', 'No Homes currently registered': 'No Homes currently registered', 'No Hospitals currently registered': 'Nenhum hospital atualmente registrado', 'No Human Resources currently registered in this event': 'Nao há recursos humanos atualmente registrados nesse evento', 'No Human Resources currently registered in this scenario': 'Sem recursos humanos atualmente registrados neste cenário', 'No Identification Report Available': 'Nenhum Relatório de Identificação Disponível', 'No Identities currently registered': 'Nenhuma Identidade atualmente registrada', 'No Image': 'Nenhuma Imagem', 'No Images currently registered': 'Nenhuma Imagem atualmente registrada', 'No Impact Types currently registered': 'Nenhum tipo de impacto atualmente registrado', 'No Impacts currently registered': 'Nenhum Impacto atualmente registrado', 'No Import Files currently uploaded': 'No Import Files currently uploaded', 'No Incident Reports currently registered': 'Nenhum relatório de incidente registrado atualmente', 'No Incoming Shipments': 'Nenhum Embarque de Entrada', 'No Inventories currently have suitable alternative items in stock': 'No Inventories currently have suitable alternative items in stock', 'No Inventories currently have this item in stock': 'No Inventories currently have this item in stock', 'No Inventory Items currently registered': 'Nenhum Item de Inventário registrado atualmente', 'No Item Categories currently registered': 'Nenhuma Categoria de Item atualmente registrada', 'No Item Packs currently registered': 'Nenhum Pacote de Itens atualmente registrado', 'No Items currently registered': 'Nenhum item registrado atualmente', 'No Items currently registered in this Inventory': 'Sem itens registrados atualmente neste inventário', 'No Keys currently defined': 'Nenhuma chave definida no momento', 'No Kits currently registered': 'Nenhum kit registrado no momento', 'No Level 1 Assessments currently registered': 'Nenhuma avaliação nível 1 registrada no momento', 'No Level 2 Assessments currently registered': 'Nenhum nível 2 Avaliações atualmente registrado', 'No Locations currently available': 'Locais Não disponíveis atualmente', 'No Locations currently registered': 'Locais Não registrados atualmente', 'No Map Configurations currently defined': 'Nenhuma configuração de Mapa estão atualmente definidos', 'No Map Configurations currently registered in this event': 'Nenhuma configuração de Mapa esta atualmente registrado nesse evento', 'No Map Configurations currently registered in this scenario': 'Nenhuma configuração de Mapa está atualmente registrado neste cenário', 'No Markers currently available': 'Não há marcadores atualmente disponíveis', 'No Match': 'Sem correspondência', 'No Matching Catalog Items': 'Nenhum Item de Catálogo Correspondente', 'No Matching Items': 'Sem itens correspondentes', 'No Matching Records': 'Sem registros correspondentes', 'No Members currently registered': 'Sem membros registrados atualmente', 'No Memberships currently defined': 'Sem Associações definidas atualmente', 'No Memberships currently registered': 'Sem Associações registradas atualmente', 'No Messages currently in Outbox': 'Nenhuma mensagem na Caixa de saída', 'No Need Types currently registered': 'Sem necessidade, Tipos atualmente registrados', 'No Needs currently registered': 'Sem necessidade, atualmente registrado', 'No Offices currently registered': 'Nenhum Escritório registrado atualmente', 'No Offices found!': 'Menhum Escritório localizado!', 'No Organizations currently registered': 'Número de Organizações atualmente registradas', 'No Packs for Item': 'No Packs for Item', 'No Patients currently registered': 'No Patients currently registered', 'No People currently committed': 'No People currently committed', 'No People currently registered in this camp': 'Nenhuma pessoa registrada atualmente neste campo', 'No People currently registered in this shelter': 'Nenhuma pessoa registrada atualmente neste abrigo', 'No Persons currently registered': 'Nenhuma pessoa atualmente registrada', 'No Persons currently reported missing': 'nenhuma pessoa reportada atualmente como perdida', 'No Persons found': 'Nenhuma pessoa localizada', 'No Photos found': 'Nenhuma Foto localizada', 'No Picture': 'Nenhuma imagem', 'No Population Statistics currently registered': 'Nenhuma estatística populacional atualmente registrada', 'No Presence Log Entries currently registered': 'Nenhuma entrada no log Presença atualmente registrado', 'No Problems currently defined': 'Nenhum Problema atualmente definido', 'No Projections currently defined': 'Nenhuma projeção atualmente definida', 'No Projects currently registered': 'Nenhum projeto atualmente registrado', 'No Rapid Assessments currently registered': 'Nenhuma Tributação Rápida atualmente registrada', 'No Ratings for Skill Type': 'No Ratings for Skill Type', 'No Received Items currently registered': 'Nenhum item recebido atualmente registrado', 'No Received Shipments': 'Entregas/Despachos não recebidos', 'No Records currently available': 'Registros atualmente não disponíveis', 'No Relatives currently registered': 'No Relatives currently registered', 'No Request Items currently registered': 'Não há items de Pedidos registados', 'No Requests': 'Não há pedidos', 'No Rivers currently registered': 'Não Rios atualmente registrado', 'No Roles currently defined': 'Nenhumas funções atualmente definidas', 'No Rooms currently registered': 'Nenhuma sala atualmente registrada', 'No Scenarios currently registered': 'Nenhum cenário atualmente registrado', 'No Sections currently registered': 'Sem seções atualmente registradas', 'No Sectors currently registered': 'setores nao atualmente registrados', 'No Sent Items currently registered': 'Nenhum item Enviado atualmente registrado', 'No Sent Shipments': 'Nenhum carregamento enviado', 'No Settings currently defined': 'configuraçoes atualmente nao definida', 'No Shelter Services currently registered': 'nenhum serviço de abrigo atualmente registrado', 'No Shelter Types currently registered': 'Nenhum tipo de abrigo registrado atualmente', 'No Shelters currently registered': 'abrigos atualmente nao registrados', 'No Skills currently requested': 'No Skills currently requested', 'No Solutions currently defined': 'Sem Soluções actualmente definidas', 'No Staff Types currently registered': 'Sem Tipos de Funcionários actualmente registrados', 'No Staff currently registered': 'Sem Funcionários actualmente registrados', 'No Subscription available': 'Nenhuma assinatura disponível', 'No Subsectors currently registered': 'Nenhum sub setor atualmente registrado', 'No Support Requests currently registered': 'Nenhum suporte a pedido atualmente registrado', 'No Survey Answers currently entered.': 'Nenhuma resposta de pesquisa atualmente inscrita.', 'No Survey Answers currently registered': 'Nenhuma resposta a pesquisa atualmente registrada', 'No Survey Questions currently registered': 'Nenhuma pergunta de pesquisa atualmente registrada', 'No Survey Sections currently registered': 'Nenhuma seção de pesquisa atualmente registrada', 'No Survey Series currently registered': 'Nenhuma série de pesquisa atualmente registrada', 'No Survey Template currently registered': 'Nenhum Modelo de Pesquisa atualmente registrado', 'No Tasks currently registered in this event': 'No Tasks currently registered in this event', 'No Tasks currently registered in this scenario': 'No Tasks currently registered in this scenario', 'No Tasks with Location Data': 'Nenhuma tarefa com local de dados', 'No Teams currently registered': 'Nenhuma equipe atualmente registrada', 'No Themes currently defined': 'Nenhum Tema atualmente definido', 'No Tickets currently registered': 'Sem ingressos atualmente registrados', 'No Tracks currently available': 'nenhum rastreamento atualmente disponível', 'No Users currently registered': 'Nenhum Usuário actualmente registrado', 'No Vehicle Details currently defined': 'No Vehicle Details currently defined', 'No Vehicles currently registered': 'No Vehicles currently registered', 'No Volunteers currently registered': 'Nenhum Voluntário actualmente registrado', 'No Warehouses currently registered': 'Nenhum Armazém actualmente registrado', 'No access at all': 'Nenhum acesso', 'No access to this record!': 'Não há acesso a esta entrada!', 'No action recommended': 'Nenhuma acção recomendada', 'No conflicts logged': 'Nenhum conflito registrado', 'No contact information available': 'Nenhuma informações de contato disponível', 'No contact method found': 'No contact method found', 'No contacts currently registered': 'Nenhum contato atualmente registrado', 'No data in this table - cannot create PDF!': 'Nenhum dado nesta tabela- PDF não pode ser criado!', 'No databases in this application': 'Nenhum banco de dados neste aplicativo', 'No dead body reports available': 'Nenhum relatório de óbito disponível', 'No entries found': 'Nenhum artigo encontrado', 'No entries matching the query': 'Nenhuma entrada correspondente a consulta', 'No entry available': 'Nenhuma entrada disponível', 'No forms to the corresponding resource have been downloaded yet.': 'No forms to the corresponding resource have been downloaded yet.', 'No location known for this person': 'Nenhum local conhecido para essa pessoa', 'No locations found for members of this team': 'Locais não localizado para membros deste equipe', 'No log entries matching the query': 'Nenhuma entrada de log correspondente a consulta', 'No match': 'No match', 'No matching records found': 'No matching records found', 'No messages in the system': 'Nenhuma mensagem no sistema', 'No notes available': 'Notas não disponíveis', 'No peers currently registered': 'Não há pares registrados atualmente', 'No pending registrations found': 'Não foram encontrados registros pendentes', 'No pending registrations matching the query': 'Não foram encontrados registros pendentes correspondentes à consulta efetuada', 'No person record found for current user.': 'Nenhum registro de pessoa localizado para o usuário atual.', 'No problem group defined yet': 'Nenhum grupo problema definido ainda', 'No records matching the query': 'Sem registros correspondentes a consulta', 'No report available.': 'Nenhum Relatório disponível.', 'No reports available.': 'Não há relatórios disponíveis.', 'No reports currently available': 'Não há relatórios disponíveis actualmente', 'No requests found': 'Não foram foram encontrados pedidos', 'No resources currently reported': 'Recursos não reportados actualmente', 'No service profile available': 'Nenhum perfil de serviço disponível', 'No skills currently set': 'Não há habilidades atualmente configuradas', 'No staff members currently registered': 'Nenhum membro da equipe atualmente registrado', 'No staff or volunteers currently registered': 'Nenhum funcionário ou voluntário atualmente registrado', 'No status information available': 'Informação não está disponível', 'No synchronization': 'Sem Sincronização', 'No tasks currently assigned': 'No tasks currently assigned', 'No tasks currently registered': 'Nenhuma tarefa atualmente registrada', 'No template found!': 'Nenhum modelo localizado!', 'No units currently registered': 'Nenhuma unidade actualmente registrada', 'No volunteer availability registered': 'Sem disponibilidade de voluntário registrada', 'No volunteers currently registered': 'Nenhum Voluntário actualmente registrado', 'Non-structural Hazards': 'Riscos não-estruturais', 'None': 'Nenhum', 'None (no such record)': 'Nenhum (sem registro )', 'Noodles': 'Macarrão', 'Normal': 'Normal', 'Not Applicable': 'Não se aplica', 'Not Authorised!': 'Não Autorizado!', 'Not Possible': 'Impossível', 'Not Set': 'não configurado', 'Not authorised!': 'Não autorizado!', 'Not installed or incorrectly configured.': 'Não instalado ou Configurado Incorretamente.', 'Note': 'Nota', 'Note Details': 'Detalhes da Nota', 'Note Status': 'Status da Nota', 'Note Type': 'Tipo de nota', 'Note added': 'Nota Incluída', 'Note deleted': 'NOTA Excluída', 'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': 'Observer que essa lista mostra apenas voluntários ativos. Para ver todas as pessoas registradas no sistema, procure a partir deste ecrã em vez de', 'Note updated': 'Nota atualizada', 'Notes': 'Observações', 'Notice to Airmen': 'Aviso ao piloto', 'Number': 'número', 'Number of Columns': 'Número de colunas', 'Number of Patients': 'Número de Pacientes', 'Number of People Required': 'Number of People Required', 'Number of Rows': 'Número de Linhas', 'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'Número de camas adicionais de tipo esperado tornar disponível nesta unidade nas próximas 24 horas.', 'Number of alternative places for studying': 'Número de locais alternativos para estudar', 'Number of available/vacant beds of that type in this unit at the time of reporting.': 'Número de camas disponíveis/livre desse tipo nesta unidade no momento do relatório.', 'Number of bodies found': 'Number of bodies found', 'Number of deaths during the past 24 hours.': 'Número de mortes durante as últimas 24 horas.', 'Number of discharged patients during the past 24 hours.': 'Número de pacientes Descarregados durante as últimas 24 horas.', 'Number of doctors': 'Número de médicos', 'Number of in-patients at the time of reporting.': 'Número de pacientes internos na hora do relatório.', 'Number of newly admitted patients during the past 24 hours.': 'Número de pacientes admitidos durante as últimas 24 horas.', 'Number of non-medical staff': 'Número de funcionários não-médico', 'Number of nurses': 'Número de enfermeiras', 'Number of private schools': 'Número de escolas privadas', 'Number of public schools': 'Número de escolas públicas', 'Number of religious schools': 'Número de escolas religiosas', 'Number of residential units': 'Número de unidades residenciais', 'Number of residential units not habitable': 'Unidades de número residencial não habitáveis', 'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': 'Número de leitos vagos/disponíveis nesse hospital. Atualizado automaticamente a partir de relatórios diários.', 'Number of vacant/available units to which victims can be transported immediately.': 'Número de unidades vagas/disponíveis em que vítimas podem ser transportadas imediatamente.', 'Number or Label on the identification tag this person is wearing (if any).': 'Número ou código na etiqueta de identificação que a pessoa está usando (se houver).', 'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': 'Número ou código utilizado para marcar o local de localização, por exemplo, código de bandeira, grade de coordenadas, número de referência do site ou similar (se disponível)', 'Number/Percentage of affected population that is Female & Aged 0-5': 'Número/percentagem da população afetada que é uma mulher entre 0 e 5 anos', 'Number/Percentage of affected population that is Female & Aged 13-17': 'Número/percentagem da população afetadas do sexo feminino entre 13 e 17 anos', 'Number/Percentage of affected population that is Female & Aged 18-25': 'Número/percentagem da população afetada que é Mulher com 18-25 anos', 'Number/Percentage of affected population that is Female & Aged 26-60': 'Número/percentagem da população afetada que é Mulher com 26-60 anos', 'Number/Percentage of affected population that is Female & Aged 6-12': 'Número/percentagem da população afetada que é Mulher com 6-12 anos', 'Number/Percentage of affected population that is Female & Aged 61+': 'Número/percentagem da população afetada que é Mulher > 61 anos', 'Number/Percentage of affected population that is Male & Aged 0-5': 'Número/percentagem da população afetada que é Homem com 0-5 anos', 'Number/Percentage of affected population that is Male & Aged 13-17': 'Número/percentagem da população afetada que é Homem com 13-17 anos', 'Number/Percentage of affected population that is Male & Aged 18-25': 'Número/percentagem da população afetada que é Homem com 18-25 anos', 'Number/Percentage of affected population that is Male & Aged 26-60': 'Número/percentagem de população afetada que é do sexo masculino & Idade 26-60', 'Number/Percentage of affected population that is Male & Aged 6-12': 'Número/percentagem de população afectada que é do sexo masculino & Idade 6-12', 'Number/Percentage of affected population that is Male & Aged 61+': 'Número/percentagem da população afetada que é do sexo masculino & Idade 61+', 'Nursery Beds': 'Camas de berçario', 'Nutrition': 'Nutrição', 'Nutrition problems': 'Problemas nutricionais', 'OK': 'OK', 'OR Reason': 'Ou Razão', 'OR Status': 'Ou Status', 'OR Status Reason': 'Ou razão do status', 'OR a site OR a location': 'OU um site OU um local', 'Observer': 'observador', 'Obsolete': 'Obsoleto', 'Obstetrics/Gynecology': 'Obstetrícia/Ginecologia', 'Office': 'escritório', 'Office Address': 'Endereço do escritório', 'Office Details': 'Detalhes do Escritório.', 'Office Phone': 'Telefone do escritório', 'Office added': 'Escritório', 'Office deleted': 'Escritório excluído', 'Office updated': 'Escritório atualizado', 'Offices': 'Escritórios', 'Offices & Warehouses': 'Escritórios & Armazéns', 'Offline Sync': 'Sincronização desconectada.', 'Offline Sync (from USB/File Backup)': 'Off-line (Sync a partir do USB/arquivo de Backup)', 'Older people as primary caregivers of children': 'Pessoas mais velhas como responsáveis primárias de crianças', 'Older people in care homes': 'Pessoas mais velhas em casas de cuidados', 'Older people participating in coping activities': 'Pessoas mais antigos participantes em lidar atividades', 'Older person (>60 yrs)': 'Idosos (>60 anos)', 'On by default?': 'Por padrão?', 'On by default? (only applicable to Overlays)': 'Por padrão? (apenas aplicável para Sobreposições)', 'One Time Cost': 'Custo Único', 'One time cost': 'Custo único', 'One-time': 'Único', 'One-time costs': 'Custos únicos', 'Oops! Something went wrong...': 'Oops! Algo deu errado...', 'Oops! something went wrong on our side.': 'Oops! algo deu errado do nosso lado.', 'Opacity (1 for opaque, 0 for fully-transparent)': 'Opacidade (1 para opaco, 0 para totalmente transparente)', 'Open': 'Abrir', 'Open area': 'Abrir área', 'Open recent': 'Abrir recente', 'Operating Rooms': 'Salas operacionais', 'Optional': 'Optional', 'Optional Subject to put into Email - can be used as a Security Password by the service provider': 'Optional Subject to put into Email - can be used as a Security Password by the service provider', 'Optional link to an Incident which this Assessment was triggered by.': 'Link opcional para um incidente que esta avaliação foi desencadeada por.', 'Optional selection of a MapServer map.': 'Optional selection of a MapServer map.', 'Optional selection of a background color.': 'Optional selection of a background color.', 'Optional selection of an alternate style.': 'Optional selection of an alternate style.', 'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.': 'opcional Se você desejar apresenta o estilo com base nos valores de um atributo, Selecione o atributo a ser utilizado aqui.', 'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'opcional Em GeoServer, esta é a área de trabalho Namespace URI (não o nome!). Dentro do getCapabilities WFS, este é parte do nome FeatureType antes dos dois pontos (:).', 'Optional. In GeoServer, this is the Workspace Namespace URI. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'optional. Em GeoServer, este é o espaço de Nomes URI. No getCapabilities WFS, este é o nome da parte FeatureType antes de os dois pontos (:).', 'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': 'opcional O nome de um elemento cujo conteúdo deve ser uma URL de um arquivo de imagem para Popups.', 'Optional. The name of an element whose contents should be put into Popups.': 'opcional O nome de um elemento cujo conteúdo deve ser adicionado em Popups.', "Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "opcional O nome da coluna de geometria. Em PostGIS padroniza para 'the_geom'.", 'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': 'opcional O nome do esquema. Em Geoserver isto tem o formato http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.', 'Options': 'opções', 'Organisation': 'Organização', 'Organization': 'Organização', 'Organization Details': 'Detalhes da Organização', 'Organization Registry': 'Registro de Organização', 'Organization added': 'Organização incluída', 'Organization deleted': 'Organização excluída', 'Organization updated': 'Organização atualizada', 'Organizations': 'Organizações', 'Origin': 'Origem', 'Origin of the separated children': 'Origem das crianças separadas', 'Other': 'outro', 'Other (describe)': 'Outros (descreva)', 'Other (specify)': 'Outros motivos (especifique)', 'Other Evidence': 'outras evidencias', 'Other Faucet/Piped Water': 'Outras Torneiras /Agua Encanada', 'Other Isolation': 'Outro Isolamento', 'Other Name': 'outro nome', 'Other activities of boys 13-17yrs': 'Outras atividades de garotos 13-17anos', 'Other activities of boys 13-17yrs before disaster': 'Outras atividades de garotos 17-13anos antes do desastre', 'Other activities of boys <12yrs': 'Outras atividades de garotos <12 anos', 'Other activities of boys <12yrs before disaster': 'Outras atividades de garotos <12anos antes do desastre', 'Other activities of girls 13-17yrs': 'Outras atividades de meninas 13-17anos', 'Other activities of girls 13-17yrs before disaster': 'Outras atividades de meninas 13-17anos antes do desastre', 'Other activities of girls<12yrs': 'Outras atividades de garotas<12anos', 'Other activities of girls<12yrs before disaster': 'Outras atividades de garotas<12anos antes do desastre', 'Other alternative infant nutrition in use': 'Nutrição infantil alternativa em uso', 'Other alternative places for study': 'Outros locais alternativos para estudo', 'Other assistance needed': 'Outra assistência necessária', 'Other assistance, Rank': 'Outra assistência, Número', 'Other current health problems, adults': 'Outros problemas actuais de saúde, adultos', 'Other current health problems, children': 'Outros problemas actuais de saúde, crianças', 'Other events': 'outros eventos', 'Other factors affecting school attendance': 'Outros fatores que afetam a frequencia escolar', 'Other major expenses': 'outras despesas importantes', 'Other non-food items': 'Outros itens não alimentícios', 'Other recommendations': 'Outras recomendações', 'Other residential': 'Outros residentes', 'Other school assistance received': 'Assistência de outra escola recebida', 'Other school assistance, details': 'Assistência de outra escola, detalhes', 'Other school assistance, source': 'Assistência de outra escola, origem', 'Other settings can only be set by editing a file on the server': 'Outras configurações só podem ser definidas editando um arquivo no servidor', 'Other side dishes in stock': 'Pratos outro lado em ações', 'Other types of water storage containers': 'Outros tipos de recipientes de armazenamento de água', 'Other ways to obtain food': 'Outras maneiras de obter alimentos', 'Outbound Mail settings are configured in models/000_config.py.': 'Definições de correio de saída são configurados em modelos/000_config..py', 'Outbox': 'Caixa de Saída', 'Outgoing SMS Handler': 'Saída do Manipulador SMS', 'Outgoing SMS handler': 'Manipulador de SMS de saída', 'Overall Hazards': 'Riscos gerais', 'Overhead falling hazard': 'Risco de queda sobrecarga', 'Overland Flow Flood': 'Por via terrestre Fluxo de Enchente', 'Owned Resources': 'Recursos Próprios', 'PAHO UID': 'OPS UID', 'PDAM': 'PDAM', 'PDF File': 'PDF File', 'PIN': 'alfinete', 'PIN number': 'Número do pino', 'PIN number ': 'PIN number ', 'PL Women': 'Mulheres PL', 'Pack': 'Pacote', 'Packs': 'Pacotes', 'Page': 'Page', 'Parameters': 'Parâmetros de Monitoramento', 'Parapets, ornamentation': 'Passarelas, ornamentação', 'Parent': 'parent', 'Parent Office': 'Escritório Principal', "Parent level should be higher than this record's level. Parent level is": 'Nível dos pais deve ser maior que o nível do registro. Nível do Pai é', 'Parent needs to be of the correct level': 'Pai precisa ser do nível correto', 'Parent needs to be set': 'Principal precisa ser configurado', 'Parent needs to be set for locations of level': 'Principal precisa ser configurado para locais de nível', 'Parents/Caregivers missing children': 'Pais/cuidadores de crianças desaparecidas', 'Parking Area': 'Parking Area', 'Partial': 'Parcial', 'Participant': 'Participante', 'Pashto': 'Pachto', 'Pass': 'Passou', 'Passport': 'passaporte', 'Password': 'senha', "Password fields don't match": 'Os campos de senha não são iguais.', 'Path': 'Caminho', 'Pathology': 'Patologia', 'Patient': 'Patient', 'Patient Details': 'Patient Details', 'Patient Tracking': 'Patient Tracking', 'Patient added': 'Patient added', 'Patient deleted': 'Patient deleted', 'Patient updated': 'Patient updated', 'Patients': 'Pacientes', 'Pediatric ICU': 'UTI Pediatrica', 'Pediatric Psychiatric': 'Psiquiátrico Pediátra', 'Pediatrics': 'Pediatria', 'Peer': 'Membro', 'Peer Details': 'Detalhes do Membro', 'Peer Registration': 'Registro de par', 'Peer Registration Details': 'Detalhes de Registro do Par', 'Peer Registration Request': 'Requerido Registro do Par', 'Peer Type': 'Por Tipo', 'Peer UID': 'Por UID', 'Peer added': 'Membro adicionado', 'Peer deleted': 'Membro excluído', 'Peer not allowed to push': 'Peer não permitido para envio', 'Peer registration request added': 'Registro Requerido do Par adicionado', 'Peer registration request deleted': 'Registro requerido do par excluído', 'Peer registration request updated': 'Registro requerido do par atualizado', 'Peer updated': 'PAR ATUALIZADO', 'Peers': 'Pares', 'Pending': 'pendente', 'Pending Requests': 'PEDIDOS PENDENTES', 'People': 'pessoas', 'People Needing Food': 'Pessoas precisando de alimento', 'People Needing Shelter': 'Pessoas precisando de abrigo', 'People Needing Water': 'Pessoas precisando de água', 'People Trapped': 'Pessoas presas', 'Performance Rating': 'Classificação da Performance', 'Person': 'pessoa', 'Person 1': 'Pessoa 1', 'Person 1, Person 2 are the potentially duplicate records': 'Pessoa 1, Pessoa 2 são os registros potencialmente duplicados', 'Person 2': 'Pessoa 2', 'Person De-duplicator': 'Anti-duplicador de Pessoas', 'Person Details': 'Detalhes Pessoais', 'Person Finder': 'Buscador de pessoas', 'Person Registry': 'Registro De Pessoa', 'Person added': 'Pessoa Incluída', 'Person added to Commitment': 'Person added to Commitment', 'Person deleted': 'Pessoa removida', 'Person details updated': 'Detalhes pessoais actualizados', 'Person interviewed': 'Pessoa entrevistada', 'Person missing': 'Pessoa perdida', 'Person must be specified!': 'Person must be specified!', 'Person removed from Commitment': 'Person removed from Commitment', 'Person reporting': 'Pessoa relatando', 'Person who has actually seen the person/group.': 'Pessoa que tenha realmente visto a pessoa/Grupo.', 'Person/Group': 'Pessoa/Grupo', 'Personal': 'Pessoal', 'Personal Data': 'Dados pessoais', 'Personal Effects': 'Efeitos pessoal', 'Personal Effects Details': 'Detalhes dos Efeitos Pessoais', 'Personal Map': 'Mapa De Pessoal', 'Personal Profile': 'Perfil pessoal', 'Personal impact of disaster': 'Impacto de desastre pessoal', 'Persons': 'Pessoas', 'Persons in institutions': 'Pessoas em instituições', 'Persons with disability (mental)': 'Pessoas com deficiência (mental)', 'Persons with disability (physical)': 'Pessoas com deficiência (física)', 'Phone': 'telefone', 'Phone 1': 'Telefone 1', 'Phone 2': 'Telefone 2', "Phone number to donate to this organization's relief efforts.": 'Número de telefone para doar ao serviço de assistência social desta organização', 'Phone/Business': 'Telefone comercial', 'Phone/Emergency': 'Telefone de emergência', 'Phone/Exchange': 'Telefone/Exchange', 'Phone/Exchange (Switchboard)': 'Telefone/Câmbio (Central)', 'Photo': 'foto', 'Photo Details': 'Foto com detalhes', 'Photo Taken?': 'Foto tomada?', 'Photo added': 'Foto adicionada (ou incluída)', 'Photo deleted': 'Foto deletada (apagada, excluída em definitivo)', 'Photo updated': 'Foto ATUALIZADA', 'Photograph': 'Fotografia ou Arte Fotográfica', 'Photos': 'fotos, imagens fotográficas', 'Physical Description': 'Descrição física', 'Physical Safety': 'Segurança Física', 'Picture': 'Imagem', 'Picture upload and finger print upload facility': 'Fazer upload de imagem e impressão dedo upload facility', 'Place': 'Local', 'Place of Recovery': 'Local de recuperação', 'Places for defecation': 'Locais para a defecação', 'Places the children have been sent to': 'Lugares que as crianças foram enviadas para', 'Planner': 'Planejador', 'Playing': 'Reproduzindo', "Please come back after sometime if that doesn't help.": 'Por favor, volte após algum tempo se isso não ajuda.', 'Please correct all errors.': 'Por favor CORRIJA todos os erros.', 'Please enter a First Name': 'Por favor insira um primeiro nome', 'Please enter a first name': 'Por favor insira um primeiro nome', 'Please enter a number only': 'Please enter a number only', 'Please enter a person': 'Insira uma pessoa', 'Please enter a site OR a location': 'Por favor digite um site ou um local', 'Please enter a valid email address': 'Please enter a valid email address', 'Please enter the first few letters of the Person/Group for the autocomplete.': 'Por favor Digite as primeiras letras do Pessoa/Grupo para o AutoCompletar.', 'Please enter the recipient': 'Por favor Digite o destinatário', 'Please fill this!': 'Por favor preencha isso!', 'Please give an estimated figure about how many bodies have been found.': 'Please give an estimated figure about how many bodies have been found.', 'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': 'Por favor Forneça a URL da página que você está fazendo referência à, uma descrição do que você esperava que acontecesse & O que realmente aconteceu.', 'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened. If a ticket was issued then please provide the Ticket ID.': 'Por favor Forneça a URL da página que você está fazendo referência à, uma descrição do que você esperava que acontecesse & O que realmente aconteceu. Se um bilhete foi emitido então por favor forneça o ID do bilhete.', 'Please report here where you are:': 'Por favor informe aqui onde você está:', 'Please select': 'Por favor Selecione', 'Please select another level': 'Por favor selecione outro nível', 'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': 'Por favor inscrever-se com seu celular como isso nos permite lhe enviar mensagens de texto. Por favor inclua código de Área total.', 'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': 'Por favor especifique quaisquer problemas e obstáculos com a manipulação correcta da doença, em detalhes (em números, se for o caso). Pode também dar sugestões - a situação pode ser melhorada.', 'Please use this field to record any additional information, including a history of the record if it is updated.': 'Por favor utilize esse campo para registrar quaisquer informações adicionais, incluindo um histórico do registro se ele estiver sendo atualizado.', 'Please use this field to record any additional information, including any Special Needs.': 'Por favor utilize esse campo para registrar quaisquer informações adicionais, incluindo quaisquer necessidades especiais.', 'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'Por favor utilize esse campo para registrar quaisquer informações adicionais, como IDs de instância Ushahidi. Incluir o histórico do registo se este fôr actualizado.', 'Pledge Support': 'Suporte da promessa', 'Point': 'Ponto', 'Poisoning': 'Envenenamento', 'Poisonous Gas': 'Gás venenoso', 'Police': 'Polícia', 'Pollution and other environmental': 'Poluição ambiental e outras', 'Polygon': 'Polígono', 'Polygon reference of the rating unit': 'Polígono de referência da unidade de classificação', 'Poor': 'Pobre', 'Population': 'População', 'Population Statistic Details': 'População Estatística Detalhes', 'Population Statistic added': 'População Estatística incluída', 'Population Statistic deleted': 'População Estatística excluído', 'Population Statistic updated': 'População De Estatística atualizada', 'Population Statistics': 'Estatísticas De população', 'Population and number of households': 'população e número de residentes', 'Popup Fields': 'Pop-up Campos', 'Popup Label': 'Rótulo do pop-up', 'Porridge': 'mingau', 'Port': 'porta', 'Port Closure': 'Porta Encerramento', 'Portuguese': 'Português', 'Portuguese (Brazil)': 'Português (Brasil)', 'Position': 'Posição', 'Position Catalog': 'Catálogo de posições', 'Position Details': 'detalhamento do cargo', 'Position added': 'Cargo inserido', 'Position deleted': 'Cargo excluído', 'Position updated': 'Posição atualizada', 'Positions': 'cargos', 'Postcode': 'Código Postal', 'Poultry': 'Aves', 'Poultry restocking, Rank': 'Reabastecimento de aves domésticas, posição', 'Pounds': 'Libras', 'Power Failure': 'Falha de Energia', 'Powered by Sahana Eden': 'Desenvolvido pela Sahana Eden', 'Pre-cast connections': 'Conexões-cast pré', 'Preferred Name': 'Nome Preferido', 'Pregnant women': 'Mulheres grávidas', 'Preliminary': 'Preliminar', 'Presence': 'Presença', 'Presence Condition': 'Condição de Presença', 'Presence Log': 'Log de Presença', 'Previous': 'Anterior', 'Primary Name': 'Nome Principal', 'Primary Occupancy': 'Principal Ocupação', 'Priority': 'priority', 'Priority from 1 to 9. 1 is most preferred.': 'Prioridade de 1 a 9. 1 é preferível', 'Private': 'Privado', 'Problem': 'Problema do', 'Problem Administration': 'Gestão de problema', 'Problem Details': 'Detalhes do Problema', 'Problem Group': 'Grupo do Problema', 'Problem Title': 'Título do Problema', 'Problem added': 'Problema incluído', 'Problem connecting to twitter.com - please refresh': 'Problema ao conectar-se ao twitter.com, tente novamente', 'Problem deleted': 'Problema Excluído', 'Problem updated': 'Problema Atualizado', 'Problems': 'Problemas', 'Procedure': 'Procedimento', 'Process Received Shipment': 'Processo recebeu embarque', 'Process Shipment to Send': 'Processar remessa a enviar', 'Profile': 'profile', 'Project': 'projeto', 'Project Details': 'Detalhes do Projeto', 'Project Status': 'Status do Projeto', 'Project Tracking': 'Acompanhamento do Projeto', 'Project added': 'Projeto incluído', 'Project deleted': 'Projeto Excluído', 'Project has no Lat/Lon': 'Projeto não possui Latitude/Longitude', 'Project updated': 'Projeto ATUALIZADO', 'Projection': 'Projeção', 'Projection Details': 'Detalhes da Projeção', 'Projection Type': 'Projection Type', 'Projection added': 'Projeção incluída', 'Projection deleted': 'Projeção excluída', 'Projection updated': 'Projecção atualizada', 'Projections': 'projeções', 'Projects': 'projetos', 'Property reference in the council system': 'Referência de propriedade no sistema do conselho', 'Protected resource': 'Recurso protegido', 'Protection': 'Protecção', 'Provide Metadata for your media files': 'Fornecer Metadados para os seus ficheiros media', 'Provide a password': 'Provide a password', 'Provide an optional sketch of the entire building or damage points. Indicate damage points.': 'Fornecer um retrato opcional de todo o edifício ou áreas danificadas. Pontos danos indicar.', 'Proxy-server': 'Servidor Proxy', 'Psychiatrics/Adult': 'Psiquiatras/Adulto', 'Psychiatrics/Pediatric': 'Psiquiatras/Pediátrica', 'Public': 'Público', 'Public Event': 'Evento público', 'Public and private transportation': 'Transporte Público e Privado', 'Public assembly': 'Assembléia Pública', 'Pull tickets from external feed': 'Pull de bilhetes alimentação externa', 'Punjabi': 'Punjabi', 'Purchase Date': 'Data de aquisição', 'Push tickets to external system': 'BILHETES Push PARA sistema externo', 'Pyroclastic Flow': 'Pyroclastic FLuxo', 'Pyroclastic Surge': 'Pyroclastic Aumento', 'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'Módulo Serial Python não disponíveis no a execução Python-isto tem de instalar para ativar o Modem', 'Quantity': 'Quantidade', 'Quantity Committed': 'Quantidade Comprometida', 'Quantity Fulfilled': 'Quantidade Preenchida', "Quantity in %s's Inventory": 'Quantidade de %s do Inventário', 'Quantity in Transit': 'Quantidade em Trânsito', 'Quarantine': 'Quarentena', 'Queries': 'Buscas', 'Query': 'Busca', 'Queryable?': 'Consultável?', 'RC frame with masonry infill': 'Quadro de RC com aterros de alvenaria', 'RECORD A': 'Registro A', 'RECORD B': 'REGISTRO B', 'Race': 'Corrida', 'Radio': 'Radio', 'Radio Callsign': 'Rádio Chamada', 'Radio Details': 'Radio Details', 'Radiological Hazard': 'Risco Radiológico', 'Radiology': 'Radiologia', 'Railway Accident': 'Acidente Ferroviário', 'Railway Hijacking': 'Sequestro Ferroviário', 'Rain Fall': 'Queda de Chuva', 'Rapid Assessment': 'Avaliação Rápida', 'Rapid Assessment Details': 'Rápida Avaliação Detalhes', 'Rapid Assessment added': 'Rapid Avaliação incluído', 'Rapid Assessment deleted': 'Rápida Avaliação excluído', 'Rapid Assessment updated': 'Rapid avaliação atualizada', 'Rapid Assessments': 'Rapid Avaliações', 'Rapid Assessments & Flexible Impact Assessments': 'Rapid Avaliações & Flexível Impacto Avaliações', 'Rapid Close Lead': 'Fechamento Lead rápido', 'Rapid Data Entry': 'Entrada de dados rápida', 'Rating Scale': 'Escala de avaliação', 'Raw Database access': 'Acesso bruto a Base de dados', 'Read-Only': 'somente para leitura', 'Read-only': 'somente para leitura', 'Receive': 'Receber', 'Receive Items': 'Aceitar itens', 'Receive New Shipment': 'Receber Novos Embarques', 'Receive Shipment': 'Receber carregamento', 'Receive this shipment?': 'Receber esse embarque?', 'Received': 'Recebido', 'Received By': 'Recebido Por', 'Received By Person': 'Recebido Por Pessoa', 'Received Item Details': 'Detalhes do item recebido', 'Received Item deleted': 'Recebido item excluído', 'Received Item updated': 'Item recebido atualizado', 'Received Shipment Details': 'Lista de remessa de mercadorias/produtos', 'Received Shipment canceled': 'Remessa de produtos cancelada', 'Received Shipment canceled and items removed from Inventory': 'Recebido carregamento cancelado e itens removidos do inventário', 'Received Shipment updated': 'Carregamento Recebido Atualizado', 'Received Shipments': 'Carregamento de produtos recebido', 'Receiving and Sending Items': 'Receber e enviar Itens', 'Recipient': 'destinatário', 'Recipients': 'destinatários', 'Recommendations for Repair and Reconstruction or Demolition': 'Recomendações para reparo e reconstrução ou demolição', 'Record': 'registro', 'Record Details': 'Detalhes do Registro', 'Record Saved': 'Registro Gravado', 'Record added': 'Registro incluído', 'Record any restriction on use or entry': 'Registro de qualquer restrição à utilização ou entrada', 'Record deleted': 'Registro excluído', 'Record last updated': 'Último registro atualizado', 'Record not found': 'Registro não encontrado', 'Record not found!': 'Registro não encontrado!', 'Record updated': 'registro atualizado', 'Recording and Assigning Assets': 'Ativos de Gravação e Designação', 'Records': 'Registros', 'Recovery': 'recuperação', 'Recovery Request': 'pedido de recuperação', 'Recovery Request added': 'Pedido de recuperação adicionado', 'Recovery Request deleted': 'Pedido de recuperação apagado', 'Recovery Request updated': 'Pedido de recuperação atualizado', 'Recovery Requests': 'Pedidos de recuperação', 'Recruitment': 'Recrutamento', 'Recurring': 'Recorrente', 'Recurring Cost': 'Custo recorrente', 'Recurring cost': 'Custo recorrente', 'Recurring costs': 'Custos recorrentes', 'Red': 'vermelho', 'Red Cross / Red Crescent': 'Cruz Vermelha / Red Crescent', 'Reference Document': 'Documento de referência', 'Refresh Rate (seconds)': 'Taxa de Atualização (Segundos)', 'Region Location': 'Localizaçao da regiao', 'Regional': 'regional', 'Regions': 'Regiões', 'Register': 'registro', 'Register Person': 'REGISTRAR PESSOA', 'Register Person into this Camp': 'Registrar Pessoa neste Acampamento', 'Register Person into this Shelter': 'REGISTRAR PESSOA PARA ESTE Abrigo', 'Register them as a volunteer': 'Registrá-los como voluntários', 'Registered People': 'Pessoas Registradas', 'Registered users can': 'Os usuários registrados podem', 'Registration': 'Inscrição', 'Registration Details': 'Detalhes da Inscrição', 'Registration added': 'Inscrição adicionada', 'Registration entry deleted': 'Inscrição excluída', 'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': 'Registro ainda está pendente de aprovação do Aprovador (%s) - Por favor, aguarde até a confirmação recebida.', 'Registration key': 'Registration key', 'Registration updated': 'Inscrição atualizada', 'Rehabilitation/Long Term Care': 'Reabilitação/Cuidados de Longo Termo', 'Reinforced masonry': 'Alvenaria reforçada', 'Rejected': 'rejeitado', 'Relative Details': 'Relative Details', 'Relative added': 'Relative added', 'Relative deleted': 'Relative deleted', 'Relative updated': 'Relative updated', 'Relatives': 'Relatives', 'Relief': 'Alivio', 'Relief Team': 'Equipe de socorro', 'Religion': 'Religião', 'Religious': 'Religiosas', 'Religious Leader': 'Líder religioso', 'Relocate as instructed in the <instruction>': 'Relocalizar conforme instruído no', 'Remove': 'remover', 'Remove Activity from this event': 'Remove Activity from this event', 'Remove Asset from this event': 'Remover ativo deste evento', 'Remove Asset from this scenario': 'Remover ativo deste cenário', 'Remove Document from this request': 'Remove Document from this request', 'Remove Facility from this event': 'Remover recurso deste evento', 'Remove Facility from this scenario': 'Remover recurso deste cenário', 'Remove Human Resource from this event': 'REMOVER RECURSOS HUMANOS A partir deste evento', 'Remove Human Resource from this scenario': 'REMOVER RECURSOS HUMANOS A partir deste cenário', 'Remove Item from Inventory': 'Remover Item do Inventário', 'Remove Map Configuration from this event': 'REMOVER Mapa de configuração a partir deste evento', 'Remove Map Configuration from this scenario': 'REMOVER Mapa de configuração a partir deste cenário', 'Remove Person from Commitment': 'Remove Person from Commitment', 'Remove Skill': 'Remove Skill', 'Remove Skill from Request': 'Remove Skill from Request', 'Remove Task from this event': 'Remove Task from this event', 'Remove Task from this scenario': 'Remove Task from this scenario', 'Remove this asset from this event': 'REMOVER este recurso a partir deste evento', 'Remove this asset from this scenario': 'Remover este recurso deste cenário', 'Remove this facility from this event': 'Remove this facility from this event', 'Remove this facility from this scenario': 'Remove this facility from this scenario', 'Remove this human resource from this event': 'Remove this human resource from this event', 'Remove this human resource from this scenario': 'Remove this human resource from this scenario', 'Remove this task from this event': 'Remove this task from this event', 'Remove this task from this scenario': 'Remove this task from this scenario', 'Repair': 'REPARO', 'Repaired': 'Reparado', 'Repeat your password': 'REPITA sua senha', 'Replace': 'TROCAR', 'Replace if Master': 'Substituir se Principal', 'Replace if Newer': 'Substituir se o Mais Recente', 'Report': 'Relatório', 'Report Another Assessment...': 'Adicionar Outro Relatório De Avaliação....', 'Report Details': 'Detalhes do Relatório', 'Report Resource': 'Reportar Recursos', 'Report Types Include': 'Tipos de relatório incluem', 'Report added': 'Relatório incluído', 'Report deleted': 'Relatório removido', 'Report my location': 'Relate meu local', 'Report the contributing factors for the current EMS status.': 'Reportar os factores que contribuem para a situação EMS actual.', 'Report the contributing factors for the current OR status.': 'Reportar os factores que contribuem para a situação OR actual.', 'Report them as found': 'Reportar como encontrados', 'Report them missing': 'Reportar como perdidos', 'Report updated': 'Relatório atualizado', 'ReportLab module not available within the running Python - this needs installing for PDF output!': 'O módulo de ReportLab não disponíveis na execução Python - isto requer a instalação para a entrega em PDF!', 'ReportLab not installed': 'ReportLab não instalado', 'Reporter': 'Relator', 'Reporter Name': 'Nome do Relator', 'Reporting on the projects in the region': 'Relatórios sobre os projetos na região', 'Reports': 'Relatórios', 'Request': 'Pedido', 'Request Added': 'Pedido Incluído', 'Request Canceled': 'Pedido Cancelado', 'Request Details': 'Detalhes do Pedido', 'Request From': 'Pedido De', 'Request Item': 'Item de pedido', 'Request Item Details': 'Detalhes do item de pedido', 'Request Item added': 'Item incluído no pedido', 'Request Item deleted': 'Item de pedido excluído', 'Request Item from Available Inventory': 'PEDIDO DE Item de Inventário Disponível', 'Request Item updated': 'Pedido actualizado', 'Request Items': 'Itens de pedido', 'Request New People': 'Request New People', 'Request Status': 'Status do Pedido', 'Request Type': 'Tipo de Pedido', 'Request Updated': 'Solicitação atualizada', 'Request added': 'Pedido adicionado', 'Request deleted': 'Solicitação excluída', 'Request for Role Upgrade': 'Pedido de upgrade de função', 'Request updated': 'Pedido actualizado', 'Request, Response & Session': 'Pedido, Resposta & Sessão', 'Requested': 'solicitado', 'Requested By': 'Solicitado Por', 'Requested By Facility': 'Solicitado Pela Instalação', 'Requested By Site': 'Solicitado Por Site', 'Requested From': 'Solicitada a Partir de', 'Requested Items': 'Itens solicitados', 'Requested Skill': 'Requested Skill', 'Requested Skill Details': 'Requested Skill Details', 'Requested Skill updated': 'Requested Skill updated', 'Requested Skills': 'Requested Skills', 'Requested by': 'Solicitado Por', 'Requested on': 'Em solicitada', 'Requester': 'Solicitante', 'Requests': 'Pedidos', 'Requests Management': 'Gerenciamento de Pedidos', 'Required Skill': 'Required Skill', 'Requires Login!': 'É necessário fazer login!', 'Rescue and recovery': 'Resgate e recuperação', 'Reset': 'Restaurar', 'Reset Password': 'restabelecer senha', 'Resolve': 'Resolver', 'Resolve Conflict': 'Resolver Conflito', 'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': 'Resolva link que levará até uma nova tela que ajudará a resolver esses registros duplicados e atualizar o banco de dados.', 'Resource': 'Recurso', 'Resource Details': 'Detalhes do recurso', 'Resource added': 'Recurso incluído', 'Resource deleted': 'Recurso Excluído', 'Resource updated': 'Recurso atualizado', 'Resources': 'Recursos', 'Respiratory Infections': 'Infecções respiratórias', 'Response': 'Resposta', 'Restricted Access': 'Acesso Restrito', 'Restricted Use': 'Uso restrito', 'Results': 'results', 'Retail Crime': 'Crime a varejo', 'Retrieve Password': 'Recuperar Senha', 'Return': 'Retorno', 'Return to Request': 'Retornar ao pedido', 'Returned': 'Retornado', 'Returned From': 'Retornado a partir de', 'Returned Status': 'Retornado Status', 'Review Incoming Shipment to Receive': 'Revisão da Remessa de Entrada para Receber', 'Rice': 'Arroz', 'Riot': 'Motim', 'River': 'Rio', 'River Details': 'Detalhes do Rio', 'River added': 'Rio adicionado', 'River deleted': 'Rio deletado', 'River updated': 'Rio atualizado', 'Rivers': 'Rios', 'Road Accident': 'Acidente na rua/estrada', 'Road Closed': 'Rua/Estrada fechada', 'Road Conditions': 'Condições da Estrada', 'Road Delay': 'Atraso de Estrada', 'Road Hijacking': 'Sequestro de Estrada', 'Road Usage Condition': 'Condição de Uso de Estrada', 'Roads Layer': 'Roads Layer', 'Role': 'Função', 'Role Details': 'Detalhes da Função', 'Role Required': 'Função requerida', 'Role Updated': 'Funções atualizadas', 'Role added': 'Regra incluída', 'Role deleted': 'Função excluída', 'Role updated': 'Funções atualizadas', 'Role-based': 'Baseada em regra', 'Roles': 'Funções', 'Roles Permitted': 'Funções Permitidas', 'Roof tile': 'Telhado lado a lado', 'Roofs, floors (vertical load)': 'Telhados, pisos (carga vertical)', 'Room': 'Sala', 'Room Details': 'Detalhes da sala', 'Room added': 'Sala incluída', 'Room deleted': 'Sala excluída', 'Room updated': 'Sala atualizada', 'Rooms': 'Salas', 'Roster': 'Lista', 'Row Choices (One Per Line)': 'Opções da linha (Um por linha)', 'Rows in table': 'Linhas na tabela', 'Rows selected': 'Linhas Selecionadas', 'Run Functional Tests': 'Executar testes funcionais', 'Run Interval': 'Intervalo de execução', 'Running Cost': 'Custo corrente', 'Russian': 'Russian', 'SMS Modems (Inbound & Outbound)': 'SMS Modems (Inbound & Outbound)', 'SMS Outbound': 'SMS Outbound', 'SMS Settings': 'SMS Settings', 'SMS settings updated': 'SMS settings updated', 'SMTP to SMS settings updated': 'SMTP to SMS settings updated', 'Safe environment for vulnerable groups': 'Ambiente seguro para grupos vulneráveis', 'Safety Assessment Form': 'Formulário de avaliação de segurança', 'Safety of children and women affected by disaster?': 'Segurança das crianças e mulheres afetadas pela catástrofe?', 'Sahana Administrator': 'Sahana AdmiNistrador', 'Sahana Agasti': 'Sahana Agasti', 'Sahana Blue': 'Sahana Azul', 'Sahana Community Chat': 'Sahana COMUNIDADE de BATE-PAPO', 'Sahana Eden': 'Sahana Eden', 'Sahana Eden <=> Other': 'Sahana Eden <=> Outros', 'Sahana Eden <=> Sahana Eden': 'Sahana Éden <=> Sahana Éden', 'Sahana Eden Humanitarian Management Platform': 'plataforma de gerenciamento humanitário Sahana Éden', 'Sahana Eden Website': 'SITE Sahana Éden', 'Sahana Green': 'Sahana Verde', 'Sahana Steel': 'Sahana Steel', 'Sahana access granted': 'Acesso Sahana CONCEDIDO', 'Salted Fish': 'Peixe Salgado', 'Sanitation problems': 'Problemas de saneamento', 'Satellite': 'satélite', 'Satellite Layer': 'Satellite Layer', 'Satellite Office': 'Escritório experimental', 'Saturday': 'SAturday', 'Save': 'armazenar', 'Saved.': 'armazenado.', 'Saving...': 'Guardando...', 'Scale of Results': 'Nível de Resultados', 'Scanned Copy': 'Scanned Copy', 'Scanned Forms Upload': 'Scanned Forms Upload', 'Scenario': 'Cenário', 'Scenario Details': 'Detalhes do Cenário', 'Scenario added': 'Cenário incluído', 'Scenario deleted': 'Cenário excluído', 'Scenario updated': 'Cenário atualizado', 'Scenarios': 'Cenários', 'Schedule': 'Horário', 'Schema': 'Esquema', 'School': 'Escola', 'School Closure': 'Encerramento Escolar', 'School Lockdown': 'Bloqueio escolar', 'School Teacher': 'Professor de escola', 'School activities': 'Actividades escolares', 'School assistance': 'Assistência escolar', 'School attendance': 'Presença escolar', 'School destroyed': 'Escola Destruída', 'School heavily damaged': 'Escola fortemente danificada', 'School tents received': 'Tendas da escola recebidas', 'School tents, source': 'Tendas de escolha, origem', 'School used for other purpose': 'Escola utilizada para outros fins', 'School/studying': 'Escola/estudando', 'Schools': 'Escolas', 'Search': 'Pesquisar', 'Search Activities': 'procurar atividades', 'Search Activity Report': 'Relatório de pesquisa de atividades', 'Search Addresses': 'procurar endereços', 'Search Alternative Items': 'Procurar itens alternativos', 'Search Assessment Summaries': 'Procura De Avaliação De RESUMOS', 'Search Assessments': 'Avaliações de procura', 'Search Asset Assignments': 'Procurar ATIVO Designações', 'Search Asset Log': 'Procurar log de ativo', 'Search Assets': 'Procurar Recursos', 'Search Baseline Type': 'Procurar Typo de Base', 'Search Baselines': 'Procurar Bases', 'Search Brands': 'Procurar Marcas', 'Search Budgets': 'Procura Orçamentos', 'Search Bundles': 'PACOTES Configuráveis de procura', 'Search Camp Services': 'Procurar Serviços de Acampamento', 'Search Camp Types': 'Procurar Tipos De Acampamento', 'Search Camps': 'Procurar acampamentos', 'Search Catalog Items': 'Itens de procura De Catálogo', 'Search Catalogs': 'Procurar nos Catálogos', 'Search Certificates': 'Procurar Certificados', 'Search Certifications': 'Procurar Certificações', 'Search Checklists': 'Listas De procura', 'Search Cluster Subsectors': 'Procura De Cluster Subsectores', 'Search Clusters': 'Clusters de procura', 'Search Commitment Items': 'Itens de procura Compromisso', 'Search Commitments': 'Compromissos de procura', 'Search Committed People': 'Search Committed People', 'Search Competencies': 'Procurar Competências', 'Search Competency Ratings': 'Procurar Indices de Competência', 'Search Contact Information': 'Procurar informações de contato', 'Search Contacts': 'Buscar contatos', 'Search Course Certificates': 'procura Certificados de Curso', 'Search Courses': 'Procurar Cursos', 'Search Credentials': 'Credenciais de busca', 'Search Documents': 'Pesquisar documentos', 'Search Donors': 'Procura de Doadores', 'Search Entries': 'Pesquisar Entradas', 'Search Events': 'Pesquisar Eventos', 'Search Facilities': 'Pesquisar Instalações', 'Search Feature Class': 'Pesquisar classe de dispositivos', 'Search Feature Layers': 'Pesquisar camadas do dispositivo', 'Search Flood Reports': 'Pesquisar relatórios de inundação', 'Search GPS data': 'Search GPS data', 'Search Groups': 'Buscar Grupos', 'Search Homes': 'Search Homes', 'Search Human Resources': 'Pesquise recursos humanos.', 'Search Identity': 'Buscar Identidade', 'Search Images': 'Procurar Imagens', 'Search Impact Type': 'Procurar Tipo de Impacto', 'Search Impacts': 'Procurar Impactos', 'Search Import Files': 'Search Import Files', 'Search Incident Reports': 'Procurar Relatórios de Incidentes', 'Search Inventory Items': 'Procurar Entradas De Inventário', 'Search Inventory items': 'Procurar Entradas De Inventário', 'Search Item Categories': 'Buscar categorias de Item', 'Search Item Packs': 'Buscar pocotes de itens', 'Search Items': 'Buscar Itens', 'Search Job Roles': 'Pesquise papéis de trabalho', 'Search Keys': 'Procurar chaves', 'Search Kits': 'Procurar kits', 'Search Layers': 'Procurar camadas', 'Search Level': 'Search Level', 'Search Level 1 Assessments': 'Procurar Avaliações Nível 1', 'Search Level 2 Assessments': 'Procurar Avaliações Nível 2', 'Search Locations': 'Procurar Localidades', 'Search Log Entry': 'Procura de entrada de Log', 'Search Map Configurations': 'Pesquise mapa de configurações.', 'Search Markers': 'Marcadores De procura', 'Search Member': 'Procurar Membro', 'Search Membership': 'Procurar filiação', 'Search Memberships': 'Pesquisar Associações', 'Search Missions': 'Procurar Missões', 'Search Need Type': 'Procura Precisa De Tipo', 'Search Needs': 'Procura precisa', 'Search Notes': 'Notes procura', 'Search Offices': 'Escritórios de procura', 'Search Organizations': 'Pesquisar Organizações', 'Search Patients': 'Search Patients', 'Search Peer': 'PROCURA Par', 'Search Personal Effects': 'Procura objetos pessoais', 'Search Persons': 'Buscar Membros', 'Search Photos': 'Procura Fotos', 'Search Population Statistics': 'Procurar Estatística de População', 'Search Positions': 'Procura de Posições', 'Search Problems': 'Procura de Problemas', 'Search Projections': 'Projeções de procura', 'Search Projects': 'Procura de Projetos', 'Search Rapid Assessments': 'Procura de Avaliações Rápidas', 'Search Received Items': 'Procura de Itens Recebidos', 'Search Received Shipments': 'Embarques de procura Recebidos', 'Search Records': 'registros de procura', 'Search Registations': 'Registations procura', 'Search Registration Request': 'Pedido de registro de procura', 'Search Relatives': 'Search Relatives', 'Search Report': 'Procurar Relatório', 'Search Reports': 'Procurar Relatórios', 'Search Request': 'pedido de pesquisa', 'Search Request Items': 'Pedido de procura de Itens', 'Search Requested Items': 'Procura de itens solicitados', 'Search Requested Skills': 'Search Requested Skills', 'Search Requests': 'Procura de solicitações', 'Search Resources': 'Pesquisa de recursos', 'Search Rivers': 'Rios procura', 'Search Roles': 'Pesquisa de papéis', 'Search Rooms': 'Procurar Salas', 'Search Scenarios': 'Procurar cenários', 'Search Sections': 'As Seções de procura', 'Search Sectors': 'Procurar Setores', 'Search Sent Items': 'Procurar Itens Enviados', 'Search Sent Shipments': 'Procurar Despachos Enviados', 'Search Service Profiles': 'Serviço de procura Perfis', 'Search Settings': 'Definições de Pesquisa', 'Search Shelter Services': 'Procura Abrigo de serviços', 'Search Shelter Types': 'Procura tipos de Abrigo', 'Search Shelters': 'Procurar Abrigos', 'Search Skill Equivalences': 'Procurar equivalencias de habilidades', 'Search Skill Provisions': 'Procurar Disposições de habilidade', 'Search Skill Types': 'Pesquisar Tipos de Habilidades', 'Search Skills': 'Pesquisar Habilidades', 'Search Solutions': 'Pesquisar Soluções', 'Search Staff': 'Busca de pessoal', 'Search Staff Types': 'Busca de tipo de pessoal', 'Search Staff or Volunteer': 'Procurar Funcionário ou Voluntário', 'Search Status': 'Busca de status', 'Search Subscriptions': 'Busca de assinaturas', 'Search Subsectors': 'Buscar subsetores', 'Search Support Requests': 'Pedidos de suporte a pesquisa', 'Search Tasks': 'Tarefa de Pesquisa', 'Search Teams': 'Times de pesquisa', 'Search Themes': 'Temas de pesquisa', 'Search Tickets': 'Buscar Bilhetes', 'Search Tracks': 'Procurar Trilhas', 'Search Trainings': 'Buscar Treinamentos', 'Search Twitter Tags': 'Procurar Twitter Tags', 'Search Units': 'Procura Unidades', 'Search Users': 'Procurar Usuários', 'Search Vehicle Details': 'Search Vehicle Details', 'Search Vehicles': 'Search Vehicles', 'Search Volunteer Availability': 'Buscar Disponibilidade para Voluntáriado', 'Search Volunteers': 'Procura Voluntários', 'Search Warehouses': 'procura Warehouses', 'Search and Edit Group': 'Procurar e editar GRUPO', 'Search and Edit Individual': 'Procurar e Editar Individual', 'Search for Staff or Volunteers': 'Pesquise por funcionários ou voluntários', 'Search for a Location by name, including local names.': 'Pesquisar local por nome, incluindo nomes locais.', 'Search for a Person': 'Procurar Pessoa', 'Search for a Project': 'Procurar Projecto', 'Search for a shipment by looking for text in any field.': 'Procurar carga fazendo uma pesquisa de texto em qualquer campo.', 'Search for a shipment received between these dates': 'Procurar carga recebida entre estas datas', 'Search for a vehicle by text.': 'Search for a vehicle by text.', 'Search for an Organization by name or acronym': 'Procurar por uma Organização por nome ou iniciais', 'Search for an Organization by name or acronym.': 'Procurar por uma organização por nome ou iniciais.', 'Search for an asset by text.': 'Pesquisar um recurso por texto.', 'Search for an item by category.': 'Procurar por categoria.', 'Search for an item by Year of Manufacture.': 'Search for an item by Year of Manufacture.', 'Search for an item by brand.': 'Search for an item by brand.', 'Search for an item by catalog.': 'Search for an item by catalog.', 'Search for an item by category.': 'Search for an item by category.', 'Search for an item by its code, name, model and/or comment.': 'Search for an item by its code, name, model and/or comment.', 'Search for an item by text.': 'Procurar por texto.', 'Search for asset by country.': 'Procurar bens por país.', 'Search for asset by location.': 'Search for asset by location.', 'Search for office by country.': 'Procurar escritórios por país.', 'Search for office by location.': 'Search for office by location.', 'Search for office by organization.': 'Procurar escritórios por organização.', 'Search for office by text.': 'Procura por texto do gabinete.', 'Search for vehicle by location.': 'Search for vehicle by location.', 'Search for warehouse by country.': 'Pesquise por depósito por país.', 'Search for warehouse by location.': 'Search for warehouse by location.', 'Search for warehouse by organization.': 'Pesquise por depósito por organização.', 'Search for warehouse by text.': 'Pesquise por depósito via campo-texto.', 'Search here for a person record in order to:': 'Buscar aqui por um registro de pessoa a fim de:', 'Search messages': 'Mensagens de Procura', 'Searching for different groups and individuals': 'Procurar diferentes grupos e indivíduos', 'Secondary Server (Optional)': 'Servidor secundário (opcional)', 'Seconds must be a number between 0 and 60': 'Segundos deve ser um número entre 0 e 60', 'Section': 'Section', 'Section Details': 'Seção Detalhes', 'Section deleted': 'Seção excluído', 'Section updated': 'Seção atualizada', 'Sections': 'Seções', 'Sections that are part of this template': 'Sections that are part of this template', 'Sections that can be selected': 'Sections that can be selected', 'Sector': 'setor', 'Sector Details': 'Detalhes do Setor', 'Sector added': 'Sector incluído', 'Sector deleted': 'Sector apagado', 'Sector updated': 'Setor atualizado', 'Sector(s)': 'Setor(es)', 'Sectors': 'Setores', 'Security Status': 'Status de Segurança', 'Security problems': 'Problemas de Segurança', 'See All Entries': 'Ver todas as entradas', 'See all': 'Ver tudo', 'See unassigned recovery requests': 'Consulte Pedidos de recuperação designado', 'Seen': 'Visto', 'Select': 'select', 'Select Items from the Request': 'Selecionar itens do pedido', 'Select Items from this Inventory': 'Selecionar itens a partir deste Inventário', 'Select Organization': 'Selecionar Organização', 'Select Skills from the Request': 'Select Skills from the Request', "Select a Room from the list or click 'Add Room'": "Escolha uma sala da lista ou clique 'Incluir sala'", 'Select a location': 'Selecionar um local', "Select a manager for status 'assigned'": "Select a manager for status 'assigned'", "Select a person in charge for status 'assigned'": "Selecione uma pessoa responsável para status 'DESIGNADO'", 'Select a question from the list': 'Selecione uma pergunta a partir da lista', 'Select a range for the number of total beds': 'Selecione um intervalo para o número de camas total', 'Select all that apply': 'Selecione todas as que se applicam', 'Select an Organization to see a list of offices': 'Selecione uma organização para ver uma lista de escritórios', 'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'Selecione as sobreposições de avaliação e actividades relacionadas com cada necessidade para identificar as lacunas.', 'Select the person assigned to this role for this project.': 'Selecione a pessoa designada para essa função neste projeto.', "Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": "Selecione isto se todas as localidades especificas precisarem de um pai no nível mais alto da hierarquia. Por exemplo, se 'distrito' é a menor divisão na hierarquia e, em seguida, todos os locais específicos seriam obrigados a ter um distrito como um pai.", "Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": 'Selecione isto se todos os locais específicos de uma posição pai na hierarquia do local. Isso pode ajudar na configuração de uma "região" representando uma área afetada.', 'Select to show this configuration in the Regions menu.': 'Selecione para mostrar essa configuração no menu regiões.', 'Select to show this configuration in the menu.': 'Select to show this configuration in the menu.', 'Selected Jobs': 'Selected Jobs', 'Selects what type of gateway to use for outbound SMS': 'Selects what type of gateway to use for outbound SMS', 'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'Selecione se vau utilizar um Modem, Tropo ou outro Gateway para enviar SMS', 'Send': 'Envie', 'Send Alerts using Email &/or SMS': 'Envio de alertas usando e-mail e/ou SMS', 'Send Commitment as Shipment': 'Enviar compromisso como carregamento', 'Send New Shipment': 'Enviar nova remessa', 'Send Notification': 'Enviar notificação', 'Send Shipment': 'Enviar Carregamento', 'Send a message to this person': 'Enviar uma mensagem para esta pessoa', 'Send a message to this team': 'Enviar uma mensagem para essa equipe', 'Send from %s': 'Enviar de %s', 'Send message': 'Enviar mensagem', 'Send new message': 'Enviar nova mensagem', 'Sends & Receives Alerts via Email & SMS': 'Envia & Recebe Alertas via E-Mail & SMS', 'Senior (50+)': 'Sênior (50+)', 'Sent': 'Enviadas', 'Sent By': 'Enviado Por', 'Sent By Person': 'Enviado Por Pessoa', 'Sent Item Details': 'Detalhes do Item enviado', 'Sent Item deleted': 'Enviado Item excluído', 'Sent Item updated': 'Enviado Item atualizado', 'Sent Shipment Details': 'Enviado Detalhes de Embarque', 'Sent Shipment canceled': 'Enviado Carregamento cancelado', 'Sent Shipment canceled and items returned to Inventory': 'Enviado Carregamento cancelado e itens retornado ao Inventário', 'Sent Shipment updated': 'Enviado Embarque atualizado', 'Sent Shipments': 'Remessas Enviadas', 'Separated children, caregiving arrangements': 'Crianças separados, disposições caregiving', 'Serial Number': 'Numero de série', 'Series': 'serie', 'Server': 'servidor', 'Service': 'serviço', 'Service Catalog': 'Catálogo de Serviços', 'Service Due': 'Service Due', 'Service or Facility': 'Serviço ou facilidade', 'Service profile added': 'Perfil de serviço adicionado', 'Service profile deleted': 'Perfil de serviço Excluído', 'Service profile updated': 'Perfil de serviço atualizado', 'Services': 'Serviços', 'Services Available': 'Serviços Disponíveis', 'Set Base Site': 'Definir base de dados do site', 'Set By': 'Definido por', 'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': 'Configure como True para permitir que este nível da hierarquia do local possa ser editado por usuários que não sejam administradores.', 'Setting Details': 'Detalhes de ajuste', 'Setting added': 'Configuração adicionada', 'Setting deleted': 'Configuração Excluída', 'Setting updated': 'Configuração atualizada', 'Settings': 'Ajustes', 'Settings updated': 'Ajustes atualizados', 'Settings were reset because authenticating with Twitter failed': 'As configurações foram redefinidas porque a autenticação com Twitter falhou', 'Settings which can be configured through the web interface are available here.': 'As configurações que podem ser definidas através da interface da web estão disponíveis aqui.', 'Severe': 'Severo', 'Severity': 'Gravidade', 'Share a common Marker (unless over-ridden at the Feature level)': 'Compartilhar um marcador comum (a não ser que abaixo-assinado ao nível de Componente)', 'Shelter': 'Abrigo', 'Shelter & Essential NFIs': 'Abrigo & NFIs Essenciais', 'Shelter Details': 'Detalhes de Abrigo', 'Shelter Name': 'Nome de Abrigo', 'Shelter Registry': 'Registro de Abrigo', 'Shelter Service': 'Serviço de Abrigo', 'Shelter Service Details': 'Detalhes do serviço de abrigo', 'Shelter Service added': 'Serviço de Abrigo incluído', 'Shelter Service deleted': 'Serviço de Abrigo excluído', 'Shelter Service updated': 'Atualização de serviços de abrigo', 'Shelter Services': 'Serviços de abrigo', 'Shelter Type': 'Tipo de abrigo', 'Shelter Type Details': 'Detalhes do tiipo de abrigo', 'Shelter Type added': 'Tipo de abrigo incluído', 'Shelter Type deleted': 'Tipo de abrigo excluído', 'Shelter Type updated': 'Abrigos Tipo De atualização', 'Shelter Types': 'Tipos De abrigo', 'Shelter Types and Services': 'Abrigo Tipos e serviços', 'Shelter added': 'Abrigo incluído', 'Shelter deleted': 'Abrigo excluído', 'Shelter updated': 'Abrigo atualizado', 'Shelter/NFI Assistance': 'Abrigo/ Assistência NFI', 'Shelters': 'Abrigos', 'Shipment Created': 'Embarque Criado', 'Shipment Items': 'Itens de Carregamento', 'Shipment Items received by Inventory': 'Itens de Remessa recebidos pelo Inventário', 'Shipment Items sent from Inventory': 'Itens de Remessa enviados pelo Inventário', 'Shipment to Send': 'Carga para Enviar', 'Shipments': 'Remessas', 'Shipments To': 'Remessas Para', 'Shooting': 'Tiroteio', 'Short Assessment': 'Curta Avaliação', 'Short Description': 'Breve Descrição', 'Show Checklist': 'Mostrar Lista De Verificação', 'Show Details': 'Mostrar detalhes', 'Show Map': 'Mostrar Mapa', 'Show Region in Menu?': 'Mostrar Região no Menu?', 'Show in Menu?': 'Show in Menu?', 'Show on Map': 'Mostrar no mapa', 'Show on map': 'Mostrar no mapa', 'Sign-up as a volunteer': 'Inscrever-se como um voluntário', 'Sign-up for Account': 'Inscrever-se para conta', 'Sign-up succesful - you should hear from us soon!': 'Sua inscriçao foi feita com sucesso - aguarde notícias em breve!', 'Sindhi': 'Sindi', 'Single PDF File': 'Single PDF File', 'Site': 'site', 'Site Administration': 'Administração do site', 'Site or Location': 'Sítio ou Local', 'Sites': 'sites', 'Situation': 'Situação', 'Situation Awareness & Geospatial Analysis': 'Situação Reconhecimento & Geoespaciais Análise', 'Sketch': 'Esboço', 'Skill': 'QUALIFICAÇÃO', 'Skill Catalog': 'Catálogo de Conhecimentos', 'Skill Details': 'Detalhes das habilidades', 'Skill Equivalence': 'Equivalência de Conhecimentos', 'Skill Equivalence Details': 'Detalhes da Equivalência de Habilidade', 'Skill Equivalence added': 'Equivalência de Habilidade incluída', 'Skill Equivalence deleted': 'Equivalência de Habilidade excluída', 'Skill Equivalence updated': 'Equivalência de Habilidade atualizada', 'Skill Equivalences': 'Equivalências de habilidade', 'Skill Provision': 'Provisão de Habilidade', 'Skill Provision Catalog': 'Catálogo de habilidades disponível', 'Skill Provision Details': 'Detalhes de habilidades disponível', 'Skill Provision added': 'Provisão de Habilidade incluída', 'Skill Provision deleted': 'Catalogo de habilidades excluído', 'Skill Provision updated': 'Catálogo de habilidades atualizado', 'Skill Provisions': 'Habilidades disponíveis', 'Skill Status': 'Status da Habilidade', 'Skill TYpe': 'Tipo de habilidade', 'Skill Type': 'Skill Type', 'Skill Type Catalog': 'Catálogo de tipos de habilidades', 'Skill Type Details': 'Detalhes do tipo de habilidade', 'Skill Type added': 'Tipo de habilidade incluído', 'Skill Type deleted': 'Tipo de habilidade excluído', 'Skill Type updated': 'Tipo de habilidade atualizado', 'Skill Types': 'Tipos de habilidade', 'Skill added': 'Habilidade incluída', 'Skill added to Request': 'Skill added to Request', 'Skill deleted': 'Habilidade Excluída', 'Skill removed': 'Skill removed', 'Skill removed from Request': 'Skill removed from Request', 'Skill updated': 'Habilidade ATUALIZADA', 'Skill/Training': 'Habilidades/Treinamento', 'Skills': 'Habilidades', 'Skills Catalog': 'Catálogo de habilidades', 'Skills Management': 'Gerenciamento das Habilidades', 'Skype': 'Skype', 'Skype ID': 'ID DO Skype', 'Slightly Damaged': 'Ligeiramente Danificado', 'Slope failure, debris': 'falha de inclinação, destroços', 'Small Trade': 'Pequeno Comércio', 'Smoke': 'Fumaça', 'Snapshot': 'snapshot', 'Snapshot Report': 'Relatório de snapshot', 'Snow Fall': 'Queda de neve , nevasca', 'Snow Squall': 'Rajada de neve', 'Soil bulging, liquefaction': 'abaulamento do solo, liquefação', 'Solid waste': 'Resíduos sólidos', 'Solution': 'Solução', 'Solution Details': 'Detalhes da Solução', 'Solution Item': 'Item de Solução', 'Solution added': 'Solução adicionada', 'Solution deleted': 'Solução excluída', 'Solution updated': 'Solução atualizada', 'Solutions': 'Soluções', 'Some': 'Algum', 'Sorry - the server has a problem, please try again later.': 'Sorry - the server has a problem, please try again later.', 'Sorry that location appears to be outside the area of the Parent.': 'Desculpe ! Essa localização está fora da área do Pai.', 'Sorry that location appears to be outside the area supported by this deployment.': 'Desculpe ! Essa localização parece estar fora da área suportada por esta implementação.', 'Sorry, I could not understand your request': 'Desculpe, eu não pude entender o seu pedido', 'Sorry, only users with the MapAdmin role are allowed to create location groups.': 'Desculpe, apenas usuários com o perfil MapAdmin tem permissão para criar locais dos grupos.', 'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'Desculpe, apenas usuários com o perfil MapAdmin tem permissão para editar estes locais', 'Sorry, something went wrong.': 'Desculpe, algo deu errado.', 'Sorry, that page is forbidden for some reason.': 'Desculpe ! Esta página tem acesso restrito por alguma razão.', 'Sorry, that service is temporary unavailable.': 'Desculpe ! Este serviço está indisponível temporariamente.', 'Sorry, there are no addresses to display': 'Desculpe ! Não há endereços para visualizar.', "Sorry, things didn't get done on time.": 'Desculpe ! As tarefas não foram concluídas em tempo útil.', "Sorry, we couldn't find that page.": 'Desculpe, não foi possível localizar essa página.', 'Source': 'source', 'Source ID': 'ID de origem', 'Source Time': 'Origem do tempo', 'Sources of income': 'Fontes de rendimento', 'Space Debris': 'Destroços Espaciais', 'Spanish': 'espanhol', 'Special Ice': 'Gelo Especial', 'Special Marine': 'Marinha especial', 'Specialized Hospital': 'Hospital especializado.', 'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'Área específica (exemplo: edifício/quarto) com a localização de onde essa pessoa/grupo é visto.', 'Specific locations need to have a parent of level': 'Locais específicos precisam ter um nível paterno.', 'Specify a descriptive title for the image.': 'Especifique um título descritivo para a imagem.', 'Specify the bed type of this unit.': 'Especifique o tipo de cama dessa unidade.', 'Specify the number of available sets': 'Especificar o número de conjuntos disponíveis', 'Specify the number of available units (adult doses)': 'Especifique o número de unidades disponíveis (doses para adultos)', 'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': 'Especificar o número de unidades disponíveis (litros) de Ringer-Lactato ou soluções equivalentes', 'Specify the number of sets needed per 24h': 'Especificar o número de conjuntos necessários por 24h', 'Specify the number of units (adult doses) needed per 24h': 'Especificar o número de unidades (doses para adultos) necessário por 24h', 'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': 'Especificar o número de unidades (litros) de Ringer-Lactato ou soluções equivalentes necessárias para 24h', 'Speed': 'Speed', 'Spherical Mercator?': 'Mapa Mercator Esférico?', 'Spreadsheet Importer': 'PLANILHA IMPORTADOR', 'Spreadsheet uploaded': 'Planilha transferido por UPLOAD', 'Spring': 'Primavera', 'Squall': 'Rajada', 'Staff': 'Equipe', 'Staff & Volunteers': 'Colaboradores & Voluntários', 'Staff 2': 'Equipe 2', 'Staff Details': 'Equipe Detalhes', 'Staff ID': 'ID da equipe', 'Staff List': 'Lista de pessoal', 'Staff Member Details': 'Detalhes de membro da equipe', 'Staff Members': 'Membros da equipe', 'Staff Record': 'Registro de pessoal', 'Staff Type Details': 'Equipe Tipo Detalhes', 'Staff Type added': 'Equipe tipo incluído', 'Staff Type deleted': 'Tipo De equipe excluído', 'Staff Type updated': 'Equipe Tipo De atualização', 'Staff Types': 'Tipos de equipe', 'Staff added': 'Equipe incluída', 'Staff and Volunteers': 'Funcionários e Voluntários', 'Staff deleted': 'Equipe excluída', 'Staff member added': 'Membro da equipe incluído', 'Staff member updated': 'Membro da equipe atualizado', 'Staff present and caring for residents': 'Equipe presente e cuidando de moradores', 'Staff updated': 'Equipe atualizado', 'Staff2': 'staff2', 'Staffing': 'Equipe', 'Stairs': 'Escadas', 'Start Date': 'Data do início', 'Start date': 'Data Inicial', 'Start of Period': 'Início do Período', 'State': 'Status', 'Stationery': 'Papel de Carta', 'Status': 'Status', 'Status Report': 'Relatório de status', 'Status Updated': 'Status atualizado', 'Status added': 'Estado adicionado', 'Status deleted': 'Estado excluído', 'Status of clinical operation of the facility.': 'Estado da operação clínica da instalação.', 'Status of general operation of the facility.': 'Estado da operação geral da instalação.', 'Status of morgue capacity.': 'Estado da capacidade da morgue.', 'Status of operations of the emergency department of this hospital.': 'Estado das operações do Departamento de Emergência deste hospital.', 'Status of security procedures/access restrictions in the hospital.': 'Estado dos procedimentos de segurança/Restrições de Acesso no hospital.', 'Status of the operating rooms of this hospital.': 'Status das salas de operação deste hospital.', 'Status updated': 'Status atualizado', 'Steel frame': 'Estrutura de aço', 'Stolen': 'Roubado', 'Store spreadsheets in the Eden database': 'Arquivar as planilhas no banco de dados Eden', 'Storeys at and above ground level': 'Andares e no nível do solo acima', 'Storm Force Wind': 'Tempestade Força Vento', 'Storm Surge': 'ressaca', 'Stowaway': 'Penetra', 'Street Address': 'Endereço residencial', 'Streetview Enabled?': 'Streetview Enabled?', 'Strong Wind': 'vento forte', 'Structural': 'estrutural', 'Structural Hazards': 'riscos estruturais', 'Style': 'Style', 'Style Field': 'Estilo do Campo', 'Style Values': 'Estilo dos Valores', 'Sub-type': 'Subtipo', 'Subject': 'assunto', 'Submission successful - please wait': 'envio bem sucedido - por favor aguarde', 'Submission successful - please wait...': 'envio bem sucedido - por favor aguarde...', 'Submit New': 'Submeter Novamente', 'Submit New (full form)': 'Submeter Novo (formulário completo)', 'Submit New (triage)': 'Submeter novo (triagem)', 'Submit a request for recovery': 'envie um pedido de recuperação', 'Submit new Level 1 assessment (full form)': 'Submeter novo nível 1 de avaliação (formulário completo)', 'Submit new Level 1 assessment (triage)': 'Submeter novo nível 1 de avaliação (triagem)', 'Submit new Level 2 assessment': 'Submeter novo nível 2 de avaliação', 'Subscription Details': 'Detalhes da Assinatura', 'Subscription added': 'Assinatura Incluída', 'Subscription deleted': 'Assinatura Excluída', 'Subscription updated': 'Assinatura ATUALIZADO', 'Subscriptions': 'assinaturas', 'Subsector': 'Subsetor', 'Subsector Details': 'Detalhes de subsetor', 'Subsector added': 'Subsetor incluído', 'Subsector deleted': 'Subsetor excluído', 'Subsector updated': 'Subsetor atualizado', 'Subsectors': 'Subsetores', 'Subsistence Cost': 'custo de subsistencia', 'Suburb': 'Subúrbio', 'Suggest not changing this field unless you know what you are doing.': 'Sugerimos não alterar esse campo a menos que você saiba o que está fazendo.', 'Summary': 'Sumário', 'Summary by Administration Level': 'Resumo por Nível de Administração', 'Sunday': 'Domingo', 'Supervisor': 'Supervisor', 'Supplies': 'Suprimentos', 'Supply Chain Management': 'Supply Chain Management', 'Supply Item Categories': 'Supply Item Categories', 'Support Request': 'Pedido de Suporte', 'Support Requests': 'Pedidos de Suporte', 'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': 'Suporta a tomada de decisão de grandes grupos de Especialistas em Gestão de Crises ajudando os grupos a criar listas de classificados.', 'Sure you want to delete this object?': 'Tem certeza que você quer excluir este objeto?', 'Surgery': 'Cirurgia', 'Survey Answer': 'Resposta da Pesquisa', 'Survey Answer Details': 'Detalhes da Resposta da Pesquisa', 'Survey Answer added': 'Incluído Resposta da Pesquisa', 'Survey Answer deleted': 'Excluído a Resposta da Pesquisa', 'Survey Answer updated': 'Resposta da Pesquisa atualizada', 'Survey Module': 'Módulo de Pesquisa', 'Survey Name': 'Nome da Pesquisa', 'Survey Question': 'Questão de Pesquisa de Opinião', 'Survey Question Details': 'Detalhes da Pergunta de Pesquisa', 'Survey Question Display Name': 'Nome da pergunta de pesquisa', 'Survey Question added': 'Pergunta de pesquisa incluída', 'Survey Question deleted': 'Pergunta de pesquisa excluída', 'Survey Question updated': 'Pergunta de pesquisa atualizada', 'Survey Section': 'Seção da Pesquisa de Opinião', 'Survey Section Details': 'Detalhes de Seção de Pesquisa', 'Survey Section Display Name': 'Seção de pesquisa do nome de exibição', 'Survey Section added': 'Seção de Pesquisa incluída', 'Survey Section deleted': 'Seção de Pesquisa excluída', 'Survey Section updated': 'Seção de pesquisa atualizada', 'Survey Series': 'Série de Pesquisa', 'Survey Series Details': 'Série de Pesquisa Detalhes', 'Survey Series Name': 'Nome de Série de Pesquisa', 'Survey Series added': 'Série de Pesquisa incluída', 'Survey Series deleted': 'Série de Pesquisa excluída', 'Survey Series updated': 'Série de Pesquisa atualizada', 'Survey Template': 'Modelo de Pesquisa de Opinião', 'Survey Template Details': 'Definir detalhes do formulário', 'Survey Template added': 'Modelo de Pesquisa incluído', 'Survey Template deleted': 'Modelo de Pesquisa excluído', 'Survey Template updated': 'Definição de formulário actualizada', 'Survey Templates': 'Definir formulários', 'Symbology': 'Simbologia', 'Sync Conflicts': 'Conflitos de Sincronização', 'Sync History': 'Histórico de Sincronização', 'Sync Now': 'Sincronizar Agora', 'Sync Partners': 'Sincronizar parceiros', 'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'PARCEIROS DE Sincronização são instâncias ou PARES (SahanaEden, SahanaAgasti, Ushahidi, etc. ) que você deseja a informação de sincronização com. Clique no link sobre o direito de ir a página em que você pode incluir parceiros de sincronização, procurar por parceiros de sincronização e Modificá-las.', 'Sync Pools': 'Conjuntos de Sincronização', 'Sync Schedule': 'Planejamento de Sincronização', 'Sync Settings': 'Configurações de Sincronização', 'Sync process already started on': 'Processo de Sincronização já iniciado em', 'Sync process already started on ': 'Sync process already started on ', 'Synchronisation': 'Sincronização', 'Synchronization': 'Sincronização', 'Synchronization Conflicts': 'Conflitos de Sincronização', 'Synchronization Details': 'Detalhes de Sincronização', 'Synchronization History': 'Histórico de Sincronização', 'Synchronization Peers': 'Parceiros de Sincronização', 'Synchronization Settings': 'Configurações de sincronização', 'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'Sincronização permite compartilhar dados que você tenha com outros e Atualizar seu próprio banco de dados com informações recentes de outros parceiros. Esta página fornece informações sobre como utilizar os recursos de sincronização de Sahana Éden', 'Synchronization not configured.': 'Sincronização não Configurada.', 'Synchronization settings updated': 'Configurações de sincronização atualizadas', 'Syncronisation History': 'Histórico De Sincronização', "System's Twitter account updated": 'DO SISTEMA Chilreiam conta ATUALIZADO', 'Tags': 'Tags', 'Take shelter in place or per <instruction>': 'Abrigue-se no local ou por', 'Task': 'Task', 'Task Details': 'Detalhes da Tarefa', 'Task List': 'Lista de tarefas', 'Task Status': 'Status da tarefa', 'Task added': 'Task Inclusa', 'Task deleted': 'Tarefa excluída', 'Task removed': 'Task removed', 'Task updated': 'Tarefa atualizada', 'Tasks': 'Tarefas', 'Team': 'Equipe', 'Team Description': 'Descrição da Equipe', 'Team Details': 'Detalhes da Equipe', 'Team ID': 'ID da Equipe', 'Team Id': 'Id da Equipe', 'Team Leader': 'Líder de Equipe', 'Team Member added': 'Membro da equipe incluído', 'Team Members': 'Membros da equipe', 'Team Name': 'Nome da equipe', 'Team Type': 'Tipo de equipe', 'Team added': 'Equipe incluída', 'Team deleted': 'Equipe excluída', 'Team updated': 'Equipa actualizada', 'Teams': 'Equipes', 'Technical testing only, all recipients disregard': 'Apenas teste técnico, todos os recipientes ignorem', 'Telecommunications': 'Telecomunicações', 'Telephone': 'Telefone', 'Telephone Details': 'Telephone Details', 'Telephony': 'Telefonia', 'Tells GeoServer to do MetaTiling which reduces the number of duplicate labels.': 'Tells GeoServer to do MetaTiling which reduces the number of duplicate labels.', 'Temp folder %s not writable - unable to apply theme!': 'PASTA Temp%s não gravável-impossível aplicar tema!', 'Template Name': 'Template Name', 'Template file %s not readable - unable to apply theme!': 'Modelo% arquivo não é Legível-impossível aplicar tema!', 'Templates': 'modelos', 'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': 'Termo para o 5º nível de divisão administrativa nacional (por exemplo, uma subdivisão de código postal ou de zona de votação). Este nível não é frequentemente utilizado.', 'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': 'Termo para o 4º nível de divisão administrativa nacional(por exemplo, vila, bairro ou distrito).', 'Term for the primary within-country administrative division (e.g. State or Province).': 'Prazo para a principal divisão administrativa dentro do país (i.e. Estado ou Distrito).', 'Term for the secondary within-country administrative division (e.g. District or County).': 'Prazo para a Secundária divisão administrativa dentro do país (por exemplo, Bairro ou Município).', 'Term for the secondary within-country administrative division (e.g. District).': 'Prazo para a Secundária divisão administrativa dentro do país (i.e. Bairro).', 'Term for the third-level within-country administrative division (e.g. City or Town).': 'Prazo para o 3ᵉʳ nível de divisão administrativa dentro do país (por exemplo, Cidade ou Municipio).', 'Term for the top-level administrative division (i.e. Country).': 'Prazo para a divisão administrativa de nível superior (por exemplo País).', 'Term for the top-level administrative division (typically Country).': 'Prazo para a divisão administrativa de nível superior (geralmente País).', 'Terms of Service\n\nYou have to be eighteen or over to register as a volunteer.': 'Terms of Service\n\nYou have to be eighteen or over to register as a volunteer.', 'Terms of Service:': 'Terms of Service:', 'Territorial Authority': 'Autoridade territoriais', 'Terrorism': 'Terrorismo', 'Tertiary Server (Optional)': 'Servidor terciário (opcional)', 'Text': 'texto', 'Text Color for Text blocks': 'Cor de texto para os blocos de texto', 'Text before each Text Field (One per line)': 'Texto antes de cada campo de texto (um por linha)', 'Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated.': 'Obrigado para validar seu e-mail. Sua conta de usuário ainda está pendente para aprovação pelo administrador do Sistema (%s). você receberá uma notificação por e-mail quando sua conta esteja ativada.', 'Thanks for your assistance': 'Obrigado por sua ajuda', 'The': 'O', 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': 'O "query" é uma condição como "db.table1.field1==\'value\'". Algo como "db.table1.field1 == db.table2.field2" resulta em uma junção SQL.', 'The Area which this Site is located within.': 'A área que este Site está localizado', 'The Assessments module allows field workers to send in assessments.': 'O Modulo Avaliações permite aos trabalhadores de campo que enviem avaliações.', 'The Author of this Document (optional)': 'O autor deste documento (opcional)', 'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': 'O módulo avaliações De Construção permite a segurança edifício a ser avaliada, por exemplo, depois de um terremoto.', 'The Camp this Request is from': 'O Alojamento neste pedido é de', 'The Camp this person is checking into.': 'O Alojamento que esta pessoa está se registrando.', 'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'O local atual do Usuário/Grupo, que pode ser geral (para relatórios) ou precisa (para exibir em um mapa). Digite alguns caracteres para procurar nos locais disponíveis.', "The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": "O doador(s) para este projeto. Vários valores podem ser selecionados ao manter pressionado a chave 'control'", 'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': 'O endereço de e-mail para onde os pedidos de aprovação são enviados (normalmente seria um correio de Grupo ao invés de um individual). Se o campo estiver em branco, os pedidos são aprovados automaticamente se o domínio corresponder.', 'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': 'O Sistema de Comunicação de Incidentes permite o Público em Geral reportar incidentes & ter esses rastreados.', 'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'A Localização da Pessoa vem do, que pode ser geral (para relatórios) ou precisa (para exibir em um mapa). Digite alguns caracteres para procurar nos locais disponíveis.', 'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'O local que a pessoa vai, que pode ser genérico (para Relatórios) ou preciso (para exibir em um mapa). Digite alguns caracteres para procurar nos locais disponíveis.', 'The Media Library provides a catalog of digital media.': 'A Biblioteca de mídias fornece um catálogo de mídia digital.', 'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': 'O módulo de mensagens é o hub de comunicação principal do sistema Sahana. É utilizado para enviar alertas e/ou mensagens utilizando o SMS & e-mail para diferentes grupos e indivíduos antes, durante e após um desastre.', 'The Organization Registry keeps track of all the relief organizations working in the area.': 'O registro Da Organização mantém controle de todos as organizações de apoio que trabalham na área.', 'The Organization Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'O registro da Organização mantém controle de todas organizações de ajuda trabalhando numa região de desastre. Ele captura não apenas os locais onde elas estão ativas, mas também captura informações sobre o conjunto de projetos que está fornecendo em cada região.', 'The Patient Tracking system keeps track of all the evacuated patients & their relatives.': 'The Patient Tracking system keeps track of all the evacuated patients & their relatives.', 'The Person currently filling this Role.': 'A pessoa atualmente preenchendo esta função.', 'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'O módulo acompanhamento do projeto permite a criação de atividades para preencher Lacunas nas avaliações de necessidades.', 'The Requests Management System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.': 'O sistema De Gerenciamento De Pedidos é um repositório online central em todas as organizações de ajuda, trabalhadores de assistência, agentes do governo e sites de acampamento para a equipe de refugiados pode coordenar o fornecimento da ajuda com seu pedido. Ela permite que usuários aloquem os recursos disponíveis para suprir as demandas de forma efetiva e eficiente.', 'The Role this person plays within this hospital.': 'A Função desta pessoa neste hospital.', 'The Role to which this Role reports.': 'A função à qual essa função responde.', 'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'O registro do Abrigo rastreia todos os detalhes básicos abrigos e armazena sobre eles. Ele colabora com outros módulos para rastrear as pessoas associadas com um abrigo, os serviços disponíveis etc.', 'The Shelter this Request is from': 'O pedido deste abrigo é de', 'The Shelter this Request is from (optional).': 'O pedido este Abrigo é de (opcional).', 'The Shelter this person is checking into.': 'O abrigo esta pessoa está verificando no.', 'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.': 'A URL para o GetCapabilities de um serviço WMS cujas camadas você deseja acessíveis através do mapa.', 'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'A URL para a página do GetCapabilities de um Web Map Service (WMS), cujas camadas que você deseja disponíveis através do painel do navegador no Mapa.', "The URL of the image file. If you don't upload an image file, then you must specify its location here.": 'A URL do arquivo de imagem. Se voce não fizer o upload de um arquivo de imagem, então voce deverá especificar sua localização aqui.', 'The URL of your web gateway without the post parameters': 'A URL de seu gateway da web sem os parâmetros post', 'The URL to access the service.': 'A URL para acessar o serviço.', 'The Unique Identifier (UUID) as assigned to this facility by the government.': 'O Idenfificador Único (UUID) conforme designado pelo governo para esta filial.', 'The asset must be assigned to a site OR location.': 'O ativo deve ser assinalado para um site ou local.', 'The attribute which is used for the title of popups.': 'O atributo que é usado para o título de popups.', 'The attribute within the KML which is used for the title of popups.': 'O Atributo dentro do KML que é utilizado para o título dos pop-ups.', 'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'O Atributo(s) no KML que são utilizados para o corpo dos pop-ups. ( utilizar um espaço entre atributos )', 'The body height (crown to heel) in cm.': 'A altura do corpo (cabeça até o calcanhar) em cm.', 'The contact person for this organization.': 'A pessoa de contato nessa organização.', 'The country the person usually lives in.': 'O país que a pessoa vive habitualmente', 'The default Facility for which this person is acting.': 'The default Facility for which this person is acting.', 'The default Facility for which you are acting.': 'The default Facility for which you are acting.', 'The default Organization for whom this person is acting.': 'A Organização padrão para quem esta pessoa está atuando.', 'The default Organization for whom you are acting.': 'A Organização padrão para quem você está atuando.', 'The duplicate record will be deleted': 'O registro duplicado será excluído', 'The first or only name of the person (mandatory).': 'O primeiro nome ou único nome da pessoa (obrigatório).', 'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'O formulário da URL é http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service que representa o caminho da URL para o WMS.', 'The language you wish the site to be displayed in.': 'O idioma que você deseja que o site seja exibido.', 'The last known location of the missing person before disappearance.': 'A última localização conhecida da pessoa desaparecida antes do desaparecimento.', 'The level at which Searches are filtered.': 'The level at which Searches are filtered.', 'The list of Brands are maintained by the Administrators.': 'A lista de Marcas serão mantidas pelos administradores.', 'The list of Catalogs are maintained by the Administrators.': 'A lista de catálogos é mantida pelos administradores.', 'The list of Item categories are maintained by the Administrators.': 'A lista de categorias dos itens são mantidas pelos administradores.', 'The map will be displayed initially with this latitude at the center.': 'O mapa será exibido inicialmente com esta latitude no centro.', 'The map will be displayed initially with this longitude at the center.': 'O mapa será exibido inicialmente com esta longitude no centro.', 'The minimum number of features to form a cluster.': 'O número mínimo de recursos para formar um cluster.', 'The name to be used when calling for or directly addressing the person (optional).': 'O nome a ser usado ao chamar por ou diretamente endereçar a pessoa (opcional).', 'The next screen will allow you to detail the number of people here & their needs.': 'A próxima tela permitirá que você detalhe o número de pessoas aqui e as suas necessidades.', 'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': 'O número de unidades de medida dos Itens alternativos é igual a uma unidade de medida do Item', 'The number of pixels apart that features need to be before they are clustered.': 'O número de separado de pixels de funcionalidades tem que ser antes que eles sejam agrupados.', 'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'O número de títulos em torno do mapa visível para fazer download. Zero significa que a primeira página carrega mais rápido, números maiores que zero significam que as paginas seguintes são mais rápida.', 'The person at the location who is reporting this incident (optional)': 'A pessoa no local que está relatando este incidenten (opcional)', 'The person reporting the missing person.': 'A pessoa reportando o desaparecimento de alguem', 'The post variable containing the phone number': 'A variavel post contendo o numero de telefone', 'The post variable on the URL used for sending messages': 'A variável post no URL é utilizada para enviar mensagens', 'The post variables other than the ones containing the message and the phone number': 'As variáveis post diferentes das que contém a mensagem e o número de telefone', 'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'A porta serial no qual o modem está conectado-/dev/ttyUSB0, etc. No linux e com1, com2, etc. No Windows', 'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': 'O servidor não receber uma resposta oportuna de outro servidor que ele estava acessando para preencher o pedido pelo navegador.', 'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': 'O servidor recebeu uma resposta incorreta a partir de outro servidor que ele estava acessando para preencher o pedido pelo navegador.', 'The site where this position is based.': 'O local onde esta posição se baseia.', 'The staff responsibile for Facilities can make Requests for assistance. Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': 'O pessoal responsável pelas Instalações podem fazer pedidos de assistência. Compromissos podem ser feitas em relação a esses pedidos no entanto os pedidos permanecem abertas até o SOLICITANTE confirma que o pedido foi concluído.', 'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': 'O acontecimento já não representa uma ameaça ou preocupação e a ação a ser tomada é descrita em<instruction>', 'The time at which the Event started.': 'O momento em que o evento começou.', 'The time difference between UTC and your timezone, specify as +HHMM for eastern or -HHMM for western timezones.': 'The time difference between UTC and your timezone, specify as +HHMM for eastern or -HHMM for western timezones.', 'The title of the WMS Browser panel in the Tools panel.': 'O título do painel do navegador WMS em ferramentas.', 'The token associated with this application on': 'O token associado a este aplicativo em', 'The unique identifier which identifies this instance to other instances.': 'O indentificador único diferencia esta instância de outras.', 'The way in which an item is normally distributed': 'O modo em que um item é normalmente distribuído', 'The weight in kg.': 'O peso em quilogramas.', 'Theme': 'Tema', 'Theme Details': 'Detalhes do Tema', 'Theme added': 'Tema incluído', 'Theme deleted': 'Tema excluído', 'Theme updated': 'Tema atualizado', 'Themes': 'Temas', 'There are errors': 'Há erros', 'There are insufficient items in the Inventory to send this shipment': 'não há itens suficientes no armazém para o envio desse carregamento', 'There are multiple records at this location': 'Há vários registros neste local', 'There are not sufficient items in the Inventory to send this shipment': 'não há itens suficientes no inventário para enviar esse carregamento', 'There is no address for this person yet. Add new address.': 'Não há endereço para esta pessoa ainda. Adicionar novo endereço.', 'There was a problem, sorry, please try again later.': 'There was a problem, sorry, please try again later.', 'These are settings for Inbound Mail.': 'Estas são as configurações para Correio de entrada.', 'These are the Incident Categories visible to normal End-Users': 'Estes são as Categorias de incidentes visíveis para usuários finais normais.', 'These need to be added in Decimal Degrees.': 'estas precisam ser incluídas em graus decimais.', 'They': 'Eles', 'This appears to be a duplicate of': 'Isto parece ser duplicado de', 'This appears to be a duplicate of ': 'This appears to be a duplicate of ', 'This email address is already in use': 'This email address is already in use', 'This file already exists on the server as': 'Este arquivo já existe como no servidor', 'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': 'Isso é apropriado se esse nível estiver em construção. Para evitar modificação acidental após esse nível estar concluído, pode ser configurado como False.', 'This is the way to transfer data between machines as it maintains referential integrity.': 'Este é o caminho para a transferência de dados entre máquinas que mantém a integridade referencial.', 'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': 'Este é o caminho para a transferência de dados entre máquinas que mantém a integridade referencial...duplicado dados devem ser removidos manualmente 1ᵉʳ!', 'This level is not open for editing.': 'Este nível não é aberto para edição.', 'This might be due to a temporary overloading or maintenance of the server.': 'Isso pode ser devido a uma sobrecarga temporária ou manutenção do servidor.', 'This module allows Inventory Items to be Requested & Shipped between the Inventories of Facilities.': 'Este módulo permite que itens de inventário sejam Solicitados & Enviados entre os Inventários das instalações.', 'This module allows you to manage Events - whether pre-planned (e.g. exercises) or Live Incidents. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'This module allows you to manage Events - whether pre-planned (e.g. exercises) or Live Incidents. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.', 'This module allows you to plan scenarios for both Exercises & Events. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'Este módulo permite que você planeje cenários para os Exercícios & Eventos. Você pode alocar apropriado recursos (humanos, Ativos e Recursos) para que estes possam ser mobilizados facilmente.', 'This page shows you logs of past syncs. Click on the link below to go to this page.': 'Esta página mostra as logs das sincronizações passadas. Clique no link abaixo para ir para essa página.', 'This screen allows you to upload a collection of photos to the server.': 'Esta tela permite que você faça upload de um conjunto de fotografias para o servidor.', 'This setting can only be controlled by the Administrator.': 'Esta definicão só pode ser controlado pelo administrador.', 'This shipment has already been received.': 'Este carregamento já foi recebido.', 'This shipment has already been sent.': 'Este carregamento já foi enviado.', 'This shipment has not been received - it has NOT been canceled because can still be edited.': 'Este carregamento não foi recebido-ele não foi cancelado porque ainda pode ser editado.', 'This shipment has not been sent - it has NOT been canceled because can still be edited.': 'Este carregamento não foi enviado- ele não foi cancelado porque ainda pode ser editado.', 'This shipment will be confirmed as received.': 'Este carregamento será confirmado como recebido.', 'This value adds a small mount of distance outside the points. Without this, the outermost points would be on the bounding box, and might not be visible.': 'Esse valor inclui um pequeno valor de distância fora dos pontos. Sem isto, os pontos mais afastados estariam na caixa delimitadora, e podem não estar visíveis.', 'This value gives a minimum width and height in degrees for the region shown. Without this, a map showing a single point would not show any extent around that point. After the map is displayed, it can be zoomed as desired.': 'Este valor fornece uma largura e altura minimas em graus para a região mostrada. Sem isto, um mapa que mostre um ponto único não mostraria nenhuma extensão ao redor desse ponto. Depois que o mapa for exibido, pode ser ampliado, conforme desejado.', 'Thunderstorm': 'Trovoada', 'Thursday': 'Quinta-feira', 'Ticket': 'Bilhete', 'Ticket Details': 'Detalhes do bilhete', 'Ticket ID': 'ID do Bilhete', 'Ticket added': 'Bilhete incluído', 'Ticket deleted': 'Bilhete removido', 'Ticket updated': 'Bilhete atualizado', 'Ticketing Module': 'Módulo de bilhetes', 'Tickets': 'Bilhetes', 'Tiled': 'Tiled', 'Tilt-up concrete': 'Inclinar concreto', 'Timber frame': 'Quadro de madeira', 'Timeline': 'Prazo', 'Timeline Report': 'Relatório de períodos de tempo', 'Timestamp': 'Timestamp', 'Timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'Timestamps can be correlated with the timestamps on the photos to locate them on the map.', 'Title': 'título', 'Title to show for the Web Map Service panel in the Tools panel.': 'Título para mostrar o painel de serviço de Mapa da Web no painel de Ferramentas.', 'To': 'para', 'To Location': 'Localidade de destino', 'To Person': 'Para Pessoa', 'To begin the sync process, click the button on the right =>': 'Para iniciar o processo de Sincronização, clique no botão à direita.', 'To begin the sync process, click the button on the right => ': 'To begin the sync process, click the button on the right => ', 'To begin the sync process, click this button =>': 'Para iniciar o processo de Sincronização, clique neste botão.', 'To begin the sync process, click this button => ': 'To begin the sync process, click this button => ', 'To create a personal map configuration, click': 'Para criar uma configuração do mapa pessoal, clique', 'To create a personal map configuration, click ': 'To create a personal map configuration, click ', 'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'Para editar OpenStreetMap, você precisa editar as configurações do OpenStreetMap em models/000_config.py', 'To search by job title, enter any portion of the title. You may use % as wildcard.': 'Para pesquisar por título, digite qualquer parte do título. Pode utilizar o % como um substituto para qualquer caracter.', "To search by person name, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Para pesquisar por nome, digite qualquer do primeiro, meio ou últimos nomes, separados por espaços. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todas as pessoas.", "To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": "Para procurar um corpo, digite o número da ID do corpo. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todos os organismos.", "To search for a hospital, enter any of the names or IDs of the hospital, or the organization name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Para procurar um hospital, digite qualquer um dos nomes ou IDs do hospital, ou o nome da organização ou Acrônimo, separados por espaços. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todos os hospitais.", "To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Para procurar um hospital, digite qualquer um dos nomes ou IDs do hospital, separados por espaços. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todos os hospitais.", "To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "Para procurar um local, digite o nome. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todos os locais.", "To search for a patient, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all patients.": "To search for a patient, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all patients.", "To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Para procurar por uma pessoa, digite qualquer do primeiro, meio ou últimos nomes e/ou um número de ID de uma pessoa, separados por espaços. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todas as pessoas.", "To search for a person, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Para procurar por uma pessoa, digite ou o primeiro nome, ou o nome do meio ou sobrenome, separados por espaços. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todas as pessoas.", "To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": "Para procurar por uma avaliação, digite qualquer parte o número da permissão da avaliação. Pode utilizar o % como um substituto para qualquer caracter. PRESSIONE ' Search' sem entrada para listar todas as avaliações.", 'To variable': 'Para variável', 'Tools': 'ferramentas', 'Tornado': 'tornado', 'Total': 'Total', 'Total # of Target Beneficiaries': 'Nº Total de Beneficiários De Destino', 'Total # of households of site visited': 'Nº Total de famílias de site Visitado', 'Total Beds': 'Total de Camas', 'Total Beneficiaries': 'Total de Beneficiários', 'Total Cost per Megabyte': 'Custo Total por Megabyte', 'Total Cost per Minute': 'Custo Total por Minuto', 'Total Monthly': 'Total Mensal', 'Total Monthly Cost': 'Custo Total mensal', 'Total Monthly Cost:': 'Custo Total mensal:', 'Total Monthly Cost: ': 'Total Monthly Cost: ', 'Total One-time Costs': 'Total Um tempo de Custos', 'Total Persons': 'Totalizar Pessoas', 'Total Recurring Costs': 'Totalizar Custos Recorrentes', 'Total Unit Cost': 'Total do custo unitário', 'Total Unit Cost:': 'Custo Unitário Total:', 'Total Unit Cost: ': 'Total Unit Cost: ', 'Total Units': 'Total de unidades', 'Total gross floor area (square meters)': 'Total de área bruta (metros quadrados)', 'Total number of beds in this hospital. Automatically updated from daily reports.': 'Número Total de leitos neste hospital. Atualizado automaticamente a partir de relatórios diários.', 'Total number of houses in the area': 'Número Total de casas na área', 'Total number of schools in affected area': 'Número Total de escolas em área afetada', 'Total population of site visited': 'Totalizar População do site Visitado', 'Totals for Budget:': 'Total para Orçamento', 'Totals for Bundle:': 'Total do Pacote', 'Totals for Kit:': 'Totais para Kit', 'Tourist Group': 'Grupo turístico', 'Town': 'Urbano', 'Traces internally displaced people (IDPs) and their needs': 'Rastreia pessoas deslocadas internamente (PDI) e suas necessidades', 'Tracing': 'Rastreio', 'Track': 'Rastrear', 'Track Details': 'Detalhes do restraio', 'Track deleted': 'Rastreio excluído', 'Track updated': 'Rastreamento atualizado', 'Track uploaded': 'Rastreamento enviado', 'Track with this Person?': 'RASTREAR com esta pessoa?', 'Tracking of Patients': 'Tracking of Patients', 'Tracking of Projects, Activities and Tasks': 'Rastreamento de projetos, atividades e tarefas', 'Tracking of basic information on the location, facilities and size of the Shelters': 'Rastreamento de informações básicas sobre a localização, instalações e tamanho dos abrigos', 'Tracks': 'Tracks', 'Tracks the location, distibution, capacity and breakdown of victims in Shelters': 'Rastreia o local, distribuição, capacidade e discriminação da vítima em Abrigos', 'Traffic Report': 'Relatório de tráfego', 'Training': 'Treinamento', 'Training Course Catalog': 'Catálogo de cursos de treinamento', 'Training Details': 'Detalhes do treinamento', 'Training added': 'Treinamento incluído', 'Training deleted': 'Treinamento excluído', 'Training updated': 'Treinamento atualizado', 'Trainings': 'Treinamentos', 'Transit': 'Trânsito', 'Transit Status': 'Status do Transito', 'Transition Effect': 'Efeito de Transição', 'Transparent?': 'TRANSPARENTE?', 'Transportation assistance, Rank': 'Assistência de transporte, Classificação', 'Trauma Center': 'Centro de traumas', 'Travel Cost': 'Custo da Viagem', 'Tropical Storm': 'Tempestade Tropical', 'Tropo': 'substiuir, mudar', 'Tropo Messaging Token': 'Sinal de Mensagem Tropo', 'Tropo Settings': 'Configurações esteja doido parceiro', 'Tropo Voice Token': 'Sinal de Voz Tropo', 'Tropo settings updated': 'Configurações Tropo Atualizadas', 'Truck': 'Caminhão', 'Try checking the URL for errors, maybe it was mistyped.': 'Tente verificar se existem erros na URL, talvez tenha sido um erro de digitação', 'Try hitting refresh/reload button or trying the URL from the address bar again.': 'Tente apertar o botão atualizar/recarregar ou tente a URL a partir da barra de endereços novamente', 'Try refreshing the page or hitting the back button on your browser.': 'Tente atualizar a página ou apertar o botão voltar em seu navegador.', 'Tsunami': 'Tsunami', 'Tuesday': 'Terça-feira', 'Twitter': 'Twitter', 'Twitter ID or #hashtag': 'ID Twitter ou #hashtag', 'Twitter Settings': 'Configurações do Twitter', 'Type': 'type', 'Type of Construction': 'Tipo de Construção', 'Type of water source before the disaster': 'Tipo de fonte de água antes do desastre', "Type the first few characters of one of the Person's names.": 'Digite os primeiros caracteres de um dos nomes da pessoa.', 'UID': 'uid', 'UN': 'ONU', 'URL': 'Localizador-Padrão de Recursos', 'UTC Offset': 'UTC Offset', 'Un-Repairable': 'ONU-Reparáveis', 'Unable to parse CSV file!': 'Não é possível analisar Arquivo CSV!', 'Understaffed': 'Pessoal', 'Unidentified': 'Não identificado', 'Unit Cost': 'Custo por unidade', 'Unit added': 'Unidade incluída', 'Unit deleted': 'Unidade Excluída', 'Unit of Measure': 'Unidade de medida', 'Unit updated': 'Unidade Atualizados', 'Units': 'Unidades', 'Unknown': 'unknown', 'Unknown Peer': 'Peer desconhecido', 'Unknown type of facility': 'Tipo desconhecido de instalação', 'Unreinforced masonry': 'Alvenaria obras', 'Unresolved Conflicts': 'Conflitos não resolvidos', 'Unsafe': 'Inseguro', 'Unselect to disable the modem': 'Desmarcar para desativar o modem', 'Unselect to disable this API service': 'Unselect to disable this API service', 'Unselect to disable this SMTP service': 'Unselect to disable this SMTP service', 'Unsent': 'não enviado', 'Unsupported data format!': 'Formato de dados não Suportado!', 'Unsupported method!': 'Método não Suportado!', 'Update': 'atualização', 'Update Activity Report': 'Atualizar Relatório de atividade', 'Update Cholera Treatment Capability Information': 'Atualizar informações de capacidade de tratamento de Cólera', 'Update Request': 'Atualizar Pedido', 'Update Service Profile': 'Atualizar Perfil de Serviço', 'Update Status': 'Status da Atualização', 'Update Task Status': 'Atualizar Status da Tarefa', 'Update Unit': 'Atualizar Unidade', 'Update if Master': 'Atualizar se for o principal', 'Update if Newer': 'Atualizar se Mais Recente', 'Update your current ordered list': 'ATUALIZE a seu atual lista ordenada', 'Updated By': 'Atualizado por', 'Upload Comma Separated Value File': 'Upload Comma Separated Value File', 'Upload Format': 'Upload Format', 'Upload OCR Form': 'Upload OCR Form', 'Upload Photos': 'Fazer Upload de Fotos', 'Upload Spreadsheet': 'Fazer atualizacao de Planilha', 'Upload Track': 'Pista de carregamento', 'Upload a CSV file': 'Upload a CSV file', 'Upload a CSV file formatted according to the Template.': 'Upload a CSV file formatted according to the Template.', 'Upload a Spreadsheet': 'Fazer Upload de uma planilha', 'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': 'Fazer Upload de um arquivo de imagem (bmp, gif, jpeg ou png), máx. 300x300 pixels!', 'Upload an image file here.': 'Fazer atualizacao de um arquivo de imagem aqui.', "Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": 'Fazer atualizacao de um arquivo de imagem aqui. Se voce não fizer o upload de um arquivo de imagem, então voce deverá especificar sua localização no campo URL', 'Upload an image, such as a photo': 'Fazer Upload de uma imagem, como uma foto', 'Uploaded': 'Uploaded', 'Urban Fire': 'Incêndio urbano', 'Urban area': 'Zona Urbana', 'Urdu': 'Urdu', 'Urgent': 'Urgente', 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Utilize (...)&(...) para e, (...)|(...) ou para, e ~(...) para não para construir consultas mais complexas.', 'Use Geocoder for address lookups?': 'Utiliza Geocodificador para consultas de endereços?', 'Use default': 'usar o padrão', 'Use these links to download data that is currently in the database.': 'Use estes links para fazer o download de dados actualmente na base de dados.', 'Use this to set the starting location for the Location Selector.': 'Use this to set the starting location for the Location Selector.', 'Used by IRS & Assess': 'Utilizado pela Receita Federal & Avaliar', 'Used in onHover Tooltip & Cluster Popups to differentiate between types.': 'Utilizado em onHover De Dicas & Cluster Popups para diferenciar entre tipos.', 'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': 'Utilizado para construir onHover Dicas & primeiro campo também utilizado no Popups Cluster para diferenciar entre os registros.', 'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Usado para verificar latitude de locais inseridos é razoável. Pode ser utilizado para filtrar listas de recursos que possuem locais.', 'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Usado para verificar que longitude de locais inserido é razoável. Pode ser utilizado para filtrar listas de recursos que possuem locais.', 'Used to import data from spreadsheets into the database': 'Para importar dados utilizada a partir de planilhas no banco de dados', 'Used within Inventory Management, Request Management and Asset Management': 'Utilizado no gerenciamento de inventário, gerenciamento de Pedido e gerenciamento de ativos', 'User': 'usuário', 'User Account has been Disabled': 'Conta de Usuário foi Desativado', 'User Details': 'Detalhes do Usuário', 'User ID': 'User ID', 'User Management': 'gerenciamento do usuário', 'User Profile': 'Perfil do Utilizador', 'User Requests': 'Pedidos do Utilizador', 'User Updated': 'Utilizador actualizado', 'User added': 'Usuário Incluído', 'User already has this role': 'Usuário já tem essa função', 'User deleted': 'Usuário Excluído', 'User updated': 'Utilizador actualizado', 'Username': 'userName', 'Users': 'usuários', 'Users removed': 'Utilizadores removidos', 'Uses the REST Query Format defined in': 'Utiliza o formato de consulta REST definido em', 'Ushahidi': 'Ushahidi', 'Utilities': 'Serviços Públicos', 'Utility, telecommunication, other non-transport infrastructure': 'Serviços Públicos, telecomunicações, outra infra-estrutura não-transporte', 'Vacancies': 'Vagas', 'Value': 'value', 'Various Reporting functionalities': 'Diversas funcionalidades de relatório', 'Vehicle': 'veículo', 'Vehicle Crime': 'Roubo/Furto de veículo', 'Vehicle Details': 'Vehicle Details', 'Vehicle Details added': 'Vehicle Details added', 'Vehicle Details deleted': 'Vehicle Details deleted', 'Vehicle Details updated': 'Vehicle Details updated', 'Vehicle Management': 'Vehicle Management', 'Vehicle Types': 'Tipos de veículo', 'Vehicle added': 'Vehicle added', 'Vehicle deleted': 'Vehicle deleted', 'Vehicle updated': 'Vehicle updated', 'Vehicles': 'Vehicles', 'Vehicles are assets with some extra details.': 'Vehicles are assets with some extra details.', 'Verification Status': 'Status de verificação', 'Verified?': 'Verificado?', 'Verify password': 'Verificar senha', 'Version': 'Version', 'Very Good': 'Muito bom', 'Very High': 'muito alto', 'View Alerts received using either Email or SMS': 'Visualizar alertas utilizando quer o correio electrónico quer SMS.', 'View All': 'Visualizar todos', 'View All Tickets': 'View All Tickets', 'View Error Tickets': 'Ver bilhetes de erro', 'View Fullscreen Map': 'Visualização Inteira Mapa', 'View Image': 'Visualizar imagem', 'View Items': 'Ver itens', 'View On Map': 'Visualizar no mapa', 'View Outbox': 'Visualização Outbox', 'View Picture': 'Visualização de imagem', 'View Results of completed and/or partially completed assessments': 'View Results of completed and/or partially completed assessments', 'View Settings': 'Ver Configurações', 'View Tickets': 'Visualizar Bilhetes', 'View and/or update their details': 'Visualizar e/ou actualizar os seus detalhes', 'View or update the status of a hospital.': 'VISUALIZAR ou atualizar o status de um hospital.', 'View pending requests and pledge support.': 'Visualizar pedidos pendentes e suporte promessa.', 'View the hospitals on a map.': 'Visualizar os hospitais em um mapa.', 'View/Edit the Database directly': 'Visualizar/Editar o banco de dados diretamente', "View/Edit the Database directly (caution: doesn't respect the framework rules!)": 'Visualizar/Alterar a base de dados directamente ( cuidado : não cumpre com as regras da infraestrutura ! ) ).', 'Village': 'Vila', 'Village Leader': 'Líder da Aldeia', 'Visible?': 'Visível?', 'Visual Recognition': 'Reconhecimento visual', 'Volcanic Ash Cloud': 'Nuvem de cinzas vulcânicas', 'Volcanic Event': 'Evento vulcânico', 'Volume (m3)': 'Volume (m3)', 'Volunteer Availability': 'Disponibilidade de Voluntário', 'Volunteer Details': 'Detalhes do voluntário', 'Volunteer Information': 'Voluntário Informações', 'Volunteer Management': 'Gestão de voluntário', 'Volunteer Project': 'Projeto voluntário', 'Volunteer Record': 'Voluntário Registro', 'Volunteer Request': 'Pedido voluntário', 'Volunteer added': 'Voluntário incluído', 'Volunteer availability added': 'Disponibilidade de voluntário incluída', 'Volunteer availability deleted': 'Disponibilidade de voluntário excluída', 'Volunteer availability updated': 'Disponibilidade de voluntário atualizada', 'Volunteer deleted': 'Voluntário excluído', 'Volunteer details updated': 'Atualização dos detalhes de voluntários', 'Volunteer updated': 'Voluntário atualizado', 'Volunteers': 'Voluntários', 'Volunteers List': 'Voluntários Lista', 'Volunteers were notified!': 'Voluntários foram notificados!', 'Vote': 'voto', 'Votes': 'votos', 'WASH': 'LAVAR', 'WMS Browser Name': 'WMS Nome do Navegador', 'WMS Browser URL': 'WMS Navegador URL', 'Walking Only': 'Apenas andando', 'Wall or other structural damage': 'Parede ou outros danos estruturais', 'Warehouse': 'Depósito', 'Warehouse Details': 'Detalhes do Armazém', 'Warehouse added': 'Warehouse incluído', 'Warehouse deleted': 'Deposito apagado', 'Warehouse updated': 'Warehouse ATUALIZADO', 'Warehouses': 'Armazéns', 'WatSan': 'WatSan', 'Water Sanitation Hygiene': 'Saneamento de água', 'Water collection': 'Coleta de água', 'Water gallon': 'Galão de água', 'Water storage containers in households': 'Recipientes de armazenamento de água nos domicílios', 'Water supply': 'Abastecimento de água', 'Waterspout': 'Waterspout', 'We have tried': 'We have tried', 'Web API settings updated': 'Web API settings updated', 'Web Map Service Browser Name': 'Nome do mapa da Web navegador de serviços', 'Web Map Service Browser URL': 'Web Mapa Do navegador de Serviços URL', 'Website': 'WebSite', 'Wednesday': 'Wednesday', 'Weight': 'peso', 'Weight (kg)': 'peso (kg)', 'Welcome to the Sahana Portal at': 'Bem-vindo ao Portal Sahana em', 'Well-Known Text': 'Texto bem conhecido', 'What order to be contacted in.': 'What order to be contacted in.', 'Wheat': 'Trigo', 'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points.': 'Quando o mapa é que exibido incide sobre um conjunto de pontos, o mapa é aproximado para mostrar apenas a região delimitadora dos pontos.', 'When reports were entered': 'Quando os relatórios foram Digitados', "When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": 'Quando Sincronizando dados com outros, os conflitos acontecem em casos onde dois (ou mais) grupos desejam sincronizar informações que os dois tenham modificado, ou seja, informações conflitantes. Módulo de sincronização tenta resolver esses conflitos automaticamente mas em alguns casos isso não consegue. Nesses casos, cabe a si resolver esses conflitos manualmente, clique no link à direita para ir para esta página.', 'Whiskers': 'Bigodes', 'Who is doing what and where': 'Quem está a fazer o quê e onde', 'Who usually collects water for the family?': 'Quem habitualmente colecta água para a família ?', 'Width': 'width', 'Width (m)': 'Largura (m)', 'Wild Fire': 'Fogo Selvagem', 'Wind Chill': 'Vento Frio', 'Window frame': 'Esquadria de janela', 'Winter Storm': 'Tempestade de inverno', 'Women of Child Bearing Age': 'Mulheres da criança Tendo Idade', 'Women participating in coping activities': 'Mulheres que participam em lidar atividades', 'Women who are Pregnant or in Labour': 'Mulheres que esto grávidas ou no trabalho', 'Womens Focus Groups': 'Mulheres de Grupos Foco', 'Wooden plank': 'Tábua de madeira', 'Wooden poles': 'Postes de madeira', 'Working hours end': 'Horas de trabalho final', 'Working hours start': 'Horas de trabalho iniciar', 'Working or other to provide money/food': 'Trabalhando para outros para prover dinheiro / alimentos', 'X-Ray': 'Raio-X', 'XMPP': 'XMPP', 'YES': 'YES', "Yahoo Layers cannot be displayed if there isn't a valid API Key": "Yahoo Layers cannot be displayed if there isn't a valid API Key", 'Year': 'Year', 'Year built': 'Ano de construção', 'Year of Manufacture': 'Ano de fabricação', 'Yellow': 'amarelo', 'Yes': 'YES', 'You are a recovery team?': 'Você é uma equipe de recuperação?', 'You are attempting to delete your own account - are you sure you want to proceed?': 'Você está tentando excluir sua própria conta-Tem certeza de que deseja continuar?', 'You are currently reported missing!': 'Você está atualmente desaparecido!', 'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': 'Você pode alterar a configuração do Módulo de Sincronização na seção configurações. Essa configuração inclui o seu UUID (número de identificação exclusivo), Planejamentos de Sincronização, serviço Farol e assim por diante. Clique no link a seguir para ir para a página Configurações de Sincronização.', 'You can click on the map below to select the Lat/Lon fields': 'Você pode clicar no mapa abaixo para selecionar os campos Lat/Lon', 'You can select the Draw tool': 'Pode selecionar a ferramenta Desenho', 'You can set the modem settings for SMS here.': 'Pode definir a configuração do modem SMS aqui.', 'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': 'Você pode utilizar a ferramenta de conversão para converter coordenadas de GPS ou graus/minutos/segundos.', 'You do no have permission to cancel this received shipment.': 'Você não tem permissão para cancelar o recebimento deste carregamento.', 'You do no have permission to cancel this sent shipment.': 'Você não tem permissão para cancelar o envio desse carregamento.', 'You do no have permission to make this commitment.': 'Você não tem permissão de fazer este compromisso.', 'You do no have permission to receive this shipment.': 'Você não tem permissão para receber este carregamento.', 'You do no have permission to send this shipment.': 'Você não tem permissão para enviar este carregamento.', 'You do not have permission for any facility to make a commitment.': 'Você não tem permissão em qualquer instalação para estabelecer um compromisso.', 'You do not have permission for any facility to make a request.': 'Você não tem permissão em qualquer instalação para fazer um pedido.', 'You do not have permission for any facility to receive a shipment.': 'You do not have permission for any facility to receive a shipment.', 'You do not have permission for any facility to send a shipment.': 'You do not have permission for any facility to send a shipment.', 'You do not have permission for any site to add an inventory item.': 'Você não tem permissão em qualquer site para incluir um item de inventário.', 'You do not have permission for any site to make a commitment.': 'Você não tem permissão em qualquer site para assumir um compromisso.', 'You do not have permission for any site to make a request.': 'Você não tem permissão em qualquer site para fazer um pedido.', 'You do not have permission for any site to perform this action.': 'Você não tem permissão em qualquer site para executar esta ação.', 'You do not have permission for any site to receive a shipment.': 'Você não tem permissão para qualquer site para receber um carregamento.', 'You do not have permission for any site to send a shipment.': 'Você não tem permissão em qualquer site para enviar um carregamento.', 'You do not have permission to cancel this received shipment.': 'Você não tem permissão para cancelar este carregamento recebido.', 'You do not have permission to cancel this sent shipment.': 'Você não tem permissão para cancelar essa remessa enviada.', 'You do not have permission to make this commitment.': 'Você não tem permissão para assumir este compromisso.', 'You do not have permission to receive this shipment.': 'Você não tem permissão para receber esta remessa.', 'You do not have permission to send a shipment from this site.': 'Você não tem permissão para enviar um carregamento a partir deste site.', 'You do not have permission to send this shipment.': 'Você não tem permissão para enviar este carregamento.', 'You have a personal map configuration. To change your personal configuration, click': 'Você tem uma configuração de mapa pessoal. Para alterar a sua configuração pessoal, clique', 'You have a personal map configuration. To change your personal configuration, click ': 'You have a personal map configuration. To change your personal configuration, click ', 'You have found a dead body?': 'Descobriu um cadáver ?', "You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.": "You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.", "You haven't made any calculations": 'Não fez quaisquer cálculos.', 'You must be logged in to register volunteers.': 'Você deve estar com login efetuado para registrar voluntários.', 'You must be logged in to report persons missing or found.': 'Você deve estar registrado para informar pessoas desaparecidas ou localizadas.', 'You must provide a series id to proceed.': 'Você deve fornecer um número de série para continuar.', 'You should edit Twitter settings in models/000_config.py': 'Você deve editar as definições do Twitter em modelos/000_config.py', 'Your current ordered list of solution items is shown below. You can change it by voting again.': 'Seu lista de itens de solução pedidos aparece abaixo. Você pode alterá-lo ao votar novamente.', 'Your post was added successfully.': 'O post foi incluído com êxito.', 'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'Uma identificação exclusiva (UUID) foi designada para o seu sistema e poderá ser usada por outros computadores ao seu redor para identificá-lo. Para visualizar o seu UUID, você pode ir para Sincronização -> configurações Sync. Você também pode ver outras configurações nesta página.', 'ZIP Code': 'ZIP Code', 'Zero Hour': 'Hora Zero', 'Zinc roof': 'Telhado de Zinco', 'Zoom': 'Zoom', 'Zoom Levels': 'Níveis de Zoom', 'active': 'ativo', 'added': 'incluído', 'all records': 'todos os registros', 'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'Permite que um orçamento seja desenvolvido com base em despesas com o pessoal e equipamento, incluindo quaisquer despesas gerais administrativas.', 'allows for creation and management of assessments.': 'allows for creation and management of assessments.', 'allows for creation and management of surveys to assess the damage following a natural disaster.': 'permite a criação e gerenciamento de pesquisas para avaliar os danos após um desastre natural.', 'an individual/team to do in 1-2 days': 'Uma pessoa/Equipe para fazer em 1 Dias-2', 'assigned': 'designado', 'average': 'Na média', 'black': 'Preto', 'blond': 'Loiro', 'blue': 'azul', 'brown': 'Marrom', 'business_damaged': 'business_damaged', 'by': 'por', 'c/o Name': 'c/o Nome', 'can be used to extract data from spreadsheets and put them into database tables.': 'Pode ser utilizado para extrair dados de planilhas e colocá-los em tabelas de dados.', 'cancelled': 'CANCELADO', 'caucasoid': 'Caucasoid', 'check all': 'Verificar Tudo', 'click for more details': 'Clique para mais detalhes', 'click here': 'click here', 'completed': 'Concluído', 'confirmed': 'Confirmado', 'consider': 'considerar', "couldn't be parsed so NetworkLinks not followed.": 'Não pôde ser analisado então o NetworkLinks não seguiu.', 'curly': 'Encaracolado', 'currently registered': 'Atualmente registrados', 'daily': 'Diariamente', 'dark': 'Escuro', 'data uploaded': 'dados carregados', 'database': 'DATABASE', 'database %s select': '% de dados s SELECIONE', 'db': 'dB', 'deceased': 'Falecido', 'delete all checked': 'excluir todos marcados', 'deleted': 'excluídos', 'design': 'projecto', 'diseased': 'Doentes', 'displaced': 'Deslocadas', 'divorced': 'Divorciado', 'done!': 'Pronto!', 'duplicate': 'duplicar', 'edit': 'Editar', 'eg. gas, electricity, water': 'Exemplo: Gás, eletricidade, água', 'embedded': 'integrado', 'enclosed area': 'Área anexada', 'enter a number between %(min)g and %(max)g': 'enter a number between %(min)g and %(max)g', 'enter an integer between %(min)g and %(max)g': 'enter an integer between %(min)g and %(max)g', 'export as csv file': 'Exportar como arquivo cvs.', 'fat': 'Gordura', 'feedback': 'Retorno', 'female': 'Sexo Feminino', 'flush latrine with septic tank': 'esvaziar latrina com tanque séptico', 'food_sources': 'fuentes de alimento', 'forehead': 'testa', 'form data': 'form data', 'found': 'Localizado', 'from Twitter': 'do Twitter', 'getting': 'getting', 'green': 'verde', 'grey': 'cinza', 'here': 'aqui', 'high': 'Alta', 'hourly': 'Por hora', 'households': 'Membros da família', 'identified': 'identificado', 'ignore': 'Ignore', 'in Deg Min Sec format': 'GRAUS Celsius no formato Mín. Segundo', 'in GPS format': 'GPS no formato', 'in Inv.': 'in Inv.', 'inactive': 'inativo', "includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": 'Inclui um GroundOverlay ou ScreenOverlay que não são ainda suportados em OpenLayuers, portanto poderá não funcionar na totalidade.', 'injured': 'Feridos', 'insert new': 'inserir novo', 'insert new %s': 'inserir novo %s', 'invalid': 'inválido', 'invalid request': 'PEDIDO INVÁLIDO', 'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'É um repositório central de informações em tempo real onde vítimas de desastres e seus familiares, especialmente casos isolados, refugiados e pessoas deslocadas podem ser abrigados. Informações como nome, idade, Contate o número de Bilhete de Identidade número, localização Deslocadas, e outros detalhes são capturados. Detalhes de impressão Imagem e Dedo de as pessoas possam ser transferidos por upload para o sistema. As pessoas podem também ser capturados pelo grupo por eficiência e conveniência.', 'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': 'tem como visão ser composto de vários sub-módulos que interagem juntos a fim de fornecer funcionalidade complexa para o gerenciamento de itens de ajuda e projeto de uma organização. Isso inclui um sistema de admissão, um sistema de gestão de depósitos, rastreamento de mercadorias, gestão da cadeia de fornecimentos, de gestão da frota, aquisições, recursos de rastreamento financeiro de ativos e outros e gerenciamento de recursos', 'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': 'Mantém controle de todos os bilhetes de entrada permitindo que sejam classificados & direcionados ao local apropriado para atuação.', 'latrines': 'privadas', 'leave empty to detach account': 'deixar em branco para desconectar a conta', 'legend URL': 'Legenda URL', 'light': 'luz', 'login': 'login', 'long': 'Longo', 'long>12cm': 'comprimento>12cm', 'low': 'baixo', 'male': 'masculino', 'manual': 'Manual', 'married': 'casado', 'medium': 'médio.', 'medium<12cm': 'médio<12cm', 'meters': 'metros', 'missing': 'ausente', 'module allows the site administrator to configure various options.': 'Módulo permite que o administrador do site configure várias opções.', 'module helps monitoring the status of hospitals.': 'Módulo ajuda monitorando o status de hospitais.', 'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': 'Módulo fornece um mecanismo para colaboração fornecem uma visão geral do desastre de desenvolvimento, utilização de mapeamento online (SIG).', 'mongoloid': 'Mongolóide', 'more': 'Mais', 'n/a': 'n/d', 'negroid': 'Negróide', 'never': 'Nunca', 'new': 'Novo(a)', 'new record inserted': 'Novo registro inserido', 'next 100 rows': 'próximas 100 linhas', 'no': 'no', 'none': 'nenhum', 'normal': 'normal', 'not accessible - no cached version available!': 'Não acessível-nenhuma versão em cache disponível!', 'not accessible - using cached version from': 'Não acessível-Utilizando versão em Cache', 'not specified': 'não especificado', 'num Zoom Levels': 'Num níveis de Zoom', 'obsolete': 'Obsoleto', 'on': 'Ligar', 'once': 'uma vez', 'open defecation': 'Abrir evacuação', 'optional': 'Optional', 'or import from csv file': 'ou importar a partir do arquivo csv', 'other': 'outros', 'over one hour': 'Mais de uma hora', 'people': 'pessoas', 'piece': 'parte', 'pit': 'cova', 'pit latrine': 'cova de latrina', 'postponed': 'Adiado', 'preliminary template or draft, not actionable in its current form': 'Modelo ou rascunho preliminar, não acionável em sua forma atual', 'previous 100 rows': '100 linhas anteriores', 'record does not exist': 'Registro não existe', 'record id': 'ID do Registro', 'red': 'vermelho', 'reported': 'relatado', 'reports successfully imported.': 'relatórios importados com êxito.', 'representation of the Polygon/Line.': 'Representação do polígono /Linha.', 'retired': 'Aposentado', 'retry': 'retry', 'river': 'Rio', 'see comment': 'Veja o comentário', 'selected': 'Selecionado', 'separated': 'Separado', 'separated from family': 'Separados da família', 'shaved': 'raspado', 'short': 'pequeno', 'short<6cm': 'pequeno<6cm', 'sides': 'lados', 'sign-up now': 'Inscreva-se agora', 'single': 'único', 'slim': 'estreito', 'specify': 'Especifique.', 'staff': 'equipe', 'staff members': 'Membros da equipe', 'state': 'Estado', 'state location': 'Localização do Estado', 'straight': 'reto', 'suffered financial losses': 'Sofreram perdas financeiras', 'table': 'table', 'tall': 'Altura', 'this': 'isto', 'times and it is still not working. We give in. Sorry.': 'times and it is still not working. We give in. Sorry.', 'to access the system': 'Para acessar o sistema', 'to download a OCR Form.': 'to download a OCR Form.', 'to reset your password': 'Para Reconfigurar sua senha', 'to verify your email': 'Para verificar seu e-mail', 'tonsure': 'tonsura', 'total': 'Total', 'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'Módulo tweepy não disponível com a execução Python-isto necessita da instalação para suporte a tropo Twitter!', 'unable to parse csv file': 'Não é possível analisar arquivo csv', 'uncheck all': 'Desmarcar Tudo', 'unidentified': 'IDENTIFICADO', 'unknown': 'unknown', 'unspecified': 'UNSPECIFIED', 'unverified': 'Não Verificado', 'updated': 'Atualizado', 'updates only': 'Apenas atualizações', 'verified': 'Verificado', 'volunteer': 'voluntário', 'volunteers': 'Voluntários', 'wavy': 'Serpentina', 'weekly': 'Semanalmente', 'white': 'branco', 'wider area, longer term, usually contain multiple Activities': 'maior área, maior prazo, contém usualmente múltiplas actividades', 'widowed': 'Viúvo', 'window': 'janela', 'within human habitat': 'Dentro do habitat humano', 'xlwt module not available within the running Python - this needs installing for XLS output!': 'Módulo Xlwt não disponível no módulo Python sendo executado - isto necessita ser instalado para saída XLS!', 'yes': 'YES', }
[]
goodchinas/pyxq
pyxq/app/__init__.py
c7f6ea63084c18178049451f30f32f04080a511c
from .. import ba, cb, actor from ..service import account class A0(ba.App): center: cb.CallBackManager a: account.Account def __init__(self, stg: actor.GateWay): a = account.Account() center = cb.CallBackManager() stg.init(a=a, center=center, broker=cb.CallBackManager()) actor.Broker( a=a, center=center, gateway=stg.broker, exchange=actor.Exchange(center=center, broker=cb.CallBackManager()).broker) self.center = center self.a = a def route(self, x: ba.Msg): self.center.route(x=x)
[]
AdrianLundell/ldpc-investigation-master-thesis
python-files/Analysis/plot_rber_curve.py
075f5cd10dae498e4fcda2f4aabedd0e27caf122
#%% #Calculate_capacity
[]
traff/python_completion_benchmark
2_b_builtins_dynamic_recall.py
df25caaabf46f8b6eca34d5618052bff7ea8b0bf
import builtins builtins.foo = 'bar' foo # foo
[]
gva-jhabte/gva-data
gva/data/validator/__init__.py
7a605ff01faa3fd38e91a324341d6166f17544a7
""" Schema Validation Tests a dictionary against a schema to test for conformity. Schema definition is similar to - but not the same as - avro schemas Supported Types: - string - a character sequence - format - numeric - a number - min: - max - date - a datetime.date or an iso format date or time - boolean - a boolean or a binary value (true/false, on/off, yes/no) - symbols - other - not one of the above, but a required field - nullable - Python Falsy (None, 0, Empty String, etc) - enum - - symbols Example Schema: { "name": "Table Name", "fields": [ {"name": "id", "type": "string"}, {"name": "country", "type": ["string", "nullable"]}, {"name": "followers", "type": ["string", "nullable"]} ] } Notes: - type(var).__name__ in (set) is faster than isinstance """ import datetime from typing import List, Any, Union, Callable import os import re from ...utils.json import serialize, parse VALID_BOOLEAN_VALUES = ("true", "false", "on", "off", "yes", "no", "0", "1") DEFAULT_MIN = -9223372036854775808 DEFAULT_MAX = 9223372036854775807 class is_string(): __slots__ = ['pattern', 'regex'] def __init__(self, **kwargs): self.regex = None self.pattern = kwargs.get('format') if self.pattern: self.regex = re.compile(self.pattern) def __call__(self, value: Any) -> bool: if self.pattern is None: return type(value).__name__ == "str" else: return self.regex.match(str(value)) def __str__(self): if self.pattern: return f'string ({self.pattern})' else: return 'string' class is_valid_enum(): __slots__ = ['symbols'] def __init__(self, **kwargs): """ -> "type": "enum", "symbols": ["up", "down"] symbols: list of allowed values (case sensitive) """ self.symbols = kwargs.get('symbols', ()) def __call__(self, value: Any) -> bool: return value and value in self.symbols def __str__(self): return f'enum {self.symbols}' class is_boolean(is_valid_enum): def __init__(self, **kwargs): """ is_boolean is a specific case of is_valid_enum - it defaults to a set of true/false values - the check is case insensitive """ super().__init__() if len(self.symbols) == 0: self.symbols = VALID_BOOLEAN_VALUES def __call__(self, value: Any) -> bool: return super().__call__(str(value).lower()) class is_numeric(): __slots__ = ['min', 'max'] def __init__(self, **kwargs): """ -> "type": "numeric", "min": 0, "max": 100 min: low end of valid range max: high end of valid range """ self.min = kwargs.get('min', DEFAULT_MIN) self.max = kwargs.get('max', DEFAULT_MAX) def __call__(self, value: Any) -> bool: try: n = float(value) except (ValueError, TypeError): return False return self.min <= n <= self.max def __str__(self): if self.min == DEFAULT_MIN and self.max == DEFAULT_MAX: return 'numeric' if not self.min == DEFAULT_MIN and not self.max == DEFAULT_MAX: return f'numeric ({self.min} - {self.max})' if not self.min == DEFAULT_MIN and self.max == DEFAULT_MAX: return f'numeric ({self.min} - infinity)' if self.min == DEFAULT_MIN and not self.max == DEFAULT_MAX: return f'numeric (infinity - {self.max})' def is_date(value: Any) -> bool: try: if type(value).__name__ in ("datetime", "date", "time"): return True datetime.datetime.fromisoformat(value) return True except (ValueError, TypeError): return False def is_null(value: Any) -> bool: return not value def other_validator(value: Any) -> bool: return True def is_list(value: Any) -> bool: return type(value).__name__ == 'list' """ Create a dictionary of the validator functions """ SIMPLE_VALIDATORS = { "date": is_date, "nullable": is_null, "other": other_validator, "list": is_list, "array": is_list, } COMPLEX_VALIDATORS = { "enum": is_valid_enum, "numeric": is_numeric, "string": is_string, "boolean": is_boolean } def get_validators( type_descriptor: Union[List[str], str], **kwargs): """ For a given type definition (the ["string", "nullable"] bit), return the matching validator functions (the _is_x ones) as a list. """ if not type(type_descriptor).__name__ == 'list': type_descriptor = [type_descriptor] # type:ignore validators: List[Any] = [] for descriptor in type_descriptor: if descriptor in COMPLEX_VALIDATORS: validators.append(COMPLEX_VALIDATORS[descriptor](**kwargs)) else: validators.append(SIMPLE_VALIDATORS[descriptor]) return validators def field_validator(value, validators: set) -> bool: """ Execute a set of validator functions (the _is_x) against a value. Return True if any of the validators are True. """ return any([True for validator in validators if validator(value)]) class Schema(): def __init__(self, definition: Union[dict, str]): """ Compile a validator for a given schema. paramaters: - definition: a dictionary, text representation of a dictionary (JSON) or a JSON file containing a schema definition """ # if we have a schema as a string, load it into a dictionary if type(definition).__name__ == 'str': if os.path.exists(definition): # type:ignore definition = parse(open(definition, mode='r').read()) # type:ignore else: definition = parse(definition) # type:ignore try: # read the schema and look up the validators self._validators = { item.get('name'): get_validators( item['type'], symbols=item.get('symbols'), min=item.get('min', DEFAULT_MIN), # 64bit signed (not a limit, just a default) max=item.get('max', DEFAULT_MAX), # 64bit signed (not a limit, just a default) format=item.get('format')) for item in definition.get('fields', []) #type:ignore } except KeyError: raise ValueError("Invalid type specified in schema - valid types are: string, numeric, date, boolean, nullable, list, enum") if len(self._validators) == 0: raise ValueError("Invalid schema specification") def validate(self, subject: dict = {}, raise_exception=False) -> bool: result = True self.last_error = '' for key, value in self._validators.items(): if not field_validator(subject.get(key), self._validators.get(key, [other_validator])): result = False for v in value: self.last_error += f"'{key}' ({subject.get(key)}) did not pass validator {str(v)}.\n" if raise_exception and not result: raise ValueError(F"Record does not conform to schema - {self.last_error}. ") return result def __call__(self, subject: dict = {}, raise_exception=False) -> bool: # wrap the validate function return self.validate(subject=subject, raise_exception=raise_exception)
[((3861, 3899), 'datetime.datetime.fromisoformat', 'datetime.datetime.fromisoformat', (['value'], {}), '(value)\n', (3892, 3899), False, 'import datetime\n'), ((1398, 1422), 're.compile', 're.compile', (['self.pattern'], {}), '(self.pattern)\n', (1408, 1422), False, 'import re\n'), ((5909, 5935), 'os.path.exists', 'os.path.exists', (['definition'], {}), '(definition)\n', (5923, 5935), False, 'import os\n')]
umarcor/prjtrellis
fuzzers/ECP5/050-pio_routing/fuzzer.py
9b3db7ba9a02e7d2f49c52ce062d5b22e320004c
from fuzzconfig import FuzzConfig import interconnect import nets import pytrellis import re jobs = [ { "pos": [(47, 0), (48, 0), (49, 0)], "cfg": FuzzConfig(job="PIOROUTEL", family="ECP5", device="LFE5U-45F", ncl="pioroute.ncl", tiles=["MIB_R47C0:PICL0", "MIB_R48C0:PICL1", "MIB_R49C0:PICL2"]) }, { "pos": [(47, 90), (48, 90), (49, 90)], "cfg": FuzzConfig(job="PIOROUTER", family="ECP5", device="LFE5U-45F", ncl="pioroute.ncl", tiles=["MIB_R47C90:PICR0", "MIB_R48C90:PICR1", "MIB_R49C90:PICR2"]) }, { "pos": [(0, 22), (1, 23), (0, 22), (1, 23)], "cfg": FuzzConfig(job="PIOROUTET", family="ECP5", device="LFE5U-45F", ncl="pioroute.ncl", tiles=["MIB_R0C22:PIOT0", "MIB_R0C23:PIOT1", "MIB_R1C22:PICT0", "MIB_R1C23:PICT1"]) }, { "pos": [(71, 11), (71, 12), (70, 11), (70, 12)], "cfg": FuzzConfig(job="PIOROUTET", family="ECP5", device="LFE5U-45F", ncl="pioroute.ncl", tiles=["MIB_R71C11:PICB0", "MIB_R71C12:PICB1"]) }, { "pos": [(71, 18), (70, 18)], "cfg": FuzzConfig(job="PIOROUTESB", family="ECP5", device="LFE5U-45F", ncl="pioroute_spicb.ncl", tiles=["MIB_R71C18:SPICB0"]) }, ] def main(): pytrellis.load_database("../../../database") for job in jobs: cfg = job["cfg"] cfg.setup() def nn_filter(net, netnames): return not nets.is_cib(net) orig_tiles = cfg.tiles for pos in job["pos"]: # Put fixed connections in the most appropriate tile target_tile = None for tile in orig_tiles: if "R{}C{}".format(pos[0], pos[1]) in tile: target_tile = tile break if target_tile is not None: cfg.tiles = [target_tile] + [_ for _ in orig_tiles if _ != orig_tiles] else: cfg.tiles = orig_tiles interconnect.fuzz_interconnect(config=cfg, location=pos, netname_predicate=nn_filter, netname_filter_union=False, func_cib=True) if __name__ == "__main__": main()
[((1347, 1391), 'pytrellis.load_database', 'pytrellis.load_database', (['"""../../../database"""'], {}), "('../../../database')\n", (1370, 1391), False, 'import pytrellis\n'), ((168, 324), 'fuzzconfig.FuzzConfig', 'FuzzConfig', ([], {'job': '"""PIOROUTEL"""', 'family': '"""ECP5"""', 'device': '"""LFE5U-45F"""', 'ncl': '"""pioroute.ncl"""', 'tiles': "['MIB_R47C0:PICL0', 'MIB_R48C0:PICL1', 'MIB_R49C0:PICL2']"}), "(job='PIOROUTEL', family='ECP5', device='LFE5U-45F', ncl=\n 'pioroute.ncl', tiles=['MIB_R47C0:PICL0', 'MIB_R48C0:PICL1',\n 'MIB_R49C0:PICL2'])\n", (178, 324), False, 'from fuzzconfig import FuzzConfig\n'), ((417, 576), 'fuzzconfig.FuzzConfig', 'FuzzConfig', ([], {'job': '"""PIOROUTER"""', 'family': '"""ECP5"""', 'device': '"""LFE5U-45F"""', 'ncl': '"""pioroute.ncl"""', 'tiles': "['MIB_R47C90:PICR0', 'MIB_R48C90:PICR1', 'MIB_R49C90:PICR2']"}), "(job='PIOROUTER', family='ECP5', device='LFE5U-45F', ncl=\n 'pioroute.ncl', tiles=['MIB_R47C90:PICR0', 'MIB_R48C90:PICR1',\n 'MIB_R49C90:PICR2'])\n", (427, 576), False, 'from fuzzconfig import FuzzConfig\n'), ((675, 850), 'fuzzconfig.FuzzConfig', 'FuzzConfig', ([], {'job': '"""PIOROUTET"""', 'family': '"""ECP5"""', 'device': '"""LFE5U-45F"""', 'ncl': '"""pioroute.ncl"""', 'tiles': "['MIB_R0C22:PIOT0', 'MIB_R0C23:PIOT1', 'MIB_R1C22:PICT0', 'MIB_R1C23:PICT1']"}), "(job='PIOROUTET', family='ECP5', device='LFE5U-45F', ncl=\n 'pioroute.ncl', tiles=['MIB_R0C22:PIOT0', 'MIB_R0C23:PIOT1',\n 'MIB_R1C22:PICT0', 'MIB_R1C23:PICT1'])\n", (685, 850), False, 'from fuzzconfig import FuzzConfig\n'), ((953, 1088), 'fuzzconfig.FuzzConfig', 'FuzzConfig', ([], {'job': '"""PIOROUTET"""', 'family': '"""ECP5"""', 'device': '"""LFE5U-45F"""', 'ncl': '"""pioroute.ncl"""', 'tiles': "['MIB_R71C11:PICB0', 'MIB_R71C12:PICB1']"}), "(job='PIOROUTET', family='ECP5', device='LFE5U-45F', ncl=\n 'pioroute.ncl', tiles=['MIB_R71C11:PICB0', 'MIB_R71C12:PICB1'])\n", (963, 1088), False, 'from fuzzconfig import FuzzConfig\n'), ((1175, 1298), 'fuzzconfig.FuzzConfig', 'FuzzConfig', ([], {'job': '"""PIOROUTESB"""', 'family': '"""ECP5"""', 'device': '"""LFE5U-45F"""', 'ncl': '"""pioroute_spicb.ncl"""', 'tiles': "['MIB_R71C18:SPICB0']"}), "(job='PIOROUTESB', family='ECP5', device='LFE5U-45F', ncl=\n 'pioroute_spicb.ncl', tiles=['MIB_R71C18:SPICB0'])\n", (1185, 1298), False, 'from fuzzconfig import FuzzConfig\n'), ((2052, 2185), 'interconnect.fuzz_interconnect', 'interconnect.fuzz_interconnect', ([], {'config': 'cfg', 'location': 'pos', 'netname_predicate': 'nn_filter', 'netname_filter_union': '(False)', 'func_cib': '(True)'}), '(config=cfg, location=pos, netname_predicate=\n nn_filter, netname_filter_union=False, func_cib=True)\n', (2082, 2185), False, 'import interconnect\n'), ((1520, 1536), 'nets.is_cib', 'nets.is_cib', (['net'], {}), '(net)\n', (1531, 1536), False, 'import nets\n')]
yecharlie/convnet3d
convnet3d/backend/tensorflow_backend.py
0b2771eec149b196ef59b58d09eef71c9b201d40
import tensorflow as tf def _is_tensor(x): """Returns `True` if `x` is a symbolic tensor-like object. From http://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/image_ops_impl.py Args: x: A python object to check. Returns: `True` if `x` is a `tf.Tensor` or `tf.Variable`, otherwise `False`. """ return isinstance(x, (tf.Tensor, tf.Variable)) def _ImageDimensions(image, rank): """Returns the dimensions of an image tensor. From http://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/image_ops_impl.py Args: image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`. rank: The expected rank of the image Returns: A list of corresponding to the dimensions of the input image. Dimensions that are statically known are python integers, otherwise they are integer scalar tensors. """ if image.get_shape().is_fully_defined(): return image.get_shape().as_list() else: static_shape = image.get_shape().with_rank(rank).as_list() dynamic_shape = tf.unstack(tf.shape(image), rank) return [ s if s is not None else d for s, d in zip(static_shape, dynamic_shape) ] def _CheckAtLeast4DImage(image, require_static=True): """Assert that we are working with properly shaped image. (modified) From http://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/image_ops_impl.py Args: image: >= 4-D Tensor of size [*, height, width, depth, channels] require_static: If `True`, requires that all dimensions of `image` are known and non-zero. Raises: ValueError: if image.shape is not a [>= 4] vector. Returns: An empty list, if `image` has fully defined dimensions. Otherwise, a list containing an assert op is returned. """ try: if image.get_shape().ndims is None: image_shape = image.get_shape().with_rank(4) else: image_shape = image.get_shape().with_rank_at_least(4) except ValueError: raise ValueError("'image' must be at least four-dimensional.") if require_static and not image_shape.is_fully_defined(): raise ValueError('\'image\' must be fully defined.') if any(x == 0 for x in image_shape): raise ValueError( 'all dims of \'image.shape\' must be > 0: %s' % image_shape) if not image_shape.is_fully_defined(): return [ tf.assert_positive( tf.shape(image), ['all dims of "image.shape " must be > 0.']) ] else: return [] def uniform(*args, **kwargs): return tf.random.uniform(*args, **kwargs) def pad(*args, **kwargs): return tf.pad(*args, **kwargs) def top_k(*args, **kwargs): return tf.math.top_k(*args, **kwargs) def non_max_suppression_overlaps(*args, **kwargs): return tf.image.non_max_suppression_overlaps(*args, **kwargs) def gather_nd(*args, **kwargs): return tf.gather_nd(*args, **kwargs) def clip_by_value(*args, **kwargs): return tf.clip_by_value(*args, **kwargs) def meshgrid(*args, **kwargs): return tf.meshgrid(*args, **kwargs) def map_fn(*args, **kwargs): return tf.map_fn(*args, **kwargs) def where(*args, **kwargs): return tf.where(*args, **kwargs) def crop_to_bounding_box_3d(image, box, target_size): '''Crops an 3d image to a specificed bounding box. When the size of box is smaller than 'target_size', then the surroundings of image is evenly (approximately) padded with zero. The 'box' with size = 0 is allowed. Args: image: 5-D Tensor of shape '[batch, heigh, width, depth, channels]' or 4-D Tensor of shape '[heights, width, depth, channels]' box: 1-D Tensor of shape '[6,]' representing the cropped area. target_size: The ultimate bounding box size. Returns: if 'image' was 5-D, a 5-D float Tensor of shape '[batch_size] + target_size + [channels]' if 'image' was 4-D, a 5-D float Tensor of shape 'target_size + [channels]' ''' with tf.name_scope(None, 'crop_to_bounding_box_3d', [image]): image = tf.convert_to_tensor(image, name='image') is_batch = True image_shape = image.get_shape() if image_shape.ndims == 4: is_batch = False image = tf.expand_dims(image, 0) elif image_shape.ndims is None: is_batch = False image = tf.expand_dims(image, 0) image.set_shape([None] * 5) elif image_shape.ndims != 5: raise ValueError('\'image\' must have either 4 or 5 dimensions.') assert_ops = _CheckAtLeast4DImage(image, require_static=False) # Never mind what are the real meaning of height/width/depth. They are mimics from the tensorflow API 's writting convention. batch, height, width, depth, channels = _ImageDimensions(image, rank=5) # print('crop_to_bounding_box_3d height:',height) box_size = box[1::2] - box[::2] assert_ops.append(tf.assert_greater_equal([height, width, depth], box[1::2], ['The remote corner of box must not exceed image boundaries.'])) assert_ops.append(tf.assert_non_negative(box[::2], ['The near corner of box must be non negative.'])) assert_ops.append(tf.assert_non_negative(box_size, ['The box size should be non negative.'])) assert_ops.append(tf.assert_greater_equal(target_size, box_size, ['The target size should be not less than box size. '])) with tf.control_dependencies(assert_ops): image = image # tf.with_dependencies(assert_ops, image) cropped = tf.slice( image, tf.stack([0, box[0], box[2], box[4], 0]), tf.stack([-1, box_size[0], box_size[1], box_size[2] , -1]) ) def _max(x, y): if _is_tensor(x) or _is_tensor(y): return tf.maximum(x, y) else: return max(x, y) padding_offsets = _max((target_size - box_size) // 2, 0) after_padding_size = target_size - padding_offsets - box_size paddings = tf.reshape( tf.stack([ 0, 0, padding_offsets[0], after_padding_size[0], padding_offsets[1], after_padding_size[1], # noqa: E131 padding_offsets[2], after_padding_size[2], 0, 0 # noqa: E131 ]), [5, 2]) padded = tf.pad(cropped, paddings) result_shape = [ None if _is_tensor(i) else i for i in [batch, target_size[0], target_size[1], target_size[2], channels] ] padded.set_shape(result_shape) if not is_batch: padded = tf.squeeze(padded, axis=[0]) return padded
[((2734, 2768), 'tensorflow.random.uniform', 'tf.random.uniform', (['*args'], {}), '(*args, **kwargs)\n', (2751, 2768), True, 'import tensorflow as tf\n'), ((2808, 2831), 'tensorflow.pad', 'tf.pad', (['*args'], {}), '(*args, **kwargs)\n', (2814, 2831), True, 'import tensorflow as tf\n'), ((2873, 2903), 'tensorflow.math.top_k', 'tf.math.top_k', (['*args'], {}), '(*args, **kwargs)\n', (2886, 2903), True, 'import tensorflow as tf\n'), ((2968, 3022), 'tensorflow.image.non_max_suppression_overlaps', 'tf.image.non_max_suppression_overlaps', (['*args'], {}), '(*args, **kwargs)\n', (3005, 3022), True, 'import tensorflow as tf\n'), ((3068, 3097), 'tensorflow.gather_nd', 'tf.gather_nd', (['*args'], {}), '(*args, **kwargs)\n', (3080, 3097), True, 'import tensorflow as tf\n'), ((3147, 3180), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['*args'], {}), '(*args, **kwargs)\n', (3163, 3180), True, 'import tensorflow as tf\n'), ((3225, 3253), 'tensorflow.meshgrid', 'tf.meshgrid', (['*args'], {}), '(*args, **kwargs)\n', (3236, 3253), True, 'import tensorflow as tf\n'), ((3296, 3322), 'tensorflow.map_fn', 'tf.map_fn', (['*args'], {}), '(*args, **kwargs)\n', (3305, 3322), True, 'import tensorflow as tf\n'), ((3364, 3389), 'tensorflow.where', 'tf.where', (['*args'], {}), '(*args, **kwargs)\n', (3372, 3389), True, 'import tensorflow as tf\n'), ((4163, 4218), 'tensorflow.name_scope', 'tf.name_scope', (['None', '"""crop_to_bounding_box_3d"""', '[image]'], {}), "(None, 'crop_to_bounding_box_3d', [image])\n", (4176, 4218), True, 'import tensorflow as tf\n'), ((4236, 4277), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image'], {'name': '"""image"""'}), "(image, name='image')\n", (4256, 4277), True, 'import tensorflow as tf\n'), ((6513, 6538), 'tensorflow.pad', 'tf.pad', (['cropped', 'paddings'], {}), '(cropped, paddings)\n', (6519, 6538), True, 'import tensorflow as tf\n'), ((1142, 1157), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (1150, 1157), True, 'import tensorflow as tf\n'), ((4427, 4451), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (4441, 4451), True, 'import tensorflow as tf\n'), ((5132, 5259), 'tensorflow.assert_greater_equal', 'tf.assert_greater_equal', (['[height, width, depth]', 'box[1::2]', "['The remote corner of box must not exceed image boundaries.']"], {}), "([height, width, depth], box[1::2], [\n 'The remote corner of box must not exceed image boundaries.'])\n", (5155, 5259), True, 'import tensorflow as tf\n'), ((5282, 5369), 'tensorflow.assert_non_negative', 'tf.assert_non_negative', (['box[::2]', "['The near corner of box must be non negative.']"], {}), "(box[::2], [\n 'The near corner of box must be non negative.'])\n", (5304, 5369), True, 'import tensorflow as tf\n'), ((5392, 5466), 'tensorflow.assert_non_negative', 'tf.assert_non_negative', (['box_size', "['The box size should be non negative.']"], {}), "(box_size, ['The box size should be non negative.'])\n", (5414, 5466), True, 'import tensorflow as tf\n'), ((5494, 5601), 'tensorflow.assert_greater_equal', 'tf.assert_greater_equal', (['target_size', 'box_size', "['The target size should be not less than box size. ']"], {}), "(target_size, box_size, [\n 'The target size should be not less than box size. '])\n", (5517, 5601), True, 'import tensorflow as tf\n'), ((5612, 5647), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['assert_ops'], {}), '(assert_ops)\n', (5635, 5647), True, 'import tensorflow as tf\n'), ((5772, 5812), 'tensorflow.stack', 'tf.stack', (['[0, box[0], box[2], box[4], 0]'], {}), '([0, box[0], box[2], box[4], 0])\n', (5780, 5812), True, 'import tensorflow as tf\n'), ((5826, 5883), 'tensorflow.stack', 'tf.stack', (['[-1, box_size[0], box_size[1], box_size[2], -1]'], {}), '([-1, box_size[0], box_size[1], box_size[2], -1])\n', (5834, 5883), True, 'import tensorflow as tf\n'), ((6237, 6398), 'tensorflow.stack', 'tf.stack', (['[0, 0, padding_offsets[0], after_padding_size[0], padding_offsets[1],\n after_padding_size[1], padding_offsets[2], after_padding_size[2], 0, 0]'], {}), '([0, 0, padding_offsets[0], after_padding_size[0], padding_offsets[\n 1], after_padding_size[1], padding_offsets[2], after_padding_size[2], 0, 0]\n )\n', (6245, 6398), True, 'import tensorflow as tf\n'), ((6789, 6817), 'tensorflow.squeeze', 'tf.squeeze', (['padded'], {'axis': '[0]'}), '(padded, axis=[0])\n', (6799, 6817), True, 'import tensorflow as tf\n'), ((2575, 2590), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (2583, 2590), True, 'import tensorflow as tf\n'), ((4541, 4565), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (4555, 4565), True, 'import tensorflow as tf\n'), ((5990, 6006), 'tensorflow.maximum', 'tf.maximum', (['x', 'y'], {}), '(x, y)\n', (6000, 6006), True, 'import tensorflow as tf\n')]
cobrab11/black1-bot
extensions/everywhere.py
47c1a80029d6183fc990960b422bb3155360702d
# BS mark.1-55 # /* coding: utf-8 */ # BlackSmith plugin # everywhere_plugin.py # Coded by: WitcherGeralt ([email protected]) # http://witcher-team.ucoz.ru/ def handler_everywhere(type, source, body): if body: args = body.split() if len(args) >= 2: mtype = args[0].strip().lower() if mtype == u'чат': msgtype = 'public' elif mtype == u'приват': msgtype = 'private' else: msgtype = False if msgtype: command = args[1].strip().lower() if len(args) >= 3: Parameters = body[((body.lower()).find(command) + (len(command) + 1)):].strip() else: Parameters = '' if len(Parameters) <= 96: if COMMANDS.has_key(command): for conf in GROUPCHATS.keys(): call_command_handlers(command, msgtype, [source[0], conf, source[2]], Parameters, command) else: reply(type, source, u'Нет такой команды.') else: reply(type, source, u'Слишком длинные параметры.') else: reply(type, source, u'Тип указан некорректно.') else: reply(type, source, u'инвалид синтакс') else: reply(type, source, u'я не умею читать мысли') command_handler(handler_everywhere, 100, "everywhere")
[]
WZMJ/Algorithms
0100.same_tree/solution.py
07f648541d38e24df38bda469665c12df6a50637
from utils import TreeNode class Solution: def is_same_tree(self, p: TreeNode, q: TreeNode) -> bool: if p is None and q is None: return True if not p or not q: return False return p.val == q.val and self.is_same_tree(p.left, q.left) and self.is_same_tree(p.right, q.right)
[]
The-Academic-Observatory/mag-archiver
tests/test_azure.py
76988020047b4ab9eb2d125f5141dfa7297a6fb3
# Copyright 2020 Curtin University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Author: James Diprose import os import unittest import uuid from azure.storage.blob import ContainerClient, BlobClient from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, \ create_blob, delete_table, create_table def generate_names(num_names, prefix): # Create containers names = [] for i in range(num_names): name = make_unique_name(prefix) names.append(name) return names def make_unique_name(prefix: str): return f"{prefix}-{str(uuid.uuid4())}" class TestAzure(unittest.TestCase): account_name: str account_key: str def __init__(self, *args, **kwargs): super(TestAzure, self).__init__(*args, **kwargs) self.account_name = os.getenv('TEST_AZURE_STORAGE_ACCOUNT_NAME') self.account_key = os.getenv('TEST_AZURE_STORAGE_ACCOUNT_KEY') def test_create_table(self): table_name = 'TestCreateTable' try: # Create a table result = create_table(self.account_name, self.account_key, table_name) self.assertTrue(result) finally: # Cleanup delete_table(self.account_name, self.account_key, table_name) def test_delete_table(self): table_name = 'TestDeleteTable' try: # Create a table create_table(self.account_name, self.account_key, table_name) # Delete table result = delete_table(self.account_name, self.account_key, table_name) self.assertTrue(result) finally: # Cleanup delete_table(self.account_name, self.account_key, table_name) def test_create_container(self): container_name = make_unique_name('test-create-container') try: # Create a container result: ContainerClient = create_container(self.account_name, self.account_key, container_name) self.assertIsInstance(result, ContainerClient) self.assertEqual(result.container_name, container_name) finally: # Cleanup delete_container(self.account_name, self.account_key, container_name) def test_create_blob(self): container_name = make_unique_name('test-create-blob') try: # Create a container for the blob create_container(self.account_name, self.account_key, container_name) # Create a blob blob_name = make_unique_name('test-create-blob') + '.txt' blob_data = 'Hello world!' result: BlobClient = create_blob(self.account_name, self.account_key, container_name, blob_name, blob_data) self.assertIsInstance(result, BlobClient) self.assertEqual(result.blob_name, blob_name) finally: # Cleanup delete_container(self.account_name, self.account_key, container_name) def test_list_containers(self): num_containers = 3 names = generate_names(num_containers, 'test-list-containers') try: # Create containers for name in names: create_container(self.account_name, self.account_key, name) # List containers containers = list_containers(self.account_name, self.account_key) self.assertEqual(len(containers), num_containers + 1) finally: # Cleanup for name in names: delete_container(self.account_name, self.account_key, name) def test_delete_container(self): container_name = make_unique_name('test-delete-container') try: create_container(self.account_name, self.account_key, container_name) delete_container(self.account_name, self.account_key, container_name) containers = list_containers(self.account_name, self.account_key) container_names = [c.name for c in containers] self.assertNotIn(container_name, container_names) finally: # Cleanup delete_container(self.account_name, self.account_key, container_name) def test_list_blobs(self): container_name = make_unique_name('test-list-blobs') try: # Create container to store the blobs create_container(self.account_name, self.account_key, container_name) # Create the blobs num_blobs = 3 blob_data = 'Hello world!' names = generate_names(num_blobs, 'test-list-blobs') for name in names: file_name = f'{name}.txt' create_blob(self.account_name, self.account_key, container_name, file_name, blob_data) # Check that we can find the blobs blobs = list_blobs(self.account_name, self.account_key, container_name) self.assertEqual(len(blobs), num_blobs) finally: # Cleanup delete_container(self.account_name, self.account_key, container_name) def test_copy_container(self): source_container = make_unique_name('test-copy-container-source') target_container = make_unique_name('test-copy-container-target') target_folder = 'target-folder' try: # Create containers and blobs create_container(self.account_name, self.account_key, source_container) create_container(self.account_name, self.account_key, target_container) # Create blobs in source container num_blobs = 3 blob_data = 'Hello world!' names = generate_names(num_blobs, 'test-copy-container') for name in names: file_name = f'{name}.txt' create_blob(self.account_name, self.account_key, source_container, file_name, blob_data) # Copy blobs from one container to another copy_container(self.account_name, self.account_key, source_container, target_container, target_folder) # Check results blobs = list_blobs(self.account_name, self.account_key, target_container) self.assertEqual(len(blobs), num_blobs) finally: # Delete container delete_container(self.account_name, self.account_key, source_container) delete_container(self.account_name, self.account_key, target_container)
[((1346, 1390), 'os.getenv', 'os.getenv', (['"""TEST_AZURE_STORAGE_ACCOUNT_NAME"""'], {}), "('TEST_AZURE_STORAGE_ACCOUNT_NAME')\n", (1355, 1390), False, 'import os\n'), ((1418, 1461), 'os.getenv', 'os.getenv', (['"""TEST_AZURE_STORAGE_ACCOUNT_KEY"""'], {}), "('TEST_AZURE_STORAGE_ACCOUNT_KEY')\n", (1427, 1461), False, 'import os\n'), ((1598, 1659), 'mag_archiver.azure.create_table', 'create_table', (['self.account_name', 'self.account_key', 'table_name'], {}), '(self.account_name, self.account_key, table_name)\n', (1610, 1659), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((1747, 1808), 'mag_archiver.azure.delete_table', 'delete_table', (['self.account_name', 'self.account_key', 'table_name'], {}), '(self.account_name, self.account_key, table_name)\n', (1759, 1808), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((1936, 1997), 'mag_archiver.azure.create_table', 'create_table', (['self.account_name', 'self.account_key', 'table_name'], {}), '(self.account_name, self.account_key, table_name)\n', (1948, 1997), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((2047, 2108), 'mag_archiver.azure.delete_table', 'delete_table', (['self.account_name', 'self.account_key', 'table_name'], {}), '(self.account_name, self.account_key, table_name)\n', (2059, 2108), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((2196, 2257), 'mag_archiver.azure.delete_table', 'delete_table', (['self.account_name', 'self.account_key', 'table_name'], {}), '(self.account_name, self.account_key, table_name)\n', (2208, 2257), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((2447, 2516), 'mag_archiver.azure.create_container', 'create_container', (['self.account_name', 'self.account_key', 'container_name'], {}), '(self.account_name, self.account_key, container_name)\n', (2463, 2516), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((2695, 2764), 'mag_archiver.azure.delete_container', 'delete_container', (['self.account_name', 'self.account_key', 'container_name'], {}), '(self.account_name, self.account_key, container_name)\n', (2711, 2764), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((2931, 3000), 'mag_archiver.azure.create_container', 'create_container', (['self.account_name', 'self.account_key', 'container_name'], {}), '(self.account_name, self.account_key, container_name)\n', (2947, 3000), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((3172, 3262), 'mag_archiver.azure.create_blob', 'create_blob', (['self.account_name', 'self.account_key', 'container_name', 'blob_name', 'blob_data'], {}), '(self.account_name, self.account_key, container_name, blob_name,\n blob_data)\n', (3183, 3262), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((3422, 3491), 'mag_archiver.azure.delete_container', 'delete_container', (['self.account_name', 'self.account_key', 'container_name'], {}), '(self.account_name, self.account_key, container_name)\n', (3438, 3491), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((3835, 3887), 'mag_archiver.azure.list_containers', 'list_containers', (['self.account_name', 'self.account_key'], {}), '(self.account_name, self.account_key)\n', (3850, 3887), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((4230, 4299), 'mag_archiver.azure.create_container', 'create_container', (['self.account_name', 'self.account_key', 'container_name'], {}), '(self.account_name, self.account_key, container_name)\n', (4246, 4299), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((4312, 4381), 'mag_archiver.azure.delete_container', 'delete_container', (['self.account_name', 'self.account_key', 'container_name'], {}), '(self.account_name, self.account_key, container_name)\n', (4328, 4381), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((4407, 4459), 'mag_archiver.azure.list_containers', 'list_containers', (['self.account_name', 'self.account_key'], {}), '(self.account_name, self.account_key)\n', (4422, 4459), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((4632, 4701), 'mag_archiver.azure.delete_container', 'delete_container', (['self.account_name', 'self.account_key', 'container_name'], {}), '(self.account_name, self.account_key, container_name)\n', (4648, 4701), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((4871, 4940), 'mag_archiver.azure.create_container', 'create_container', (['self.account_name', 'self.account_key', 'container_name'], {}), '(self.account_name, self.account_key, container_name)\n', (4887, 4940), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((5347, 5410), 'mag_archiver.azure.list_blobs', 'list_blobs', (['self.account_name', 'self.account_key', 'container_name'], {}), '(self.account_name, self.account_key, container_name)\n', (5357, 5410), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((5514, 5583), 'mag_archiver.azure.delete_container', 'delete_container', (['self.account_name', 'self.account_key', 'container_name'], {}), '(self.account_name, self.account_key, container_name)\n', (5530, 5583), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((5876, 5947), 'mag_archiver.azure.create_container', 'create_container', (['self.account_name', 'self.account_key', 'source_container'], {}), '(self.account_name, self.account_key, source_container)\n', (5892, 5947), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((5960, 6031), 'mag_archiver.azure.create_container', 'create_container', (['self.account_name', 'self.account_key', 'target_container'], {}), '(self.account_name, self.account_key, target_container)\n', (5976, 6031), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((6460, 6566), 'mag_archiver.azure.copy_container', 'copy_container', (['self.account_name', 'self.account_key', 'source_container', 'target_container', 'target_folder'], {}), '(self.account_name, self.account_key, source_container,\n target_container, target_folder)\n', (6474, 6566), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((6612, 6677), 'mag_archiver.azure.list_blobs', 'list_blobs', (['self.account_name', 'self.account_key', 'target_container'], {}), '(self.account_name, self.account_key, target_container)\n', (6622, 6677), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((6790, 6861), 'mag_archiver.azure.delete_container', 'delete_container', (['self.account_name', 'self.account_key', 'source_container'], {}), '(self.account_name, self.account_key, source_container)\n', (6806, 6861), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((6874, 6945), 'mag_archiver.azure.delete_container', 'delete_container', (['self.account_name', 'self.account_key', 'target_container'], {}), '(self.account_name, self.account_key, target_container)\n', (6890, 6945), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((1122, 1134), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1132, 1134), False, 'import uuid\n'), ((3719, 3778), 'mag_archiver.azure.create_container', 'create_container', (['self.account_name', 'self.account_key', 'name'], {}), '(self.account_name, self.account_key, name)\n', (3735, 3778), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((4040, 4099), 'mag_archiver.azure.delete_container', 'delete_container', (['self.account_name', 'self.account_key', 'name'], {}), '(self.account_name, self.account_key, name)\n', (4056, 4099), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((5192, 5282), 'mag_archiver.azure.create_blob', 'create_blob', (['self.account_name', 'self.account_key', 'container_name', 'file_name', 'blob_data'], {}), '(self.account_name, self.account_key, container_name, file_name,\n blob_data)\n', (5203, 5282), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n'), ((6303, 6395), 'mag_archiver.azure.create_blob', 'create_blob', (['self.account_name', 'self.account_key', 'source_container', 'file_name', 'blob_data'], {}), '(self.account_name, self.account_key, source_container,\n file_name, blob_data)\n', (6314, 6395), False, 'from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, create_blob, delete_table, create_table\n')]
uve/tensorflow
tensorflow/contrib/framework/python/framework/tensor_util_test.py
e08079463bf43e5963acc41da1f57e95603f8080
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """tensor_util tests.""" # pylint: disable=unused-import from __future__ import absolute_import from __future__ import division from __future__ import print_function import re import numpy as np from tensorflow.contrib.framework.python.framework import tensor_util from tensorflow.contrib.framework.python.ops import variables as variables_lib2 from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import variables as variables_lib from tensorflow.python.platform import test class LocalVariabletest(test.TestCase): def test_local_variable(self): with self.cached_session() as sess: self.assertEquals([], variables_lib.local_variables()) value0 = 42 variables_lib2.local_variable(value0) value1 = 43 variables_lib2.local_variable(value1) variables = variables_lib.local_variables() self.assertEquals(2, len(variables)) self.assertRaises(errors_impl.OpError, sess.run, variables) variables_lib.variables_initializer(variables).run() self.assertAllEqual(set([value0, value1]), set(sess.run(variables))) class ReduceSumNTest(test.TestCase): def test_reduce_sum_n(self): with self.cached_session(): a = constant_op.constant(1) b = constant_op.constant([2]) c = constant_op.constant([[3, 4], [5, 6]]) self.assertEqual(21, tensor_util.reduce_sum_n([a, b, c]).eval()) class AssertScalarIntTest(test.TestCase): def test_assert_scalar_int(self): tensor_util.assert_scalar_int(constant_op.constant(3, dtype=dtypes.int32)) tensor_util.assert_scalar_int(constant_op.constant(3, dtype=dtypes.int64)) tensor_util.assert_scalar_int(3) with self.assertRaisesRegexp(ValueError, "Expected integer"): tensor_util.assert_scalar_int( constant_op.constant( 3, dtype=dtypes.float32)) with self.assertRaisesRegexp(ValueError, "Expected scalar"): tensor_util.assert_scalar_int( constant_op.constant( [3, 4], dtype=dtypes.int32)) class WithShapeTest(test.TestCase): def _assert_with_shape(self, tensor, expected_value, expected_shape, unexpected_shapes): for unexpected_shape in unexpected_shapes: self.assertRaises(ValueError, tensor_util.with_shape, unexpected_shape, tensor) pattern = ( r"\[Wrong shape for %s \[expected\] \[actual\].\] \[%s\] \[%s\]" % (tensor.name, " ".join([str(dim) for dim in unexpected_shape]), " ".join([str(dim) for dim in expected_shape]))) self.assertRaisesRegexp(errors_impl.OpError, re.compile(pattern), tensor_util.with_shape( constant_op.constant(unexpected_shape), tensor).eval) expected_placeholder = array_ops.placeholder(dtypes.float32) self.assertRaisesRegexp(errors_impl.OpError, re.compile(pattern), tensor_util.with_same_shape(expected_placeholder, tensor).eval, {expected_placeholder: np.ones(unexpected_shape)}) self.assertIs(tensor, tensor_util.with_shape(expected_shape, tensor)) self.assertIs( tensor, tensor_util.with_same_shape( constant_op.constant( 1, shape=expected_shape), tensor)) tensor_with_shape = tensor_util.with_shape( constant_op.constant(expected_shape), tensor) np.testing.assert_array_equal(expected_value, tensor_with_shape.eval()) tensor_with_same_shape = tensor_util.with_same_shape(expected_placeholder, tensor) np.testing.assert_array_equal(expected_value, tensor_with_same_shape.eval({ expected_placeholder: np.ones(expected_shape) })) def test_with_shape_invalid_expected_shape(self): with self.cached_session(): self.assertRaisesRegexp(ValueError, "Invalid rank", tensor_util.with_shape, [[1], [2]], constant_op.constant(1.0)) def test_with_shape_invalid_type(self): with self.cached_session(): self.assertRaisesRegexp(ValueError, "Invalid dtype", tensor_util.with_shape, [1.1], constant_op.constant([1.0])) self.assertRaisesRegexp(ValueError, "Invalid dtype", tensor_util.with_shape, np.array([1.1]), constant_op.constant(1.0)) self.assertRaisesRegexp(ValueError, "Invalid dtype", tensor_util.with_shape, constant_op.constant(np.array([1.1])), constant_op.constant(1.0)) def test_with_shape_0(self): with self.cached_session(): value = 42 shape = [0] unexpected_shapes = [[1], [2], [1, 1]] self._assert_with_shape( constant_op.constant( value, shape=shape), value, shape, unexpected_shapes) def test_with_shape_1(self): with self.cached_session(): value = [42] shape = [1] unexpected_shapes = [[0], [2], [1, 1]] self._assert_with_shape( constant_op.constant( value, shape=shape), value, shape, unexpected_shapes) def test_with_shape_2(self): with self.cached_session(): value = [42, 43] shape = [2] unexpected_shapes = [[0], [1], [2, 1]] self._assert_with_shape( constant_op.constant( value, shape=shape), value, shape, unexpected_shapes) def test_with_shape_2x2(self): with self.cached_session(): value = [[42, 43], [44, 45]] shape = [2, 2] unexpected_shapes = [[0], [1], [2, 1]] self._assert_with_shape( constant_op.constant( value, shape=shape), value, shape, unexpected_shapes) def test_with_shape_2x2_with_partial_expected_shape(self): with self.cached_session(): value = [[42, 43], [44, 45]] actual_shape = [2, 2] tensor = constant_op.constant(value, shape=actual_shape) partial_expected_shape = tensor_shape.TensorShape([None, 2]) # Won't raise any exception here: tensor_with_shape = tensor_util.with_shape(partial_expected_shape, tensor) np.testing.assert_array_equal(value, tensor_with_shape.eval()) def test_with_shape_none(self): with self.cached_session(): tensor_no_shape = array_ops.placeholder(dtypes.float32) compatible_shape = [2, 2] with_present_2x2 = tensor_util.with_shape(compatible_shape, tensor_no_shape) self.assertEquals(compatible_shape, with_present_2x2.get_shape().dims) with_future_2x2 = tensor_util.with_shape( constant_op.constant(compatible_shape), tensor_no_shape) array_2x2 = [[42.0, 43.0], [44.0, 45.0]] for tensor_2x2 in [with_present_2x2, with_future_2x2]: np.testing.assert_array_equal(array_2x2, tensor_2x2.eval({ tensor_no_shape: array_2x2 })) self.assertRaisesRegexp(errors_impl.OpError, "Wrong shape", tensor_2x2.eval, {tensor_no_shape: [42.0, 43.0]}) self.assertRaisesRegexp(errors_impl.OpError, "Wrong shape", tensor_2x2.eval, {tensor_no_shape: [42.0]}) def test_with_shape_partial(self): with self.cached_session(): tensor_partial_shape = array_ops.placeholder(dtypes.float32) tensor_partial_shape.set_shape([None, 2]) for incompatible_shape in [[0], [1]]: self.assertRaisesRegexp( ValueError, "Shapes must be equal rank, but are 2 and 1", tensor_util.with_shape, incompatible_shape, tensor_partial_shape) for incompatible_shape in [[1, 2, 1]]: self.assertRaisesRegexp(ValueError, "Dimensions must be equal", tensor_util.with_shape, incompatible_shape, tensor_partial_shape) for incompatible_shape in [[2, 1]]: self.assertRaisesRegexp( ValueError, r"Dimension 1 in both shapes must be equal, but are 2 and 1. " r"Shapes are \[\?,2\] and \[2,1\].", tensor_util.with_shape, incompatible_shape, tensor_partial_shape) compatible_shape = [2, 2] with_present_2x2 = tensor_util.with_shape(compatible_shape, tensor_partial_shape) self.assertEquals(compatible_shape, with_present_2x2.get_shape().dims) with_future_2x2 = tensor_util.with_shape( constant_op.constant(compatible_shape), tensor_partial_shape) array_2x2 = [[42.0, 43.0], [44.0, 45.0]] for tensor_2x2 in [with_present_2x2, with_future_2x2]: np.testing.assert_array_equal(array_2x2, tensor_2x2.eval({ tensor_partial_shape: array_2x2 })) self.assertRaises(ValueError, tensor_2x2.eval, {tensor_partial_shape: [42.0, 43.0]}) self.assertRaises(ValueError, tensor_2x2.eval, {tensor_partial_shape: [42.0]}) class RemoveSqueezableDimensionsTest(test.TestCase): def testRemoveSqueezableDimensions(self): self._testRemoveSqueezableDimensions( predictions_have_static_shape=False, predictions_have_extra_dim=False, labels_have_static_shape=False, labels_have_extra_dim=False) def testRemoveSqueezableDimensions_extraLabelDim(self): self._testRemoveSqueezableDimensions( predictions_have_static_shape=False, predictions_have_extra_dim=False, labels_have_static_shape=False, labels_have_extra_dim=True) def testRemoveSqueezableDimensions_staticLabel(self): self._testRemoveSqueezableDimensions( predictions_have_static_shape=False, predictions_have_extra_dim=False, labels_have_static_shape=True, labels_have_extra_dim=False) def testRemoveSqueezableDimensions_staticLabel_extraLabelDim(self): self._testRemoveSqueezableDimensions( predictions_have_static_shape=False, predictions_have_extra_dim=False, labels_have_static_shape=True, labels_have_extra_dim=True) def testRemoveSqueezableDimensions_extraPredictionDim(self): self._testRemoveSqueezableDimensions( predictions_have_static_shape=False, predictions_have_extra_dim=True, labels_have_static_shape=False, labels_have_extra_dim=False) def testRemoveSqueezableDimensions_extraPredictionDim_staticLabel(self): self._testRemoveSqueezableDimensions( predictions_have_static_shape=False, predictions_have_extra_dim=True, labels_have_static_shape=True, labels_have_extra_dim=False) def testRemoveSqueezableDimensions_staticPrediction(self): self._testRemoveSqueezableDimensions( predictions_have_static_shape=True, predictions_have_extra_dim=False, labels_have_static_shape=False, labels_have_extra_dim=False) def testRemoveSqueezableDimensions_staticPrediction_extraLabelDim(self): self._testRemoveSqueezableDimensions( predictions_have_static_shape=True, predictions_have_extra_dim=False, labels_have_static_shape=False, labels_have_extra_dim=True) def testRemoveSqueezableDimensions_static(self): self._testRemoveSqueezableDimensions( predictions_have_static_shape=True, predictions_have_extra_dim=False, labels_have_static_shape=True, labels_have_extra_dim=False) def testRemoveSqueezableDimensions_static_extraLabelDim(self): self._testRemoveSqueezableDimensions( predictions_have_static_shape=True, predictions_have_extra_dim=False, labels_have_static_shape=True, labels_have_extra_dim=True) def testRemoveSqueezableDimensions_staticPrediction_extraPredictionDim(self): self._testRemoveSqueezableDimensions( predictions_have_static_shape=True, predictions_have_extra_dim=True, labels_have_static_shape=False, labels_have_extra_dim=False) def testRemoveSqueezableDimensions_static_extraPredictionDim(self): self._testRemoveSqueezableDimensions( predictions_have_static_shape=True, predictions_have_extra_dim=True, labels_have_static_shape=True, labels_have_extra_dim=False) # TODO(ptucker): Replace this with parameterized test. def _testRemoveSqueezableDimensions(self, predictions_have_static_shape, predictions_have_extra_dim, labels_have_static_shape, labels_have_extra_dim): assert not (predictions_have_extra_dim and labels_have_extra_dim) predictions_value = (0, 1, 1, 0, 0, 1, 0) labels_value = (0, 0, 1, 1, 0, 0, 0) input_predictions_value = ([[p] for p in predictions_value] if predictions_have_extra_dim else predictions_value) input_labels_value = ([[l] for l in labels_value] if labels_have_extra_dim else labels_value) with ops.Graph().as_default() as g: feed_dict = {} if predictions_have_static_shape: predictions = constant_op.constant( input_predictions_value, dtype=dtypes.int32) else: predictions = array_ops.placeholder( dtype=dtypes.int32, name="predictions") feed_dict[predictions] = input_predictions_value if labels_have_static_shape: labels = constant_op.constant(input_labels_value, dtype=dtypes.int32) else: labels = array_ops.placeholder(dtype=dtypes.int32, name="labels") feed_dict[labels] = input_labels_value squeezed_predictions, squeezed_labels = ( tensor_util.remove_squeezable_dimensions(predictions, labels)) with self.session(g): variables_lib.local_variables_initializer().run() self.assertAllClose( predictions_value, squeezed_predictions.eval(feed_dict=feed_dict)) self.assertAllClose( labels_value, squeezed_labels.eval(feed_dict=feed_dict)) if __name__ == "__main__": test.main()
[((16270, 16281), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (16279, 16281), False, 'from tensorflow.python.platform import test\n'), ((2684, 2716), 'tensorflow.contrib.framework.python.framework.tensor_util.assert_scalar_int', 'tensor_util.assert_scalar_int', (['(3)'], {}), '(3)\n', (2713, 2716), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((4772, 4829), 'tensorflow.contrib.framework.python.framework.tensor_util.with_same_shape', 'tensor_util.with_same_shape', (['expected_placeholder', 'tensor'], {}), '(expected_placeholder, tensor)\n', (4799, 4829), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((1730, 1767), 'tensorflow.contrib.framework.python.ops.variables.local_variable', 'variables_lib2.local_variable', (['value0'], {}), '(value0)\n', (1759, 1767), True, 'from tensorflow.contrib.framework.python.ops import variables as variables_lib2\n'), ((1794, 1831), 'tensorflow.contrib.framework.python.ops.variables.local_variable', 'variables_lib2.local_variable', (['value1'], {}), '(value1)\n', (1823, 1831), True, 'from tensorflow.contrib.framework.python.ops import variables as variables_lib2\n'), ((1851, 1882), 'tensorflow.python.ops.variables.local_variables', 'variables_lib.local_variables', ([], {}), '()\n', (1880, 1882), True, 'from tensorflow.python.ops import variables as variables_lib\n'), ((2250, 2273), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1)'], {}), '(1)\n', (2270, 2273), False, 'from tensorflow.python.framework import constant_op\n'), ((2285, 2310), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[2]'], {}), '([2])\n', (2305, 2310), False, 'from tensorflow.python.framework import constant_op\n'), ((2322, 2360), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[[3, 4], [5, 6]]'], {}), '([[3, 4], [5, 6]])\n', (2342, 2360), False, 'from tensorflow.python.framework import constant_op\n'), ((2554, 2597), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(3)'], {'dtype': 'dtypes.int32'}), '(3, dtype=dtypes.int32)\n', (2574, 2597), False, 'from tensorflow.python.framework import constant_op\n'), ((2634, 2677), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(3)'], {'dtype': 'dtypes.int64'}), '(3, dtype=dtypes.int64)\n', (2654, 2677), False, 'from tensorflow.python.framework import constant_op\n'), ((3944, 3981), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.float32'], {}), '(dtypes.float32)\n', (3965, 3981), False, 'from tensorflow.python.ops import array_ops\n'), ((4351, 4397), 'tensorflow.contrib.framework.python.framework.tensor_util.with_shape', 'tensor_util.with_shape', (['expected_shape', 'tensor'], {}), '(expected_shape, tensor)\n', (4373, 4397), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((4619, 4655), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['expected_shape'], {}), '(expected_shape)\n', (4639, 4655), False, 'from tensorflow.python.framework import constant_op\n'), ((7613, 7660), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['value'], {'shape': 'actual_shape'}), '(value, shape=actual_shape)\n', (7633, 7660), False, 'from tensorflow.python.framework import constant_op\n'), ((7693, 7728), 'tensorflow.python.framework.tensor_shape.TensorShape', 'tensor_shape.TensorShape', (['[None, 2]'], {}), '([None, 2])\n', (7717, 7728), False, 'from tensorflow.python.framework import tensor_shape\n'), ((7797, 7851), 'tensorflow.contrib.framework.python.framework.tensor_util.with_shape', 'tensor_util.with_shape', (['partial_expected_shape', 'tensor'], {}), '(partial_expected_shape, tensor)\n', (7819, 7851), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((8017, 8054), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.float32'], {}), '(dtypes.float32)\n', (8038, 8054), False, 'from tensorflow.python.ops import array_ops\n'), ((8116, 8173), 'tensorflow.contrib.framework.python.framework.tensor_util.with_shape', 'tensor_util.with_shape', (['compatible_shape', 'tensor_no_shape'], {}), '(compatible_shape, tensor_no_shape)\n', (8138, 8173), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((9184, 9221), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', (['dtypes.float32'], {}), '(dtypes.float32)\n', (9205, 9221), False, 'from tensorflow.python.ops import array_ops\n'), ((10121, 10183), 'tensorflow.contrib.framework.python.framework.tensor_util.with_shape', 'tensor_util.with_shape', (['compatible_shape', 'tensor_partial_shape'], {}), '(compatible_shape, tensor_partial_shape)\n', (10143, 10183), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((15874, 15935), 'tensorflow.contrib.framework.python.framework.tensor_util.remove_squeezable_dimensions', 'tensor_util.remove_squeezable_dimensions', (['predictions', 'labels'], {}), '(predictions, labels)\n', (15914, 15935), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((1671, 1702), 'tensorflow.python.ops.variables.local_variables', 'variables_lib.local_variables', ([], {}), '()\n', (1700, 1702), True, 'from tensorflow.python.ops import variables as variables_lib\n'), ((2833, 2878), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(3)'], {'dtype': 'dtypes.float32'}), '(3, dtype=dtypes.float32)\n', (2853, 2878), False, 'from tensorflow.python.framework import constant_op\n'), ((3011, 3059), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[3, 4]'], {'dtype': 'dtypes.int32'}), '([3, 4], dtype=dtypes.int32)\n', (3031, 3059), False, 'from tensorflow.python.framework import constant_op\n'), ((3714, 3733), 're.compile', 're.compile', (['pattern'], {}), '(pattern)\n', (3724, 3733), False, 'import re\n'), ((4065, 4084), 're.compile', 're.compile', (['pattern'], {}), '(pattern)\n', (4075, 4084), False, 'import re\n'), ((4487, 4532), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1)'], {'shape': 'expected_shape'}), '(1, shape=expected_shape)\n', (4507, 4532), False, 'from tensorflow.python.framework import constant_op\n'), ((5416, 5441), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {}), '(1.0)\n', (5436, 5441), False, 'from tensorflow.python.framework import constant_op\n'), ((5674, 5701), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[1.0]'], {}), '([1.0])\n', (5694, 5701), False, 'from tensorflow.python.framework import constant_op\n'), ((5849, 5864), 'numpy.array', 'np.array', (['[1.1]'], {}), '([1.1])\n', (5857, 5864), True, 'import numpy as np\n'), ((5866, 5891), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {}), '(1.0)\n', (5886, 5891), False, 'from tensorflow.python.framework import constant_op\n'), ((6109, 6134), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {}), '(1.0)\n', (6129, 6134), False, 'from tensorflow.python.framework import constant_op\n'), ((6329, 6369), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['value'], {'shape': 'shape'}), '(value, shape=shape)\n', (6349, 6369), False, 'from tensorflow.python.framework import constant_op\n'), ((6648, 6688), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['value'], {'shape': 'shape'}), '(value, shape=shape)\n', (6668, 6688), False, 'from tensorflow.python.framework import constant_op\n'), ((6971, 7011), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['value'], {'shape': 'shape'}), '(value, shape=shape)\n', (6991, 7011), False, 'from tensorflow.python.framework import constant_op\n'), ((7311, 7351), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['value'], {'shape': 'shape'}), '(value, shape=shape)\n', (7331, 7351), False, 'from tensorflow.python.framework import constant_op\n'), ((8361, 8399), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['compatible_shape'], {}), '(compatible_shape)\n', (8381, 8399), False, 'from tensorflow.python.framework import constant_op\n'), ((10371, 10409), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['compatible_shape'], {}), '(compatible_shape)\n', (10391, 10409), False, 'from tensorflow.python.framework import constant_op\n'), ((15311, 15376), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['input_predictions_value'], {'dtype': 'dtypes.int32'}), '(input_predictions_value, dtype=dtypes.int32)\n', (15331, 15376), False, 'from tensorflow.python.framework import constant_op\n'), ((15427, 15488), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'dtype': 'dtypes.int32', 'name': '"""predictions"""'}), "(dtype=dtypes.int32, name='predictions')\n", (15448, 15488), False, 'from tensorflow.python.ops import array_ops\n'), ((15615, 15675), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['input_labels_value'], {'dtype': 'dtypes.int32'}), '(input_labels_value, dtype=dtypes.int32)\n', (15635, 15675), False, 'from tensorflow.python.framework import constant_op\n'), ((15707, 15763), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'dtype': 'dtypes.int32', 'name': '"""labels"""'}), "(dtype=dtypes.int32, name='labels')\n", (15728, 15763), False, 'from tensorflow.python.ops import array_ops\n'), ((2001, 2047), 'tensorflow.python.ops.variables.variables_initializer', 'variables_lib.variables_initializer', (['variables'], {}), '(variables)\n', (2036, 2047), True, 'from tensorflow.python.ops import variables as variables_lib\n'), ((4117, 4174), 'tensorflow.contrib.framework.python.framework.tensor_util.with_same_shape', 'tensor_util.with_same_shape', (['expected_placeholder', 'tensor'], {}), '(expected_placeholder, tensor)\n', (4144, 4174), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((4294, 4319), 'numpy.ones', 'np.ones', (['unexpected_shape'], {}), '(unexpected_shape)\n', (4301, 4319), True, 'import numpy as np\n'), ((5108, 5131), 'numpy.ones', 'np.ones', (['expected_shape'], {}), '(expected_shape)\n', (5115, 5131), True, 'import numpy as np\n'), ((6060, 6075), 'numpy.array', 'np.array', (['[1.1]'], {}), '([1.1])\n', (6068, 6075), True, 'import numpy as np\n'), ((15194, 15205), 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), '()\n', (15203, 15205), False, 'from tensorflow.python.framework import ops\n'), ((2389, 2424), 'tensorflow.contrib.framework.python.framework.tensor_util.reduce_sum_n', 'tensor_util.reduce_sum_n', (['[a, b, c]'], {}), '([a, b, c])\n', (2413, 2424), False, 'from tensorflow.contrib.framework.python.framework import tensor_util\n'), ((3825, 3863), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['unexpected_shape'], {}), '(unexpected_shape)\n', (3845, 3863), False, 'from tensorflow.python.framework import constant_op\n'), ((15975, 16018), 'tensorflow.python.ops.variables.local_variables_initializer', 'variables_lib.local_variables_initializer', ([], {}), '()\n', (16016, 16018), True, 'from tensorflow.python.ops import variables as variables_lib\n')]
vallemrv/tpvB3
tpv/modals/sugerencias.py
9988a528b32692b01bd042cc6486188c4dc2109b
# @Author: Manuel Rodriguez <valle> # @Date: 10-May-2017 # @Email: [email protected] # @Last modified by: valle # @Last modified time: 23-Feb-2018 # @License: Apache license vesion 2.0 from kivy.uix.modalview import ModalView from kivy.uix.button import Button from kivy.properties import ObjectProperty, StringProperty, ListProperty from kivy.lang import Builder Builder.load_file("view/sugerencias.kv") class Sugerencias(ModalView): onExit = ObjectProperty(None, allownone=True) content = ObjectProperty(None, allownone=True) texto = StringProperty("") des = StringProperty("") sug = ListProperty([]) key = StringProperty("") tag = ObjectProperty(None, allownone=True) def __init__(self, **kargs): super(Sugerencias, self).__init__(**kargs) self.auto_dismiss=False def on_sug(self, key, value): self.lista.rm_all_widgets() for item in self.sug: btn = Button(text=item) btn.tag = item btn.bind(on_press=self.onPress) self.lista.add_linea(btn) def onPress(self, b): self.onExit(self.key, self.content, b.tag, self.tag) def clear_text(self): self.texto = "" def exit(self): self.texto = self.txtSug.text if self.onExit: if self.texto != "": self.sug.append(self.texto) self.onExit(self.key, self.content, self.texto, self.tag)
[((374, 414), 'kivy.lang.Builder.load_file', 'Builder.load_file', (['"""view/sugerencias.kv"""'], {}), "('view/sugerencias.kv')\n", (391, 414), False, 'from kivy.lang import Builder\n'), ((459, 495), 'kivy.properties.ObjectProperty', 'ObjectProperty', (['None'], {'allownone': '(True)'}), '(None, allownone=True)\n', (473, 495), False, 'from kivy.properties import ObjectProperty, StringProperty, ListProperty\n'), ((510, 546), 'kivy.properties.ObjectProperty', 'ObjectProperty', (['None'], {'allownone': '(True)'}), '(None, allownone=True)\n', (524, 546), False, 'from kivy.properties import ObjectProperty, StringProperty, ListProperty\n'), ((559, 577), 'kivy.properties.StringProperty', 'StringProperty', (['""""""'], {}), "('')\n", (573, 577), False, 'from kivy.properties import ObjectProperty, StringProperty, ListProperty\n'), ((588, 606), 'kivy.properties.StringProperty', 'StringProperty', (['""""""'], {}), "('')\n", (602, 606), False, 'from kivy.properties import ObjectProperty, StringProperty, ListProperty\n'), ((617, 633), 'kivy.properties.ListProperty', 'ListProperty', (['[]'], {}), '([])\n', (629, 633), False, 'from kivy.properties import ObjectProperty, StringProperty, ListProperty\n'), ((644, 662), 'kivy.properties.StringProperty', 'StringProperty', (['""""""'], {}), "('')\n", (658, 662), False, 'from kivy.properties import ObjectProperty, StringProperty, ListProperty\n'), ((673, 709), 'kivy.properties.ObjectProperty', 'ObjectProperty', (['None'], {'allownone': '(True)'}), '(None, allownone=True)\n', (687, 709), False, 'from kivy.properties import ObjectProperty, StringProperty, ListProperty\n'), ((946, 963), 'kivy.uix.button.Button', 'Button', ([], {'text': 'item'}), '(text=item)\n', (952, 963), False, 'from kivy.uix.button import Button\n')]
p7g/dd-trace-py
templates/integration/__init__.py
141ac0ab6e9962e3b3bafc9de172076075289a19
""" The foo integration instruments the bar and baz features of the foo library. Enabling ~~~~~~~~ The foo integration is enabled automatically when using :ref:`ddtrace-run <ddtracerun>` or :ref:`patch_all() <patch_all>`. Or use :ref:`patch() <patch>` to manually enable the integration:: from ddtrace import patch patch(foo=True) Global Configuration ~~~~~~~~~~~~~~~~~~~~ .. py:data:: ddtrace.config.foo["service"] The service name reported by default for foo instances. This option can also be set with the ``DD_FOO_SERVICE`` environment variable. Default: ``"foo"`` Instance Configuration ~~~~~~~~~~~~~~~~~~~~~~ To configure the foo integration on an per-instance basis use the ``Pin`` API:: import foo from ddtrace import Pin myfoo = foo.Foo() Pin.override(myfoo, service="myfoo") """ from ...internal.utils.importlib import require_modules required_modules = ["foo"] with require_modules(required_modules) as missing_modules: if not missing_modules: from .patch import patch from .patch import unpatch __all__ = ["patch", "unpatch"]
[]
hpagseddy/ZPUI
libs/linux/wpa_cli.py
b82819e523987639c2dfab417f9895d7cd7ce049
from subprocess import check_output, CalledProcessError from ast import literal_eval from time import sleep from helpers import setup_logger logger = setup_logger(__name__, "warning") current_interface = None #wpa_cli related functions and objects def wpa_cli_command(*command): run = ["wpa_cli"] if current_interface: run += ["-i"+current_interface] try: return check_output(run + list(command)) except CalledProcessError as e: raise WPAException(command[0], e.returncode, output=e.output, args=command[1:]) class WPAException(Exception): def __init__(self, command, exit_code, args=None, output=None): self.command = command self.code = exit_code self.args = args if args != []: message = "'wpa_cli {}' returned {}".format(self.command, self.code) else: message = "'wpa_cli {} {}' returned {}".format(self.command, ' '.join(args), self.code) if output: message += "\n Output: {}".format(output) super(WPAException, self).__init__(message) #wpa_cli command wrappers and their helpers def connect_new_network(network_info): #First, looking in the known networks conf_networks = list_configured_networks() network_found = False for network in conf_networks: if network_info['ssid'] == network['ssid']: network_found = True select_network(network['network id']) return True #Then, if it's an open network, just connecting if is_open_network(network_info): network_id = add_network() logger.info(set_network(network_id, 'ssid', '"'+network_info['ssid']+'"')) set_network(network_id, 'key_mgmt', 'NONE') select_network(network_id) return True #Else, there's not enough implemented as for now if not network_found: logger.warning("Hell, I dunno.") return False def is_open_network(network_info): #Might be an approach which doesn't take some things into account return not is_wpa_enabled(network_info) def is_wpa_enabled(network_info): flags = parse_network_flags(network_info['flags']) wpa_enabled = False for flag in flags: if flag.startswith('WPA'): wpa_enabled = True return wpa_enabled def parse_network_flags(flag_string): #Flags go each after another, enclosed in "[]" braces flags = [flag.strip('[]') for flag in flag_string.split('][')] #If anybody knows a better way, do commit return flags #wpa_cli commands def get_interfaces(): output = process_output(wpa_cli_command("interface")) output = output[1:] #First line removed by process_output, second line says "Available interfaces" return output def set_active_interface(interface_name): #TODO output check global current_interface # try to set the module's interface variable, then check status # if status check fails, set the variable back to what it was # and re-raise the exception last_interface = current_interface try: current_interface = interface_name output = process_output(wpa_cli_command("status")) except: current_interface = last_interface raise # else: all went well #if output == "Connected to interface '{}'".format(interface_name): def get_current_interface(): #TODO: check without wireless adapter plugged in output = process_output(wpa_cli_command("ifname")) return output[0] def connection_status(): #TODO: check without wireless adapter plugged in parameters = {} output = process_output(wpa_cli_command("status")) for line in output: if '=' not in line: continue else: param, value = line.split('=',1) parameters[param] = value return parameters def list_configured_networks(): #Gives a nice table with first row as header and tab-separated elements, so I'll use process_table function output = process_output(wpa_cli_command("list_networks")) #As of wpa_supplicant 2.3-1, header elements are ['network id', 'ssid', 'bssid', 'flags'] networks = process_table(output[0], output[1:]) return networks def dict_configured_networks_by_ssid(): networks = list_configured_networks() return {n["ssid"]:n for n in networks} def dict_configured_networks_by_id(): networks = list_configured_networks() return {n["network id"]:n for n in networks} def select_network(network_id): return ok_fail_command("select_network", str(network_id)) def enable_network(network_id): return ok_fail_command("enable_network", str(network_id)) def remove_network(network_id): return ok_fail_command("remove_network", str(network_id)) def save_config(): return ok_fail_command("save_config") def disable_network(network_id): return ok_fail_command("disable_network", str(network_id)) def initiate_scan(): return ok_fail_command("scan") def disconnect(): return ok_fail_command("disconnect") def reconnect(): return ok_fail_command("reconnect") def parse_string_from_cli(ssid): return literal_eval("'{}'".format(ssid)) def get_scan_results(): #Currently I know of no way to know if the scan results got updated since last time scan was initiated output = process_output(wpa_cli_command("scan_results")) #As of wpa_supplicant 2.3-1, header elements are ['bssid', 'frequency', 'signal level', 'flags', 'ssid'] networks = process_table(output[0], output[1:]) # Filtering SSIDs to allow for using Unicode SSIDs for network in networks: network["ssid"] = parse_string_from_cli(network["ssid"]) return networks def add_network(): return int_fail_command("add_network") def set_network(network_id, param_name, value): if param_name == "ssid": value = 'P'+value return ok_fail_command("set_network", str(network_id), param_name, value) def get_network(network_id, param_name): output = wpa_cli_command("get_network", str(network_id), param_name) value = process_output(output)[0] if value.startswith("'") or value.startswith('"'): value = literal_eval(value) return value #Helper commands def ok_fail_command(command_name, *args): #Wrapper around commands which return either "OK" or "FAIL" #Might fail if the wireless dongle gets unplugged or something output = process_output(wpa_cli_command(command_name, *[str(arg) for arg in args])) if output[0] == "OK": return True else: raise WPAException(command_name, output[0], args) def int_fail_command(command_name, *args): output = process_output(wpa_cli_command(command_name, *[str(arg) for arg in args])) try: return int(output[0]) except: raise WPAException(command_name, output[0], args) def process_table(header, contents): #Takes a tab-separated table and returns a list of dicts, each dict representing a row and having column_name:value mappings table = [] #I'm going to split the header to column names and use those for dictionary keys so that there's no need to hard-code values column_names = [name.strip(' ') for name in header.split(' / ')] for line in contents: row = {} values = line.split('\t') for i, value in enumerate(values): column_name = column_names[i] row[column_name] = value table.append(row) return table def process_output(output): #First line of output of wpa_cli (almost?) always says "Selected interface: $INT" # but only if the interface is not passed using "wpa_cli -iinterface". lines = output.split('\n') if not current_interface: lines = lines[1:] #First line has the "Selected interface: $INT" return [line.strip(' ') for line in lines if line] #Removing all whitespace and not counting empty lines if __name__ == "__main__": print(get_current_interface()) print(get_interfaces()) print(list_configured_networks()) print(connection_status()) print(initiate_scan()) for i in range(7): sleep(1) print(get_scan_results()) print(initiate_scan()) print(initiate_scan())
[((152, 185), 'helpers.setup_logger', 'setup_logger', (['__name__', '"""warning"""'], {}), "(__name__, 'warning')\n", (164, 185), False, 'from helpers import setup_logger\n'), ((6151, 6170), 'ast.literal_eval', 'literal_eval', (['value'], {}), '(value)\n', (6163, 6170), False, 'from ast import literal_eval\n'), ((8097, 8105), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (8102, 8105), False, 'from time import sleep\n')]
eukreign/python-v8
demos/ServerSideBrowser.py
f20d7bef766a2ae3573cc536e7d03e07afe9b173
#!/usr/bin/env python from __future__ import with_statement import sys, traceback, os, os.path import xml.dom.minidom import logging class Task(object): @staticmethod def waitAll(tasks): pass class FetchFile(Task): def __init__(self, url): self.url = url def __call__(self): logging.debug("fetching from %s", self.url) try: return urllib2.urlopen(self.url) except: logging.warn("fail to fetch %s: %s", self.url, traceback.format_exc()) return None class Evaluator(Task): def __init__(self, target): assert hasattr(target, "eval") self.target = target def __call__(self): try: self.target.eval(self.pipeline) except: logging.warn("fail to evalute %s: %s", self.target, traceback.format_exc()) return self.target def __repr__(self): return "<Evaluator object for %s at 0x%08X>" % (self.target, id(self)) class WebObject(object): context = [] def __enter__(self): self.context.append(self) logging.debug("entering %s...", self) return self def __exit__(self, exc_type, exc_value, traceback): self.context.pop() logging.debug("leaving %s...", self) def __init__(self, parent, url): self.children = [] self.parent = parent self.url = url @staticmethod def current(): current = WebObject.context[-1] if len(WebObject.context) > 0 else None return current @property def page(self): tag = self.parent while not isinstance(tag, WebPage): tag = tag.parent return tag class WebScript(WebObject): def __init__(self, parent, value, url): WebObject.__init__(self, parent, url) if type(value) in [str, unicode]: self.script = value elif hasattr(value, "read"): self.script = value.read() else: self.func = value def eval(self, pipeline): if len(WebObject.context) > 0: WebObject.context[-1].children.append((None, self)) with self: if hasattr(self, "script"): self.result = self.page.window.eval(self.script) else: self.result = self.page.window.execute(self.func) class HtmlStyle(PyV8.JSClass): def __init__(self, node): self._node = node self._attrs = self.parse(node.getAttribute("style")) def parse(self, style): attrs = {} try: for attr in style.split(';'): if attr == '': continue strs = attr.split(':') if len(strs) == 2: attrs[strs[0]] = strs[1] else: attrs[attr] = None except: logging.warn("fail to parse the style attribute: %s", sys.exc_info()[1]) return attrs def __getattr__(self, name): try: try: return object.__getattribute__(self, name) except AttributeError: return object.__getattribute__(self, "_attrs")[name] except: logging.error(sys.exc_info()) def __setattr__(self, name, value): try: if name[0] == '_': return object.__setattr__(self, name, value) else: node = object.__getattribute__(self, "_node") attrs = object.__getattribute__(self, "_attrs") style = ";".join(["%s:%s" % (k, v) if v else k for k, v in attrs.items()]) if node.hasAttribute("style") and len(style) == 0: node.removeAttribute("style") elif len(style) > 0: node.setAttribute("style", style) except: logging.error(sys.exc_info()) class WebCss(WebObject): def __init__(self, parent, value, url): WebObject.__init__(self, parent, url) self.css = value if type(value) in [str, unicode] else value.read() def eval(self, pipeline): logging.info("evalute css: %s...", self.css[:20]) with self: pass class WebPage(WebObject): def __init__(self, parent, response, url): WebObject.__init__(self, parent, url) self.code = response.code self.headers = response.headers html = response.read() self.size = len(html) self.dom = BeautifulSoup.BeautifulSoup(html) self.window = HtmlWindow(self, self.dom) def __repr__(self): return "<WebPage at %s>" % self.url def evalScript(self, pipeline, script, parent): if script.has_key("type") and script["type"] != "text/javascript": raise NotImplementedError("not support script type %s", script["type"]) elif script.has_key("src"): if script["src"].startswith("http://www.google-analytics.com"): return None return pipeline.openScript(self, script["src"], lambda child: parent.children.append((script, child))) else: return pipeline.evalScript(self, unicode(script.string).encode("utf-8"), lambda child: parent.children.append((script, child))) def evalTag(self, pipeline, tag, parent): with parent: tasks = [] for iframe in tag.findAll('iframe'): tasks.append(pipeline.openPage(self, iframe["src"], lambda page: parent.children.append((iframe, page)))) for frame in tag.findAll('frame'): tasks.append(pipeline.openPage(self, frame["src"], lambda page: parent.children.append((frame, page)))) for link in tag.findAll('link', rel='stylesheet', type='text/css', href=True): tasks.append(pipeline.openCss(self, link["href"], lambda css: parent.children.append((link, css)))) for style in tag.findAll('style,', type='text/css'): tasks.append(pipeline.evalCss(self, unicode(style.string).encode("utf-8"), lambda css: parent.children.append((link, css)))) for script in tag.findAll('script'): tasks.append(self.evalScript(pipeline, script, parent)) return tasks def eval(self, pipeline): with self.window.ctxt: scripts = [] self.window.document.onCreateElement = lambda element: scripts.append((element, WebObject.current())) if element.tagName == "script" else None self.window.document.onDocumentWrite = lambda element: self.evalTag(pipeline, element.tag, WebObject.current()) tasks = self.evalTag(pipeline, self.dom, self) Task.waitAll(tasks) self.window.timers.sort(lambda x, y: x[0] - y[0]) for interval, code in self.window.timers: tasks.append(pipeline.evalScript(self, code)) try: scripts.append((self.window.document.body['onload'], self)) except: pass for script, parent in scripts: with parent: tasks.append(self.evalScript(pipeline, script.tag, parent)) class WebSession(object): def __init__(self, root): self.root = root def __repr__(self): return "<WebSession at %s>" % self.root.url def dumpName(self, obj): if isinstance(obj, WebCss): return "Css%d" % id(obj) if isinstance(obj, WebScript): return "Script%d" % id(obj) if isinstance(obj, WebPage): return "Page%d" % id(obj) return "Object%d" % id(obj) def dumpChildren(self, out, obj): for tag, child in obj.children: if isinstance(child, WebCss): self.dumpCss(out, child) elif isinstance(child, WebScript): self.dumpScript(out, child) elif isinstance(child, WebPage): self.dumpPAge(out, child) def dumpCss(self, out, css): print >>out, '%s [label="%s"];' % (self.dumpName(css), css.url or "inline CSS") print >>out, '%s -> %s;' % (self.dumpName(css.parent), self.dumpName(css)) self.dumpChildren(out, css) def dumpScript(self, out, script): print >>out, '%s [label="%s"];' % (self.dumpName(script), script.url or "inline Script") print >>out, '%s -> %s;' % (self.dumpName(script.parent), self.dumpName(script)) self.dumpChildren(out, script) def dumpPage(self, out, page): print >>out, '%s [label="%s"];' % (self.dumpName(page), page.url) self.dumpChildren(out, page) def save(self, filename): with open(filename, "w") as f: print >>f, "digraph WebSession {" self.dumpPage(f, self.root) print >>f, "}" class Pipeline(object): def __init__(self): self.evalPage = self.getEvaluator(WebPage) self.openPage = self.getOpener(WebPage) self.evalCss = self.getEvaluator(WebCss) self.openCss = self.getOpener(WebCss) self.evalScript = self.getEvaluator(WebScript) self.openScript = self.getOpener(WebScript) def queue(self, task, callback): try: task.pipeline = self result = task() if result: task.result = callback(result) return task except: logging.error("fail to execute task %s", task) logging.debug(traceback.format_exc()) def openSession(self, url, callback): self.openPage(None, url, lambda page: callback(WebSession(page))) def getEvaluator(self, clazz): def evaluator(parent, target, callback=None): self.queue(Evaluator(clazz(parent, target, None)), lambda result: callback(result) if callback else None) return evaluator def getOpener(self, clazz): def opener(parent, url, callback=None): if parent: url = urlparse.urljoin(parent.url, url) self.queue(FetchFile(url), lambda response: self.queue(Evaluator(clazz(parent, response, url)), lambda result: callback(result) if callback else None)) return opener class Browser(object): pipeline = Pipeline() sessions = [] def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): pass @property def version(self): return "0.1 (Google v8 engine v%s)" % PyV8.JSEngine.version def parseCmdLine(self): from optparse import OptionParser parser = OptionParser(version="%prog v" + self.version) parser.add_option("-q", "--quiet", action="store_const", const=logging.FATAL, dest="logLevel", default=logging.WARN) parser.add_option("-v", "--verbose", action="store_const", const=logging.INFO, dest="logLevel") parser.add_option("-d", "--debug", action="store_const", const=logging.DEBUG, dest="logLevel") parser.add_option("--log-format", dest="logFormat", default="%(asctime)s %(levelname)s %(message)s") (self.opts, self.args) = parser.parse_args() return True def switchMode(self, mode): self.mode = mode def terminate(self): self.terminated = True def loadJSFile(self, filename): logging.info("load javascript file %s" % filename) with open(filename) as f: PyV8.JSEngine().compile(f.read()).run() def openUrl(self, url): self.pipeline.openSession(url, lambda session: self.sessions.append(session)) def findSessions(self, pattern): for p in pattern.split(): try: yield self.sessions[int(p)] except: for s in self.sessions: if s.root.url.find(p) >= 0: yield s def listSessions(self, pattern): for session in self.findSessions(pattern) if pattern else self.sessions: print "#%d\t%s" % (self.sessions.index(session), session.root.url) COMMANDS = ( { "names" : ["javascript", "js"], "help" : "switch to the javascript mode", "handler" : lambda self, line: self.switchMode("javascript"), }, { "names" : ["python", "py"], "help" : "switch to the python mode", "handler" : lambda self, line: self.switchMode("python"), }, { "names" : ["shell", "sh"], "help" : "switch to the shell mode", "handler" : lambda self, line: self.switchMode("shell"), }, { "names" : ["exit", "quit", "q"], "help" : "exit the shell", "handler" : lambda self, line: self.terminate(), }, { "names" : ["help", "?"], "help" : "show the help screen" }, { "names" : ["load", "l"], "help" : "load javascript file", "handler" : lambda self, line: self.loadJSFile(line), }, { "names" : ["open", "o"], "help" : "open a HTML page", "handler" : lambda self, line: self.openUrl(line) }, { "names" : ["sessions", "s"], "help" : "list the web sessions", "handler" : lambda self, line: self.listSessions(line) }, ) def runCommand(self, line): for command in self.COMMANDS: for name in command["names"]: if line.startswith(name): if command.has_key("handler"): try: return command["handler"](self, line[len(name):].strip()) except: traceback.print_exc() break else: break for command in self.COMMANDS: print "%s %s" % (", ".join(command["names"]).rjust(15), command["help"]) def runJavascript(self, source): try: result = PyV8.JSEngine().compile(source).run() if result: print str(result) except: traceback.print_exc() def runShellCommand(self, line): try: os.system(line) except: traceback.print_exc() MODES = { "python" : { "abbr" : "py" }, "javascript" : { "abbr" : "js" }, "shell" : { "abbr" : "sh" }, } def runShell(self): import code logging.basicConfig(level=self.opts.logLevel, format=self.opts.logFormat) logging.debug("settings: %s", self.opts) self.mode = "python" self.console = code.InteractiveConsole({"sessions" : self.sessions}) self.terminated = False while not self.terminated: line = self.console.raw_input(self.MODES[self.mode]["abbr"] + ">").strip() if len(line) == 0: continue if line[0] == '`': self.runCommand(line[1:]) elif line[0] == '?': self.runJavascript(line[1:]) elif line[0] == '!': self.runShellCommand(line[1:]) else: if self.mode == "python": self.console.runsource(line) elif self.mode == "javascript": self.runJavascript(line) elif self.mode == "shell": self.runShellCommand(line) else: print "unknown mode - " + self.mode if __name__ == "__main__": with Browser() as browser: if browser.parseCmdLine(): browser.runShell()
[]
WielderOfMjoelnir/pypeira
pypeira/io/fits.py
4ef554c577875e09f55673f8e6ea53ba129fb37f
from __future__ import division import fitsio """ A FITS file is comprised of segments called Header/Data Units (HDUs), where the first HDU is called the 'Primary HDU', or 'Primary Array'. The primary data array can contain a 1-999 dimensional array of 1, 2 or 4 byte integers or 4 or 8 byte floating point numbers using IEEE representation. A typical primary array could contain a 1-D spectrum, a 2-D image, or a 3-D data cube (this is what's coming from the SSC). Any number of additional HDUs may follow the primary array. These additional HDUs are referred to as FITS 'extensions'. Three types of standard extensions are currently defined: * Image Extensions * Contain a 0-999 dimensional array of pixels, similar to primary array * Header begins with XTENSION = 'IMAGE' * ASCII Tables Extensions * Store tabular information with all numberic information stored in ASCII formats While ASCII tables are generellay less efficient than binary tables, they can be made relatively human readable and can store numeric information with essentially arbitrary size and accuracy (e.g., 16 byte reals). * Header begins with XTENSION = 'TABLE' * Binary Table Extensions * Store tabular information in a binary represetation. Each cell in the table can be an array but the dimensionality of the array must be constant within a column. The strict standard supports only one-dimensional arrays, but a convention to support multi-dimensional arrays are widely accepted. * Header begins with XTENSION = 'BINTABLE' In addition to the structures above, there is one other type of FITS HDU called "Random Groups" that is almost exclusively used for applications in radio interferometry. The random groups format should not be used for other types of applications. .. [REF] fits.gsfc.nasa.gov/fits_primer.html """ def read_headers(path, *args, **kwargs): # Reads the headers from the FITS file header = fitsio.read_header(path, *args, **kwargs) return header def read_image(path, *args, **kwargs): # Reads the image data from the FITS file data = fitsio.read(path, *args, **kwargs) return data def read_fits(path, headers_only=False, image_only=False, *args, **kwargs): """ Reader function for the FITS files. Takes advantage of the fitsio reader function. Parameters ---------- path: str Path to the FITS file you want to read headers_only: bool, optional Set to True if you only want to read the headers of the file. If True, the data return will only be the headers of the files read. Default is False. image_only: bool, optional Set to True if you only want to read the image data of the file. If True, the data return will be a numpy array corresponding to the image data of the files read. Default is False. *args: optional Contains all arguments that will be passed onto the fitsio reader. This reader will be fitsio.read_headers() or fitsio.FITS() depending on if 'headers_only' is True or False. **kwargs: optional Contains all keyword arguments that will be passed to the fitsio reader. Returns ------- hdr, image: FITSHDR object, np.array If none of the "only"-keywords are not False, then a (FITSHDR, np.array)-pair will be returned. Note that a FITSHDR can be access by indexing as a normal dictionary. See fitsio.fitslib.FITSHDR for implementation of FITSHDR. FITSHDR object If 'headers_only' is not False it will return in the same manner as for normally, but now the type of the files will be FITSHDR objects. numpy.array If 'image_only' is not False it will return in the same manner as for the FITS object, but now the type of the tiles will be numpy.arrays. """ if headers_only: hdr = read_headers(path, *args, **kwargs) return hdr elif image_only: image = read_image(path, *args, **kwargs) return image else: hdr = read_headers(path, *args, **kwargs) image = read_image(path, *args, **kwargs) return hdr, image
[((1951, 1992), 'fitsio.read_header', 'fitsio.read_header', (['path', '*args'], {}), '(path, *args, **kwargs)\n', (1969, 1992), False, 'import fitsio\n'), ((2111, 2145), 'fitsio.read', 'fitsio.read', (['path', '*args'], {}), '(path, *args, **kwargs)\n', (2122, 2145), False, 'import fitsio\n')]
RyanCargan/emscripten
system/lib/update_musl.py
6d3859f88e1d6394395760153c0a8cfa6a876ac7
#!/usr/bin/env python3 # Copyright 2021 The Emscripten Authors. All rights reserved. # Emscripten is available under two separate licenses, the MIT license and the # University of Illinois/NCSA Open Source License. Both these licenses can be # found in the LICENSE file. """Simple script for updating musl from external git repo. The upstream sources, along with our local changes, live at: https://github.com/emscripten-core/musl To update musl first make sure all changes from the emscripten repo are present in the `emscripten` branch of the above repo. Then run `git merge v<musl_version>` to pull in the latest musl changes from a given musl version. Once any merge conflict are resolved those change can then be copied back into emscripten using this script. """ import os import sys import shutil import subprocess script_dir = os.path.abspath(os.path.dirname(__file__)) local_src = os.path.join(script_dir, 'libc', 'musl') exclude_dirs = ( # Top level directories we don't include 'tools', 'obj', 'lib', 'crt', 'musl', 'compat', # Parts of src we don't build 'malloc', # Arch-specific code we don't use 'arm', 'x32', 'sh', 'i386', 'x86_64', 'aarch64', 'riscv64', 's390x', 'mips', 'mips64', 'mipsn32', 'powerpc', 'powerpc64', 'm68k', 'microblaze', 'or1k', 'generic') musl_dir = os.path.abspath(sys.argv[1]) def should_ignore(name): return name in exclude_dirs or name[0] == '.' def ignore(dirname, contents): return [c for c in contents if should_ignore(c)] def main(): assert os.path.exists(musl_dir) # Remove old version shutil.rmtree(local_src) # Copy new version into place shutil.copytree(musl_dir, local_src, ignore=ignore) if __name__ == '__main__': main()
[((902, 942), 'os.path.join', 'os.path.join', (['script_dir', '"""libc"""', '"""musl"""'], {}), "(script_dir, 'libc', 'musl')\n", (914, 942), False, 'import os\n'), ((1315, 1343), 'os.path.abspath', 'os.path.abspath', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (1330, 1343), False, 'import os\n'), ((863, 888), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (878, 888), False, 'import os\n'), ((1526, 1550), 'os.path.exists', 'os.path.exists', (['musl_dir'], {}), '(musl_dir)\n', (1540, 1550), False, 'import os\n'), ((1577, 1601), 'shutil.rmtree', 'shutil.rmtree', (['local_src'], {}), '(local_src)\n', (1590, 1601), False, 'import shutil\n'), ((1637, 1688), 'shutil.copytree', 'shutil.copytree', (['musl_dir', 'local_src'], {'ignore': 'ignore'}), '(musl_dir, local_src, ignore=ignore)\n', (1652, 1688), False, 'import shutil\n')]
xiamx/scout_apm_python
src/scout_apm/instruments/pymongo.py
d03dab45f65cf7d1030e11fabf6da4cf6e72ee59
from __future__ import absolute_import, division, print_function, unicode_literals import logging # Used in the exec() call below. from scout_apm.core.monkey import monkeypatch_method, unpatch_method # noqa: F401 from scout_apm.core.tracked_request import TrackedRequest # noqa: F401 logger = logging.getLogger(__name__) class Instrument(object): PYMONGO_METHODS = [ "aggregate", "bulk_write", "count", "create_index", "create_indexes", "delete_many", "delete_one", "distinct", "drop", "drop_index", "drop_indexes", "ensure_index", "find_and_modify", "find_one", "find_one_and_delete", "find_one_and_replace", "find_one_and_update", "group", "inline_map_reduce", "insert", "insert_many", "insert_one", "map_reduce", "reindex", "remove", "rename", "replace_one", "save", "update", "update_many", "update_one", ] def __init__(self): self.installed = False def installable(self): try: from pymongo.collection import Collection # noqa: F401 except ImportError: logger.info("Unable to import for PyMongo instruments") return False if self.installed: logger.warn("PyMongo Instruments are already installed.") return False return True def install(self): if not self.installable(): logger.info("PyMongo instruments are not installable. Skipping.") return False self.installed = True try: from pymongo.collection import Collection # noqa: F401 # There is no way the import can fail if self.installable() succeeded. except ImportError: # pragma: no cover logger.info( "Unable to import for PyMongo instruments. Instrument install failed." ) return False for method_str in self.__class__.PYMONGO_METHODS: try: code_str = """ @monkeypatch_method(Collection) def {method_str}(original, self, *args, **kwargs): tr = TrackedRequest.instance() name = '/'.join(['MongoDB', self.name, '{camel_name}']) span = tr.start_span(operation=name, ignore_children=True) span.tag('name', self.name) try: return original(*args, **kwargs) finally: tr.stop_span() """.format( method_str=method_str, camel_name="".join(c.title() for c in method_str.split("_")), ) exec(code_str) logger.info("Instrumented PyMongo Collection.%s", method_str) except Exception as e: logger.warn( "Unable to instrument for PyMongo Collection.%s: %r", method_str, e ) return False return True def uninstall(self): if not self.installed: logger.info("PyMongo instruments are not installed. Skipping.") return False self.installed = False from pymongo.collection import Collection for method_str in self.__class__.PYMONGO_METHODS: unpatch_method(Collection, method_str)
[((298, 325), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (315, 325), False, 'import logging\n'), ((3319, 3357), 'scout_apm.core.monkey.unpatch_method', 'unpatch_method', (['Collection', 'method_str'], {}), '(Collection, method_str)\n', (3333, 3357), False, 'from scout_apm.core.monkey import monkeypatch_method, unpatch_method\n')]
uktrade/tamato
measures/tests/factories.py
4ba2ffb25eea2887e4e081c81da7634cd7b4f9ca
import random from typing import Optional import factory from common.tests import factories from measures.sheet_importers import MeasureSheetRow class MeasureSheetRowFactory(factory.Factory): """ A factory that produces a row that might be read from a sheet of measures as recognised by the :class:`measures.sheet_importers.MeasureSheetRow` importer. The factory references a MeasureFactory to do the production of an actual Measure, and then references the data produced by the MeasureFactory to build up a row of string values. The values are then built into a tuple in the order specified in the `MeasureSheetRow` importer. """ class Meta: model = tuple exclude = ["measure"] measure = factory.SubFactory(factories.MeasureFactory) item_id = factory.SelfAttribute("measure.goods_nomenclature.item_id") measure_type_description = factory.SelfAttribute("measure.measure_type.description") duty_sentence = factory.sequence(lambda n: f"{n}.00%") origin_description = factory.LazyAttribute( lambda m: m.measure.geographical_area.get_description().description, ) excluded_origin_descriptions = factory.LazyAttribute( lambda m: random.choice(MeasureSheetRow.separators).join( e.excluded_geographical_area.get_description().description for e in m.measure.exclusions.all() ), ) quota_order_number = factory.LazyAttribute( lambda m: m.measure.order_number.order_number if m.measure.order_number else m.measure.dead_order_number, ) additional_code_id = factory.LazyAttribute( lambda m: m.measure.additional_code.type.sid + m.measure.additional_code.code if m.measure.additional_code else m.measure.dead_additional_code, ) validity_start_date = factory.SelfAttribute("measure.valid_between.lower") validity_end_date = factory.SelfAttribute("measure.valid_between.upper") regulation_id = factory.SelfAttribute("measure.generating_regulation.regulation_id") footnote_ids = factory.LazyAttribute( lambda m: random.choice(MeasureSheetRow.separators).join( f.footnote_type.footnote_type_id + f.footnote_id for f in m.measure.footnotes.all() ), ) @factory.lazy_attribute def conditions(self) -> Optional[str]: """Returns a string that can be parsed by the :class:`measures.parsers.ConditionSentenceParser`.""" if not self.measure.conditions.exists(): return None parts = [] for c in self.measure.conditions.all(): part = [] part.append(c.condition_code.code) if c.required_certificate: part.append("cert:") part.append( f"{c.required_certificate.certificate_type.sid}-{c.required_certificate.sid}", ) part.append(f"({c.action.code}):") parts.append(" ".join(part)) return f"Cond: {'; '.join(parts)}" @classmethod def _create(cls, model_class, *args, **kwargs): data = [kwargs[k] for k in MeasureSheetRow.columns] return super()._create(model_class, data)
[((761, 805), 'factory.SubFactory', 'factory.SubFactory', (['factories.MeasureFactory'], {}), '(factories.MeasureFactory)\n', (779, 805), False, 'import factory\n'), ((821, 880), 'factory.SelfAttribute', 'factory.SelfAttribute', (['"""measure.goods_nomenclature.item_id"""'], {}), "('measure.goods_nomenclature.item_id')\n", (842, 880), False, 'import factory\n'), ((912, 969), 'factory.SelfAttribute', 'factory.SelfAttribute', (['"""measure.measure_type.description"""'], {}), "('measure.measure_type.description')\n", (933, 969), False, 'import factory\n'), ((990, 1028), 'factory.sequence', 'factory.sequence', (["(lambda n: f'{n}.00%')"], {}), "(lambda n: f'{n}.00%')\n", (1006, 1028), False, 'import factory\n'), ((1445, 1577), 'factory.LazyAttribute', 'factory.LazyAttribute', (['(lambda m: m.measure.order_number.order_number if m.measure.order_number else\n m.measure.dead_order_number)'], {}), '(lambda m: m.measure.order_number.order_number if m.\n measure.order_number else m.measure.dead_order_number)\n', (1466, 1577), False, 'import factory\n'), ((1629, 1804), 'factory.LazyAttribute', 'factory.LazyAttribute', (['(lambda m: m.measure.additional_code.type.sid + m.measure.additional_code.\n code if m.measure.additional_code else m.measure.dead_additional_code)'], {}), '(lambda m: m.measure.additional_code.type.sid + m.\n measure.additional_code.code if m.measure.additional_code else m.\n measure.dead_additional_code)\n', (1650, 1804), False, 'import factory\n'), ((1852, 1904), 'factory.SelfAttribute', 'factory.SelfAttribute', (['"""measure.valid_between.lower"""'], {}), "('measure.valid_between.lower')\n", (1873, 1904), False, 'import factory\n'), ((1929, 1981), 'factory.SelfAttribute', 'factory.SelfAttribute', (['"""measure.valid_between.upper"""'], {}), "('measure.valid_between.upper')\n", (1950, 1981), False, 'import factory\n'), ((2002, 2070), 'factory.SelfAttribute', 'factory.SelfAttribute', (['"""measure.generating_regulation.regulation_id"""'], {}), "('measure.generating_regulation.regulation_id')\n", (2023, 2070), False, 'import factory\n'), ((1236, 1277), 'random.choice', 'random.choice', (['MeasureSheetRow.separators'], {}), '(MeasureSheetRow.separators)\n', (1249, 1277), False, 'import random\n'), ((2131, 2172), 'random.choice', 'random.choice', (['MeasureSheetRow.separators'], {}), '(MeasureSheetRow.separators)\n', (2144, 2172), False, 'import random\n')]
Remit/autoscaling-simulator
autoscalingsim/scaling/policiesbuilder/metric/correlator/correlator.py
091943c0e9eedf9543e9305682a067ab60f56def
from abc import ABC, abstractmethod import collections import pandas as pd from autoscalingsim.utils.error_check import ErrorChecker class Correlator(ABC): _Registry = {} @abstractmethod def _compute_correlation(self, metrics_vals_1 : pd.Series, metrics_vals_2 : pd.Series, lag : int): pass def __init__(self, config : dict): history_buffer_size_raw = ErrorChecker.key_check_and_load('history_buffer_size', config, self.__class__.__name__) history_buffer_size_value = ErrorChecker.key_check_and_load('value', history_buffer_size_raw, self.__class__.__name__) history_buffer_size_unit = ErrorChecker.key_check_and_load('unit', history_buffer_size_raw, self.__class__.__name__) self.history_buffer_size = pd.Timedelta(history_buffer_size_value, unit = history_buffer_size_unit) max_time_lag_raw = ErrorChecker.key_check_and_load('max_time_lag', config, self.__class__.__name__) max_time_lag_value = ErrorChecker.key_check_and_load('value', max_time_lag_raw, self.__class__.__name__) max_time_lag_unit = ErrorChecker.key_check_and_load('unit', max_time_lag_raw, self.__class__.__name__) self.max_time_lag = pd.Timedelta(max_time_lag_value, unit = max_time_lag_unit) self.associated_service_metric_vals = pd.DataFrame() self.other_service_metric_vals = collections.defaultdict(pd.DataFrame) def _update_data(self, associated_service_metric_vals : pd.DataFrame, other_service_metric_vals : pd.DataFrame): if len(self.associated_service_metric_vals.index) > 0: self.associated_service_metric_vals = self.associated_service_metric_vals.append(associated_service_metric_vals[associated_service_metric_vals.index > max(self.associated_service_metric_vals.index)]) else: self.associated_service_metric_vals = self.associated_service_metric_vals.append(associated_service_metric_vals) if self.associated_service_metric_vals.shape[0] > 0: self.associated_service_metric_vals = self.associated_service_metric_vals[self.associated_service_metric_vals.index >= max(self.associated_service_metric_vals.index) - self.history_buffer_size] for service_name, metric_vals in other_service_metric_vals.items(): if len(self.other_service_metric_vals[service_name].index) > 0: self.other_service_metric_vals[service_name] = self.other_service_metric_vals[service_name].append(metric_vals[metric_vals.index > max(self.other_service_metric_vals[service_name].index)]) else: self.other_service_metric_vals[service_name] = self.other_service_metric_vals[service_name].append(metric_vals) if self.other_service_metric_vals[service_name].shape[0] > 0: self.other_service_metric_vals[service_name] = self.other_service_metric_vals[service_name][self.other_service_metric_vals[service_name].index >= max(self.other_service_metric_vals[service_name].index) - self.history_buffer_size] def get_lagged_correlation(self, associated_service_metric_vals : pd.DataFrame, other_service_metric_vals : pd.DataFrame) -> dict: self._update_data(associated_service_metric_vals, other_service_metric_vals) min_resolution = self._get_minimal_resolution() max_lag = self.max_time_lag // min_resolution lags_range = range(-max_lag, max_lag) lags_per_service = dict() for service_name, metric_vals in self.other_service_metric_vals.items(): other_service_metric_vals_resampled = metric_vals.resample(min_resolution).mean() associated_service_metric_vals_resampled = self.associated_service_metric_vals.resample(min_resolution).mean() common_len = min(associated_service_metric_vals_resampled.shape[0], other_service_metric_vals_resampled.shape[0]) associated_service_metric_vals_inp = associated_service_metric_vals_resampled['value'][-common_len:] other_service_metric_vals_inp = other_service_metric_vals_resampled['value'][-common_len:] if associated_service_metric_vals_inp.shape == other_service_metric_vals_inp.shape: corr_raw = { lag : self._compute_correlation(associated_service_metric_vals_inp, other_service_metric_vals_inp, lag) for lag in lags_range } corr_pruned = { lag : corr for lag, corr in corr_raw.items() if not corr is None} if len(corr_pruned) > 0: linear_correlation_df = pd.DataFrame({'lags': list(corr_pruned.keys()), 'correlation': list(corr_pruned.values())}).set_index('lags') lags_per_service[service_name] = { 'lag': int(linear_correlation_df.correlation.idxmax()) * min_resolution, 'correlation': linear_correlation_df.correlation.max() } return lags_per_service def _get_minimal_resolution(self): minimas_to_consider = [pd.Timedelta(1, unit = 's')] for service_name, metric_vals in self.other_service_metric_vals.items(): if metric_vals.shape[0] > 0: other_service_metric_vals_min_resolution = min(metric_vals.index.to_series().diff()[1:]) if not other_service_metric_vals_min_resolution is pd.NaT: minimas_to_consider.append(other_service_metric_vals_min_resolution) associated_service_metric_vals_min_resolution = min(self.associated_service_metric_vals.index.to_series().diff()[1:]) if not associated_service_metric_vals_min_resolution is pd.NaT: minimas_to_consider.append(associated_service_metric_vals_min_resolution) return min(minimas_to_consider) @classmethod def register(cls, name : str): def decorator(correlator_class): cls._Registry[name] = correlator_class return correlator_class return decorator @classmethod def get(cls, name : str): if not name in cls._Registry: raise ValueError(f'An attempt to use a non-existent {cls.__name__} {name}') return cls._Registry[name] from .correlators import *
[((391, 483), 'autoscalingsim.utils.error_check.ErrorChecker.key_check_and_load', 'ErrorChecker.key_check_and_load', (['"""history_buffer_size"""', 'config', 'self.__class__.__name__'], {}), "('history_buffer_size', config, self.\n __class__.__name__)\n", (422, 483), False, 'from autoscalingsim.utils.error_check import ErrorChecker\n'), ((515, 610), 'autoscalingsim.utils.error_check.ErrorChecker.key_check_and_load', 'ErrorChecker.key_check_and_load', (['"""value"""', 'history_buffer_size_raw', 'self.__class__.__name__'], {}), "('value', history_buffer_size_raw, self.\n __class__.__name__)\n", (546, 610), False, 'from autoscalingsim.utils.error_check import ErrorChecker\n'), ((641, 735), 'autoscalingsim.utils.error_check.ErrorChecker.key_check_and_load', 'ErrorChecker.key_check_and_load', (['"""unit"""', 'history_buffer_size_raw', 'self.__class__.__name__'], {}), "('unit', history_buffer_size_raw, self.\n __class__.__name__)\n", (672, 735), False, 'from autoscalingsim.utils.error_check import ErrorChecker\n'), ((766, 836), 'pandas.Timedelta', 'pd.Timedelta', (['history_buffer_size_value'], {'unit': 'history_buffer_size_unit'}), '(history_buffer_size_value, unit=history_buffer_size_unit)\n', (778, 836), True, 'import pandas as pd\n'), ((867, 952), 'autoscalingsim.utils.error_check.ErrorChecker.key_check_and_load', 'ErrorChecker.key_check_and_load', (['"""max_time_lag"""', 'config', 'self.__class__.__name__'], {}), "('max_time_lag', config, self.__class__.__name__\n )\n", (898, 952), False, 'from autoscalingsim.utils.error_check import ErrorChecker\n'), ((977, 1065), 'autoscalingsim.utils.error_check.ErrorChecker.key_check_and_load', 'ErrorChecker.key_check_and_load', (['"""value"""', 'max_time_lag_raw', 'self.__class__.__name__'], {}), "('value', max_time_lag_raw, self.__class__.\n __name__)\n", (1008, 1065), False, 'from autoscalingsim.utils.error_check import ErrorChecker\n'), ((1089, 1176), 'autoscalingsim.utils.error_check.ErrorChecker.key_check_and_load', 'ErrorChecker.key_check_and_load', (['"""unit"""', 'max_time_lag_raw', 'self.__class__.__name__'], {}), "('unit', max_time_lag_raw, self.__class__.\n __name__)\n", (1120, 1176), False, 'from autoscalingsim.utils.error_check import ErrorChecker\n'), ((1200, 1256), 'pandas.Timedelta', 'pd.Timedelta', (['max_time_lag_value'], {'unit': 'max_time_lag_unit'}), '(max_time_lag_value, unit=max_time_lag_unit)\n', (1212, 1256), True, 'import pandas as pd\n'), ((1306, 1320), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1318, 1320), True, 'import pandas as pd\n'), ((1362, 1399), 'collections.defaultdict', 'collections.defaultdict', (['pd.DataFrame'], {}), '(pd.DataFrame)\n', (1385, 1399), False, 'import collections\n'), ((4915, 4940), 'pandas.Timedelta', 'pd.Timedelta', (['(1)'], {'unit': '"""s"""'}), "(1, unit='s')\n", (4927, 4940), True, 'import pandas as pd\n')]