max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
EMeRGE/dssmetrics/constants.py
NREL/EMeRGE
6
1500
<reponame>NREL/EMeRGE """ Default values : DO NOT CHANGE !!!""" LOG_FORMAT = "%(asctime)s: %(levelname)s: %(message)s" DATE_FORMAT = "%Y-%m-%d %H:%M:%S" MAXITERATIONS = 100 LIFE_PARAMETERS = {"theta_i":30,"theta_fl":36,"theta_gfl":28.6, "R":4.87,"n":1,"tau":3.5,"m":1,"A":-13.391, "B":6972.15,"num_of_iteration":4,} DEFAULT_TEMP = 25 MAX_TRANS_LOADING = 1.5 DEFAULT_CONFIGURATION = { "dss_filepath": "", "dss_filename":"", "extra_data_path": ".", "export_folder":"", "start_time":"2018-1-1 0:0:0", "end_time":"2018-2-1 0:0:0", "simulation_time_step (minute)": 15, "frequency": 50, "upper_voltage": 1.1, "lower_voltage":0.9, "record_every": 96, "export_voltages": False, "export_lineloadings": False, "export_transloadings":False, "export_start_date": "", "export_end_date": "", "volt_var": { "enabled": False, "yarray": [0.44,0.44,0,0,-0.44,-0.44], "xarray": [0.7,0.90,0.95,1.05,1.10,1.3] }, "log_settings": { "save_in_file": False, "log_folder": ".", "log_filename":"logs.log", "clear_old_log_file": True } } DEFAULT_ADVANCED_CONFIGURATION = { "project_path": "C:\\Users\\KDUWADI\\Desktop\\NREL_Projects\\CIFF-TANGEDCO\\TANGEDCO\\EMERGE\\Projects", "active_project":"GR_PALAYAM", "active_scenario": "FullYear", "dss_filename":"gr_palayam.dss", "start_time":"2018-1-1 0:0:0", "end_time":"2018-1-2 0:0:0", "simulation_time_step (minute)": 60, "frequency": 50, "upper_voltage": 1.1, "lower_voltage":0.9, "record_every": 4, "parallel_simulation":True, "parallel_process": 1, "export_voltages": False, "export_lineloadings": False, "export_transloadings":False, "export_start_date": "", "export_end_date": "", "volt_var": { "enabled": True, "yarray": [0.44,0.44,0,0,-0.44,-0.44], "xarray": [0.7,0.90,0.95,1.05,1.10,1.3] }, "log_settings": { "save_in_file": False, "log_filename":"", "clear_old_log_file": True } } VALID_SETTINGS = { "project_path":{'type':str}, "active_project":{'type':str}, "active_scenario":{'type':str}, "dss_filepath": {'type': str}, "dss_filename":{'type':str}, "export_folder":{'type':str}, "start_time":{'type':str}, "end_time":{'type':str}, "simulation_time_step (minute)":{'type':int}, "frequency": {'type':int,'options':[50,60]}, "upper_voltage": {'type':float,'range':[1,1.5]}, "lower_voltage":{'type':float,'range':[0.8,1]}, "record_every": {'type':int}, "extra_data_path":{'type':str}, "parallel_simulation":{'type':bool}, "parallel_process": {'type':int,'range':[1,4]}, "export_voltages": {'type':bool}, "export_lineloadings": {'type':bool}, "export_transloadings":{'type':bool}, "export_start_date": {'type':str}, "export_end_date": {'type':str}, "volt_var": { "enabled": {'type':bool}, "yarray": {'type':list}, "xarray": {'type':list} }, "log_settings": { "save_in_file": {'type':bool}, "log_folder": {'type':str}, "log_filename":{'type':str}, "clear_old_log_file": {'type':bool} } }
1.289063
1
minesweeper/game.py
MathisFederico/Minesweeper
1
1501
try: import importlib.resources as pkg_resources except ImportError: # Try backported to PY<37 `importlib_resources`. import importlib_resources as pkg_resources from . import images from gym import Env, spaces from time import time import numpy as np from copy import copy import colorsys import pygame from pygame.transform import scale class MinesweeperEnv(Env): def __init__(self, grid_shape=(10, 15), bombs_density=0.1, n_bombs=None, impact_size=3, max_time=999, chicken=False): self.grid_shape = grid_shape self.grid_size = np.prod(grid_shape) self.n_bombs = max(1, int(bombs_density * self.grid_size)) if n_bombs is None else n_bombs self.n_bombs = min(self.grid_size - 1, self.n_bombs) self.flaged_bombs = 0 self.flaged_empty = 0 self.max_time = max_time if impact_size % 2 == 0: raise ValueError('Impact_size must be an odd number !') self.impact_size = impact_size # Define constants self.HIDDEN = 0 self.REVEAL = 1 self.FLAG = 2 self.BOMB = self.impact_size ** 2 # Setting up gym Env conventions nvec_observation = (self.BOMB + 2) * np.ones(self.grid_shape) self.observation_space = spaces.MultiDiscrete(nvec_observation) nvec_action = np.array(self.grid_shape + (2,)) self.action_space = spaces.MultiDiscrete(nvec_action) # Initalize state self.state = np.zeros(self.grid_shape + (2,), dtype=np.uint8) ## Setup bombs places idx = np.indices(self.grid_shape).reshape(2, -1) bombs_ids = np.random.choice(range(self.grid_size), size=self.n_bombs, replace=False) self.bombs_positions = idx[0][bombs_ids], idx[1][bombs_ids] ## Place numbers self.semi_impact_size = (self.impact_size-1)//2 bomb_impact = np.ones((self.impact_size, self.impact_size), dtype=np.uint8) for bombs_id in bombs_ids: bomb_x, bomb_y = idx[0][bombs_id], idx[1][bombs_id] x_min, x_max, dx_min, dx_max = self.clip_index(bomb_x, 0) y_min, y_max, dy_min, dy_max = self.clip_index(bomb_y, 1) bomb_region = self.state[x_min:x_max, y_min:y_max, 0] bomb_region += bomb_impact[dx_min:dx_max, dy_min:dy_max] ## Place bombs self.state[self.bombs_positions + (0,)] = self.BOMB self.start_time = time() self.time_left = int(time() - self.start_time) # Setup rendering self.pygame_is_init = False self.chicken = chicken self.done = False self.score = 0 def get_observation(self): observation = copy(self.state[:, :, 1]) revealed = observation == 1 flaged = observation == 2 observation += self.impact_size ** 2 + 1 observation[revealed] = copy(self.state[:, :, 0][revealed]) observation[flaged] -= 1 return observation def reveal_around(self, coords, reward, done, without_loss=False): if not done: x_min, x_max, _, _ = self.clip_index(coords[0], 0) y_min, y_max, _, _ = self.clip_index(coords[1], 1) region = self.state[x_min:x_max, y_min:y_max, :] unseen_around = np.sum(region[..., 1] == 0) if unseen_around == 0: if not without_loss: reward -= 0.001 return flags_around = np.sum(region[..., 1] == 2) if flags_around == self.state[coords + (0,)]: unrevealed_zeros_around = np.logical_and(region[..., 0] == 0, region[..., 1] == self.HIDDEN) if np.any(unrevealed_zeros_around): zeros_coords = np.argwhere(unrevealed_zeros_around) for zero in zeros_coords: coord = (x_min + zero[0], y_min + zero[1]) self.state[coord + (1,)] = 1 self.reveal_around(coord, reward, done, without_loss=True) self.state[x_min:x_max, y_min:y_max, 1][self.state[x_min:x_max, y_min:y_max, 1] != self.FLAG] = 1 unflagged_bombs_around = np.logical_and(region[..., 0] == self.BOMB, region[..., 1] != self.FLAG) if np.any(unflagged_bombs_around): self.done = True reward, done = -1, True else: if not without_loss: reward -= 0.001 def clip_index(self, x, axis): max_idx = self.grid_shape[axis] x_min, x_max = max(0, x-self.semi_impact_size), min(max_idx, x + self.semi_impact_size + 1) dx_min, dx_max = x_min - (x - self.semi_impact_size), x_max - (x + self.semi_impact_size + 1) + self.impact_size return x_min, x_max, dx_min, dx_max def step(self, action): coords = action[:2] action_type = action[2] + 1 # 0 -> 1 = reveal; 1 -> 2 = toggle_flag case_state = self.state[coords + (1,)] case_content = self.state[coords + (0,)] NO_BOMBS_AROUND = 0 reward, done = 0, False self.time_left = self.max_time - time() + self.start_time if self.time_left <= 0: score = -(self.n_bombs - self.flaged_bombs + self.flaged_empty)/self.n_bombs reward, done = score, True return self.get_observation(), reward, done, {'passed':False} if action_type == self.REVEAL: if case_state == self.HIDDEN: self.state[coords + (1,)] = action_type if case_content == self.BOMB: if self.pygame_is_init: self.done = True reward, done = -1, True return self.get_observation(), reward, done, {'passed':False} elif case_content == NO_BOMBS_AROUND: self.reveal_around(coords, reward, done) elif case_state == self.REVEAL: self.reveal_around(coords, reward, done) reward -= 0.01 else: reward -= 0.001 self.score += reward return self.get_observation(), reward, done, {'passed':True} elif action_type == self.FLAG: if case_state == self.REVEAL: reward -= 0.001 else: flaging = 1 if case_state == self.FLAG: flaging = -1 self.state[coords + (1,)] = self.HIDDEN else: self.state[coords + (1,)] = self.FLAG if case_content == self.BOMB: self.flaged_bombs += flaging else: self.flaged_empty += flaging if self.flaged_bombs == self.n_bombs and self.flaged_empty == 0: reward, done = 2 + self.time_left/self.max_time, True if np.any(np.logical_and(self.state[..., 0]==9, self.state[..., 1]==1)) or self.done: reward, done = -1 + self.time_left/self.max_time + (self.flaged_bombs - self.flaged_empty)/self.n_bombs, True self.score += reward return self.get_observation(), reward, done, {'passed':False} def reset(self): self.__init__(self.grid_shape, n_bombs=self.n_bombs, impact_size=self.impact_size, max_time=self.max_time, chicken=self.chicken) return self.get_observation() def render(self): if not self.pygame_is_init: self._init_pygame() self.pygame_is_init = True for event in pygame.event.get(): if event.type == pygame.QUIT: # pylint: disable=E1101 pygame.quit() # pylint: disable=E1101 # Plot background pygame.draw.rect(self.window, (60, 56, 53), (0, 0, self.height, self.width)) # Plot grid for index, state in np.ndenumerate(self.state[..., 1]): self._plot_block(index, state) # Plot infos ## Score score_text = self.score_font.render("SCORE", 1, (255, 10, 10)) score = self.score_font.render(str(round(self.score, 4)), 1, (255, 10, 10)) self.window.blit(score_text, (0.1*self.header_size, 0.75*self.width)) self.window.blit(score, (0.1*self.header_size, 0.8*self.width)) ## Time left time_text = self.num_font.render("TIME", 1, (255, 10, 10)) self.time_left = self.max_time - time() + self.start_time time_left = self.num_font.render(str(int(self.time_left+1)), 1, (255, 10, 10)) self.window.blit(time_text, (0.1*self.header_size, 0.03*self.width)) self.window.blit(time_left, (0.1*self.header_size, 0.1*self.width)) ## Bombs left bombs_text = self.num_font.render("BOMBS", 1, (255, 255, 10)) left_text = self.num_font.render("LEFT", 1, (255, 255, 10)) potential_bombs_left = self.n_bombs - self.flaged_bombs - self.flaged_empty potential_bombs_left = self.num_font.render(str(int(potential_bombs_left)), 1, (255, 255, 10)) self.window.blit(bombs_text, (0.1*self.header_size, 0.4*self.width)) self.window.blit(left_text, (0.1*self.header_size, 0.45*self.width)) self.window.blit(potential_bombs_left, (0.1*self.header_size, 0.5*self.width)) pygame.display.flip() pygame.time.wait(10) if self.done: pygame.time.wait(3000) @staticmethod def _get_color(n, max_n): BLUE_HUE = 0.6 RED_HUE = 0.0 HUE = RED_HUE + (BLUE_HUE - RED_HUE) * ((max_n - n) / max_n)**3 color = 255 * np.array(colorsys.hsv_to_rgb(HUE, 1, 0.7)) return color def _plot_block(self, index, state): position = tuple(self.origin + self.scale_factor * self.BLOCK_SIZE * np.array((index[1], index[0]))) label = None if state == self.HIDDEN and not self.done: img_key = 'hidden' elif state == self.FLAG: if not self.done: img_key = 'flag' else: content = self.state[index][0] if content == self.BOMB: img_key = 'disabled_mine' if not self.chicken else 'disabled_chicken' else: img_key = 'misplaced_flag' else: content = self.state[index][0] if content == self.BOMB: if state == self.HIDDEN: img_key = 'mine' if not self.chicken else 'chicken' else: img_key = 'exploded_mine' if not self.chicken else 'exploded_chicken' else: img_key = 'revealed' label = self.num_font.render(str(content), 1, self._get_color(content, self.BOMB)) self.window.blit(self.images[img_key], position) if label: self.window.blit(label, position + self.font_offset - (content > 9) * self.decimal_font_offset) def _init_pygame(self): pygame.init() # pylint: disable=E1101 # Open Pygame window self.scale_factor = 2 * min(12 / self.grid_shape[0], 25 / self.grid_shape[1]) self.BLOCK_SIZE = 32 self.header_size = self.scale_factor * 100 self.origin = np.array([self.header_size, 0]) self.width = int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[0]) self.height = int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[1] + self.header_size) self.window = pygame.display.set_mode((self.height, self.width)) # Setup font for numbers num_font_size = 20 self.num_font = pygame.font.SysFont("monospace", int(self.scale_factor * num_font_size)) self.font_offset = self.scale_factor * self.BLOCK_SIZE * np.array([0.325, 0.15]) self.decimal_font_offset = self.scale_factor * self.BLOCK_SIZE * np.array([0.225, 0]) self.score_font = pygame.font.SysFont("monospace", int(self.scale_factor * 12)) # Load images def scale_image(img, scale_factor=self.scale_factor): return scale(img, (int(scale_factor*img.get_width()), int(scale_factor*img.get_height()))) images_names = ['hidden', 'revealed', 'flag', 'misplaced_flag'] if self.chicken: images_names += ['chicken', 'exploded_chicken', 'disabled_chicken'] else: images_names += ['mine', 'exploded_mine', 'disabled_mine'] self.images = {} for img_name in images_names: with pkg_resources.path(images, img_name + '.png') as path: img = pygame.image.load(str(path)).convert() self.images[img_name] = scale_image(img)
2.34375
2
tests/python/gaia-ui-tests/gaiatest/tests/functional/lockscreen/test_lockscreen_unlock_to_camera_with_passcode.py
BReduardokramer/gaia
1
1502
<filename>tests/python/gaia-ui-tests/gaiatest/tests/functional/lockscreen/test_lockscreen_unlock_to_camera_with_passcode.py # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from gaiatest import GaiaTestCase from gaiatest.apps.lockscreen.app import LockScreen class TestCameraUnlockWithPasscode(GaiaTestCase): # Input data _input_passcode = '<PASSWORD>' def setUp(self): GaiaTestCase.setUp(self) # Turn off geolocation prompt self.apps.set_permission('System', 'geolocation', 'deny') self.data_layer.set_setting('lockscreen.passcode-lock.code', self._input_passcode) self.data_layer.set_setting('lockscreen.passcode-lock.enabled', True) # this time we need it locked! self.lockscreen.lock() self.lock_screen = LockScreen(self.marionette) def test_unlock_to_camera_with_passcode(self): # https://github.com/mozilla/gaia-ui-tests/issues/479 camera = self.lock_screen.unlock_to_camera() self.lock_screen.wait_for_lockscreen_not_visible() camera.switch_to_camera_frame() self.assertFalse(camera.is_gallery_button_visible) camera.tap_switch_source() camera.wait_for_capture_ready() self.assertFalse(camera.is_gallery_button_visible)
2.34375
2
models/layers/mesh_conv.py
CallumMcMahon/MeshCNN
2
1503
<filename>models/layers/mesh_conv.py import torch import torch.nn as nn import torch.nn.functional as F class MeshConv(nn.Module): """ Computes convolution between edges and 4 incident (1-ring) edge neighbors in the forward pass takes: x: edge features (Batch x Features x Edges) mesh: list of mesh data-structure (len(mesh) == Batch) and applies convolution """ def __init__(self, in_channels, out_channels, k=5, bias=True): super(MeshConv, self).__init__() self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, k), bias=bias) self.k = k def forward(self, x, mesh): x = x.squeeze(-1) # pad gemm G = torch.cat([self.pad_gemm(i, x.shape[2], x.device) for i in mesh], 0) # build 'neighborhood image' and apply convolution G = self.create_GeMM(x, G) x = self.conv(G) return x def flatten_gemm_inds(self, Gi): (b, ne, nn) = Gi.shape ne += 1 batch_n = torch.floor(torch.arange(b * ne, device=Gi.device).float() / ne).view(b, ne) add_fac = batch_n * ne add_fac = add_fac.view(b, ne, 1) add_fac = add_fac.repeat(1, 1, nn) # flatten Gi Gi = Gi.float() + add_fac[:, 1:, :] return Gi def create_GeMM(self, x, Gi): """ gathers the edge features (x) with from the 1-ring indices (Gi) applys symmetric functions to handle order invariance returns a 'fake image' which can use 2d convolution on output dimensions: Batch x Channels x Edges x 5 """ Gishape = Gi.shape # pad the first row of every sample in batch with zeros padding = torch.zeros((x.shape[0], x.shape[1], 1), requires_grad=True, device=x.device) # add zero feature vector then shift all indices. border edges now reference zero vector x = torch.cat((padding, x), dim=2) Gi = Gi + 1 #shift # first flatten indices Gi_flat = self.flatten_gemm_inds(Gi) Gi_flat = Gi_flat.view(-1).long() # odim = x.shape x = x.permute(0, 2, 1).contiguous() x = x.view(odim[0] * odim[2], odim[1]) # indices of gemm never reference padded section of x so padded section never used f = torch.index_select(x, dim=0, index=Gi_flat) f = f.view(Gishape[0], Gishape[1], Gishape[2], -1) f = f.permute(0, 3, 1, 2) # apply the symmetric functions for an equivariant convolution x_1 = f[:, :, :, 1] + f[:, :, :, 3] x_2 = f[:, :, :, 2] + f[:, :, :, 4] x_3 = torch.abs(f[:, :, :, 1] - f[:, :, :, 3]) x_4 = torch.abs(f[:, :, :, 2] - f[:, :, :, 4]) f = torch.stack([f[:, :, :, 0], x_1, x_2, x_3, x_4], dim=3) return f def pad_gemm(self, m, xsz, device): """ extracts one-ring neighbors (4x) -> m.gemm_edges which is of size #edges x 4 add the edge_id itself to make #edges x 5 then pad to desired size e.g., xsz x 5 """ padded_gemm = torch.tensor(m.gemm_edges, device=device).float() padded_gemm = padded_gemm.requires_grad_() padded_gemm = torch.cat((torch.arange(int(m.edges_count), device=device).float().unsqueeze(1), padded_gemm), dim=1) # pad using F padded_gemm = F.pad(padded_gemm, (0, 0, 0, xsz - m.edges_count), "constant", 0) padded_gemm = padded_gemm.unsqueeze(0) return padded_gemm
2.75
3
code/0-input/create_hdf5/check_hdf5.py
AvinWangZH/3D-convolutional-speaker-recognition
1
1504
import tables import numpy as np import matplotlib.pyplot as plt # Reading the file. fileh = tables.open_file('development.hdf5', mode='r') # Dimentionality of the data structure. print(fileh.root.utterance_test.shape) print(fileh.root.utterance_train.shape) print(fileh.root.label_train.shape) print(fileh.root.label_test.shape)
2.546875
3
qtask/utils/testing.py
LinkTsang/qtask-legacy-python
0
1505
<reponame>LinkTsang/qtask-legacy-python import asyncio import traceback import unittest def async_test(f): def wrapper(test_case: unittest.TestCase, *args, **kwargs): loop = asyncio.get_event_loop() task = loop.create_task(f(test_case, *args, **kwargs)) try: loop.run_until_complete(task) except Exception: traceback.print_exc() raise return wrapper
2.484375
2
landing/views.py
theflatladder/kyrsovaya
0
1506
<reponame>theflatladder/kyrsovaya from django.shortcuts import render, render_to_response, redirect from django.contrib import auth from django.contrib.auth.forms import UserCreationForm from django.template.context_processors import csrf from django.http import HttpResponseRedirect def login(request): args = {} args.update(csrf(request)) if request.POST: username = request.POST.get('username') password = request.POST.get('password') user = auth.authenticate(username=username, password=password) if user is not None: auth.login(request, user) return redirect('/main') else: args['login_error'] = "Пользователь не найден или пароль введен неверный пароль" return render_to_response('login.html', args) else: return render_to_response('login.html', args) def reg(request): auth.logout(request) error = '' if request.method == "POST": newuser_form = UserCreationForm(data = request.POST) if newuser_form.is_valid(): newuser_form.save() newuser = auth.authenticate(username = newuser_form.cleaned_data['username'], password = <PASSWORD>.cleaned_data['<PASSWORD>']) auth.login(request, newuser) return redirect('/main') else: error = 'Проверьте правильность вводимых данных.' else: newuser_form = UserCreationForm() return render(request, 'reg.html', locals() ) def main(request): return render(request, 'index.html', {'username': auth.get_user(request).username} ) def logout(request): auth.logout(request) return HttpResponseRedirect("/login")
2.28125
2
FPRun11.py
yecfly/DEPRESSIONEST
0
1507
<gh_stars>0 from Facepatchindependenttrain import runPatch import sys if len(sys.argv)==6: runPatch(GPU_Device_ID=1, FacePatchID=int(sys.argv[1]), trainpklID=int(sys.argv[2]), testpklID=int(sys.argv[3]), NetworkType=int(sys.argv[4]), runs=int(sys.argv[5])) else: print("argument errors, try\npython runfile.py <FacePatchID> <trainpklID> <testpklID> <NetworkType> <runs>")
2.125
2
vendor/packages/translate-toolkit/translate/convert/test_po2tmx.py
jgmize/kitsune
2
1508
<gh_stars>1-10 #!/usr/bin/env python # -*- coding: utf-8 -*- from translate.convert import po2tmx from translate.convert import test_convert from translate.misc import wStringIO from translate.storage import tmx from translate.storage import lisa class TestPO2TMX: def po2tmx(self, posource, sourcelanguage='en', targetlanguage='af'): """helper that converts po source to tmx source without requiring files""" inputfile = wStringIO.StringIO(posource) outputfile = wStringIO.StringIO() outputfile.tmxfile = tmx.tmxfile(inputfile=None, sourcelanguage=sourcelanguage) po2tmx.convertpo(inputfile, outputfile, templatefile=None, sourcelanguage=sourcelanguage, targetlanguage=targetlanguage) return outputfile.tmxfile def test_basic(self): minipo = r"""# Afrikaans translation of program ABC # msgid "" msgstr "" "Project-Id-Version: program 2.1-branch\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2006-01-09 07:15+0100\n" "PO-Revision-Date: 2004-03-30 17:02+0200\n" "Last-Translator: Zuza Software Foundation <<EMAIL>>\n" "Language-Team: Afrikaans <<EMAIL>>\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" # Please remember to do something #: ../dir/file.xml.in.h:1 ../dir/file2.xml.in.h:4 msgid "Applications" msgstr "Toepassings" """ tmx = self.po2tmx(minipo) print "The generated xml:" print str(tmx) assert tmx.translate("Applications") == "Toepassings" assert tmx.translate("bla") is None xmltext = str(tmx) assert xmltext.index('creationtool="Translate Toolkit - po2tmx"') assert xmltext.index('adminlang') assert xmltext.index('creationtoolversion') assert xmltext.index('datatype') assert xmltext.index('o-tmf') assert xmltext.index('segtype') assert xmltext.index('srclang') def test_sourcelanguage(self): minipo = 'msgid "String"\nmsgstr "String"\n' tmx = self.po2tmx(minipo, sourcelanguage="xh") print "The generated xml:" print str(tmx) header = tmx.document.find("header") assert header.get("srclang") == "xh" def test_targetlanguage(self): minipo = 'msgid "String"\nmsgstr "String"\n' tmx = self.po2tmx(minipo, targetlanguage="xh") print "The generated xml:" print str(tmx) tuv = tmx.document.findall(".//%s" % tmx.namespaced("tuv"))[1] #tag[0] will be the source, we want the target tuv assert tuv.get("{%s}lang" % lisa.XML_NS) == "xh" def test_multiline(self): """Test multiline po entry""" minipo = r'''msgid "First part " "and extra" msgstr "Eerste deel " "en ekstra"''' tmx = self.po2tmx(minipo) print "The generated xml:" print str(tmx) assert tmx.translate('First part and extra') == 'Eerste deel en ekstra' def test_escapednewlines(self): """Test the escaping of newlines""" minipo = r'''msgid "First line\nSecond line" msgstr "Eerste lyn\nTweede lyn" ''' tmx = self.po2tmx(minipo) print "The generated xml:" print str(tmx) assert tmx.translate("First line\nSecond line") == "Eerste lyn\nTweede lyn" def test_escapedtabs(self): """Test the escaping of tabs""" minipo = r'''msgid "First column\tSecond column" msgstr "Eerste kolom\tTweede kolom" ''' tmx = self.po2tmx(minipo) print "The generated xml:" print str(tmx) assert tmx.translate("First column\tSecond column") == "Eerste kolom\tTweede kolom" def test_escapedquotes(self): """Test the escaping of quotes (and slash)""" minipo = r'''msgid "Hello \"Everyone\"" msgstr "Good day \"All\"" msgid "Use \\\"." msgstr "Gebruik \\\"." ''' tmx = self.po2tmx(minipo) print "The generated xml:" print str(tmx) assert tmx.translate('Hello "Everyone"') == 'Good day "All"' assert tmx.translate(r'Use \".') == r'Gebruik \".' def test_exclusions(self): """Test that empty and fuzzy messages are excluded""" minipo = r'''#, fuzzy msgid "One" msgstr "Een" msgid "Two" msgstr "" msgid "" msgstr "Drie" ''' tmx = self.po2tmx(minipo) print "The generated xml:" print str(tmx) assert "<tu" not in str(tmx) assert len(tmx.units) == 0 def test_nonascii(self): """Tests that non-ascii conversion works.""" minipo = r'''msgid "Bézier curve" msgstr "Bézier-kurwe" ''' tmx = self.po2tmx(minipo) print str(tmx) assert tmx.translate(u"Bézier curve") == u"Bézier-kurwe" class TestPO2TMXCommand(test_convert.TestConvertCommand, TestPO2TMX): """Tests running actual po2tmx commands on files""" convertmodule = po2tmx def test_help(self): """tests getting help""" options = test_convert.TestConvertCommand.test_help(self) options = self.help_check(options, "-l LANG, --language=LANG") options = self.help_check(options, "--source-language=LANG", last=True)
2.34375
2
plugin.video.yatp/libs/client/commands.py
mesabib/kodi.yatp
54
1509
<filename>plugin.video.yatp/libs/client/commands.py<gh_stars>10-100 # coding: utf-8 # Module: commands # Created on: 28.07.2015 # Author: <NAME> aka <NAME>. (<EMAIL>) # Licence: GPL v.3: http://www.gnu.org/copyleft/gpl.html """ Context menu commands """ import sys import xbmc import xbmcgui import json_requests as jsonrq from simpleplugin import Addon addon = Addon('plugin.video.yatp') _ = addon.initialize_gettext() def show_torrent_info(info_hash): """ Display current torrent info :param info_hash: :return: """ torr_info = jsonrq.get_torrent_info(info_hash) info_dialog = xbmcgui.DialogProgress() info_dialog.create(torr_info['name']) while not info_dialog.iscanceled(): info_dialog.update(torr_info['progress'], _('state: {0}; seeds: {1}; peers: {2}').format( torr_info['state'], torr_info['num_seeds'], torr_info['num_peers'] ), _('size: {0}MB; DL speed: {1}KB/s; UL speed: {2}KB/s').format( torr_info['size'], torr_info['dl_speed'], torr_info['ul_speed'] ), _('total DL: {0}MB; total UL: {1}MB').format( torr_info['total_download'], torr_info['total_upload']) ) xbmc.sleep(1000) torr_info = jsonrq.get_torrent_info(info_hash) if __name__ == '__main__': if sys.argv[1] == 'pause': jsonrq.pause_torrent(sys.argv[2]) elif sys.argv[1] == 'resume': jsonrq.resume_torrent(sys.argv[2]) elif sys.argv[1] == 'delete' and xbmcgui.Dialog().yesno( _('Confirm delete'), _('Do you really want to delete the torrent?')): jsonrq.remove_torrent(sys.argv[2], False) elif sys.argv[1] == 'delete_with_files'and xbmcgui.Dialog().yesno( _('Confirm delete'), _('Do you really want to delete the torrent with files?'), _('Warning: The files will be deleted permanently!')): jsonrq.remove_torrent(sys.argv[2], True) elif sys.argv[1] == 'pause_all': jsonrq.pause_all() elif sys.argv[1] == 'resume_all': jsonrq.resume_all() elif sys.argv[1] == 'show_info': show_torrent_info(sys.argv[2]) elif sys.argv[1] == 'restore_finished': jsonrq.restore_finished(sys.argv[2]) else: addon.log_debug('Command cancelled or invalid command: {0}'.format(sys.argv[1])) xbmc.executebuiltin('Container.Refresh')
2.296875
2
setup.py
GeorgeDittmar/MarkovTextGenerator
1
1510
#!/usr/bin/env python from distutils.core import setup setup(name='Mimik', version='1.0', description='Python framework for markov models', author='<NAME>', author_email='<EMAIL>', url='https://www.python.org/sigs/distutils-sig/', packages=['distutils', 'distutils.command'], )
1.132813
1
pipeline/scripts/package.py
deplatformr/open-images
2
1511
import os import shutil import sqlite3 import tarfile from datetime import datetime import bagit def create_package(images, batch_dir): package_threshold = 838860800 # 800 Mib to the next power of 2 = 1GiB print("Package threshold: " + get_human_readable_file_size(package_threshold)) abs_path = os.getcwd() try: package_size = 0 for image in images: package_size += image[1] print("Total batch size: " + get_human_readable_file_size(package_size)) if package_size < package_threshold: print("Not enough images yet to make a package from this batch.") return() else: try: # create new batch directory split = os.path.split(batch_dir) new_dir_number = int(split[1]) + 1 new_batch_dir = os.path.join(split[0], str(new_dir_number)) os.makedirs(new_batch_dir) # move all related files for the last image that's getting removed from batch to keep within threshold last_image = images[-1] path, dirs, files = next(os.walk(batch_dir)) for file in files: if file.find(last_image[0]) != -1: filepath = os.path.join(path, file) shutil.move(filepath, os.path.join( new_batch_dir, file)) # drop the last image from the list (convert tuple) to get the package size back under threshold images.pop(-1) except Exception as e: print("Unable to separate batch to make a package.") print(e) return() # Convert batch directory into a Bagit directory external_identifier = "deplatformr-open-images-" + split[1] bagit.make_bag(batch_dir, {'Source-Organization': 'Deplatformr Project', 'Organization-Address': 'https://open-images.deplatformr.com', 'External-Description': 'This package contains a subset of the Google Open Images dataset used for machine learning training. The image files have been downloaded from their Flickr server source, verified for fixity, had EXIF metadata extracted, and are now bundled here with their annotation data, segmentation files and newly generated sha512 checksums. This content and context is described in a sidecar metadata files using schema.org/ImageObject and JSON-LD format.', 'External-Identifier': external_identifier, 'License': 'https://creativecommons.org/licenses/by/2.0/'}, checksums=["sha512"]) print("Created a Bagit directory.") try: # Create the tar package packages_dir = os.path.join( os.getcwd(), "source_data/packages/") tarball_name = external_identifier + ".tar" tarball = tarfile.open(os.path.join( packages_dir, tarball_name), "w") tarball.add(batch_dir, arcname=external_identifier) tarball.close() print("Created tarball " + tarball_name + ".") except Exception as e: print("Unable to create a tarball package from batch.") print(e) return() try: shutil.rmtree(batch_dir) print("Deleted the batch source directory.") except OSError as e: print("Unable to delete the source directory.") print(e) # record the tarball package name for each image db_path = os.path.join( abs_path, "source_data/deplatformr_open_images_v6.sqlite") images_db = sqlite3.connect(db_path) cursor = images_db.cursor() for image in images: cursor.execute("UPDATE open_images SET package_name = ? WHERE ImageID = ?", (tarball_name, image[0],),) images_db.commit() images_db.close() # add tarball name, size, and timestamp to the workflow dbase utctime = datetime.utcnow() tarball_size = os.path.getsize( os.path.join(packages_dir, tarball_name)) print("Tarball size is: " + get_human_readable_file_size(tarball_size)) db_path = os.path.join( abs_path, "deplatformr_open_images_workflow.sqlite") workflow_db = sqlite3.connect(db_path) cursor = workflow_db.cursor() for image in images: print("Linking image " + image[0] + " to " + tarball_name + " in SQLite.") cursor.execute( "UPDATE images SET package_name = ? WHERE image_id = ?", (tarball_name, image[0],),) cursor.execute("INSERT INTO packages (name, size, timestamp) VALUES (?,?,?)", (tarball_name, tarball_size, utctime,),) workflow_db.commit() workflow_db.close() except Exception as e: print("Unable to create a package for batch directory " + batch_dir) print(e) def get_human_readable_file_size(size, precision=2): suffixes = ["B", "KiB", "MiB", "GiB", "TiB"] suffixIndex = 0 while size > 1024 and suffixIndex < 4: suffixIndex += 1 # increment the index of the suffix size = size / 1024.0 # apply the division return "%.*f %s" % (precision, size, suffixes[suffixIndex]) return()
2.828125
3
app/search/hot_eval/hl_reportable.py
don4apaev/anfisa
0
1512
def evalRec(env, rec): """hl_reportable""" return (len(set(rec.Genes) & { 'ABHD12', 'ACTG1', 'ADGRV1', 'AIFM1', 'ATP6V1B1', 'BCS1L', 'BSND', 'CABP2', 'CACNA1D', 'CDC14A', 'CDH23', 'CEACAM16', 'CEP78', 'CHD7', 'CIB2', 'CISD2', 'CLDN14', 'CLIC5', 'CLPP', 'CLRN1', 'COCH', 'COL11A2', 'DIAPH1', 'DIAPH3', 'DMXL2', 'DNMT1', 'DSPP', 'EDN3', 'EDNRB', 'EPS8', 'EPS8L2', 'ESPN', 'ESRRB', 'EYA1', 'EYA4', 'GIPC3', 'GJB2', 'GJB6', 'GPSM2', 'GRHL2', 'GRXCR1', 'GSDME', 'HGF', 'HSD17B4', 'ILDR1', 'KCNE1', 'KCNQ1', 'KCNQ4', 'LARS2', 'LHFPL5', 'LOXHD1', 'LRTOMT', 'MARVELD2', 'MIR96', 'MITF', 'MSRB3', 'MT-RNR1', 'MT-TS1', 'MYH14', 'MYH9', 'MYO15A', 'MYO3A', 'MYO6', 'MYO7A', 'OSBPL2', 'OTOA', 'OTOF', 'OTOG', 'OTOGL', 'P2RX2', 'PAX3', 'PDZD7', 'PJVK', 'POU3F4', 'POU4F3', 'PRPS1', 'PTPRQ', 'RDX', 'RIPOR2', 'S1PR2', 'SERPINB6', 'SIX1', 'SLC17A8', 'SLC26A4', 'SLC52A2', 'SLITRK6', 'SMPX', 'SOX10', 'STRC', 'SYNE4', 'TBC1D24', 'TECTA', 'TIMM8A', 'TMC1', 'TMIE', 'TMPRSS3', 'TPRN', 'TRIOBP', 'TUBB4B', 'USH1C', 'USH1G', 'USH2A', 'WFS1', 'WHRN', } ) > 0)
1.570313
2
eval/util/metrics.py
fau-is/grm
5
1513
import sklearn import pandas import seaborn as sns import matplotlib.pyplot as pyplot from functools import reduce # import numpy as np def metrics_from_prediction_and_label(labels, predictions, verbose=False): measures = { "accuracy": sklearn.metrics.accuracy_score(labels, predictions), "balanced_accuracy": sklearn.metrics.balanced_accuracy_score(labels, predictions), "precision_micro": sklearn.metrics.precision_score(labels, predictions, average='micro'), "precision_macro": sklearn.metrics.precision_score(labels, predictions, average='macro'), "precision_weighted": sklearn.metrics.precision_score(labels, predictions, average='weighted'), "recall_micro": sklearn.metrics.recall_score(labels, predictions, average='micro'), "recall_macro": sklearn.metrics.recall_score(labels, predictions, average='macro'), "recall_weighted": sklearn.metrics.recall_score(labels, predictions, average='weighted'), "f1_score_micro": sklearn.metrics.f1_score(labels, predictions, average='micro'), "f1_score_macro": sklearn.metrics.f1_score(labels, predictions, average='macro'), "f1_score_weighted": sklearn.metrics.f1_score(labels, predictions, average='weighted') } try: measures["roc_auc_weighted"] = multi_class_roc_auc_score(labels, predictions, 'weighted') measures["roc_auc_macro"] = multi_class_roc_auc_score(labels, predictions, 'macro') measures["roc_auc_micro"] = multi_class_roc_auc_score(labels, predictions, 'micro') except ValueError: print("Warning: Roc auc score can not be calculated ...") try: # note we use the average precision at different threshold values as the auc of the pr-curve # and not the auc-pr-curve with the trapezoidal rule / linear interpolation because it could be too optimistic measures["auc_prc_weighted"] = multi_class_prc_auc_score(labels, predictions, 'weighted') measures["auc_prc_macro"] = multi_class_prc_auc_score(labels, predictions, 'macro') measures["auc_prc_micro"] = multi_class_prc_auc_score(labels, predictions, 'micro') except ValueError: print("Warning: Auc prc score can not be calculated ...") save_confusion_matrix(labels, predictions) report = save_classification_report(labels, predictions) classes = list(sorted(set(labels))) for pos_class in classes: measures[str(pos_class) + "_precision"] = report[str(pos_class)]['precision'] measures[str(pos_class) + "_recall"] = report[str(pos_class)]['recall'] measures[str(pos_class) + "_f1-score"] = report[str(pos_class)]['f1-score'] measures[str(pos_class) + "_support"] = report[str(pos_class)]['support'] if pos_class == 1: neg_class = 0 else: neg_class = 1 tp, fp, tn, fn = calculate_cm_states(labels, predictions, pos_class, neg_class) measures[str(pos_class) + "_tp"] = tp measures[str(pos_class) + "_fp"] = fp measures[str(pos_class) + "_tn"] = tn measures[str(pos_class) + "_fn"] = fn if tn + fp == 0: pass else: # Specificity or true negative rate measures[str(pos_class) + "_tnr"] = tn / (tn + fp) # Fall out or false positive rate measures[str(pos_class) + "_fpr"] = fp / (fp + tn) if tn + fn == 0: pass else: # Negative predictive value measures[str(pos_class) + "_npv"] = tn / (tn + fn) if tp + fn == 0: pass else: # False negative rate measures[str(pos_class) + "_fnr"] = fn / (tp + fn) if tp + fp == 0: pass else: # False discovery rate measures[str(pos_class) + "_fdr"] = fp / (tp + fp) return measures def calculate_cm_states(labels, predictions, pos_class, neg_class): tp = 0 fp = 0 tn = 0 fn = 0 for i in range(len(predictions)): if labels[i] == predictions[i] == pos_class: tp += 1 if predictions[i] == pos_class and labels[i] != predictions[i]: fp += 1 if labels[i] == predictions[i] == neg_class: tn += 1 if predictions[i] == neg_class and labels[i] != predictions[i]: fn += 1 return tp, fp, tn, fn def save_classification_report(labels, predictions): return sklearn.metrics.classification_report(y_true=labels, y_pred=predictions, output_dict=True) def multi_class_roc_auc_score(label, predict, average): label_binarizer = sklearn.preprocessing.LabelBinarizer() label_binarizer.fit(label) label = label_binarizer.transform(label) predict = label_binarizer.transform(predict) return sklearn.metrics.roc_auc_score(label, predict, average=average) def multi_class_prc_auc_score(label, predict, average): label_binarizer = sklearn.preprocessing.LabelBinarizer() label_binarizer.fit(label) label = label_binarizer.transform(label) predict = label_binarizer.transform(predict) return sklearn.metrics.average_precision_score(label, predict, average=average) def label_binarizer(labels): for index in range(0, len(labels)): if labels[index] >= 0.5: labels[index] = 1.0 else: labels[index] = 0.0 return labels def save_confusion_matrix(labels, predictions, path="../../../results/cm.pdf"): classes = sklearn.utils.multiclass.unique_labels(labels, predictions) cms = [] cm = sklearn.metrics.confusion_matrix(labels, predictions) cm_df = pandas.DataFrame(cm, index=classes, columns=classes) cms.append(cm_df) def prettify(n): """ if n > 1000000: return str(np.round(n / 1000000, 1)) + 'M' elif n > 1000: return str(np.round(n / 1000, 1)) + 'K' else: return str(n) """ return str(n) cm = reduce(lambda x, y: x.add(y, fill_value=0), cms) annot = cm.applymap(prettify) cm = (cm.T / cm.sum(axis=1)).T fig, g = pyplot.subplots(figsize=(7, 4.5)) g = sns.heatmap(cm, annot=annot, fmt='', cmap='Blues', cbar=False, rasterized=True, linewidths=0.1) _ = g.set(ylabel='Actual', xlabel='Prediction') for _, spine in g.spines.items(): spine.set_visible(True) pyplot.xticks(rotation=45) fig.tight_layout() fig.savefig(path) pyplot.close()
2.671875
3
dpgs_sandbox/tests/test_bug_migrations_in_base_models.py
gabrielpiassetta/django-pgschemas
0
1514
import warnings from unittest.mock import patch from django.apps import apps from django.core import management from django.core.management.base import CommandError from django.db import models from django.db.utils import ProgrammingError from django.test import TransactionTestCase, tag from django_pgschemas.checks import check_schema_names from django_pgschemas.models import TenantMixin from django_pgschemas.utils import get_tenant_model TenantModel = get_tenant_model() def patched_get_tenant_model(*args, **kwargs): class TenantModel(TenantMixin): dummy = models.TextField() class Meta: app_label = get_tenant_model()._meta.app_label return TenantModel @tag("bug") class MigrationZeroRoundTripTestCase(TransactionTestCase): """ Provoke a handled ProgrammingError by migrating models from empty database. """ def test_database_checks_with_zero_migrations(self): management.call_command("migrate", "shared_public", "zero", verbosity=0) # The goal is that the next line doesn't raise ProgrammingError check_schema_names(apps.get_app_config("django_pgschemas")) management.call_command("migrate", verbosity=0) @tag("bug") class UnappliedMigrationTestCase(TransactionTestCase): """ Provoke a handled ProgrammingError by running tenant command with pending model changes. """ @classmethod def setUpClass(cls): tenant1 = TenantModel(schema_name="tenant1") tenant1.save(verbosity=0) @classmethod def tearDownClass(cls): for tenant in TenantModel.objects.all(): tenant.delete(force_drop=True) @patch("django_pgschemas.management.commands.get_tenant_model", patched_get_tenant_model) def test_whowill_with_pending_migrations(self): with warnings.catch_warnings(): warnings.simplefilter("ignore") # Avoid warnings about model being registered twice with self.assertRaises(CommandError) as ctx: management.call_command("whowill", all_schemas=True, verbosity=0) self.assertEqual( str(ctx.exception), "Error while attempting to retrieve dynamic schemas. " "Perhaps you need to migrate the 'public' schema first?", ) @tag("bug") class MigrateIgnoringExcludedSchemasTestCase(TransactionTestCase): @classmethod def setUpClass(cls): tenant1 = TenantModel(schema_name="tenant1") tenant1.save(verbosity=0) @classmethod def tearDownClass(cls): for tenant in TenantModel.objects.all(): tenant.delete(force_drop=True) def test_migrate_with_exclusions(self): # We first unapply a migration with fake so we can reapply it without fake # This should work without errors management.call_command("migrate", "app_tenants", "0001_initial", fake=True, schemas=["tenant1"], verbosity=0) # We then migrate on all schemas except for tenant1, THIS IS THE CASE WE WANT TO TEST # This should work without errors management.call_command("migrate", all_schemas=True, excluded_schemas=["tenant1"], verbosity=0) # If we try to global migrate now, we should get a ProgrammingError with self.assertRaises(ProgrammingError): management.call_command("migrate", all_schemas=True, verbosity=0) # We finally apply the migration again with fake # This should work without errors management.call_command("migrate", fake=True, all_schemas=True, verbosity=0)
2.078125
2
tfx/components/transform/component.py
pingsutw/tfx
0
1515
<filename>tfx/components/transform/component.py # Lint as: python2, python3 # Copyright 2019 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TFX Transform component definition.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from typing import Optional, Text, Union import absl from tfx import types from tfx.components.base import base_component from tfx.components.base import executor_spec from tfx.components.transform import executor from tfx.orchestration import data_types from tfx.types import artifact from tfx.types import artifact_utils from tfx.types import standard_artifacts from tfx.types.standard_component_specs import TransformSpec class Transform(base_component.BaseComponent): """A TFX component to transform the input examples. The Transform component wraps TensorFlow Transform (tf.Transform) to preprocess data in a TFX pipeline. This component will load the preprocessing_fn from input module file, preprocess both 'train' and 'eval' splits of input examples, generate the `tf.Transform` output, and save both transform function and transformed examples to orchestrator desired locations. ## Providing a preprocessing function The TFX executor will use the estimator provided in the `module_file` file to train the model. The Transform executor will look specifically for the `preprocessing_fn()` function within that file. An example of `preprocessing_fn()` can be found in the [user-supplied code]((https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py)) of the TFX Chicago Taxi pipeline example. ## Example ``` # Performs transformations and feature engineering in training and serving. transform = Transform( examples=example_gen.outputs['examples'], schema=infer_schema.outputs['schema'], module_file=module_file) ``` Please see https://www.tensorflow.org/tfx/transform for more details. """ SPEC_CLASS = TransformSpec EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor) def __init__( self, examples: types.Channel = None, schema: types.Channel = None, module_file: Optional[Union[Text, data_types.RuntimeParameter]] = None, preprocessing_fn: Optional[Union[Text, data_types.RuntimeParameter]] = None, transform_graph: Optional[types.Channel] = None, transformed_examples: Optional[types.Channel] = None, input_data: Optional[types.Channel] = None, instance_name: Optional[Text] = None, enable_cache: Optional[bool] = None): """Construct a Transform component. Args: examples: A Channel of type `standard_artifacts.Examples` (required). This should contain the two splits 'train' and 'eval'. schema: A Channel of type `standard_artifacts.Schema`. This should contain a single schema artifact. module_file: The file path to a python module file, from which the 'preprocessing_fn' function will be loaded. The function must have the following signature. def preprocessing_fn(inputs: Dict[Text, Any]) -> Dict[Text, Any]: ... where the values of input and returned Dict are either tf.Tensor or tf.SparseTensor. Exactly one of 'module_file' or 'preprocessing_fn' must be supplied. preprocessing_fn: The path to python function that implements a 'preprocessing_fn'. See 'module_file' for expected signature of the function. Exactly one of 'module_file' or 'preprocessing_fn' must be supplied. transform_graph: Optional output 'TransformPath' channel for output of 'tf.Transform', which includes an exported Tensorflow graph suitable for both training and serving; transformed_examples: Optional output 'ExamplesPath' channel for materialized transformed examples, which includes both 'train' and 'eval' splits. input_data: Backwards compatibility alias for the 'examples' argument. instance_name: Optional unique instance name. Necessary iff multiple transform components are declared in the same pipeline. enable_cache: Optional boolean to indicate if cache is enabled for the Transform component. If not specified, defaults to the value specified for pipeline's enable_cache parameter. Raises: ValueError: When both or neither of 'module_file' and 'preprocessing_fn' is supplied. """ if input_data: absl.logging.warning( 'The "input_data" argument to the Transform component has ' 'been renamed to "examples" and is deprecated. Please update your ' 'usage as support for this argument will be removed soon.') examples = input_data if bool(module_file) == bool(preprocessing_fn): raise ValueError( "Exactly one of 'module_file' or 'preprocessing_fn' must be supplied." ) transform_graph = transform_graph or types.Channel( type=standard_artifacts.TransformGraph, artifacts=[standard_artifacts.TransformGraph()]) if not transformed_examples: example_artifact = standard_artifacts.Examples() example_artifact.split_names = artifact_utils.encode_split_names( artifact.DEFAULT_EXAMPLE_SPLITS) transformed_examples = types.Channel( type=standard_artifacts.Examples, artifacts=[example_artifact]) spec = TransformSpec( examples=examples, schema=schema, module_file=module_file, preprocessing_fn=preprocessing_fn, transform_graph=transform_graph, transformed_examples=transformed_examples) super(Transform, self).__init__( spec=spec, instance_name=instance_name, enable_cache=enable_cache)
1.882813
2
objects/GitIndexEntry.py
anderslatif/alg
0
1516
# https://github.com/git/git/blob/master/Documentation/technical/index-format.txt class GitIndexEntry(object): # The last time a file's metadata changed. This is a tuple (seconds, nanoseconds) ctime = None # The last time a file's data changed. This is a tuple (seconds, nanoseconds) mtime = None # the ID of device containing this file dev = None # The file's inode number ino = None # The object type, either b1000 (regular), b1010 (symlink), b1110 (gitlink) mode_type = None # The object permissions as an integer mode_permissions = None # User ID of owner uui = None # Group ID of owner gid = None # Size of this object in bytes size = None # The object's hash as a hex string object = None flag_assume_valid = None flag_extended = None flag_stage = None # Length of the name if < OxFFF, -1 otherwise flag_name_length = None name = None
2.3125
2
matdgl/layers/partitionpaddinglayer.py
huzongxiang/CrystalNetwork
6
1517
# -*- coding: utf-8 -*- """ Created on Wed Oct 13 14:47:13 2021 @author: huzongxiang """ import tensorflow as tf from tensorflow.keras import layers class PartitionPadding(layers.Layer): def __init__(self, batch_size, **kwargs): super().__init__(**kwargs) self.batch_size = batch_size def call(self, inputs): features, graph_indices = inputs # Obtain subgraphs features = tf.dynamic_partition( features, graph_indices, self.batch_size ) # Pad and stack subgraphs num_features = [tf.shape(f)[0] for f in features] max_num = tf.reduce_max(num_features) features_padded = tf.stack( [ tf.pad(f, [(0, max_num - n), (0, 0)]) for f, n in zip(features, num_features) ], axis=0, ) # Remove empty subgraphs (usually for last batch) nonempty_examples = tf.where(tf.reduce_sum(features_padded, (1, 2)) != 0) nonempty_examples = tf.squeeze(nonempty_examples, axis=-1) features_batch = tf.gather(features_padded, nonempty_examples, axis=0) return features_batch def get_config(self): config = super().get_config() config.update({"batch": self.batch_size}) return config class PartitionPaddingPair(layers.Layer): def __init__(self, batch_size, **kwargs): super().__init__(**kwargs) self.batch_size = batch_size def call(self, inputs): features, graph_indices = inputs # Obtain subgraphs features = tf.dynamic_partition( features, graph_indices, self.batch_size ) # Pad and stack subgraphs num_features = [tf.shape(f)[0] for f in features] max_num = tf.reduce_max(num_features) features_padded = tf.stack( [ tf.pad(f, [(0, max_num - n), (0, 0)]) for f, n in zip(features, num_features) ], axis=0, ) # Remove empty subgraphs (usually for last batch) nonempty_examples = tf.unique(graph_indices)[0] features_batch = tf.gather(features_padded, nonempty_examples, axis=0) return features_batch def get_config(self): config = super().get_config() config.update({"batch_size": self.batch_size}) return config
2.546875
3
lino_book/projects/min9/settings/memory.py
khchine5/book
1
1518
<gh_stars>1-10 from .demo import * SITE.verbose_name = SITE.verbose_name + " (:memory:)" # SITE = Site(globals(), title=Site.title+" (:memory:)") DATABASES['default']['NAME'] = ':memory:'
1.625
2
reservation/urls.py
aryamanak10/diner-restaurant-website
1
1519
from django.urls import path from . import views app_name = 'reservation' urlpatterns = [ path('', views.reserve_table, name = 'reserve_table'), ]
1.53125
2
chainer/_version.py
yumetov/chainer
3,705
1520
__version__ = '7.8.0' _optional_dependencies = [ { 'name': 'CuPy', 'packages': [ 'cupy-cuda120', 'cupy-cuda114', 'cupy-cuda113', 'cupy-cuda112', 'cupy-cuda111', 'cupy-cuda110', 'cupy-cuda102', 'cupy-cuda101', 'cupy-cuda100', 'cupy-cuda92', 'cupy-cuda91', 'cupy-cuda90', 'cupy-cuda80', 'cupy', ], 'specifier': '>=7.7.0,<8.0.0', 'help': 'https://docs.cupy.dev/en/latest/install.html', }, { 'name': 'iDeep', 'packages': [ 'ideep4py', ], 'specifier': '>=2.0.0.post3, <2.1', 'help': 'https://docs.chainer.org/en/latest/tips.html', }, ]
1.085938
1
image_aug.py
qwerasdf887/image_augmentation
0
1521
<filename>image_aug.py # coding=UTF-8 # This Python file uses the following encoding: utf-8 import cv2 import numpy as np import xml.etree.cElementTree as ET from random import sample #default args: default_args = {'noise_prob': 0.1, 'gasuss_mean': 0, 'gasuss_var': 0.001, 'rand_hug': 30, 'rand_saturation':30, 'rand_light': 30, 'rot_angle': 15, 'bordervalue': (127, 127, 127), 'zoom_out_value': 0.7, 'output_shape': (416, 416), 'take_value' : 5 } #添加黑色noise def sp_noise(image, box_loc=None, **kwargs): h, w = image.shape[0:2] noise = np.random.rand(h,w) out_img = image.copy() out_img[noise < kwargs['noise_prob']] = 0 if box_loc is None: return out_img else: return out_img, box_loc #高斯noise def gasuss_noise(image, box_loc=None, **kwargs): out_img = (image / 255.) - 0.5 noise = np.random.normal(kwargs['gasuss_mean'], kwargs['gasuss_var']** 0.5, image.shape) out_img = out_img + noise + 0.5 out_img[out_img < 0] = 0 out_img[out_img > 1] = 1 out_img = (out_img * 255).astype(np.uint8) if box_loc is None: return out_img else: return out_img, box_loc #調整彩度(彩度通道加上隨機-N~N之值) def mod_hue(image, box_loc=None, **kwargs): out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,0] += np.random.randint(-kwargs['rand_hug'], kwargs['rand_hug']) out_img = cv2.cvtColor(np.clip(out_img, 0, 180).astype(np.uint8), cv2.COLOR_HSV2BGR) if box_loc is None: return out_img else: return out_img, box_loc #調整飽和度(飽和度通道加上隨機-N~N之值) def mod_saturation(image, box_loc=None, **kwargs): out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,1] += np.random.randint(-kwargs['rand_saturation'], kwargs['rand_saturation']) out_img = cv2.cvtColor(np.clip(out_img, 0, 255).astype(np.uint8), cv2.COLOR_HSV2BGR) if box_loc is None: return out_img else: return out_img, box_loc #調整亮度(亮度通道加上隨機-N~N之值) def mod_light(image, box_loc=None, **kwargs): out_img = cv2.cvtColor(image, cv2.COLOR_BGR2HSV).astype(np.float32) out_img[:,:,2] += np.random.randint(-kwargs['rand_light'], kwargs['rand_light']) out_img = cv2.cvtColor(np.clip(out_img, 0, 255).astype(np.uint8), cv2.COLOR_HSV2BGR) if box_loc is None: return out_img else: return out_img, box_loc #水平翻轉 def horizontal_flip(image, box_loc=None, **kwargs): ''' Args: box_loc: bounding box location(x_min, y_min, x_max, y_max) ''' if box_loc is None: return cv2.flip(image, 1) else: w = image.shape[1] for i in box_loc: if i[2] == 0: break else: x_min, x_max = i[0], i[2] i[0] = w - x_max i[2] = w - x_min return cv2.flip(image, 1), box_loc #垂直翻轉 def vertical_flip(image, box_loc=None, **kwargs): ''' Args: box_loc: bounding box location(num box,(x_min, y_min, x_max, y_max, label)) ''' if box_loc is None: return cv2.flip(image, 0) else: h = image.shape[0] for i in box_loc: if i[3] == 0: break else: y_min, y_max = i[1], i[3] i[1] = h - y_max i[3] = h - y_min return cv2.flip(image, 0), box_loc #旋轉-n~n度 def rot_image(image, box_loc=None, **kwargs): ''' Args: box_loc: bounding box location(num box,(x_min, y_min, x_max, y_max, label)) rot: 要選轉的範圍 bordervalue: 空白處補的值 ''' h, w, _ = image.shape center = ( w // 2, h // 2) angle = np.random.randint(-kwargs['rot_angle'], kwargs['rot_angle']) M = cv2.getRotationMatrix2D(center, angle, 1) out_img = cv2.warpAffine(image, M, (w, h), borderValue = kwargs['bordervalue']) if box_loc is None: return out_img else: loc = box_loc[:,0:4].copy() loc = np.append(loc, loc[:, 0:1], axis=-1) loc = np.append(loc, loc[:, 3:4], axis=-1) loc = np.append(loc, loc[:, 2:3], axis=-1) loc = np.append(loc, loc[:, 1:2], axis=-1) loc = loc.reshape(-1, 4, 2) loc = loc - np.array(center) rot_loc = loc.dot(np.transpose(M[:,0:2])) rot_loc = rot_loc + np.array(center) rot_box = np.hstack([np.min(rot_loc, axis=-2), np.max(rot_loc, axis=-2), box_loc[:, 4:5]]) rot_box = np.floor(rot_box) rot_box[...,0:4] = np.clip(rot_box[...,0:4], [0,0,0,0], [w-1, h-1, w-1, h-1]) return out_img, rot_box #等比例縮放影像 def resize_img(image, box_loc=None, **kwargs): h, w, _ = image.shape max_edge = max(kwargs['output_shape'][0], kwargs['output_shape'][1]) scale = min( max_edge / h, max_edge / w) h = int(h * scale) w = int(w * scale) if box_loc is None: return cv2.resize(image, (w, h)) else: box_loc[:,0] = box_loc[:,0] * scale box_loc[:,1] = box_loc[:,1] * scale box_loc[:,2] = box_loc[:,2] * scale box_loc[:,3] = box_loc[:,3] * scale return cv2.resize(image, (w, h)), box_loc.astype(np.int32) #將樸片補至指定大小 def padding_img(image, box_loc=None, **kwargs): h, w, _ = image.shape dx = int((kwargs['output_shape'][1] - w) / 2) dy = int((kwargs['output_shape'][0] - h) / 2) out_img = np.ones((kwargs['output_shape'][0], kwargs['output_shape'][1], 3), np.uint8) * kwargs['bordervalue'][0] out_img[dy: dy + h, dx: dx + w] = cv2.resize(image, (w, h)) if box_loc is None: return out_img else: box_loc[:,0] = box_loc[:,0] + dx box_loc[:,1] = box_loc[:,1] + dy box_loc[:,2] = box_loc[:,2] + dx box_loc[:,3] = box_loc[:,3] + dy return out_img, box_loc.astype(np.int32) #隨機縮小 value~1倍 def random_zoom_out(image, box_loc=None, **kwargs): h, w, _ = image.shape scale = np.random.uniform(kwargs['zoom_out_value'], 1) h = int(h * scale) w = int(w * scale) dx = int((image.shape[1] - w) / 2) dy = int((image.shape[0] - h) / 2) out_img = np.ones(image.shape, np.uint8) * kwargs['bordervalue'][0] out_img[dy: dy + h, dx: dx + w] = cv2.resize(image, (w, h)) if box_loc is None: return out_img else: box_loc[:,0] = box_loc[:,0] * scale + dx box_loc[:,1] = box_loc[:,1] * scale + dy box_loc[:,2] = box_loc[:,2] * scale + dx box_loc[:,3] = box_loc[:,3] * scale + dy return out_img, box_loc.astype(np.int32) #load csv data def load_csv(xml_path, max_boxes=4): tree = ET.parse(xml_path) root = tree.getroot() #location list loc_list = np.zeros((0, 5)) box_count = 0 for obj in root.iter('object'): if box_count >= max_boxes: break ''' difficult = obj.find('difficult').text cls = obj.find('name').text if cls not in classes or int(difficult) == 1: continue cls_id = classes.index(cls) ''' loc = obj.find('bndbox') x_min = int(loc.find('xmin').text) y_min = int(loc.find('ymin').text) x_max = int(loc.find('xmax').text) y_max = int(loc.find('ymax').text) loc_list = np.vstack([loc_list, np.array([x_min, y_min, x_max, y_max, 0])]) box_count += 1 return loc_list.astype(np.float32) #draw rectangle def draw_rect(image, box_loc): for i in box_loc: cv2.rectangle(image, (int(i[0]), int(i[1])), (int(i[2]), int(i[3])), (0, 255, 0), 4) def print_args(**kwargs): for key, value in kwargs.items(): print('key name: {}\nvalue:{}\n'.format(key, value)) #隨機選擇0~N個 image augmentation方法 def rand_aug_image(image, box_loc=None, **kwargs): if box_loc is None: out_img = resize_img(image, **kwargs) else: out_img, box_loc = resize_img(image, box_loc, **kwargs) #total augmentation function func_list = [sp_noise, gasuss_noise, mod_hue, mod_saturation, mod_light, horizontal_flip, vertical_flip, rot_image, random_zoom_out] #rand take function take_func = sample(func_list, np.random.randint(kwargs['take_value'])) for func in take_func: if box_loc is None: out_img = func(out_img, **kwargs) else: out_img, box_loc = func(out_img, box_loc, **kwargs) if box_loc is None: out_img = padding_img(out_img, **kwargs) return out_img else: out_img, box_loc = padding_img(out_img, box_loc, **kwargs) return out_img, box_loc if __name__ == "__main__": img = cv2.imread('./00002.jpg') bbox = load_csv('./00002.xml') #黑點noise #aug_img = sp_noise(img, **default_args) #aug_img, bbox = sp_noise(img, bbox, **default_args) #gasuss_noise #aug_img = gasuss_noise(img, **default_args) #aug_img, bbox = gasuss_noise(img, bbox, **default_args) #調整Hue #aug_img = mod_hue(img, **default_args) #aug_img, bbox = mod_hue(img, bbox, **default_args) #調整saturation #aug_img = mod_saturation(img, **default_args) #aug_img, bbox = mod_saturation(img, bbox, **default_args) #調整light #aug_img = mod_light(img, **default_args) #aug_img, bbox = mod_light(img, bbox, **default_args) #水平翻轉 #aug_img = horizontal_flip(img, **default_args) #aug_img, bbox = horizontal_flip(img, bbox, **default_args) #垂直翻轉 #aug_img = vertical_flip(img, **default_args) #aug_img, bbox = vertical_flip(img, bbox, **default_args) #旋轉角度 #aug_img = rot_image(img, **default_args) #aug_img, bbox = rot_image(img, bbox, **default_args) #等比例resize至指定大小 #aug_img = resize_img(img, **default_args) #aug_img, bbox = resize_img(img, bbox, **default_args) #補形狀至指定大小 #aug_img = padding_img(aug_img, **default_args) #aug_img, bbox = padding_img(aug_img, bbox, **default_args) #隨機縮小 N~1倍 #aug_img = random_zoom_out(img, **default_args) #aug_img, bbox = random_zoom_out(img, bbox, **default_args) #隨機選擇augmentation方法 aug_img = rand_aug_image(img, **default_args) #aug_img, bbox = rand_aug_image(img, bbox, **default_args) print(bbox) draw_rect(aug_img, bbox) cv2.imshow('img', img) cv2.imshow('aug img', aug_img) cv2.waitKey(0) cv2.destroyAllWindows()
2.625
3
03_picnic/picnic.py
intimanipuchi/tiny_python_projects
0
1522
#!/usr/bin/env python3 """ Author : <NAME> <<EMAIL>> Date : 2021-12-15 Purpose: Working with lists """ import argparse # -------------------------------------------------- def get_args(): """Get command-line arguments""" parser = argparse.ArgumentParser( description="Working with lists", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument("items", type=str, nargs="+", metavar="str", help="item(s) to bring") parser.add_argument("-s", "--sorted", help="a boolean flag", action="store_true") return parser.parse_args() # -------------------------------------------------- def main(): """The main function: formatting and printing the output""" args = get_args() sort_flag = args.sorted items = args.items if sort_flag: items = sorted(items) if len(items) == 1: print(f"You are bringing {items[0]}.") elif len(items) < 3: items.insert(-1, "and") print(f"You are bringing {' '.join(items)}.") else: # print(items) last = items[-1] and_last = "and " + last items[-1] = and_last # print(items) print(f"You are bringing {', '.join(items)}.") # -------------------------------------------------- if __name__ == "__main__": main()
4.21875
4
triangle.py
montyshyama/python-basics
0
1523
<filename>triangle.py side_a=int(input("Enter the first side(a):")) side_b=int(input("Enter the second side(b):")) side_c=int(input("Enter the third side(c):")) if side_a==side_b and side_a==side_c: print("The triangle is an equilateral triangle.") elif side_a==side_b or side_a==side_c or side_b==side_c: print("The triangle is an isosceles triangle.") else: print("The triangle is scalene triangle.")
4.1875
4
david/modules/artist/view.py
ktmud/david
2
1524
<reponame>ktmud/david # -*- coding: utf-8 -*- from flask import Blueprint, request from david.lib.template import st from .model import Artist bp = Blueprint('artist', __name__) @bp.app_template_global('artists') def artists(): return Artist.query.all() @bp.route('/artist/<uid>/') def intro(uid): artist = Artist.get_or_404(uid) return st('modules/artist/show.html', **locals()) @bp.route('/artist/<uid>/detail') def detail(uid): artist = Artist.get_or_404(uid) return st('modules/artist/detailed.html', **locals())
2.203125
2
Volume Estimation/volume.py
JessieRamaux/Food-Volume-Estimation
10
1525
import numpy as np import cv2 import os import json import glob from PIL import Image, ImageDraw plate_diameter = 25 #cm plate_depth = 1.5 #cm plate_thickness = 0.2 #cm def Max(x, y): if (x >= y): return x else: return y def polygons_to_mask(img_shape, polygons): mask = np.zeros(img_shape, dtype=np.uint8) mask = Image.fromarray(mask) xy = list(map(tuple, polygons)) ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1) mask = np.array(mask, dtype=bool) return mask def mask2box(mask): index = np.argwhere(mask == 1) rows = index[:, 0] clos = index[:, 1] left_top_r = np.min(rows) left_top_c = np.min(clos) right_bottom_r = np.max(rows) right_bottom_c = np.max(clos) return [left_top_c, left_top_r, right_bottom_c, right_bottom_r] def get_bbox(points, h, w): polygons = points mask = polygons_to_mask([h,w], polygons) return mask2box(mask) def get_scale(points, img, lowest): bbox = get_bbox(points, img.shape[0], img.shape[1]) diameter = (bbox[2]-bbox[0]+1+bbox[3]-bbox[1]+1)/2 len_per_pix = plate_diameter/float(diameter) avg = 0 k = 0 for point in points: avg += img[point[1]][point[0]] k += 1 avg = avg/float(k) depth = lowest - avg depth_per_pix = plate_depth/depth return len_per_pix, depth_per_pix def cal_volume(points, img, len_per_pix, depth_per_pix, lowest): volume = 0.0 bbox = get_bbox(points, img.shape[0], img.shape[1]) points = np.array(points) shape = points.shape points = points.reshape(shape[0], 1, shape[1]) for i in range(bbox[0], bbox[2]+1): for j in range(bbox[1], bbox[3]+1): if (cv2.pointPolygonTest(points, (i,j), False) >= 0): volume += Max(0, (lowest - img[j][i]) * depth_per_pix - plate_thickness) * len_per_pix * len_per_pix return volume def get_volume(img, json_path): lowest = np.max(img) vol_dict = {} #print(lowest) len_per_pix = 0.0 depth_per_pix = 0.0 with open(json_path, 'r') as json_file: data = json.load(json_file) for shape in data['shapes']: if (shape['label'] == "plate"): len_per_pix, depth_per_pix = get_scale(shape['points'], img, lowest) #print(len_per_pix, depth_per_pix) break for shape in data['shapes']: label = shape['label'] if (label == "plate"): continue points = shape['points'] volume = cal_volume(points, img, len_per_pix, depth_per_pix, lowest) if (label in vol_dict): vol_dict[label] += volume else: vol_dict[label] = volume return vol_dict img = cv2.imread("out.png",0) print(get_volume(img,"test.json"))
2.5
2
t2k/bin/cmttags.py
tianluyuan/pyutils
1
1526
#!/usr/bin/env python """ A script to create tags for CMT managed packages. Call from within cmt/ directory """ import subprocess import sys import os from optparse import OptionParser __author__ = '<NAME>' __email__ = 't<EMAIL>uan [at] colorado.edu' # Ignore large external packages for now IGNORES = ['CMT', 'EXTERN', 'GSL', 'MYSQL', 'GEANT', 'CLHEP'] # Extensions for finding src files, must satisfy unix wildcard rules EXTENSIONS = {'cpp': ('*.[hc]', '*.[hc]xx', '*.[hc]pp', '*.cc', '*.hh'), 'python':('*.py'), 'java':('*.java')} # Ignore these files and dirs, key specifies argument to find # (e.g. '-iname') PRUNE = {'iname':['*_Dict.[hc]*', '*linkdef.h']} def check_dir(): """ Are we inside cmt/ """ if os.path.basename(os.getcwd()) != 'cmt': sys.exit('Not inside cmt directory!') def check_requirements(): """ Ensure that requirements file exists in cmt dir """ if not os.path.isfile('requirements'): sys.exit('No requirements file!') def init_use_dict(): """Returns the initial use_dict which contains the current (cwd) package and its path. 'cmt show uses' does not include the package itself. """ # Must call os.path.dirname because the cwd should be inside a cmt # directory return {'this':os.path.dirname(os.getcwd())} def parse_uses(): """ Returns a dict of used packages and their root dir paths. e.g. {ROOT:/path/to/cmt/installed/ROOT/vXrY} """ check_dir() check_requirements() proc = subprocess.Popen(['cmt', 'show', 'uses'], stdout=subprocess.PIPE) use_dict = init_use_dict() for line in iter(proc.stdout.readline, ''): tokens = line.split() # ignore lines that start with '#' if line[0] != '#' and tokens[1] not in IGNORES: basepath = tokens[-1].strip('()') # highland and psyche do not strictly follow CMT path # organization. They have subpackages within a master, so # we need to take that into account relpath_list = [master for master in tokens[3:-1]] relpath_list.extend([tokens[1], tokens[2]]) use_dict[tokens[1]] = os.path.join(basepath, *relpath_list) return use_dict def get_exts(opts): if opts.python: return EXTENSIONS['python'] elif opts.java: return EXTENSIONS['java'] else: return EXTENSIONS['cpp'] def build_find_args(exts): """ ext is a list of file extensions corresponding to the files we want to search. This will return a list of arguments that can be passed to `find` """ find_args = [] for a_ext in exts: # -o for "or" find_args.extend(['-o', '-iname']) find_args.append('{0}'.format(a_ext)) # replace first '-o' with '( for grouping matches find_args[0] = '(' # append parens for grouping negation find_args.extend([')', '(']) # Add prune files for match_type in PRUNE: for aprune in PRUNE[match_type]: find_args.append('-not') find_args.append('-'+match_type) find_args.append('{0}'.format(aprune)) find_args.append(')') return find_args def build_find_cmd(opts, paths): """ Builds teh cmd file using ctags. Returns cmd based on the following template: 'find {0} -type f {1} | etags -' """ find_args = build_find_args(get_exts(opts)) return ['find']+paths+['-type', 'f']+find_args def build_tags_cmd(): return ['etags', '-'] def main(): """ Uses ctags to generate TAGS file in cmt directory based on cmt show uses """ parser = OptionParser() parser.add_option('--cpp', dest='cpp', action='store_true', default=False, help='tag only c/cpp files (default)') parser.add_option('--python', dest='python', action='store_true', default=False, help='tag only python files') parser.add_option('--java', dest='java', action='store_true', default=False, help='tag only java files') parser.add_option('-n', dest='dry_run', action='store_true', default=False, help='dry run') (opts, args) = parser.parse_args() # get the cmt show uses dictionary of programs and paths use_dict = parse_uses() # build the commands find_cmd = build_find_cmd(opts, list(use_dict.itervalues())) tags_cmd = build_tags_cmd() print 'Creating TAGS file based on dependencies:' print use_dict if not opts.dry_run: find_proc = subprocess.Popen(find_cmd, stdout=subprocess.PIPE) tags_proc = subprocess.Popen(tags_cmd, stdin=find_proc.stdout) tags_proc.communicate() if __name__ == '__main__': main()
2.359375
2
salt/daemons/masterapi.py
rickh563/salt
0
1527
# -*- coding: utf-8 -*- ''' This module contains all of the routines needed to set up a master server, this involves preparing the three listeners and the workers needed by the master. ''' from __future__ import absolute_import # Import python libs import fnmatch import logging import os import re import time import stat import tempfile # Import salt libs import salt.crypt import salt.utils import salt.client import salt.payload import salt.pillar import salt.state import salt.runner import salt.auth import salt.wheel import salt.minion import salt.search import salt.key import salt.fileserver import salt.utils.atomicfile import salt.utils.event import salt.utils.verify import salt.utils.minions import salt.utils.gzip_util import salt.utils.jid from salt.pillar import git_pillar from salt.utils.event import tagify from salt.exceptions import SaltMasterError # Import 3rd-party libs import salt.ext.six as six try: import pwd HAS_PWD = True except ImportError: # pwd is not available on windows HAS_PWD = False log = logging.getLogger(__name__) # Things to do in lower layers: # only accept valid minion ids def init_git_pillar(opts): ''' Clear out the ext pillar caches, used when the master starts ''' pillargitfs = [] for opts_dict in [x for x in opts.get('ext_pillar', [])]: if 'git' in opts_dict: try: import git except ImportError: return pillargitfs parts = opts_dict['git'].strip().split() try: br = parts[0] loc = parts[1] except IndexError: log.critical( 'Unable to extract external pillar data: {0}' .format(opts_dict['git']) ) else: pillargitfs.append( git_pillar.GitPillar( br, loc, opts ) ) return pillargitfs def clean_fsbackend(opts): ''' Clean out the old fileserver backends ''' # Clear remote fileserver backend caches so they get recreated for backend in ('git', 'hg', 'svn'): if backend in opts['fileserver_backend']: env_cache = os.path.join( opts['cachedir'], '{0}fs'.format(backend), 'envs.p' ) if os.path.isfile(env_cache): log.debug('Clearing {0}fs env cache'.format(backend)) try: os.remove(env_cache) except OSError as exc: log.critical( 'Unable to clear env cache file {0}: {1}' .format(env_cache, exc) ) file_lists_dir = os.path.join( opts['cachedir'], 'file_lists', '{0}fs'.format(backend) ) try: file_lists_caches = os.listdir(file_lists_dir) except OSError: continue for file_lists_cache in fnmatch.filter(file_lists_caches, '*.p'): cache_file = os.path.join(file_lists_dir, file_lists_cache) try: os.remove(cache_file) except OSError as exc: log.critical( 'Unable to file_lists cache file {0}: {1}' .format(cache_file, exc) ) def clean_expired_tokens(opts): ''' Clean expired tokens from the master ''' serializer = salt.payload.Serial(opts) for (dirpath, dirnames, filenames) in os.walk(opts['token_dir']): for token in filenames: token_path = os.path.join(dirpath, token) with salt.utils.fopen(token_path) as token_file: token_data = serializer.loads(token_file.read()) if 'expire' not in token_data or token_data.get('expire', 0) < time.time(): try: os.remove(token_path) except (IOError, OSError): pass def clean_pub_auth(opts): try: auth_cache = os.path.join(opts['cachedir'], 'publish_auth') if not os.path.exists(auth_cache): return else: for (dirpath, dirnames, filenames) in os.walk(auth_cache): for auth_file in filenames: auth_file_path = os.path.join(dirpath, auth_file) if not os.path.isfile(auth_file_path): continue if os.path.getmtime(auth_file_path) - time.time() > opts['keep_jobs']: os.remove(auth_file_path) except (IOError, OSError): log.error('Unable to delete pub auth file') def clean_old_jobs(opts): ''' Clean out the old jobs from the job cache ''' # TODO: better way to not require creating the masterminion every time? mminion = salt.minion.MasterMinion( opts, states=False, rend=False, ) # If the master job cache has a clean_old_jobs, call it fstr = '{0}.clean_old_jobs'.format(opts['master_job_cache']) if fstr in mminion.returners: mminion.returners[fstr]() def access_keys(opts): ''' A key needs to be placed in the filesystem with permissions 0400 so clients are required to run as root. ''' users = [] keys = {} acl_users = set(opts['client_acl'].keys()) if opts.get('user'): acl_users.add(opts['user']) acl_users.add(salt.utils.get_user()) if HAS_PWD: for user in pwd.getpwall(): users.append(user.pw_name) for user in acl_users: log.info( 'Preparing the {0} key for local communication'.format( user ) ) if HAS_PWD: if user not in users: try: user = pwd.getpwnam(user).pw_name except KeyError: log.error('ACL user {0} is not available'.format(user)) continue keyfile = os.path.join( opts['cachedir'], '.{0}_key'.format(user) ) if os.path.exists(keyfile): log.debug('Removing stale keyfile: {0}'.format(keyfile)) os.unlink(keyfile) key = salt.crypt.Crypticle.generate_key_string() cumask = os.umask(191) with salt.utils.fopen(keyfile, 'w+') as fp_: fp_.write(key) os.umask(cumask) # 600 octal: Read and write access to the owner only. # Write access is necessary since on subsequent runs, if the file # exists, it needs to be written to again. Windows enforces this. os.chmod(keyfile, 0o600) if HAS_PWD: try: os.chown(keyfile, pwd.getpwnam(user).pw_uid, -1) except OSError: # The master is not being run as root and can therefore not # chown the key file pass keys[user] = key return keys def fileserver_update(fileserver): ''' Update the fileserver backends, requires that a built fileserver object be passed in ''' try: if not fileserver.servers: log.error( 'No fileservers loaded, the master will not be able to ' 'serve files to minions' ) raise SaltMasterError('No fileserver backends available') fileserver.update() except Exception as exc: log.error( 'Exception {0} occurred in file server update'.format(exc), exc_info_on_loglevel=logging.DEBUG ) class AutoKey(object): ''' Implement the methods to run auto key acceptance and rejection ''' def __init__(self, opts): self.opts = opts def check_permissions(self, filename): ''' Check if the specified filename has correct permissions ''' if salt.utils.is_windows(): return True # After we've ascertained we're not on windows try: user = self.opts['user'] pwnam = pwd.getpwnam(user) uid = pwnam[2] gid = pwnam[3] groups = salt.utils.get_gid_list(user, include_default=False) except KeyError: log.error( 'Failed to determine groups for user {0}. The user is not ' 'available.\n'.format( user ) ) return False fmode = os.stat(filename) if os.getuid() == 0: if fmode.st_uid == uid or fmode.st_gid != gid: return True elif self.opts.get('permissive_pki_access', False) \ and fmode.st_gid in groups: return True else: if stat.S_IWOTH & fmode.st_mode: # don't allow others to write to the file return False # check group flags if self.opts.get('permissive_pki_access', False) and stat.S_IWGRP & fmode.st_mode: return True elif stat.S_IWGRP & fmode.st_mode: return False # check if writable by group or other if not (stat.S_IWGRP & fmode.st_mode or stat.S_IWOTH & fmode.st_mode): return True return False def check_signing_file(self, keyid, signing_file): ''' Check a keyid for membership in a signing file ''' if not signing_file or not os.path.exists(signing_file): return False if not self.check_permissions(signing_file): message = 'Wrong permissions for {0}, ignoring content' log.warn(message.format(signing_file)) return False with salt.utils.fopen(signing_file, 'r') as fp_: for line in fp_: line = line.strip() if line.startswith('#'): continue else: if salt.utils.expr_match(keyid, line): return True return False def check_autosign_dir(self, keyid): ''' Check a keyid for membership in a autosign directory. ''' autosign_dir = os.path.join(self.opts['pki_dir'], 'minions_autosign') # cleanup expired files expire_minutes = self.opts.get('autosign_expire_minutes', 10) if expire_minutes > 0: min_time = time.time() - (60 * int(expire_minutes)) for root, dirs, filenames in os.walk(autosign_dir): for f in filenames: stub_file = os.path.join(autosign_dir, f) mtime = os.path.getmtime(stub_file) if mtime < min_time: log.warn('Autosign keyid expired {0}'.format(stub_file)) os.remove(stub_file) stub_file = os.path.join(autosign_dir, keyid) if not os.path.exists(stub_file): return False os.remove(stub_file) return True def check_autoreject(self, keyid): ''' Checks if the specified keyid should automatically be rejected. ''' return self.check_signing_file( keyid, self.opts.get('autoreject_file', None) ) def check_autosign(self, keyid): ''' Checks if the specified keyid should automatically be signed. ''' if self.opts['auto_accept']: return True if self.check_signing_file(keyid, self.opts.get('autosign_file', None)): return True if self.check_autosign_dir(keyid): return True return False class RemoteFuncs(object): ''' Funcitons made available to minions, this class includes the raw routines post validation that make up the minion access to the master ''' def __init__(self, opts): self.opts = opts self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False) self.serial = salt.payload.Serial(opts) self.ckminions = salt.utils.minions.CkMinions(opts) # Create the tops dict for loading external top data self.tops = salt.loader.tops(self.opts) # Make a client self.local = salt.client.get_local_client(mopts=self.opts) # Create the master minion to access the external job cache self.mminion = salt.minion.MasterMinion( self.opts, states=False, rend=False) self.__setup_fileserver() def __setup_fileserver(self): ''' Set the local file objects from the file server interface ''' fs_ = salt.fileserver.Fileserver(self.opts) self._serve_file = fs_.serve_file self._file_hash = fs_.file_hash self._file_list = fs_.file_list self._file_list_emptydirs = fs_.file_list_emptydirs self._dir_list = fs_.dir_list self._symlink_list = fs_.symlink_list self._file_envs = fs_.envs def __verify_minion_publish(self, load): ''' Verify that the passed information authorized a minion to execute ''' # Verify that the load is valid if 'peer' not in self.opts: return False if not isinstance(self.opts['peer'], dict): return False if any(key not in load for key in ('fun', 'arg', 'tgt', 'ret', 'id')): return False # If the command will make a recursive publish don't run if re.match('publish.*', load['fun']): return False # Check the permissions for this minion perms = [] for match in self.opts['peer']: if re.match(match, load['id']): # This is the list of funcs/modules! if isinstance(self.opts['peer'][match], list): perms.extend(self.opts['peer'][match]) if ',' in load['fun']: # 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']] load['fun'] = load['fun'].split(',') arg_ = [] for arg in load['arg']: arg_.append(arg.split()) load['arg'] = arg_ good = self.ckminions.auth_check( perms, load['fun'], load['tgt'], load.get('tgt_type', 'glob'), publish_validate=True) if not good: return False return True def _master_opts(self, load): ''' Return the master options to the minion ''' mopts = {} file_roots = {} envs = self._file_envs() for saltenv in envs: if saltenv not in file_roots: file_roots[saltenv] = [] mopts['file_roots'] = file_roots if load.get('env_only'): return mopts mopts['renderer'] = self.opts['renderer'] mopts['failhard'] = self.opts['failhard'] mopts['state_top'] = self.opts['state_top'] mopts['nodegroups'] = self.opts['nodegroups'] mopts['state_auto_order'] = self.opts['state_auto_order'] mopts['state_events'] = self.opts['state_events'] mopts['state_aggregate'] = self.opts['state_aggregate'] mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks'] mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks'] return mopts def _ext_nodes(self, load, skip_verify=False): ''' Return the results from an external node classifier if one is specified ''' if not skip_verify: if 'id' not in load: log.error('Received call for external nodes without an id') return {} if not salt.utils.verify.valid_id(self.opts, load['id']): return {} # Evaluate all configured master_tops interfaces opts = {} grains = {} ret = {} if 'opts' in load: opts = load['opts'] if 'grains' in load['opts']: grains = load['opts']['grains'] for fun in self.tops: if fun not in self.opts.get('master_tops', {}): continue try: ret.update(self.tops[fun](opts=opts, grains=grains)) except Exception as exc: # If anything happens in the top generation, log it and move on log.error( 'Top function {0} failed with error {1} for minion ' '{2}'.format( fun, exc, load['id'] ) ) return ret def _mine_get(self, load, skip_verify=False): ''' Gathers the data from the specified minions' mine ''' if not skip_verify: if any(key not in load for key in ('id', 'tgt', 'fun')): return {} if 'mine_get' in self.opts: # If master side acl defined. if not isinstance(self.opts['mine_get'], dict): return {} perms = set() for match in self.opts['mine_get']: if re.match(match, load['id']): if isinstance(self.opts['mine_get'][match], list): perms.update(self.opts['mine_get'][match]) if not any(re.match(perm, load['fun']) for perm in perms): return {} ret = {} if not salt.utils.verify.valid_id(self.opts, load['id']): return ret match_type = load.get('expr_form', 'glob') if match_type.lower() == 'pillar': match_type = 'pillar_exact' if match_type.lower() == 'compound': match_type = 'compound_pillar_exact' checker = salt.utils.minions.CkMinions(self.opts) minions = checker.check_minions( load['tgt'], match_type, greedy=False ) for minion in minions: mine = os.path.join( self.opts['cachedir'], 'minions', minion, 'mine.p') try: with salt.utils.fopen(mine, 'rb') as fp_: fdata = self.serial.load(fp_).get(load['fun']) if fdata: ret[minion] = fdata except Exception: continue return ret def _mine(self, load, skip_verify=False): ''' Return the mine data ''' if not skip_verify: if 'id' not in load or 'data' not in load: return False if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False): cdir = os.path.join(self.opts['cachedir'], 'minions', load['id']) if not os.path.isdir(cdir): os.makedirs(cdir) datap = os.path.join(cdir, 'mine.p') if not load.get('clear', False): if os.path.isfile(datap): with salt.utils.fopen(datap, 'rb') as fp_: new = self.serial.load(fp_) if isinstance(new, dict): new.update(load['data']) load['data'] = new with salt.utils.fopen(datap, 'w+b') as fp_: fp_.write(self.serial.dumps(load['data'])) return True def _mine_delete(self, load): ''' Allow the minion to delete a specific function from its own mine ''' if 'id' not in load or 'fun' not in load: return False if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False): cdir = os.path.join(self.opts['cachedir'], 'minions', load['id']) if not os.path.isdir(cdir): return False datap = os.path.join(cdir, 'mine.p') if os.path.isfile(datap): try: with salt.utils.fopen(datap, 'rb') as fp_: mine_data = self.serial.load(fp_) if isinstance(mine_data, dict): if mine_data.pop(load['fun'], False): with salt.utils.fopen(datap, 'w+b') as fp_: fp_.write(self.serial.dumps(mine_data)) except OSError: return False return True def _mine_flush(self, load, skip_verify=False): ''' Allow the minion to delete all of its own mine contents ''' if not skip_verify and 'id' not in load: return False if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False): cdir = os.path.join(self.opts['cachedir'], 'minions', load['id']) if not os.path.isdir(cdir): return False datap = os.path.join(cdir, 'mine.p') if os.path.isfile(datap): try: os.remove(datap) except OSError: return False return True def _file_recv(self, load): ''' Allows minions to send files to the master, files are sent to the master file cache ''' if any(key not in load for key in ('id', 'path', 'loc')): return False if not self.opts['file_recv'] or os.path.isabs(load['path']): return False if os.path.isabs(load['path']) or '../' in load['path']: # Can overwrite master files!! return False if not salt.utils.verify.valid_id(self.opts, load['id']): return False file_recv_max_size = 1024*1024 * self.opts['file_recv_max_size'] if 'loc' in load and load['loc'] < 0: log.error('Invalid file pointer: load[loc] < 0') return False if len(load['data']) + load.get('loc', 0) > file_recv_max_size: log.error( 'Exceeding file_recv_max_size limit: {0}'.format( file_recv_max_size ) ) return False # Normalize Windows paths normpath = load['path'] if ':' in normpath: # make sure double backslashes are normalized normpath = normpath.replace('\\', '/') normpath = os.path.normpath(normpath) cpath = os.path.join( self.opts['cachedir'], 'minions', load['id'], 'files', normpath) cdir = os.path.dirname(cpath) if not os.path.isdir(cdir): try: os.makedirs(cdir) except os.error: pass if os.path.isfile(cpath) and load['loc'] != 0: mode = 'ab' else: mode = 'wb' with salt.utils.fopen(cpath, mode) as fp_: if load['loc']: fp_.seek(load['loc']) fp_.write(load['data']) return True def _pillar(self, load): ''' Return the pillar data for the minion ''' if any(key not in load for key in ('id', 'grains')): return False pillar = salt.pillar.Pillar( self.opts, load['grains'], load['id'], load.get('saltenv', load.get('env')), load.get('ext'), self.mminion.functions, pillar=load.get('pillar_override', {})) pillar_dirs = {} data = pillar.compile_pillar(pillar_dirs=pillar_dirs) if self.opts.get('minion_data_cache', False): cdir = os.path.join(self.opts['cachedir'], 'minions', load['id']) if not os.path.isdir(cdir): os.makedirs(cdir) datap = os.path.join(cdir, 'data.p') tmpfh, tmpfname = tempfile.mkstemp(dir=cdir) os.close(tmpfh) with salt.utils.fopen(tmpfname, 'w+b') as fp_: fp_.write( self.serial.dumps( {'grains': load['grains'], 'pillar': data}) ) # On Windows, os.rename will fail if the destination file exists. salt.utils.atomicfile.atomic_rename(tmpfname, datap) return data def _minion_event(self, load): ''' Receive an event from the minion and fire it on the master event interface ''' if 'id' not in load: return False if 'events' not in load and ('tag' not in load or 'data' not in load): return False if 'events' in load: for event in load['events']: self.event.fire_event(event, event['tag']) # old dup event if load.get('pretag') is not None: if 'data' in event: self.event.fire_event(event['data'], tagify(event['tag'], base=load['pretag'])) else: self.event.fire_event(event, tagify(event['tag'], base=load['pretag'])) else: tag = load['tag'] self.event.fire_event(load, tag) return True def _return(self, load): ''' Handle the return data sent from the minions ''' # Generate EndTime endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid()) # If the return data is invalid, just ignore it if any(key not in load for key in ('return', 'jid', 'id')): return False if load['jid'] == 'req': # The minion is returning a standalone job, request a jobid prep_fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid'] = self.mminion.returners[prep_fstr](nocache=load.get('nocache', False)) # save the load, since we don't have it saveload_fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[saveload_fstr](load['jid'], load) log.info('Got return from {id} for job {jid}'.format(**load)) self.event.fire_event(load, load['jid']) # old dup event self.event.fire_event(load, tagify([load['jid'], 'ret', load['id']], 'job')) self.event.fire_ret_load(load) if not self.opts['job_cache'] or self.opts.get('ext_job_cache'): return fstr = '{0}.update_endtime'.format(self.opts['master_job_cache']) if (self.opts.get('job_cache_store_endtime') and fstr in self.mminion.returners): self.mminion.returners[fstr](load['jid'], endtime) fstr = '{0}.returner'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load) def _syndic_return(self, load): ''' Receive a syndic minion return and format it to look like returns from individual minions. ''' # Verify the load if any(key not in load for key in ('return', 'jid', 'id')): return None # if we have a load, save it if 'load' in load: fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load['load']) # Format individual return loads for key, item in six.iteritems(load['return']): ret = {'jid': load['jid'], 'id': key, 'return': item} if 'out' in load: ret['out'] = load['out'] self._return(ret) def minion_runner(self, load): ''' Execute a runner from a minion, return the runner's function data ''' if 'peer_run' not in self.opts: return {} if not isinstance(self.opts['peer_run'], dict): return {} if any(key not in load for key in ('fun', 'arg', 'id')): return {} perms = set() for match in self.opts['peer_run']: if re.match(match, load['id']): # This is the list of funcs/modules! if isinstance(self.opts['peer_run'][match], list): perms.update(self.opts['peer_run'][match]) good = False for perm in perms: if re.match(perm, load['fun']): good = True if not good: # The minion is not who it says it is! # We don't want to listen to it! log.warn( 'Minion id {0} is not who it says it is!'.format( load['id'] ) ) return {} # Prepare the runner object opts = {'fun': load['fun'], 'arg': load['arg'], 'id': load['id'], 'doc': False, 'conf_file': self.opts['conf_file']} opts.update(self.opts) runner = salt.runner.Runner(opts) return runner.run() def pub_ret(self, load, skip_verify=False): ''' Request the return data from a specific jid, only allowed if the requesting minion also initialted the execution. ''' if not skip_verify and any(key not in load for key in ('jid', 'id')): return {} else: auth_cache = os.path.join( self.opts['cachedir'], 'publish_auth') if not os.path.isdir(auth_cache): os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache, load['jid']) with salt.utils.fopen(jid_fn, 'r') as fp_: if not load['id'] == fp_.read(): return {} return self.local.get_cache_returns(load['jid']) def minion_pub(self, load): ''' Publish a command initiated from a minion, this method executes minion restrictions so that the minion publication will only work if it is enabled in the config. The configuration on the master allows minions to be matched to salt functions, so the minions can only publish allowed salt functions The config will look like this: peer: .*: - .* This configuration will enable all minions to execute all commands. peer: foo.example.com: - test.* This configuration will only allow the minion foo.example.com to execute commands from the test module ''' if not self.__verify_minion_publish(load): return {} # Set up the publication payload pub_load = { 'fun': load['fun'], 'arg': load['arg'], 'expr_form': load.get('tgt_type', 'glob'), 'tgt': load['tgt'], 'ret': load['ret'], 'id': load['id'], } if 'tgt_type' in load: if load['tgt_type'].startswith('node'): if load['tgt'] in self.opts['nodegroups']: pub_load['tgt'] = self.opts['nodegroups'][load['tgt']] pub_load['expr_form_type'] = 'compound' pub_load['expr_form'] = load['tgt_type'] else: return {} else: pub_load['expr_form'] = load['tgt_type'] ret = {} ret['jid'] = self.local.cmd_async(**pub_load) ret['minions'] = self.ckminions.check_minions( load['tgt'], pub_load['expr_form']) auth_cache = os.path.join( self.opts['cachedir'], 'publish_auth') if not os.path.isdir(auth_cache): os.makedirs(auth_cache) jid_fn = os.path.join(auth_cache, str(ret['jid'])) with salt.utils.fopen(jid_fn, 'w+') as fp_: fp_.write(load['id']) return ret def minion_publish(self, load): ''' Publish a command initiated from a minion, this method executes minion restrictions so that the minion publication will only work if it is enabled in the config. The configuration on the master allows minions to be matched to salt functions, so the minions can only publish allowed salt functions The config will look like this: peer: .*: - .* This configuration will enable all minions to execute all commands. peer: foo.example.com: - test.* This configuration will only allow the minion foo.example.com to execute commands from the test module ''' if not self.__verify_minion_publish(load): return {} # Set up the publication payload pub_load = { 'fun': load['fun'], 'arg': load['arg'], 'expr_form': load.get('tgt_type', 'glob'), 'tgt': load['tgt'], 'ret': load['ret'], 'id': load['id'], } if 'tmo' in load: try: pub_load['timeout'] = int(load['tmo']) except ValueError: msg = 'Failed to parse timeout value: {0}'.format( load['tmo']) log.warn(msg) return {} if 'timeout' in load: try: pub_load['timeout'] = int(load['timeout']) except ValueError: msg = 'Failed to parse timeout value: {0}'.format( load['timeout']) log.warn(msg) return {} if 'tgt_type' in load: if load['tgt_type'].startswith('node'): if load['tgt'] in self.opts['nodegroups']: pub_load['tgt'] = self.opts['nodegroups'][load['tgt']] pub_load['expr_form_type'] = 'compound' else: return {} else: pub_load['expr_form'] = load['tgt_type'] pub_load['raw'] = True ret = {} for minion in self.local.cmd_iter(**pub_load): if load.get('form', '') == 'full': data = minion if 'jid' in minion: ret['__jid__'] = minion['jid'] data['ret'] = data.pop('return') ret[minion['id']] = data else: ret[minion['id']] = minion['return'] if 'jid' in minion: ret['__jid__'] = minion['jid'] for key, val in six.iteritems(self.local.get_cache_returns(ret['__jid__'])): if key not in ret: ret[key] = val if load.get('form', '') != 'full': ret.pop('__jid__') return ret def revoke_auth(self, load): ''' Allow a minion to request revocation of its own key ''' if 'id' not in load: return False keyapi = salt.key.Key(self.opts) keyapi.delete_key(load['id'], preserve_minions=load.get('preserve_minion_cache', False)) return True class LocalFuncs(object): ''' Set up methods for use only from the local system ''' # The ClearFuncs object encapsulates the functions that can be executed in # the clear: # publish (The publish from the LocalClient) # _auth def __init__(self, opts, key): self.opts = opts self.serial = salt.payload.Serial(opts) self.key = key # Create the event manager self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False) # Make a client self.local = salt.client.get_local_client(mopts=self.opts) # Make an minion checker object self.ckminions = salt.utils.minions.CkMinions(opts) # Make an Auth object self.loadauth = salt.auth.LoadAuth(opts) # Stand up the master Minion to access returner data self.mminion = salt.minion.MasterMinion( self.opts, states=False, rend=False) # Make a wheel object self.wheel_ = salt.wheel.Wheel(opts) def runner(self, load): ''' Send a master control function back to the runner system ''' if 'token' in load: try: token = self.loadauth.get_tok(load['token']) except Exception as exc: msg = 'Exception occurred when generating auth token: {0}'.format( exc) log.error(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) if not token: msg = 'Authentication failure of type "token" occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) if token['eauth'] not in self.opts['external_auth']: msg = 'Authentication failure of type "token" occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) good = self.ckminions.runner_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun']) if not good: msg = ('Authentication failure of type "token" occurred for ' 'user {0}.').format(token['name']) log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) try: fun = load.pop('fun') runner_client = salt.runner.RunnerClient(self.opts) return runner_client.async( fun, load.get('kwarg', {}), token['name']) except Exception as exc: log.error('Exception occurred while ' 'introspecting {0}: {1}'.format(fun, exc)) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) if 'eauth' not in load: msg = ('Authentication failure of type "eauth" occurred for ' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) if load['eauth'] not in self.opts['external_auth']: # The eauth system is not enabled, fail msg = ('Authentication failure of type "eauth" occurred for ' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) try: name = self.loadauth.load_name(load) if not (name in self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']]): msg = ('Authentication failure of type "eauth" occurred for ' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) if not self.loadauth.time_auth(load): msg = ('Authentication failure of type "eauth" occurred for ' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) good = self.ckminions.runner_check( self.opts['external_auth'][load['eauth']][name] if name in self.opts['external_auth'][load['eauth']] else self.opts['external_auth'][load['eauth']]['*'], load['fun']) if not good: msg = ('Authentication failure of type "eauth" occurred for ' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) try: fun = load.pop('fun') runner_client = salt.runner.RunnerClient(self.opts) return runner_client.async(fun, load.get('kwarg', {}), load.get('username', 'UNKNOWN')) except Exception as exc: log.error('Exception occurred while ' 'introspecting {0}: {1}'.format(fun, exc)) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) except Exception as exc: log.error( 'Exception occurred in the runner system: {0}'.format(exc) ) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) def wheel(self, load): ''' Send a master control function back to the wheel system ''' # All wheel ops pass through eauth if 'token' in load: try: token = self.loadauth.get_tok(load['token']) except Exception as exc: msg = 'Exception occurred when generating auth token: {0}'.format( exc) log.error(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) if not token: msg = 'Authentication failure of type "token" occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) if token['eauth'] not in self.opts['external_auth']: msg = 'Authentication failure of type "token" occurred.' log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) good = self.ckminions.wheel_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun']) if not good: msg = ('Authentication failure of type "token" occurred for ' 'user {0}.').format(token['name']) log.warning(msg) return dict(error=dict(name='TokenAuthenticationError', message=msg)) jid = salt.utils.jid.gen_jid() fun = load.pop('fun') tag = tagify(jid, prefix='wheel') data = {'fun': "wheel.{0}".format(fun), 'jid': jid, 'tag': tag, 'user': token['name']} try: self.event.fire_event(data, tagify([jid, 'new'], 'wheel')) ret = self.wheel_.call_func(fun, **load) data['return'] = ret data['success'] = True self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data': data} except Exception as exc: log.error(exc) log.error('Exception occurred while ' 'introspecting {0}: {1}'.format(fun, exc)) data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format( fun, exc.__class__.__name__, exc, ) data['success'] = False self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data': data} if 'eauth' not in load: msg = ('Authentication failure of type "eauth" occurred for ' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) if load['eauth'] not in self.opts['external_auth']: # The eauth system is not enabled, fail msg = ('Authentication failure of type "eauth" occurred for ' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) try: name = self.loadauth.load_name(load) if not ((name in self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']])): msg = ('Authentication failure of type "eauth" occurred for ' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) if not self.loadauth.time_auth(load): msg = ('Authentication failure of type "eauth" occurred for ' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) good = self.ckminions.wheel_check( self.opts['external_auth'][load['eauth']][name] if name in self.opts['external_auth'][load['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun']) if not good: msg = ('Authentication failure of type "eauth" occurred for ' 'user {0}.').format(load.get('username', 'UNKNOWN')) log.warning(msg) return dict(error=dict(name='EauthAuthenticationError', message=msg)) jid = salt.utils.jid.gen_jid() fun = load.pop('fun') tag = tagify(jid, prefix='wheel') data = {'fun': "wheel.{0}".format(fun), 'jid': jid, 'tag': tag, 'user': load.get('username', 'UNKNOWN')} try: self.event.fire_event(data, tagify([jid, 'new'], 'wheel')) ret = self.wheel_.call_func(fun, **load) data['return'] = ret data['success'] = True self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data': data} except Exception as exc: log.error('Exception occurred while ' 'introspecting {0}: {1}'.format(fun, exc)) data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format( fun, exc.__class__.__name__, exc, ) self.event.fire_event(data, tagify([jid, 'ret'], 'wheel')) return {'tag': tag, 'data': data} except Exception as exc: log.error( 'Exception occurred in the wheel system: {0}'.format(exc) ) return dict(error=dict(name=exc.__class__.__name__, args=exc.args, message=str(exc))) def mk_token(self, load): ''' Create and return an authentication token, the clear load needs to contain the eauth key and the needed authentication creds. ''' if 'eauth' not in load: log.warning('Authentication failure of type "eauth" occurred.') return '' if load['eauth'] not in self.opts['external_auth']: # The eauth system is not enabled, fail log.warning('Authentication failure of type "eauth" occurred.') return '' try: name = self.loadauth.load_name(load) if not ((name in self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']])): log.warning('Authentication failure of type "eauth" occurred.') return '' if not self.loadauth.time_auth(load): log.warning('Authentication failure of type "eauth" occurred.') return '' return self.loadauth.mk_token(load) except Exception as exc: log.error( 'Exception occurred while authenticating: {0}'.format(exc) ) return '' def get_token(self, load): ''' Return the name associated with a token or False if the token is invalid ''' if 'token' not in load: return False return self.loadauth.get_tok(load['token']) def publish(self, load): ''' This method sends out publications to the minions, it can only be used by the LocalClient. ''' extra = load.get('kwargs', {}) # check blacklist/whitelist good = True # Check if the user is blacklisted for user_re in self.opts['client_acl_blacklist'].get('users', []): if re.match(user_re, load['user']): good = False break # check if the cmd is blacklisted for module_re in self.opts['client_acl_blacklist'].get('modules', []): # if this is a regular command, its a single function if isinstance(load['fun'], str): funs_to_check = [load['fun']] # if this a compound function else: funs_to_check = load['fun'] for fun in funs_to_check: if re.match(module_re, fun): good = False break if good is False: log.error( '{user} does not have permissions to run {function}. Please ' 'contact your local administrator if you believe this is in ' 'error.\n'.format( user=load['user'], function=load['fun'] ) ) return '' # to make sure we don't step on anyone else's toes del good # Check for external auth calls if extra.get('token', False): # A token was passed, check it try: token = self.loadauth.get_tok(extra['token']) except Exception as exc: log.error( 'Exception occurred when generating auth token: {0}'.format( exc ) ) return '' if not token: log.warning('Authentication failure of type "token" occurred. \ Token could not be retrieved.') return '' if token['eauth'] not in self.opts['external_auth']: log.warning('Authentication failure of type "token" occurred. \ Authentication type of {0} not present.').format(token['eauth']) return '' if not ((token['name'] in self.opts['external_auth'][token['eauth']]) | ('*' in self.opts['external_auth'][token['eauth']])): log.warning('Authentication failure of type "token" occurred. \ Token does not verify against eauth provider: {0}').format( self.opts['external_auth']) return '' good = self.ckminions.auth_check( self.opts['external_auth'][token['eauth']][token['name']] if token['name'] in self.opts['external_auth'][token['eauth']] else self.opts['external_auth'][token['eauth']]['*'], load['fun'], load['tgt'], load.get('tgt_type', 'glob')) if not good: # Accept find_job so the CLI will function cleanly if load['fun'] != 'saltutil.find_job': log.warning( 'Authentication failure of type "token" occurred.' ) return '' load['user'] = token['name'] log.debug('Minion tokenized user = "{0}"'.format(load['user'])) elif 'eauth' in extra: if extra['eauth'] not in self.opts['external_auth']: # The eauth system is not enabled, fail log.warning( 'Authentication failure of type "eauth" occurred.' ) return '' try: name = self.loadauth.load_name(extra) if not ((name in self.opts['external_auth'][extra['eauth']]) | ('*' in self.opts['external_auth'][extra['eauth']])): log.warning( 'Authentication failure of type "eauth" occurred.' ) return '' if not self.loadauth.time_auth(extra): log.warning( 'Authentication failure of type "eauth" occurred.' ) return '' except Exception as exc: log.error( 'Exception occurred while authenticating: {0}'.format(exc) ) return '' good = self.ckminions.auth_check( self.opts['external_auth'][extra['eauth']][name] if name in self.opts['external_auth'][extra['eauth']] else self.opts['external_auth'][extra['eauth']]['*'], load['fun'], load['tgt'], load.get('tgt_type', 'glob')) if not good: # Accept find_job so the CLI will function cleanly if load['fun'] != 'saltutil.find_job': log.warning( 'Authentication failure of type "eauth" occurred.' ) return '' load['user'] = name # Verify that the caller has root on master elif 'user' in load: if load['user'].startswith('sudo_'): # If someone can sudo, allow them to act as root if load.get('key', 'invalid') == self.key.get('root'): load.pop('key') elif load.pop('key') != self.key[self.opts.get('user', 'root')]: log.warning( 'Authentication failure of type "user" occurred.' ) return '' elif load['user'] == self.opts.get('user', 'root'): if load.pop('key') != self.key[self.opts.get('user', 'root')]: log.warning( 'Authentication failure of type "user" occurred.' ) return '' elif load['user'] == 'root': if load.pop('key') != self.key.get(self.opts.get('user', 'root')): log.warning( 'Authentication failure of type "user" occurred.' ) return '' elif load['user'] == salt.utils.get_user(): if load.pop('key') != self.key.get(load['user']): log.warning( 'Authentication failure of type "user" occurred.' ) return '' else: if load['user'] in self.key: # User is authorised, check key and check perms if load.pop('key') != self.key[load['user']]: log.warning( 'Authentication failure of type "user" occurred.' ) return '' if load['user'] not in self.opts['client_acl']: log.warning( 'Authentication failure of type "user" occurred.' ) return '' good = self.ckminions.auth_check( self.opts['client_acl'][load['user']], load['fun'], load['tgt'], load.get('tgt_type', 'glob')) if not good: # Accept find_job so the CLI will function cleanly if load['fun'] != 'saltutil.find_job': log.warning( 'Authentication failure of type "user" ' 'occurred.' ) return '' else: log.warning( 'Authentication failure of type "user" occurred.' ) return '' else: if load.pop('key') != self.key[salt.utils.get_user()]: log.warning( 'Authentication failure of type "other" occurred.' ) return '' # Retrieve the minions list minions = self.ckminions.check_minions( load['tgt'], load.get('tgt_type', 'glob') ) # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not minions: return { 'enc': 'clear', 'load': { 'jid': None, 'minions': minions } } # Retrieve the jid if not load['jid']: fstr = '{0}.prep_jid'.format(self.opts['master_job_cache']) load['jid'] = self.mminion.returners[fstr](nocache=extra.get('nocache', False)) self.event.fire_event({'minions': minions}, load['jid']) new_job_load = { 'jid': load['jid'], 'tgt_type': load['tgt_type'], 'tgt': load['tgt'], 'user': load['user'], 'fun': load['fun'], 'arg': load['arg'], 'minions': minions, } # Announce the job on the event bus self.event.fire_event(new_job_load, 'new_job') # old dup event self.event.fire_event(new_job_load, tagify([load['jid'], 'new'], 'job')) # Save the invocation information if self.opts['ext_job_cache']: try: fstr = '{0}.save_load'.format(self.opts['ext_job_cache']) self.mminion.returners[fstr](load['jid'], load) except KeyError: log.critical( 'The specified returner used for the external job cache ' '"{0}" does not have a save_load function!'.format( self.opts['ext_job_cache'] ) ) except Exception: log.critical( 'The specified returner threw a stack trace:\n', exc_info=True ) # always write out to the master job cache try: fstr = '{0}.save_load'.format(self.opts['master_job_cache']) self.mminion.returners[fstr](load['jid'], load) except KeyError: log.critical( 'The specified returner used for the master job cache ' '"{0}" does not have a save_load function!'.format( self.opts['master_job_cache'] ) ) except Exception: log.critical( 'The specified returner threw a stack trace:\n', exc_info=True ) # Altering the contents of the publish load is serious!! Changes here # break compatibility with minion/master versions and even tiny # additions can have serious implications on the performance of the # publish commands. # # In short, check with <NAME> before you even think about # touching this stuff, we can probably do what you want to do another # way that won't have a negative impact. pub_load = { 'fun': load['fun'], 'arg': load['arg'], 'tgt': load['tgt'], 'jid': load['jid'], 'ret': load['ret'], } if 'id' in extra: pub_load['id'] = extra['id'] if 'tgt_type' in load: pub_load['tgt_type'] = load['tgt_type'] if 'to' in load: pub_load['to'] = load['to'] if 'kwargs' in load: if 'ret_config' in load['kwargs']: pub_load['ret_config'] = load['kwargs'].get('ret_config') if 'metadata' in load['kwargs']: pub_load['metadata'] = load['kwargs'].get('metadata') if 'user' in load: log.info( 'User {user} Published command {fun} with jid {jid}'.format( **load ) ) pub_load['user'] = load['user'] else: log.info( 'Published command {fun} with jid {jid}'.format( **load ) ) log.debug('Published command details {0}'.format(pub_load)) return {'ret': { 'jid': load['jid'], 'minions': minions }, 'pub': pub_load }
1.984375
2
core/domain/role_services_test.py
Mohitbalwani26/oppia
0
1528
<gh_stars>0 # coding: utf-8 # # Copyright 2017 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test functions relating to roles and actions.""" from __future__ import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules from core.domain import role_services from core.tests import test_utils import feconf import python_utils class RolesAndActionsServicesUnitTests(test_utils.GenericTestBase): """Tests for roles and actions.""" def test_get_role_actions_return_value_in_correct_schema(self): role_actions = role_services.get_role_actions() self.assertTrue(isinstance(role_actions, dict)) for role_name, allotted_actions in role_actions.items(): self.assertTrue(isinstance(role_name, python_utils.UNICODE)) self.assertTrue(isinstance(allotted_actions, list)) self.assertEqual(len(set(allotted_actions)), len(allotted_actions)) for action_name in allotted_actions: self.assertTrue( isinstance(action_name, python_utils.UNICODE)) def test_get_all_actions(self): with self.assertRaisesRegexp( Exception, 'Role TEST_ROLE does not exist.'): role_services.get_all_actions('TEST_ROLE') self.assertEqual( role_services.get_all_actions(feconf.ROLE_ID_GUEST), [role_services.ACTION_PLAY_ANY_PUBLIC_ACTIVITY])
1.953125
2
deep_learning/keras/keras/backend/cntk_backend.py
xpennec/applications
21
1529
from __future__ import absolute_import from __future__ import division from __future__ import print_function import cntk as C import numpy as np from .common import floatx, epsilon, image_dim_ordering, image_data_format from collections import defaultdict from contextlib import contextmanager import warnings C.set_global_option('align_axis', 1) b_any = any dev = C.device.use_default_device() if dev.type() == 0: warnings.warn( 'CNTK backend warning: GPU is not detected. ' 'CNTK\'s CPU version is not fully optimized,' 'please run with GPU to get better performance.') # A learning phase is a bool tensor used to run Keras models in # either train mode (learning_phase == 1) or test mode (learning_phase == 0). # LEARNING_PHASE_PLACEHOLDER is the placeholder for dynamic learning phase _LEARNING_PHASE_PLACEHOLDER = C.constant(shape=(), dtype=np.float32, value=1.0, name='_keras_learning_phase') # static learning phase flag, if it is not 0 or 1, we will go with dynamic learning phase tensor. _LEARNING_PHASE = -1 _UID_PREFIXES = defaultdict(int) # cntk doesn't support gradient as symbolic op, to hook up with keras model, # we will create gradient as a constant placeholder, here use this global # map to keep the mapping from grad placeholder to parameter grad_parameter_dict = {} NAME_SCOPE_STACK = [] @contextmanager def name_scope(name): global NAME_SCOPE_STACK NAME_SCOPE_STACK.append(name) yield NAME_SCOPE_STACK.pop() def get_uid(prefix=''): _UID_PREFIXES[prefix] += 1 return _UID_PREFIXES[prefix] def learning_phase(): # If _LEARNING_PHASE is not 0 or 1, return dynamic learning phase tensor return _LEARNING_PHASE if _LEARNING_PHASE in {0, 1} else _LEARNING_PHASE_PLACEHOLDER def set_learning_phase(value): global _LEARNING_PHASE if value not in {0, 1}: raise ValueError('CNTK Backend: Set learning phase ' 'with value %s is not supported, ' 'expected 0 or 1.' % value) _LEARNING_PHASE = value def clear_session(): """Reset learning phase flag for cntk backend. """ global _LEARNING_PHASE global _LEARNING_PHASE_PLACEHOLDER _LEARNING_PHASE = -1 _LEARNING_PHASE_PLACEHOLDER.value = np.asarray(1.0) def in_train_phase(x, alt, training=None): global _LEARNING_PHASE if training is None: training = learning_phase() uses_learning_phase = True else: uses_learning_phase = False # CNTK currently don't support cond op, so here we use # element_select approach as workaround. It may have # perf issue, will resolve it later with cntk cond op. if callable(x) and isinstance(x, C.cntk_py.Function) is False: x = x() if callable(alt) and isinstance(alt, C.cntk_py.Function) is False: alt = alt() if training is True: x._uses_learning_phase = uses_learning_phase return x else: # if _LEARNING_PHASE is static if isinstance(training, int) or isinstance(training, bool): result = x if training == 1 or training is True else alt else: result = C.element_select(training, x, alt) result._uses_learning_phase = uses_learning_phase return result def in_test_phase(x, alt, training=None): return in_train_phase(alt, x, training=training) def _convert_string_dtype(dtype): # cntk only support float32 and float64 if dtype == 'float32': return np.float32 elif dtype == 'float64': return np.float64 else: # cntk only running with float, # try to cast to float to run the model return np.float32 def _convert_dtype_string(dtype): if dtype == np.float32: return 'float32' elif dtype == np.float64: return 'float64' else: raise ValueError('CNTK Backend: Unsupported dtype: %s. ' 'CNTK only supports float32 and ' 'float64.' % dtype) def variable(value, dtype=None, name=None, constraint=None): """Instantiates a variable and returns it. # Arguments value: Numpy array, initial value of the tensor. dtype: Tensor type. name: Optional name string for the tensor. constraint: Optional projection function to be applied to the variable after an optimizer update. # Returns A variable instance (with Keras metadata included). """ if dtype is None: dtype = floatx() if name is None: name = '' if isinstance( value, C.variables.Constant) or isinstance( value, C.variables.Parameter): value = value.value # we don't support init parameter with symbolic op, so eval it first as # workaround if isinstance(value, C.cntk_py.Function): value = eval(value) shape = value.shape if hasattr(value, 'shape') else () if hasattr(value, 'dtype') and value.dtype != dtype and len(shape) > 0: value = value.astype(dtype) # TODO: remove the conversion when cntk supports int32, int64 # https://docs.microsoft.com/en-us/python/api/cntk.variables.parameter dtype = 'float32' if 'int' in str(dtype) else dtype v = C.parameter(shape=shape, init=value, dtype=dtype, name=_prepare_name(name, 'variable')) v._keras_shape = v.shape v._uses_learning_phase = False v.constraint = constraint return v def bias_add(x, bias, data_format=None): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) dims = len(x.shape) if dims > 0 and x.shape[0] == C.InferredDimension: dims -= 1 bias_dims = len(bias.shape) if bias_dims != 1 and bias_dims != dims: raise ValueError('Unexpected bias dimensions %d, ' 'expected 1 or %d dimensions' % (bias_dims, dims)) if dims == 4: if data_format == 'channels_first': if bias_dims == 1: shape = (bias.shape[0], 1, 1, 1) else: shape = (bias.shape[3],) + bias.shape[:3] elif data_format == 'channels_last': if bias_dims == 1: shape = (1, 1, 1, bias.shape[0]) else: shape = bias.shape elif dims == 3: if data_format == 'channels_first': if bias_dims == 1: shape = (bias.shape[0], 1, 1) else: shape = (bias.shape[2],) + bias.shape[:2] elif data_format == 'channels_last': if bias_dims == 1: shape = (1, 1, bias.shape[0]) else: shape = bias.shape elif dims == 2: if data_format == 'channels_first': if bias_dims == 1: shape = (bias.shape[0], 1) else: shape = (bias.shape[1],) + bias.shape[:1] elif data_format == 'channels_last': if bias_dims == 1: shape = (1, bias.shape[0]) else: shape = bias.shape else: shape = bias.shape return x + reshape(bias, shape) def eval(x): if isinstance(x, C.cntk_py.Function): return x.eval() elif isinstance(x, C.variables.Constant) or isinstance(x, C.variables.Parameter): return x.value else: raise ValueError('CNTK Backend: `eval` method on ' '`%s` type is not supported. ' 'CNTK only supports `eval` with ' '`Function`, `Constant` or ' '`Parameter`.' % type(x)) def placeholder( shape=None, ndim=None, dtype=None, sparse=False, name=None, dynamic_axis_num=1): if dtype is None: dtype = floatx() if not shape: if ndim: shape = tuple([None for _ in range(ndim)]) dynamic_dimension = C.FreeDimension if _get_cntk_version() >= 2.2 else C.InferredDimension cntk_shape = [dynamic_dimension if s is None else s for s in shape] cntk_shape = tuple(cntk_shape) if dynamic_axis_num > len(cntk_shape): raise ValueError('CNTK backend: creating placeholder with ' '%d dimension is not supported, at least ' '%d dimensions are needed.' % (len(cntk_shape, dynamic_axis_num))) if name is None: name = '' cntk_shape = cntk_shape[dynamic_axis_num:] x = C.input( shape=cntk_shape, dtype=_convert_string_dtype(dtype), is_sparse=sparse, name=name) x._keras_shape = shape x._uses_learning_phase = False x._cntk_placeholder = True return x def is_placeholder(x): """Returns whether `x` is a placeholder. # Arguments x: A candidate placeholder. # Returns Boolean. """ return hasattr(x, '_cntk_placeholder') and x._cntk_placeholder def is_keras_tensor(x): if not is_tensor(x): raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) + '`. ' 'Expected a symbolic tensor instance.') return hasattr(x, '_keras_history') def is_tensor(x): return isinstance(x, (C.variables.Constant, C.variables.Variable, C.variables.Parameter, C.ops.functions.Function)) def shape(x): shape = list(int_shape(x)) num_dynamic = _get_dynamic_axis_num(x) non_dyn_shape = [] for i in range(len(x.shape)): if shape[i + num_dynamic] is None: non_dyn_shape.append(x.shape[i]) else: non_dyn_shape.append(shape[i + num_dynamic]) return shape[:num_dynamic] + non_dyn_shape def is_sparse(tensor): return tensor.is_sparse def int_shape(x): if hasattr(x, '_keras_shape'): return x._keras_shape shape = x.shape if hasattr(x, 'dynamic_axes'): dynamic_shape = [None for a in x.dynamic_axes] shape = tuple(dynamic_shape) + shape return shape def ndim(x): shape = int_shape(x) return len(shape) def _prepare_name(name, default): prefix = '_'.join(NAME_SCOPE_STACK) if name is None or name == '': return prefix + '/' + default return prefix + '/' + name def constant(value, dtype=None, shape=None, name=None): if dtype is None: dtype = floatx() if shape is None: shape = () np_value = value * np.ones(shape) const = C.constant(np_value, dtype=dtype, name=_prepare_name(name, 'constant')) const._keras_shape = const.shape const._uses_learning_phase = False return const def random_binomial(shape, p=0.0, dtype=None, seed=None): # use numpy workaround now if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e7) np.random.seed(seed) if dtype is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) size = 1 for _ in shape: if _ is None: raise ValueError('CNTK Backend: randomness op with ' 'dynamic shape is not supported now. ' 'Please provide fixed dimension ' 'instead of `None`.') size *= _ binomial = np.random.binomial(1, p, size).astype(dtype).reshape(shape) return variable(value=binomial, dtype=dtype) def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): for _ in shape: if _ is None: raise ValueError('CNTK Backend: randomness op with ' 'dynamic shape is not supported now. ' 'Please provide fixed dimension ' 'instead of `None`.') return random_uniform_variable(shape, minval, maxval, dtype, seed) def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None): if dtype is None: dtype = floatx() if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e3) if dtype is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) if name is None: name = '' scale = (high - low) / 2 p = C.parameter( shape, init=C.initializer.uniform( scale, seed=seed), dtype=dtype, name=name) return variable(value=p.value + low + scale) def random_normal_variable( shape, mean, scale, dtype=None, name=None, seed=None): if dtype is None: dtype = floatx() if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e7) if dtype is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) if name is None: name = '' return C.parameter( shape=shape, init=C.initializer.normal( scale=scale, seed=seed), dtype=dtype, name=name) def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): if dtype is None: dtype = floatx() for _ in shape: if _ is None: raise ValueError('CNTK Backend: randomness op with ' 'dynamic shape is not supported now. ' 'Please provide fixed dimension ' 'instead of `None`.') # how to apply mean and stddev return random_normal_variable(shape=shape, mean=mean, scale=1.0, seed=seed) def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): if seed is None: seed = np.random.randint(1, 10e6) if dtype is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) return C.parameter( shape, init=C.initializer.truncated_normal( stddev, seed=seed), dtype=dtype) def dtype(x): return _convert_dtype_string(x.dtype) def zeros(shape, dtype=None, name=None): if dtype is None: dtype = floatx() ctype = _convert_string_dtype(dtype) return variable(value=np.zeros(shape, ctype), dtype=dtype, name=name) def ones(shape, dtype=None, name=None): if dtype is None: dtype = floatx() ctype = _convert_string_dtype(dtype) return variable(value=np.ones(shape, ctype), dtype=dtype, name=name) def eye(size, dtype=None, name=None): if dtype is None: dtype = floatx() return variable(np.eye(size), dtype, name) def zeros_like(x, dtype=None, name=None): return x * 0 def ones_like(x, dtype=None, name=None): return zeros_like(x) + 1 def count_params(x): for _ in x.shape: if _ == C.InferredDimension or _ == C.FreeDimension: raise ValueError('CNTK backend: `count_params` with dynamic ' 'shape is not supported. Please provide ' 'fixed dimension instead of `None`.') return np.prod(int_shape(x)) def cast(x, dtype): # cntk calculate everything in float, so don't need case from bool / int return x def dot(x, y): if len(x.shape) > 2 or len(y.shape) > 2: y_shape = int_shape(y) if len(y_shape) > 2: permutation = [len(y_shape) - 2] permutation += list(range(len(y_shape) - 2)) permutation += [len(y_shape) - 1] y = C.transpose(y, perm=permutation) return C.times(x, y, len(y_shape) - 1) else: return C.times(x, y) def batch_dot(x, y, axes=None): x_shape = int_shape(x) y_shape = int_shape(y) if isinstance(axes, int): axes = (axes, axes) if axes is None: # behaves like tf.batch_matmul as default axes = [len(x_shape) - 1, len(y_shape) - 2] if b_any([isinstance(a, (list, tuple)) for a in axes]): raise ValueError('Multiple target dimensions are not supported. ' + 'Expected: None, int, (int, int), ' + 'Provided: ' + str(axes)) if len(x_shape) == 2 and len(y_shape) == 2: if axes[0] == axes[1]: result = sum(x * y, axis=axes[0], keepdims=True) return result if axes[0] == 1 else transpose(result) else: return sum(x * transpose(y), axis=axes[0], keepdims=True) else: if len(y_shape) == 2: y = expand_dims(y) normalized_axis = [] normalized_axis.append(_normalize_axis(axes[0], x)[0]) normalized_axis.append(_normalize_axis(axes[1], y)[0]) # transpose i = normalized_axis[0] while i < len(x.shape) - 1: x = C.swapaxes(x, i, i + 1) i += 1 i = normalized_axis[1] while i > 0: y = C.swapaxes(y, i, i - 1) i -= 1 result = C.times(x, y, output_rank=(len(y.shape) - 1) if len(y.shape) > 1 else 1) if len(y_shape) == 2: result = squeeze(result, -1) return result def transpose(x): return C.swapaxes(x, 0, 1) def gather(reference, indices): # There is a bug in cntk gather op which may cause crash. # We have made a fix but not catched in CNTK 2.1 release. # Will update with gather op in next release if _get_cntk_version() >= 2.2: return C.ops.gather(reference, indices) else: num_classes = reference.shape[0] one_hot_matrix = C.ops.one_hot(indices, num_classes) return C.times(one_hot_matrix, reference, output_rank=len(reference.shape) - 1) def _remove_dims(x, axis, keepdims=False): if keepdims is False and isinstance(axis, list): # sequence axis is removed by default, so don't need reshape on it reduce_axes = [] for a in axis: if isinstance(a, C.Axis) is False: reduce_axes.append(a) return _reshape_dummy_dim(x, reduce_axes) else: if isinstance(axis, list): has_seq = False for a in axis: if isinstance(a, C.Axis): has_seq = True break if has_seq: nones = _get_dynamic_axis_num(x) x = expand_dims(x, nones) return x def max(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_max') return _remove_dims(output, axis, keepdims) def min(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_min') return _remove_dims(output, axis, keepdims) def sum(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_sum') return _remove_dims(output, axis, keepdims) def prod(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_prod') return _remove_dims(output, axis, keepdims) def logsumexp(x, axis=None, keepdims=False): return log(sum(exp(x), axis=axis, keepdims=keepdims)) def var(x, axis=None, keepdims=False): m = mean(x, axis, keepdims=True) devs_squared = C.square(x - m) return mean(devs_squared, axis=axis, keepdims=keepdims) def std(x, axis=None, keepdims=False): return C.sqrt(var(x, axis=axis, keepdims=keepdims)) def expand_dims(x, axis=-1): shape = list(int_shape(x)) nones = _get_dynamic_axis_num(x) index = axis if axis >= 0 else len(shape) + 1 shape.insert(index, 1) new_shape = shape[nones:] new_shape = tuple( [C.InferredDimension if _ is None else _ for _ in new_shape]) result = C.reshape(x, new_shape) if index < nones: result._keras_shape = shape return result def squeeze(x, axis): if isinstance(axis, tuple): axis = list(axis) if not isinstance(axis, list): axis = [axis] shape = list(int_shape(x)) _axis = [] for _ in axis: if isinstance(_, int): _axis.append(_ if _ >= 0 else _ + len(shape)) if len(_axis) == 0: return x nones = _get_dynamic_axis_num(x) for _ in sorted(_axis, reverse=True): del shape[_] new_shape = shape[nones:] new_shape = tuple([C.InferredDimension if _ == C.FreeDimension else _ for _ in new_shape]) return C.reshape(x, new_shape) def tile(x, n): if isinstance(n, int): n = (n,) elif isinstance(n, list): n = tuple(n) shape = int_shape(x) num_dynamic_axis = _get_dynamic_axis_num(x) # Padding the axis if len(n) < len(shape): n = tuple([1 for _ in range(len(shape) - len(n))]) + n if len(n) != len(shape): raise NotImplementedError i = num_dynamic_axis for i, rep in enumerate(n): if i >= num_dynamic_axis and shape[i] is not None: tmp = [x] * rep x = C.splice(*tmp, axis=i - num_dynamic_axis) i += 1 return x def _normalize_axis(axis, x): shape = int_shape(x) ndim = len(shape) nones = _get_dynamic_axis_num(x) if nones > ndim: raise ValueError('CNTK Backend: tensor with keras shape: `%s` has ' '%d cntk dynamic axis, this is not expected, please ' 'double check the keras shape history.' % (str(shape), nones)) # Current cntk does not support shape like (1, batch). so using the workaround # here to mapping the correct axis. Will remove this tricky after we add support # in native cntk op cntk_axis = [] dynamic_axis_index = 0 for i in range(ndim): if shape[i] is None and dynamic_axis_index < nones: cntk_axis.append(x.dynamic_axes[dynamic_axis_index]) dynamic_axis_index += 1 else: cntk_axis.append(i - dynamic_axis_index) if dynamic_axis_index < nones: i = 0 while dynamic_axis_index < nones: cntk_axis[i] = x.dynamic_axes[dynamic_axis_index] i += 1 dynamic_axis_index += 1 while i < len(cntk_axis): cntk_axis[i] -= nones i += 1 if isinstance(axis, tuple): _axis = list(axis) elif isinstance(axis, int): _axis = [axis] elif isinstance(axis, list): _axis = list(axis) else: _axis = axis if isinstance(_axis, list): for i, a in enumerate(_axis): if a is not None and a < 0: _axis[i] = (a % ndim) if _axis[i] is not None: _axis[i] = cntk_axis[_axis[i]] else: if _axis is None: _axis = C.Axis.all_axes() return _axis def _reshape_dummy_dim(x, axis): shape = list(x.shape) _axis = [_ + len(shape) if _ < 0 else _ for _ in axis] if shape.count(C.InferredDimension) > 1 or shape.count(C.FreeDimension) > 1: result = x for index in sorted(_axis, reverse=True): result = C.reshape(result, shape=(), begin_axis=index, end_axis=index + 1) return result else: for index in sorted(_axis, reverse=True): del shape[index] shape = [C.InferredDimension if _ == C.FreeDimension else _ for _ in shape] return C.reshape(x, shape) def mean(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_mean') return _remove_dims(output, axis, keepdims) def any(x, axis=None, keepdims=False): reduce_result = sum(x, axis, keepdims=keepdims) any_matrix = C.element_select( reduce_result, ones_like(reduce_result), zeros_like(reduce_result)) if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0: return C.reduce_sum(any_matrix) else: return any_matrix def all(x, axis=None, keepdims=False): reduce_result = prod(x, axis, keepdims=keepdims) all_matrix = C.element_select( reduce_result, ones_like(reduce_result), zeros_like(reduce_result)) if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0: return C.reduce_sum(all_matrix) else: return all_matrix def classification_error(target, output, axis=-1): return C.ops.reduce_mean( C.equal( argmax( output, axis=-1), argmax( target, axis=-1)), axis=C.Axis.all_axes()) def argmax(x, axis=-1): axis = [axis] axis = _normalize_axis(axis, x) output = C.ops.argmax(x, axis=axis[0]) return _reshape_dummy_dim(output, axis) def argmin(x, axis=-1): axis = [axis] axis = _normalize_axis(axis, x) output = C.ops.argmin(x, axis=axis[0]) return _reshape_dummy_dim(output, axis) def square(x): return C.square(x) def abs(x): return C.abs(x) def sqrt(x): return C.sqrt(x) def exp(x): return C.exp(x) def log(x): return C.log(x) def round(x): return C.round(x) def sigmoid(x): return C.sigmoid(x) def sign(x): return x / C.abs(x) def pow(x, a): return C.pow(x, a) def clip(x, min_value, max_value): if max_value is not None and max_value < min_value: max_value = min_value if max_value is None: max_value = np.inf if min_value is None: min_value = -np.inf return C.clip(x, min_value, max_value) def binary_crossentropy(target, output, from_logits=False): if from_logits: output = C.sigmoid(output) output = C.clip(output, epsilon(), 1.0 - epsilon()) output = -target * C.log(output) - (1.0 - target) * C.log(1.0 - output) return output def get_variable_shape(x): return int_shape(x) def update(x, new_x): return C.assign(x, new_x) def moving_average_update(variable, value, momentum): return C.assign(variable, variable * momentum + value * (1. - momentum)) def update_add(x, increment): result = x + increment return C.assign(x, result) def gradients(loss, variables): # cntk does not support gradients as symbolic op, # to hook up with keras model # we will return a constant as place holder, the cntk learner will apply # the gradient during training. global grad_parameter_dict if isinstance(variables, list) is False: variables = [variables] grads = [] for v in variables: g = C.constant(0, shape=v.shape, name='keras_grad_placeholder') grads.append(g) grad_parameter_dict[g] = v return grads def equal(x, y): return C.equal(x, y) def not_equal(x, y): return C.not_equal(x, y) def greater(x, y): return C.greater(x, y) def greater_equal(x, y): return C.greater_equal(x, y) def less(x, y): return C.less(x, y) def less_equal(x, y): return C.less_equal(x, y) def maximum(x, y): return C.element_max(x, y) def minimum(x, y): return C.element_min(x, y) def sin(x): return C.sin(x) def cos(x): return C.cos(x) def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): if gamma is None: if beta is None: gamma = ones_like(x) else: gamma = ones_like(beta) if beta is None: if gamma is None: beta = zeros_like(x) else: beta = zeros_like(gamma) mean, variant = _moments(x, _normalize_axis(reduction_axes, x)) if sorted(reduction_axes) == list(range(ndim(x)))[:-1]: normalized = batch_normalization( x, mean, variant, beta, gamma, epsilon) else: # need broadcasting target_shape = [] x_shape = int_shape(x) # skip the batch axis for axis in range(1, ndim(x)): if axis in reduction_axes: target_shape.append(1) if ndim(gamma) > axis: gamma = C.reduce_mean(gamma, axis - 1) beta = C.reduce_mean(beta, axis - 1) else: target_shape.append(x_shape[axis]) broadcast_mean = C.reshape(mean, target_shape) broadcast_var = C.reshape(variant, target_shape) broadcast_gamma = C.reshape(gamma, target_shape) broadcast_beta = C.reshape(beta, target_shape) normalized = batch_normalization( x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon) return normalized, mean, variant def _moments(x, axes=None, shift=None, keep_dims=False): _axes = tuple(axes) if shift is None: shift = x # Compute true mean while keeping the dims for proper broadcasting. for axis in _axes: shift = C.reduce_mean(shift, axis=axis) shift = C.stop_gradient(shift) shifted_mean = C.minus(x, shift) for axis in _axes: shifted_mean = C.reduce_mean(shifted_mean, axis=axis) variance_mean = C.square(C.minus(x, shift)) for axis in _axes: variance_mean = C.reduce_mean(variance_mean, axis=axis) variance = C.minus(variance_mean, C.square(shifted_mean)) mean = C.plus(shifted_mean, shift) if not keep_dims: mean = squeeze(mean, _axes) variance = squeeze(variance, _axes) return mean, variance def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3): # The mean / var / beta / gamma may be processed by broadcast # so it may have an extra batch axis with 1, it is not needed # in cntk, need to remove those dummy axis. if ndim(mean) == ndim(x) and shape(mean)[0] == 1: mean = _reshape_dummy_dim(mean, [0]) if ndim(var) == ndim(x) and shape(var)[0] == 1: var = _reshape_dummy_dim(var, [0]) if gamma is None: gamma = ones_like(var) elif ndim(gamma) == ndim(x) and shape(gamma)[0] == 1: gamma = _reshape_dummy_dim(gamma, [0]) if beta is None: beta = zeros_like(mean) elif ndim(beta) == ndim(x) and shape(beta)[0] == 1: beta = _reshape_dummy_dim(beta, [0]) return (x - mean) / (C.sqrt(var) + epsilon) * gamma + beta def concatenate(tensors, axis=-1): if len(tensors) == 0: return None axis = [axis] axis = _normalize_axis(axis, tensors[0]) return C.splice(*tensors, axis=axis[0]) def flatten(x): return reshape(x, (-1,)) def reshape(x, shape): shape = tuple([C.InferredDimension if _ == C.FreeDimension else _ for _ in shape]) if isinstance(x, C.variables.Parameter): return C.reshape(x, shape) else: num_dynamic_axis = _get_dynamic_axis_num(x) if num_dynamic_axis == 1 and len(shape) > 0 and shape[0] == -1: # collapse axis with batch axis if b_any(_ == C.InferredDimension for _ in x.shape) or b_any( _ == C.FreeDimension for _ in x.shape): warnings.warn( 'Warning: CNTK backend does not support ' 'collapse of batch axis with inferred dimension. ' 'The reshape did not take place.') return x return _reshape_batch(x, shape) else: # no collapse, then first need to padding the shape if num_dynamic_axis >= len(shape): i = 0 while i < len(shape): if shape[i] is None or shape[i] == -1: i += 1 else: break shape = tuple([-1 for _ in range(num_dynamic_axis - i)]) + shape new_shape = list(shape) new_shape = new_shape[num_dynamic_axis:] new_shape = [C.InferredDimension if _ is None else _ for _ in new_shape] return C.reshape(x, new_shape) def permute_dimensions(x, pattern): dims = len(int_shape(x)) num_dynamic_axis = _get_dynamic_axis_num(x) if isinstance(pattern, list): current_layout = [i for i in range(dims)] else: current_layout = tuple([i for i in range(dims)]) if num_dynamic_axis > 0 and pattern[:num_dynamic_axis] != current_layout[:num_dynamic_axis]: raise ValueError('CNTK backend: the permute pattern %s ' 'requested permute on dynamic axis, ' 'which is not supported. Please do permute ' 'on static axis.' % pattern) axis = list(pattern) axis = axis[num_dynamic_axis:] axis = _normalize_axis(axis, x) return C.transpose(x, axis) def resize_images(x, height_factor, width_factor, data_format): if data_format == 'channels_first': output = repeat_elements(x, height_factor, axis=2) output = repeat_elements(output, width_factor, axis=3) return output elif data_format == 'channels_last': output = repeat_elements(x, height_factor, axis=1) output = repeat_elements(output, width_factor, axis=2) return output else: raise ValueError('CNTK Backend: Invalid data_format:', data_format) def resize_volumes(x, depth_factor, height_factor, width_factor, data_format): if data_format == 'channels_first': output = repeat_elements(x, depth_factor, axis=2) output = repeat_elements(output, height_factor, axis=3) output = repeat_elements(output, width_factor, axis=4) return output elif data_format == 'channels_last': output = repeat_elements(x, depth_factor, axis=1) output = repeat_elements(output, height_factor, axis=2) output = repeat_elements(output, width_factor, axis=3) return output else: raise ValueError('CNTK Backend: Invalid data_format:', data_format) def repeat_elements(x, rep, axis): axis = _normalize_axis(axis, x) axis = axis[0] slices = [] shape = x.shape i = 0 while i < shape[axis]: tmp = C.ops.slice(x, axis, i, i + 1) for _ in range(rep): slices.append(tmp) i += 1 return C.splice(*slices, axis=axis) def repeat(x, n): # this is a workaround for recurrent layer # if n is inferred dimension, # we can't figure out how to repeat it in cntk now # return the same x to take cntk broadcast feature # to make the recurrent layer work. # need to be fixed in GA. if n is C.InferredDimension or n is C.FreeDimension: return x index = 1 - _get_dynamic_axis_num(x) if index < 0 or index > 1: raise NotImplementedError new_shape = list(x.shape) new_shape.insert(index, 1) new_shape = tuple(new_shape) x = C.reshape(x, new_shape) temp = [x] * n return C.splice(*temp, axis=index) def tanh(x): return C.tanh(x) def _static_rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None): shape = int_shape(inputs) dims = len(shape) uses_learning_phase = False if dims < 3: raise ValueError('Input should be at least 3D.') # if the second axis is static axis, CNTK will do unroll by default if shape[1] is None: raise ValueError('CNTK Backend: the input of static rnn ' 'has shape `%s`, the second axis ' 'is not static. If you want to run ' 'rnn with non-static axis, please try ' 'dynamic rnn with sequence axis.' % shape) if constants is None: constants = [] if mask is not None: mask_shape = int_shape(mask) if len(mask_shape) == dims - 1: mask = expand_dims(mask) nones = _get_dynamic_axis_num(inputs) states = tuple(initial_states) outputs = [] time_axis = 1 - nones if nones > 0 else 1 if go_backwards: i = shape[1] - 1 while i >= 0: current = C.ops.slice(inputs, time_axis, i, i + 1) # remove dummy dimension current = squeeze(current, time_axis) output, new_states = step_function( current, tuple(states) + tuple(constants)) if getattr(output, '_uses_learning_phase', False): uses_learning_phase = True if mask is not None: mask_slice = C.ops.slice(mask, time_axis, i, i + 1) mask_slice = squeeze(mask_slice, time_axis) if len(outputs) == 0: prev_output = zeros_like(output) else: prev_output = outputs[-1] output = C.ops.element_select(mask_slice, output, prev_output) return_states = [] for s, n_s in zip(states, new_states): return_states.append( C.ops.element_select( mask_slice, n_s, s)) new_states = return_states outputs.append(output) states = new_states i -= 1 else: i = 0 while i < shape[1]: current = C.ops.slice(inputs, time_axis, i, i + 1) # remove dummy dimension current = squeeze(current, 1) output, new_states = step_function( current, tuple(states) + tuple(constants)) if getattr(output, '_uses_learning_phase', False): uses_learning_phase = True if mask is not None: mask_slice = C.ops.slice(mask, time_axis, i, i + 1) mask_slice = squeeze(mask_slice, 1) if len(outputs) == 0: prev_output = zeros_like(output) else: prev_output = outputs[-1] output = C.ops.element_select(mask_slice, output, prev_output) return_states = [] for s, n_s in zip(states, new_states): return_states.append( C.ops.element_select( mask_slice, n_s, s)) new_states = return_states outputs.append(output) states = new_states[:len(states)] i += 1 i = 1 # add the time_step axis back final_output = expand_dims(outputs[0], 1) last_output = outputs[0] while i < len(outputs): # add the time_step axis back output_slice = expand_dims(outputs[i], 1) final_output = C.splice(final_output, output_slice, axis=time_axis) last_output = outputs[i] i += 1 last_output._uses_learning_phase = uses_learning_phase return last_output, final_output, states def rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None): shape = int_shape(inputs) dims = len(shape) global uses_learning_phase uses_learning_phase = False if dims < 3: raise ValueError('CNTK Backend: the input of rnn has only rank %d ' 'Need at least rank 3 to run RNN.' % dims) if _get_dynamic_axis_num(inputs) == 0 or unroll: return _static_rnn( step_function, inputs, initial_states, go_backwards, mask, constants, unroll, input_length) if constants is None: constants = [] num_time_step = shape[1] if num_time_step is None and not has_seq_axis(inputs): num_time_step = inputs.shape[0] initial = [] for s in initial_states: if _get_dynamic_axis_num(s) == 0: if hasattr(C, 'to_batch'): initial.append(C.to_batch(s)) else: initial.append(C.user_function(ConvertToBatch(s))) else: initial.append(s) need_convert = not has_seq_axis(inputs) if go_backwards and need_convert is False: raise NotImplementedError('CNTK Backend: `go_backwards` is not supported with ' 'variable-length sequences. Please specify a ' 'static length for your sequences.') rnn_inputs = inputs if need_convert: if go_backwards: rnn_inputs = reverse(rnn_inputs, 1) rnn_inputs = C.to_sequence(rnn_inputs) rnn_constants = [] for constant in constants: if isinstance(constant, list): new_c = [] for c in constant: if _get_dynamic_axis_num(c) == 1: new_c.append(C.sequence.broadcast_as(c, rnn_inputs)) else: new_c.append(c) rnn_constants.append(new_c) else: if _get_dynamic_axis_num(constant) == 1: rnn_constants.append(C.sequence.broadcast_as(constant, rnn_inputs)) else: rnn_constants.append(constant) else: rnn_constants = constants if mask is not None and not has_seq_axis(mask): if go_backwards: mask = reverse(mask, 1) if len(int_shape(mask)) == 2: mask = expand_dims(mask) mask = C.to_sequence_like(mask, rnn_inputs) states = tuple(initial) with C.default_options(axis_offset=1): def _recurrence(x, states, m): # create place holder place_holders = [C.placeholder(dynamic_axes=x.dynamic_axes) for _ in states] past_values = [] for s, p in zip(states, place_holders): past_values.append(C.sequence.past_value(p, s)) new_output, new_states = step_function( x, tuple(past_values) + tuple(rnn_constants)) if getattr(new_output, '_uses_learning_phase', False): global uses_learning_phase uses_learning_phase = True if m is not None: new_states = [C.element_select(m, n, s) for n, s in zip(new_states, past_values)] n_s = [] for o, p in zip(new_states, place_holders): n_s.append(o.replace_placeholders({p: o.output})) if len(n_s) > 0: new_output = n_s[0] return new_output, n_s final_output, final_states = _recurrence(rnn_inputs, states, mask) last_output = C.sequence.last(final_output) last_states = [C.sequence.last(s) for s in final_states] if need_convert: final_output = C.sequence.unpack(final_output, 0, no_mask_output=True) if num_time_step is not None and num_time_step is not C.FreeDimension: final_output = _reshape_sequence(final_output, num_time_step) f_stats = [] for l_s, i_s in zip(last_states, initial_states): if _get_dynamic_axis_num(i_s) == 0 and _get_dynamic_axis_num(l_s) == 1: if hasattr(C, 'unpack_batch'): f_stats.append(C.unpack_batch(l_s)) else: f_stats.append(C.user_function(ConvertToStatic(l_s, batch_size=i_s.shape[0]))) else: f_stats.append(l_s) last_output._uses_learning_phase = uses_learning_phase return last_output, final_output, f_stats def has_seq_axis(x): return hasattr(x, 'dynamic_axes') and len(x.dynamic_axes) > 1 def l2_normalize(x, axis=None): axis = [axis] axis = _normalize_axis(axis, x) norm = C.sqrt(C.reduce_sum(C.square(x), axis=axis[0])) return x / norm def hard_sigmoid(x): x = (0.2 * x) + 0.5 x = C.clip(x, 0.0, 1.0) return x def conv1d(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) if padding == 'causal': # causal (dilated) convolution: left_pad = dilation_rate * (kernel.shape[0] - 1) x = temporal_padding(x, (left_pad, 0)) padding = 'valid' if data_format == 'channels_last': x = C.swapaxes(x, 0, 1) kernel = C.swapaxes(kernel, 0, 2) padding = _preprocess_border_mode(padding) strides = [strides] x = C.convolution( kernel, x, strides=tuple(strides), auto_padding=[ False, padding]) if data_format == 'channels_last': x = C.swapaxes(x, 0, 1) return x def conv2d(x, kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv2d_input(x, data_format) kernel = _preprocess_conv2d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) if dilation_rate == (1, 1): strides = (1,) + strides x = C.convolution( kernel, x, strides, auto_padding=[ False, padding, padding]) else: assert dilation_rate[0] == dilation_rate[1] assert strides == (1, 1), 'Invalid strides for dilated convolution' x = C.convolution( kernel, x, strides=dilation_rate[0], auto_padding=[ False, padding, padding]) return _postprocess_conv2d_output(x, data_format) def separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): raise NotImplementedError def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv2d_input(x, data_format) depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format) depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)), (-1, 1) + depthwise_kernel.shape[2:]) pointwise_kernel = _preprocess_conv2d_kernel(pointwise_kernel, data_format) padding = _preprocess_border_mode(padding) if dilation_rate == (1, 1): strides = (1,) + strides x = C.convolution(depthwise_kernel, x, strides=strides, auto_padding=[False, padding, padding], groups=x.shape[0]) x = C.convolution(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False]) else: if dilation_rate[0] != dilation_rate[1]: raise ValueError('CNTK Backend: non-square dilation_rate is ' 'not supported.') if strides != (1, 1): raise ValueError('Invalid strides for dilated convolution') x = C.convolution(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[False, padding, padding]) x = C.convolution(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False]) return _postprocess_conv2d_output(x, data_format) def depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv2d_input(x, data_format) depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format) depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)), (-1, 1) + depthwise_kernel.shape[2:]) padding = _preprocess_border_mode(padding) if dilation_rate == (1, 1): strides = (1,) + strides x = C.convolution(depthwise_kernel, x, strides=strides, auto_padding=[False, padding, padding], groups=x.shape[0]) else: if dilation_rate[0] != dilation_rate[1]: raise ValueError('CNTK Backend: non-square dilation_rate is ' 'not supported.') if strides != (1, 1): raise ValueError('Invalid strides for dilated convolution') x = C.convolution(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[False, padding, padding], groups=x.shape[0]) return _postprocess_conv2d_output(x, data_format) def conv3d(x, kernel, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1)): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv3d_input(x, data_format) kernel = _preprocess_conv3d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides = strides + (strides[0],) x = C.convolution( kernel, x, strides, auto_padding=[ False, padding, padding, padding]) return _postprocess_conv3d_output(x, data_format) def conv3d_transpose(x, kernel, output_shape, strides=(1, 1, 1), padding='valid', data_format=None): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv3d_input(x, data_format) kernel = _preprocess_conv3d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides = (1,) + strides # cntk output_shape does not include batch axis output_shape = output_shape[1:] # in keras2, need handle output shape in different format if data_format == 'channels_last': shape = list(output_shape) shape[0] = output_shape[3] shape[1] = output_shape[0] shape[2] = output_shape[1] shape[3] = output_shape[2] output_shape = tuple(shape) x = C.convolution_transpose( kernel, x, strides, auto_padding=[ False, padding, padding, padding], output_shape=output_shape) return _postprocess_conv3d_output(x, data_format) def pool2d(x, pool_size, strides=(1, 1), padding='valid', data_format=None, pool_mode='max'): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) padding = _preprocess_border_mode(padding) strides = strides pool_size = pool_size x = _preprocess_conv2d_input(x, data_format) if pool_mode == 'max': x = C.pooling( x, C.MAX_POOLING, pool_size, strides, auto_padding=[padding]) elif pool_mode == 'avg': x = C.pooling( x, C.AVG_POOLING, pool_size, strides, auto_padding=[padding]) else: raise ValueError('Invalid pooling mode: ' + str(pool_mode)) return _postprocess_conv2d_output(x, data_format) def pool3d(x, pool_size, strides=(1, 1, 1), padding='valid', data_format=None, pool_mode='max'): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) padding = _preprocess_border_mode(padding) x = _preprocess_conv3d_input(x, data_format) if pool_mode == 'max': x = C.pooling( x, C.MAX_POOLING, pool_size, strides, auto_padding=[padding]) elif pool_mode == 'avg': x = C.pooling( x, C.AVG_POOLING, pool_size, strides, auto_padding=[padding]) else: raise ValueError('Invalid pooling mode: ' + str(pool_mode)) return _postprocess_conv3d_output(x, data_format) def relu(x, alpha=0., max_value=None): if alpha != 0.: negative_part = C.relu(-x) x = C.relu(x) if max_value is not None: x = C.clip(x, 0.0, max_value) if alpha != 0.: x -= alpha * negative_part return x def dropout(x, level, noise_shape=None, seed=None): if level < 0. or level >= 1: raise ValueError('CNTK Backend: Invalid dropout level %s, ' 'must be in interval [0, 1].' % level) return C.dropout(x, level) def batch_flatten(x): # cntk's batch axis is not in shape, # so just flatten all the dim in x.shape dim = np.prod(x.shape) x = C.reshape(x, (-1,)) x._keras_shape = (None, dim) return x def softmax(x, axis=-1): return C.softmax(x, axis=axis) def softplus(x): return C.softplus(x) def softsign(x): return x / (1 + C.abs(x)) def categorical_crossentropy(target, output, from_logits=False): if from_logits: result = C.cross_entropy_with_softmax(output, target) # cntk's result shape is (batch, 1), while keras expect (batch, ) return C.reshape(result, ()) else: # scale preds so that the class probas of each sample sum to 1 output /= C.reduce_sum(output, axis=-1) # avoid numerical instability with epsilon clipping output = C.clip(output, epsilon(), 1.0 - epsilon()) return -sum(target * C.log(output), axis=-1) def sparse_categorical_crossentropy(target, output, from_logits=False): target = C.one_hot(target, output.shape[-1]) target = C.reshape(target, output.shape) return categorical_crossentropy(target, output, from_logits) class Function(object): def __init__(self, inputs, outputs, updates=[], **kwargs): self.placeholders = inputs self.trainer = None self.unrelated_updates = None self.updates = updates if len(updates) > 0: assert len(outputs) > 0 self.loss = outputs[0] # need group update by gradient place holder u_ops = [] unrelated_updates = [] for update in updates: if isinstance(update, tuple): if len(update) != 2: raise NotImplementedError else: u = C.assign(update[0], update[1]) else: u = update if len(u.arguments) == 0: u_ops.append(u) else: unrelated_updates.append(u) update_func = C.combine([u.output for u in u_ops]) grads = update_func.find_all_with_name('keras_grad_placeholder') u_list = [] p_list = [] for g in grads: if g in grad_parameter_dict: p_list.append(grad_parameter_dict[g]) u_list.append(g) else: raise ValueError( 'CNTK backend: when constructing trainer, ' 'found gradient node `%s` which is not ' 'related to any parameters in the model. ' 'Please double check how the gradient node ' 'is constructed.' % g) if len(u_list) > 0: learner = C.cntk_py.universal_learner(p_list, u_list, update_func) criterion = ( outputs[0], outputs[1]) if len(outputs) > 1 else ( outputs[0], ) self.trainer = C.trainer.Trainer( outputs[0], criterion, [learner]) self.trainer_output = tuple([f.output for f in criterion]) elif len(u_ops) > 0: unrelated_updates.extend(u_ops) if len(unrelated_updates) > 0: self.unrelated_updates = C.combine([_.output for _ in unrelated_updates]) if self.trainer is None: self.metrics_outputs = [f.output for f in outputs] self.metrics_func = C.combine(self.metrics_outputs) # cntk only could handle loss and 1 metric in trainer, for metrics more # than 2, need manual eval elif len(outputs) > 2: self.metrics_outputs = [f.output for f in outputs[2:]] self.metrics_func = C.combine(self.metrics_outputs) else: self.metrics_func = None @staticmethod def _is_input_shape_compatible(input, placeholder): if hasattr(input, 'shape') and hasattr(placeholder, 'shape'): num_dynamic = get_num_dynamic_axis(placeholder) input_shape = input.shape[num_dynamic:] placeholder_shape = placeholder.shape for i, p in zip(input_shape, placeholder_shape): if i != p and p != C.InferredDimension and p != C.FreeDimension: return False return True def __call__(self, inputs): global _LEARNING_PHASE_PLACEHOLDER global _LEARNING_PHASE assert isinstance(inputs, (list, tuple)) feed_dict = {} for tensor, value in zip(self.placeholders, inputs): # cntk only support calculate on float, do auto cast here if (hasattr(value, 'dtype') and value.dtype != np.float32 and value.dtype != np.float64): value = value.astype(np.float32) if tensor == _LEARNING_PHASE_PLACEHOLDER: _LEARNING_PHASE_PLACEHOLDER.value = np.asarray(value) else: # in current version cntk can't support input with variable # length. Will support it in next release. if not self._is_input_shape_compatible(value, tensor): raise ValueError('CNTK backend: The placeholder has been resolved ' 'to shape `%s`, but input shape is `%s`. Currently ' 'CNTK can not take variable length inputs. Please ' 'pass inputs that have a static shape.' % (str(tensor.shape), str(value.shape))) feed_dict[tensor] = value updated = [] if self.trainer is not None: input_dict = {} for argument in self.loss.arguments: if argument in feed_dict: input_dict[argument] = feed_dict[argument] else: raise ValueError( 'CNTK backend: argument %s is not found in inputs. ' 'Please double check the model and inputs in ' '`train_function`.' % argument.name) result = self.trainer.train_minibatch( input_dict, self.trainer_output) assert(len(result) == 2) outputs = result[1] for o in self.trainer_output: updated.append(outputs[o]) if self.metrics_func is not None: input_dict = {} for argument in self.metrics_func.arguments: if argument in feed_dict: input_dict[argument] = feed_dict[argument] else: raise ValueError('CNTK backend: metrics argument %s ' 'is not found in inputs. Please double ' 'check the model and inputs.' % argument.name) # Some ops (like dropout) won't be applied during "eval" in cntk. # They only evaluated in training phase. To make it work, call # "forward" method to let cntk know we want to evaluate them.from # But the assign ops won't be executed under this mode, that's why # we need this check. if (self.unrelated_updates is None and (_LEARNING_PHASE_PLACEHOLDER.value == 1.0 or _LEARNING_PHASE == 1)): _, output_values = self.metrics_func.forward( input_dict, self.metrics_func.outputs, (self.metrics_func.outputs[0],), as_numpy=False) else: output_values = self.metrics_func.eval(input_dict, as_numpy=False) if isinstance(output_values, dict): for o in self.metrics_outputs: value = output_values[o] v = value.asarray() updated.append(v) else: v = output_values.asarray() for o in self.metrics_outputs: updated.append(v) if self.unrelated_updates is not None: input_dict = {} for argument in self.unrelated_updates.arguments: if argument in feed_dict: input_dict[argument] = feed_dict[argument] else: raise ValueError( 'CNTK backend: assign ops argument %s ' 'is not found in inputs. Please double ' 'check the model and inputs.' % argument.name) self.unrelated_updates.eval(input_dict, as_numpy=False) return updated def function(inputs, outputs, updates=[], **kwargs): return Function(inputs, outputs, updates=updates, **kwargs) def temporal_padding(x, padding=(1, 1)): assert len(padding) == 2 num_dynamic_axis = _get_dynamic_axis_num(x) base_shape = x.shape if num_dynamic_axis > 0: assert len(base_shape) == 2 if hasattr(C, 'pad'): x = C.pad(x, pattern=[padding, (0, 0)]) else: x = _padding(x, padding, 0) else: assert len(base_shape) == 3 if hasattr(C, 'pad'): x = C.pad(x, pattern=[(0, 0), padding, (0, 0)]) else: x = _padding(x, padding, 1) return x def _padding(x, pattern, axis): base_shape = x.shape if b_any([dim < 0 for dim in base_shape]): raise ValueError('CNTK Backend: padding input tensor with ' 'shape `%s` contains non-specified dimension, ' 'which is not supported. Please give fixed ' 'dimension to enable padding.' % base_shape) if pattern[0] > 0: prefix_shape = list(base_shape) prefix_shape[axis] = pattern[0] prefix_shape = tuple(prefix_shape) x = C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis) base_shape = x.shape if pattern[1] > 0: postfix_shape = list(base_shape) postfix_shape[axis] = pattern[1] postfix_shape = tuple(postfix_shape) x = C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis) return x def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None): assert len(padding) == 2 assert len(padding[0]) == 2 assert len(padding[1]) == 2 if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) num_dynamic_axis = _get_dynamic_axis_num(x) base_shape = x.shape if data_format == 'channels_first': if num_dynamic_axis > 0: assert len(base_shape) == 3 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1])]) else: x = _padding(x, padding[0], 1) x = _padding(x, padding[1], 2) else: assert len(base_shape) == 4 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1])]) else: x = _padding(x, padding[0], 2) x = _padding(x, padding[1], 3) else: if num_dynamic_axis > 0: assert len(base_shape) == 3 if hasattr(C, 'pad'): x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), [0, 0]]) else: x = _padding(x, padding[0], 0) x = _padding(x, padding[1], 1) else: assert len(base_shape) == 4 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), [0, 0]]) else: x = _padding(x, padding[0], 1) x = _padding(x, padding[1], 2) return x def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None): assert len(padding) == 3 assert len(padding[0]) == 2 assert len(padding[1]) == 2 assert len(padding[2]) == 2 if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) num_dynamic_axis = _get_dynamic_axis_num(x) base_shape = x.shape if data_format == 'channels_first': if num_dynamic_axis > 0: assert len(base_shape) == 4 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2])]) else: x = _padding(x, padding[0], 1) x = _padding(x, padding[1], 2) x = _padding(x, padding[2], 3) else: assert len(base_shape) == 5 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1]), list(padding[2])]) else: x = _padding(x, padding[0], 2) x = _padding(x, padding[1], 3) x = _padding(x, padding[2], 4) else: if num_dynamic_axis > 0: assert len(base_shape) == 4 if hasattr(C, 'pad'): x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]]) else: x = _padding(x, padding[0], 0) x = _padding(x, padding[1], 1) x = _padding(x, padding[2], 2) else: assert len(base_shape) == 5 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]]) else: x = _padding(x, padding[0], 1) x = _padding(x, padding[1], 2) x = _padding(x, padding[2], 3) return x def one_hot(indices, num_classes): return C.one_hot(indices, num_classes) def get_value(x): if isinstance( x, C.variables.Parameter) or isinstance( x, C.variables.Constant): return x.value else: return eval(x) def batch_get_value(xs): result = [] for x in xs: if (isinstance(x, C.variables.Parameter) or isinstance(x, C.variables.Constant)): result.append(x.value) else: result.append(eval(x)) return result def set_value(x, value): if (isinstance(x, C.variables.Parameter) or isinstance(x, C.variables.Constant)): if isinstance(value, (float, int)): value = np.full(x.shape, value, dtype=floatx()) x.value = value else: raise NotImplementedError def print_tensor(x, message=''): return C.user_function( LambdaFunc(x, when=lambda x: True, execute=lambda x: print(message))) def batch_set_value(tuples): for t in tuples: x = t[0] value = t[1] if isinstance(value, np.ndarray) is False: value = np.asarray(value) if isinstance(x, C.variables.Parameter): x.value = value else: raise NotImplementedError def stop_gradient(variables): if isinstance(variables, (list, tuple)): return map(C.stop_gradient, variables) else: return C.stop_gradient(variables) def switch(condition, then_expression, else_expression): ndim_cond = ndim(condition) ndim_expr = ndim(then_expression) if ndim_cond > ndim_expr: raise ValueError('Rank of condition should be less' ' than or equal to rank of then and' ' else expressions. ndim(condition)=' + str(ndim_cond) + ', ndim(then_expression)' '=' + str(ndim_expr)) elif ndim_cond < ndim_expr: shape_expr = int_shape(then_expression) ndim_diff = ndim_expr - ndim_cond for i in range(ndim_diff): condition = expand_dims(condition) condition = tile(condition, shape_expr[ndim_cond + i]) return C.element_select(condition, then_expression, else_expression) def elu(x, alpha=1.): res = C.elu(x) if alpha == 1: return res else: return C.element_select(C.greater(x, 0), res, alpha * res) def in_top_k(predictions, targets, k): _targets = C.one_hot(targets, predictions.shape[-1]) result = C.classification_error(predictions, _targets, topN=k) return 1 - C.reshape(result, shape=()) def conv2d_transpose(x, kernel, output_shape, strides=(1, 1), padding='valid', data_format=None): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv2d_input(x, data_format) kernel = _preprocess_conv2d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides = (1,) + strides # cntk output_shape does not include batch axis output_shape = output_shape[1:] # in keras2, need handle output shape in different format if data_format == 'channels_last': shape = list(output_shape) shape[0] = output_shape[2] shape[1] = output_shape[0] shape[2] = output_shape[1] output_shape = tuple(shape) x = C.convolution_transpose( kernel, x, strides, auto_padding=[ False, padding, padding], output_shape=output_shape) return _postprocess_conv2d_output(x, data_format) def identity(x, name=None): if name is None: name = '%s_alias' % x.name return C.alias(x, name=name) def _preprocess_conv2d_input(x, data_format): if data_format == 'channels_last': # TF uses the last dimension as channel dimension, # instead of the 2nd one. # TH input shape: (samples, input_depth, rows, cols) # TF input shape: (samples, rows, cols, input_depth) x = C.transpose(x, (2, 0, 1)) return x def _preprocess_conv2d_kernel(kernel, data_format): # As of Keras 2.0.0, all kernels are normalized # on the format `(rows, cols, input_depth, depth)`, # independently of `data_format`. # CNTK expects `(depth, input_depth, rows, cols)`. kernel = C.transpose(kernel, (3, 2, 0, 1)) return kernel def _preprocess_border_mode(padding): if padding == 'same': padding = True elif padding == 'valid': padding = False else: raise ValueError('Invalid border mode: ' + str(padding)) return padding def _postprocess_conv2d_output(x, data_format): if data_format == 'channels_last': x = C.transpose(x, (1, 2, 0)) return x def _preprocess_conv3d_input(x, data_format): if data_format == 'channels_last': # TF uses the last dimension as channel dimension, # instead of the 2nd one. # TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3) # TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3, # input_depth) x = C.transpose(x, (3, 0, 1, 2)) return x def _preprocess_conv3d_kernel(kernel, dim_ordering): kernel = C.transpose(kernel, (4, 3, 0, 1, 2)) return kernel def _postprocess_conv3d_output(x, dim_ordering): if dim_ordering == 'channels_last': x = C.transpose(x, (1, 2, 3, 0)) return x def _get_dynamic_axis_num(x): if hasattr(x, 'dynamic_axes'): return len(x.dynamic_axes) else: return 0 def _contain_seqence_axis(x): if _get_dynamic_axis_num(x) > 1: return x.dynamic_axes[1] == C.Axis.default_dynamic_axis() else: return False def get_num_dynamic_axis(x): return _get_dynamic_axis_num(x) def _reduce_on_axis(x, axis, reduce_fun_name): if isinstance(axis, list): for a in axis: if isinstance(a, C.Axis) \ and a != C.Axis.default_batch_axis() \ and hasattr(C.sequence, reduce_fun_name): x = getattr(C.sequence, reduce_fun_name)(x, a) else: x = getattr(C, reduce_fun_name)(x, a) else: x = getattr(C, reduce_fun_name)(x, axis) return x def _reshape_sequence(x, time_step): tmp_shape = list(int_shape(x)) tmp_shape[1] = time_step return reshape(x, tmp_shape) def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) stride = strides[0] kernel_shape = int_shape(kernel) output_length, feature_dim, filters = kernel_shape xs = [] for i in range(output_length): slice_length = slice(i * stride, i * stride + kernel_size[0]) xs.append(reshape(inputs[:, slice_length, :], (-1, 1, feature_dim))) x_aggregate = concatenate(xs, axis=1) # transpose kernel to output_filters first, to apply broadcast weight = permute_dimensions(kernel, (2, 0, 1)) # Shape: (batch, filters, output_length, input_length * kernel_size) output = x_aggregate * weight # Shape: (batch, filters, output_length) output = sum(output, axis=3) # Shape: (batch, output_length, filters) return permute_dimensions(output, (0, 2, 1)) def local_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format=None): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) stride_row, stride_col = strides output_row, output_col = output_shape kernel_shape = int_shape(kernel) _, feature_dim, filters = kernel_shape xs = [] for i in range(output_row): for j in range(output_col): slice_row = slice(i * stride_row, i * stride_row + kernel_size[0]) slice_col = slice(j * stride_col, j * stride_col + kernel_size[1]) if data_format == 'channels_first': xs.append(reshape(inputs[:, :, slice_row, slice_col], (-1, 1, feature_dim))) else: xs.append(reshape(inputs[:, slice_row, slice_col, :], (-1, 1, feature_dim))) x_aggregate = concatenate(xs, axis=1) # transpose kernel to put filters first weight = permute_dimensions(kernel, (2, 0, 1)) # shape: batch, filters, output_length, input_length * kernel_size output = x_aggregate * weight # shape: batch, filters, output_length output = sum(output, axis=3) # shape: batch, filters, row, col output = reshape(output, (-1, filters, output_row, output_col)) if data_format == 'channels_last': # shape: batch, row, col, filters output = permute_dimensions(output, (0, 2, 3, 1)) return output def reverse(x, axes): if isinstance(axes, int): axes = [axes] cntk_axes = _normalize_axis(axes, x) begin_index = [0 for _ in cntk_axes] end_index = [0 for _ in cntk_axes] strides = [-1 for _ in cntk_axes] return C.slice(x, cntk_axes, begin_index, end_index, strides) def _reshape_batch(x, shape): # there is a bug in cntk 2.1's unpack_batch implementation if hasattr(C, 'unpack_batch') and _get_cntk_version() >= 2.2: const_a = C.unpack_batch(x) const_a = C.reshape(const_a, shape) return C.to_batch(const_a) else: return C.user_function(ReshapeBatch(x, shape[1:])) def _get_cntk_version(): version = C.__version__ if version.endswith('+'): version = version[:-1] # for hot fix, ignore all the . except the first one. if len(version) > 2 and version[1] == '.': version = version[:2] + version[2:].replace('.', '') try: return float(version) except: warnings.warn( 'CNTK backend warning: CNTK version not detected. ' 'Will using CNTK 2.0 GA as default.') return float(2.0) class ReshapeBatch(C.ops.functions.UserFunction): def __init__(self, input, shape, name='reshape_with_batch'): super(ReshapeBatch, self).__init__([input], as_numpy=False, name=name) self.from_shape = input.shape self.target_shape = shape def infer_outputs(self): batch_axis = C.Axis.default_batch_axis() return [ C.output_variable( self.target_shape, self.inputs[0].dtype, [batch_axis])] def forward(self, arguments, device=None, outputs_to_retain=None): num_element = arguments.shape()[0] * np.prod(np.asarray(self.from_shape)) num_static_element = np.prod(np.asarray(self.target_shape)) num_batch = int(num_element / num_static_element) result = arguments.data().as_shape((num_batch,) + self.target_shape) return None, C.cntk_py.Value(result) def backward(self, state, root_gradients): grad_array_view = root_gradients.data() num_element = root_gradients.shape()[0] * np.prod(np.asarray(self.target_shape)) num_static_element = np.prod(np.asarray(self.from_shape)) num_old_batch = int(num_element / num_static_element) return C.cntk_py.Value( grad_array_view.as_shape( (num_old_batch,) + self.from_shape)) class ConvertToBatch(C.ops.functions.UserFunction): """Converts input first axis to CNTK batch axis. We may introduce this operation in CNTK native implementation later. # Arguments inputs: a cntk variable (parameter/constant) name: name of this node """ def __init__(self, input, name='convert_to_batch'): super(ConvertToBatch, self).__init__([input], as_numpy=False, name=name) def infer_outputs(self): batch_axis = C.Axis.default_batch_axis() return [ C.output_variable( self.inputs[0].shape[1:], self.inputs[0].dtype, [batch_axis])] def forward(self, arguments, device=None, outputs_to_retain=None): return None, C.cntk_py.Value(arguments.data()) def backward(self, state, root_gradients): return C.cntk_py.Value(root_gradients.data()) class ConvertToStatic(C.ops.functions.UserFunction): """Converts input first axis to CNTK static axis. We may introduce this operation in CNTK native implementation later. # Arguments inputs: a cntk tensor which has batch axis batch_size: size of batch axis. name: name of this node. """ def __init__(self, input, batch_size, name='convert_to_static'): super(ConvertToStatic, self).__init__([input], as_numpy=False, name=name) self.target_shape = (batch_size,) + input.shape def infer_outputs(self): return [ C.output_variable( self.target_shape, self.inputs[0].dtype, [])] def forward(self, arguments, device=None, outputs_to_retain=None): return None, C.cntk_py.Value(arguments.data()) def backward(self, state, root_gradients): return C.cntk_py.Value(root_gradients.data()) class LambdaFunc(C.ops.functions.UserFunction): def __init__(self, arg, when=lambda arg: True, execute=lambda arg: print(arg), name=''): self.when = when self.execute = execute super(LambdaFunc, self).__init__([arg], name=name) def infer_outputs(self): return [ C.output_variable( self.inputs[0].shape, self.inputs[0].dtype, self.inputs[0].dynamic_axes)] def forward(self, argument, device=None, outputs_to_retain=None): if self.when(argument): self.execute(argument) return None, argument def backward(self, state, root_gradients): return root_gradients
2.28125
2
Project Files/Prebuilt tools/twitter/Twitter/pylib/oauthlib/oauth1/rfc5849/endpoints/resource.py
nVoid/Yale-TouchDesigner-April2016
39
1530
# -*- coding: utf-8 -*- """ oauthlib.oauth1.rfc5849.endpoints.resource ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module is an implementation of the resource protection provider logic of OAuth 1.0 RFC 5849. """ from __future__ import absolute_import, unicode_literals from oauthlib.common import log from .base import BaseEndpoint from .. import errors class ResourceEndpoint(BaseEndpoint): """An endpoint responsible for protecting resources. Typical use is to instantiate with a request validator and invoke the ``validate_protected_resource_request`` in a decorator around a view function. If the request is valid, invoke and return the response of the view. If invalid create and return an error response directly from the decorator. See :doc:`/oauth1/validator` for details on which validator methods to implement for this endpoint. An example decorator:: from functools import wraps from your_validator import your_validator from oauthlib.oauth1 import ResourceEndpoint endpoint = ResourceEndpoint(your_validator) def require_oauth(realms=None): def decorator(f): @wraps(f) def wrapper(request, *args, **kwargs): v, r = provider.validate_protected_resource_request( request.url, http_method=request.method, body=request.data, headers=request.headers, realms=realms or []) if v: return f(*args, **kwargs) else: return abort(403) """ def validate_protected_resource_request(self, uri, http_method='GET', body=None, headers=None, realms=None): """Create a request token response, with a new request token if valid. :param uri: The full URI of the token request. :param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc. :param body: The request body as a string. :param headers: The request headers as a dict. :param realms: A list of realms the resource is protected under. This will be supplied to the ``validate_realms`` method of the request validator. :returns: A tuple of 2 elements. 1. True if valid, False otherwise. 2. An oauthlib.common.Request object. """ try: request = self._create_request(uri, http_method, body, headers) except errors.OAuth1Error: return False, None try: self._check_transport_security(request) self._check_mandatory_parameters(request) except errors.OAuth1Error: return False, request if not request.resource_owner_key: return False, request if not self.request_validator.check_access_token( request.resource_owner_key): return False, request if not self.request_validator.validate_timestamp_and_nonce( request.client_key, request.timestamp, request.nonce, request, access_token=request.resource_owner_key): return False, request # The server SHOULD return a 401 (Unauthorized) status code when # receiving a request with invalid client credentials. # Note: This is postponed in order to avoid timing attacks, instead # a dummy client is assigned and used to maintain near constant # time request verification. # # Note that early exit would enable client enumeration valid_client = self.request_validator.validate_client_key( request.client_key, request) if not valid_client: request.client_key = self.request_validator.dummy_client # The server SHOULD return a 401 (Unauthorized) status code when # receiving a request with invalid or expired token. # Note: This is postponed in order to avoid timing attacks, instead # a dummy token is assigned and used to maintain near constant # time request verification. # # Note that early exit would enable resource owner enumeration valid_resource_owner = self.request_validator.validate_access_token( request.client_key, request.resource_owner_key, request) if not valid_resource_owner: request.resource_owner_key = self.request_validator.dummy_access_token # Note that `realm`_ is only used in authorization headers and how # it should be interepreted is not included in the OAuth spec. # However they could be seen as a scope or realm to which the # client has access and as such every client should be checked # to ensure it is authorized access to that scope or realm. # .. _`realm`: http://tools.ietf.org/html/rfc2617#section-1.2 # # Note that early exit would enable client realm access enumeration. # # The require_realm indicates this is the first step in the OAuth # workflow where a client requests access to a specific realm. # This first step (obtaining request token) need not require a realm # and can then be identified by checking the require_resource_owner # flag and abscence of realm. # # Clients obtaining an access token will not supply a realm and it will # not be checked. Instead the previously requested realm should be # transferred from the request token to the access token. # # Access to protected resources will always validate the realm but note # that the realm is now tied to the access token and not provided by # the client. valid_realm = self.request_validator.validate_realms(request.client_key, request.resource_owner_key, request, uri=request.uri, realms=realms) valid_signature = self._check_signature(request) # We delay checking validity until the very end, using dummy values for # calculations and fetching secrets/keys to ensure the flow of every # request remains almost identical regardless of whether valid values # have been supplied. This ensures near constant time execution and # prevents malicious users from guessing sensitive information v = all((valid_client, valid_resource_owner, valid_realm, valid_signature)) if not v: log.info("[Failure] request verification failed.") log.info("Valid client: %s", valid_client) log.info("Valid token: %s", valid_resource_owner) log.info("Valid realm: %s", valid_realm) log.info("Valid signature: %s", valid_signature) return v, request
2.609375
3
python/ex_1.py
AymenSe/Geometric-operations-DIP
0
1531
<filename>python/ex_1.py #################################################### # # @ Authors : <NAME> # <NAME> # # @ Hint: you have to install all requirements # from requirements.txt # #################################################### import numpy as np import cv2 as cv import matplotlib.pyplot as plt # load the image onion_img = cv.imread("onion.png") # Store height and width and channels of the image row, col, chs = onion_img.shape # Store the spectral resolution dtype_img = onion_img.dtype # This will give you: uint8 def translation(img, trans): """ args: - img: absolute path to the image - trans: must be a tuple (row_trans, col_trans) """ # read the image image = cv.imread(img) # retrieve the height and the width height, width = image.shape[:2] # retrieve the params of translation row_trans, col_trans = trans # Create the translation matrix T = np.float32([[1, 0, col_trans], [0, 1, row_trans]]) # Apply the T matrix: T*M img_translation = cv.warpAffine(image, T, (width, height)) # show the images cv.imshow("Original Image", image) cv.imshow('Translation Image', img_translation) # Don't destroy the images until the user do cv.waitKey() cv.destroyAllWindows() # translation 20 pixel to the right translation("onion.png", (0, 20)) # translation 50 lines and 100 cols to the right translation("onion.png", (50, 100)) # remove the peper from the image using translations translation("onion.png", (40, 40))
3.375
3
utils/hit_rate_utils.py
h-zcc/ref-nms
19
1532
<gh_stars>10-100 from utils.misc import calculate_iou, xywh_to_xyxy __all__ = ['NewHitRateEvaluator', 'CtxHitRateEvaluator'] class NewHitRateEvaluator: def __init__(self, refer, top_N=None, threshold=0.5): """Evaluate refexp-based hit rate. Args: refdb: `refdb` dict. split: Dataset split to evaluate on. top_N: Select top-N scoring proposals to evaluate. `None` means no selection. Default `None`. """ self.refer = refer self.top_N = top_N self.threshold = threshold def eval_hit_rate(self, split, proposal_dict, image_as_key=False): """Evaluate refexp-based hit rate. Args: proposal_dict: {exp_id or image_id: [{box: [4,], score: float}]}. image_as_key: Use image_id instead of exp_id as key, default `False`. Returns: proposal_per_ref: Number of proposals per refexp. hit_rate: Refexp-based hit rate of proposals. """ # Initialize counters num_hit = 0 num_proposal = 0 num_ref = 0 # NOTE: this is the number of refexp, not ref for ref_id in self.refer.getRefIds(split=split): ref = self.refer.Refs[ref_id] image_id = ref['image_id'] ann_id = ref['ann_id'] ann = self.refer.Anns[ann_id] gt_box = xywh_to_xyxy(ann['bbox']) for exp_id in ref['sent_ids']: # Get proposals if image_as_key: proposals = proposal_dict[image_id] else: proposals = proposal_dict[exp_id] # Rank and select proposals ranked_proposals = sorted(proposals, key=lambda p: p['score'], reverse=True)[:self.top_N] for proposal in ranked_proposals: if calculate_iou(gt_box, proposal['box']) > self.threshold: num_hit += 1 break num_proposal += len(ranked_proposals) num_ref += 1 proposal_per_ref = num_proposal / num_ref hit_rate = num_hit / num_ref return proposal_per_ref, hit_rate class CtxHitRateEvaluator: def __init__(self, refer, ctxdb, top_N=None, threshold=0.5): self.refer = refer self.ctxdb = ctxdb self.top_N = top_N self.threshold = threshold def eval_hit_rate(self, split, proposal_dict, image_as_key=False): """Evaluate refexp-based hit rate. Args: proposal_dict: {exp_id or image_id: [{box: [4,], score: float}]}. image_as_key: Use image_id instead of exp_id as key, default `False`. Returns: proposal_per_ref: Number of proposals per refexp. hit_rate: Refexp-based hit rate of proposals. """ # Initialize counters recall_list = [] avg_num_list = [] for exp_id, ctx in self.ctxdb[split].items(): exp_id = int(exp_id) if len(ctx['ctx']) == 0: continue # Get proposals if image_as_key: image_id = self.refer.sentToRef[exp_id]['image_id'] proposals = proposal_dict[image_id] else: proposals = proposal_dict[exp_id] # Rank and select proposals ranked_proposals = sorted(proposals, key=lambda p: p['score'], reverse=True)[:self.top_N] hit_num, ctx_num = 0, 0 for ctx_item in ctx['ctx']: ctx_num += 1 ctx_box = ctx_item['box'] for proposal in ranked_proposals: if calculate_iou(ctx_box, proposal['box']) > self.threshold: hit_num += 1 break recall_list.append(hit_num / ctx_num) avg_num_list.append(len(ranked_proposals)) return sum(avg_num_list) / len(avg_num_list), sum(recall_list) / len(recall_list)
2.296875
2
LeetCode_ReorderDataLogFiles.py
amukher3/Problem_solutions
1
1533
# -*- coding: utf-8 -*- """ Created on Sat Aug 22 19:07:30 2020 @author: <NAME> """ class Solution: def reorderLogFiles(self, logs: List[str]) -> List[str]: letLog=[] digLog=[] for i in range(len(logs)): temp=[] temp=logs[i].split(' ') if temp[1].isdigit() is True: digLog.append(logs[i]) else: letLog.append(logs[i]) tempLetLog=[] for i in letLog: tempLetLog.append(' '.join(i.split(' ')[1:]+[i.split(' ')[0]])) tempLetLog=sorted(tempLetLog) letLog=[] for i in tempLetLog: tempPrime=i.split(' ')[:-1] temp=i.split(' ')[-1] letLog.append(' '.join([temp]+tempPrime)) return letLog+digLog
3.265625
3
saleor/core/transactions.py
fairhopeweb/saleor
15,337
1534
from contextlib import contextmanager from django.db import DatabaseError from ..core.tracing import traced_atomic_transaction @contextmanager def transaction_with_commit_on_errors(): """Perform transaction and raise an error in any occurred.""" error = None with traced_atomic_transaction(): try: yield except DatabaseError: raise except Exception as e: error = e if error: raise error
2.359375
2
src/command_modules/azure-cli-policyinsights/azure/cli/command_modules/policyinsights/tests/latest/test_policyinsights_scenario.py
diberry/azure-cli
1
1535
<reponame>diberry/azure-cli # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from azure.cli.testsdk import ScenarioTest, record_only @record_only() class PolicyInsightsTests(ScenarioTest): def test_policy_insights(self): top_clause = '--top 2' filter_clause = '--filter "isCompliant eq false"' apply_clause = '--apply "groupby((policyAssignmentId, resourceId), aggregate($count as numRecords))"' select_clause = '--select "policyAssignmentId, resourceId, numRecords"' order_by_clause = '--order-by "numRecords desc"' from_clause = '--from "2018-04-04T00:00:00"' to_clause = '--to "2018-05-22T00:00:00"' scopes = [ '-m "azgovtest4"', '', '-g "defaultresourcegroup-eus"', '--resource "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/eastusnsggroup/providers/microsoft.network/networksecuritygroups/eastusnsg/securityrules/allow-joba"', '--resource "omssecuritydevkeyvalut" --namespace "microsoft.keyvault" --resource-type "vaults" -g "omssecurityintresourcegroup"', '--resource "default" --namespace "microsoft.network" --resource-type "subnets" --parent "virtualnetworks/mms-wcus-vnet" -g "mms-wcus"', '-s "335cefd2-ab16-430f-b364-974a170eb1d5"', '-d "25bf1e2a-6004-47ad-9bd1-2a40dd6de016"', '-a "96e22f7846e94bb186ae3a01"', '-a "bc916e4f3ab54030822a11b3" -g "tipkeyvaultresourcegroup" ' ] for scope in scopes: events = self.cmd('az policy event list {} {} {} {} {} {} {} {}'.format( scope, from_clause, to_clause, filter_clause, apply_clause, select_clause, order_by_clause, top_clause)).get_output_in_json() assert len(events) >= 0 states = self.cmd('az policy state list {} {} {} {} {} {} {} {}'.format( scope, from_clause, to_clause, filter_clause, apply_clause, select_clause, order_by_clause, top_clause)).get_output_in_json() assert len(states) >= 0 summary = self.cmd('az policy state summarize {} {} {} {} {}'.format( scope, from_clause, to_clause, filter_clause, top_clause)).get_output_in_json() assert summary["results"] is not None assert len(summary["policyAssignments"]) >= 0 if len(summary["policyAssignments"]) > 0: assert summary["policyAssignments"][0]["results"] is not None assert len(summary["policyAssignments"][0]["policyDefinitions"]) >= 0 if len(summary["policyAssignments"][0]["policyDefinitions"]) > 0: assert summary["policyAssignments"][0]["policyDefinitions"][0]["results"] is not None
1.804688
2
tests/prep_post/test.py
Aslic/rmats_turbo_4.1.0
0
1536
<reponame>Aslic/rmats_turbo_4.1.0 import glob import os.path import subprocess import sys import unittest import tests.bam import tests.base_test import tests.gtf import tests.output_parser as output_parser import tests.test_config import tests.util class Test(tests.base_test.BaseTest): def setUp(self): super().setUp() self._test_base_dir = tests.test_config.TEST_BASE_DIR self._test_dir = os.path.join(self._test_base_dir, 'prep_post') self._generated_input_dir = os.path.join(self._test_dir, 'generated_input') self._out_dir = os.path.join(self._test_dir, 'out') self._prep_1_tmp_dir = os.path.join(self._test_dir, 'tmp_prep_1') self._prep_2_tmp_dir = os.path.join(self._test_dir, 'tmp_prep_2') self._post_tmp_dir = os.path.join(self._test_dir, 'tmp_post') self._dup_input_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_dup_input_bam') self._dup_prep_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_dup_prep_bam') self._miss_input_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_miss_input_bam') self._miss_prep_bam_tmp_dir = os.path.join(self._test_dir, 'tmp_miss_prep_bam') tests.util.recreate_dirs([ self._generated_input_dir, self._out_dir, self._prep_1_tmp_dir, self._prep_2_tmp_dir, self._post_tmp_dir, self._dup_input_bam_tmp_dir, self._dup_prep_bam_tmp_dir, self._miss_input_bam_tmp_dir, self._miss_prep_bam_tmp_dir, self._command_output_dir() ]) self._read_type = 'paired' self._read_length = 50 self._sample_1_bams_path = os.path.join(self._generated_input_dir, 'b1.txt') self._sample_2_bams_path = os.path.join(self._generated_input_dir, 'b2.txt') sample_1_bam_replicate_template = os.path.join( self._generated_input_dir, 'sample_1_rep_{}.bam') sample_2_bam_replicate_template = os.path.join( self._generated_input_dir, 'sample_2_rep_{}.bam') self._sample_1_bams = self._create_sample_1_bams( self._sample_1_bams_path, sample_1_bam_replicate_template) self._sample_2_bams = self._create_sample_2_bams( self._sample_2_bams_path, sample_2_bam_replicate_template) self._gtf_path = os.path.join(self._generated_input_dir, 'test.gtf') self._gtf = self._create_gtf(self._gtf_path) self._sub_steps = [ 'prep_1', 'inte_1_fail', 'inte_1_pass', 'prep_2', 'inte_2_fail', 'inte_2_pass', 'post', 'duplicate_input_bam', 'duplicate_prep_bam', 'missing_input_bam', 'missing_prep_bam', ] self._sub_step = None def test(self): for sub_step in self._sub_steps: self._sub_step = sub_step self._setup_sub_step() self._run_test() def _command_output_dir(self): return os.path.join(self._test_dir, 'command_output') def _rmats_arguments(self): arguments = [ '--gtf', self._gtf_path, '--od', self._out_dir, '-t', self._read_type, '--readLength', str(self._read_length), ] if self._sub_step == 'prep_1': arguments.extend([ '--tmp', self._prep_1_tmp_dir, '--b1', self._sample_1_bams_path, '--task', 'prep', ]) elif self._sub_step == 'inte_1_fail': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2', self._sample_2_bams_path, '--task', 'inte', ]) elif self._sub_step == 'inte_1_pass': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--task', 'inte', '--statoff', ]) elif self._sub_step == 'prep_2': arguments.extend([ '--tmp', self._prep_2_tmp_dir, '--b1', self._sample_2_bams_path, '--task', 'prep', ]) elif self._sub_step == 'inte_2_fail': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_2_bams_path, '--task', 'inte', '--statoff', ]) elif self._sub_step == 'inte_2_pass': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2', self._sample_2_bams_path, '--task', 'inte', ]) elif self._sub_step == 'post': arguments.extend([ '--tmp', self._post_tmp_dir, '--b1', self._sample_1_bams_path, '--b2', self._sample_2_bams_path, '--task', 'post', ]) elif self._sub_step == 'duplicate_input_bam': arguments.extend([ '--tmp', self._dup_input_bam_tmp_dir, '--b1', self._dup_input_bam_path, '--task', 'post', '--statoff', ]) elif self._sub_step == 'duplicate_prep_bam': arguments.extend([ '--tmp', self._dup_prep_bam_tmp_dir, '--b1', self._dup_prep_bam_path, '--task', 'post', '--statoff', ]) elif self._sub_step == 'missing_input_bam': arguments.extend([ '--tmp', self._miss_input_bam_tmp_dir, '--b1', self._miss_input_bam_path, '--task', 'post', '--statoff', ]) elif self._sub_step == 'missing_prep_bam': arguments.extend([ '--tmp', self._miss_prep_bam_tmp_dir, '--b1', self._miss_prep_bam_path, '--task', 'post', '--statoff', ]) return arguments def _setup_sub_step(self): if self._sub_step == 'duplicate_input_bam': self._setup_dup_input_bam() elif self._sub_step == 'duplicate_prep_bam': self._setup_dup_prep_bam() elif self._sub_step == 'missing_input_bam': self._setup_miss_input_bam() elif self._sub_step == 'missing_prep_bam': self._setup_miss_prep_bam() def _setup_dup_input_bam(self): self._dup_input_bam_path = os.path.join(self._generated_input_dir, 'dup_input.txt') bams = self._sample_1_bams + [self._sample_1_bams[0]] self._write_bams(bams, self._dup_input_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._dup_input_bam_tmp_dir) def _setup_dup_prep_bam(self): self._dup_prep_bam_path = os.path.join(self._generated_input_dir, 'dup_prep.txt') bams = self._sample_1_bams self._write_bams(bams, self._dup_prep_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._dup_prep_bam_tmp_dir) self._cp_with_prefix('prep_1_again', self._prep_1_tmp_dir, self._dup_prep_bam_tmp_dir) def _setup_miss_input_bam(self): self._miss_input_bam_path = os.path.join(self._generated_input_dir, 'miss_input.txt') bams = [self._sample_1_bams[0]] self._write_bams(bams, self._miss_input_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._miss_input_bam_tmp_dir) def _setup_miss_prep_bam(self): self._miss_prep_bam_path = os.path.join(self._generated_input_dir, 'miss_prep.txt') bams = self._sample_1_bams + self._sample_2_bams self._write_bams(bams, self._miss_prep_bam_path) self._cp_with_prefix('prep_1', self._prep_1_tmp_dir, self._miss_prep_bam_tmp_dir) def _create_gtf(self, gtf_path): gtf = tests.gtf.GTF() gtf.path = gtf_path transcript_1 = tests.gtf.Transcript() transcript_1.chromosome = '1' transcript_1.strand = '+' transcript_1.gene_id = tests.util.gene_id_str(1) transcript_1.gene_name = tests.util.gene_name_str(1) transcript_1.transcript_id = tests.util.transcript_id_str(1) transcript_1.exons = [(1, 100), (201, 300), (401, 500)] gtf.transcripts = [transcript_1] error = gtf.write() self.assertFalse(error) return gtf def _create_sample_1_bams(self, sample_1_bams_path, sample_1_replicate_template): rep_1_bam = tests.bam.BAM() rep_1_bam.path = sample_1_replicate_template.format(1) rep_2_bam = tests.bam.BAM() rep_2_bam.path = sample_1_replicate_template.format(2) sample_1_bams = [rep_1_bam, rep_2_bam] rep_1_read_1 = tests.bam.Read() rep_1_read_1.ref_seq_name = '1' # chromosome rep_1_read_1.ref_seq_len = 1000 # chromosome length rep_1_read_1.template_name = tests.util.template_name_str([1, 1]) rep_1_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_1_read_1, rep_1_read_2, [[76, 100], [201, 300]], [[401, 475]], self._read_length) self.assertFalse(error) rep_1_bam.reads = [rep_1_read_1, rep_1_read_2] rep_2_read_1 = tests.bam.Read() rep_2_read_1.ref_seq_name = '1' # chromosome rep_2_read_1.ref_seq_len = 1000 # chromosome length rep_2_read_1.template_name = tests.util.template_name_str([1, 2]) rep_2_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals( rep_2_read_1, rep_2_read_2, [[26, 100]], [[201, 300], [401, 425]], self._read_length) self.assertFalse(error) rep_2_bam.reads = [rep_2_read_1, rep_2_read_2] self._write_bams(sample_1_bams, sample_1_bams_path) return sample_1_bams def _create_sample_2_bams(self, sample_2_bams_path, sample_2_replicate_template): rep_1_bam = tests.bam.BAM() rep_1_bam.path = sample_2_replicate_template.format(1) rep_2_bam = tests.bam.BAM() rep_2_bam.path = sample_2_replicate_template.format(2) sample_2_bams = [rep_1_bam, rep_2_bam] rep_1_read_1 = tests.bam.Read() rep_1_read_1.ref_seq_name = '1' # chromosome rep_1_read_1.ref_seq_len = 1000 # chromosome length rep_1_read_1.template_name = tests.util.template_name_str([2, 1]) rep_1_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_1_read_1, rep_1_read_2, [[76, 100], [401, 500]], [[401, 475]], self._read_length) self.assertFalse(error) rep_1_bam.reads = [rep_1_read_1, rep_1_read_2] rep_2_read_1 = tests.bam.Read() rep_2_read_1.ref_seq_name = '1' # chromosome rep_2_read_1.ref_seq_len = 1000 # chromosome length rep_2_read_1.template_name = tests.util.template_name_str([2, 2]) rep_2_read_2 = tests.bam.Read() error = tests.bam.set_read_pair_from_intervals(rep_2_read_1, rep_2_read_2, [[26, 100]], [[1, 100], [401, 425]], self._read_length) self.assertFalse(error) rep_2_bam.reads = [rep_2_read_1, rep_2_read_2] self._write_bams(sample_2_bams, sample_2_bams_path) return sample_2_bams def _cp_with_prefix(self, prefix, source_dir, dest_dir): source_paths = self._get_dot_rmats_paths(source_dir) command = [ sys.executable, tests.test_config.CP_WITH_PREFIX, prefix, dest_dir ] command.extend(source_paths) subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True) def _check_results(self): if self._sub_step == 'prep_1': self._check_results_prep_1() elif self._sub_step == 'inte_1_fail': self._check_results_inte_1_fail() elif self._sub_step == 'inte_1_pass': self._check_results_inte_1_pass() elif self._sub_step == 'prep_2': self._check_results_prep_2() elif self._sub_step == 'inte_2_fail': self._check_results_inte_2_fail() elif self._sub_step == 'inte_2_pass': self._check_results_inte_2_pass() elif self._sub_step == 'post': self._check_results_post() elif self._sub_step == 'duplicate_input_bam': self._check_results_dup_input_bam() elif self._sub_step == 'duplicate_prep_bam': self._check_results_dup_prep_bam() elif self._sub_step == 'missing_input_bam': self._check_results_miss_input_bam() elif self._sub_step == 'missing_prep_bam': self._check_results_miss_prep_bam() else: self.fail('unexpected sub_step: {}'.format(self._sub_step)) def _get_dot_rmats_paths(self, tmp_dir): dot_rmats_file_paths = glob.glob(os.path.join(tmp_dir, '*.rmats')) # filenames begin with a timestamp used for alphanumeric sort return sorted(dot_rmats_file_paths) def _check_results_prep_1(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name() with open(command_stdout_file_name, 'rt') as out_f_h: out_lines = out_f_h.readlines() tests.util.assert_no_line_has(self, out_lines, 'Processing count files') test_gene_id = tests.util.gene_id_str(1) quoted_test_gene_id = tests.util.double_quote(test_gene_id) dot_rmats_paths = self._get_dot_rmats_paths(self._prep_1_tmp_dir) self.assertEqual(len(dot_rmats_paths), 2) for dot_rmats_i in range(2): dot_rmats_contents, error = output_parser.parse_dot_rmats( dot_rmats_paths[dot_rmats_i]) self.assertFalse(error) self.assertEqual(dot_rmats_contents['bams'], [self._sample_1_bams[dot_rmats_i].path]) self.assertEqual(dot_rmats_contents['read_length'], self._read_length) novel_juncs = dot_rmats_contents['novel_juncs'] self.assertEqual(novel_juncs, [dict()]) exons = dot_rmats_contents['exons'] if dot_rmats_i == 0: self.assertEqual(exons, [{ quoted_test_gene_id: [{ 'start_box': [401, 499], 'end_box': [401, 499], 'counts': [1, 0] }] }]) else: self.assertEqual(exons, [{ quoted_test_gene_id: [{ 'start_box': [1, 99], 'end_box': [1, 99], 'counts': [1, 0] }] }]) multis = dot_rmats_contents['multis'] if dot_rmats_i == 0: self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[1, 1], [100, 200], [299, 299]], 'count': 1 }] }]) else: self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[201, 201], [300, 400], [499, 499]], 'count': 1 }] }]) self._cp_with_prefix('prep_1_', self._prep_1_tmp_dir, self._post_tmp_dir) def _check_results_prep_2(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name() with open(command_stdout_file_name, 'rt') as out_f_h: out_lines = out_f_h.readlines() tests.util.assert_no_line_has(self, out_lines, 'Processing count files') test_gene_id = tests.util.gene_id_str(1) quoted_test_gene_id = tests.util.double_quote(test_gene_id) dot_rmats_paths = self._get_dot_rmats_paths(self._prep_2_tmp_dir) self.assertEqual(len(dot_rmats_paths), 2) for dot_rmats_i in range(2): dot_rmats_contents, error = output_parser.parse_dot_rmats( dot_rmats_paths[dot_rmats_i]) self.assertFalse(error) self.assertEqual(dot_rmats_contents['bams'], [self._sample_2_bams[dot_rmats_i].path]) self.assertEqual(dot_rmats_contents['read_length'], self._read_length) novel_juncs = dot_rmats_contents['novel_juncs'] self.assertEqual(novel_juncs, [{quoted_test_gene_id: [[0, 0, 2]]}]) exons = dot_rmats_contents['exons'] if dot_rmats_i == 0: self.assertEqual(exons, [{ quoted_test_gene_id: [{ 'start_box': [401, 499], 'end_box': [401, 499], 'counts': [1, 0] }] }]) else: self.assertEqual(exons, [{ quoted_test_gene_id: [{ 'start_box': [1, 99], 'end_box': [1, 99], 'counts': [1, 0] }] }]) multis = dot_rmats_contents['multis'] if dot_rmats_i == 0: self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[1, 1], [100, 400], [499, 499]], 'count': 1 }] }]) else: self.assertEqual(multis, [{ quoted_test_gene_id: [{ 'junction_pairs': [[1, 1], [100, 400], [499, 499]], 'count': 1 }] }]) self._cp_with_prefix('prep_2_', self._prep_2_tmp_dir, self._post_tmp_dir) def _check_results_inte_1_fail(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name, 'rt') as err_f_h: err_lines = err_f_h.readlines() tests.util.assert_some_line_has( self, err_lines, 'input bam files with no associated prep output') def _check_results_inte_1_pass(self): self._check_no_error_results() def _check_results_inte_2_fail(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name, 'rt') as err_f_h: err_lines = err_f_h.readlines() tests.util.assert_some_line_has( self, err_lines, 'bam files not in input but associated with prep output') def _check_results_inte_2_pass(self): self._check_no_error_results() def _check_results_post(self): self._check_no_error_results() command_stdout_file_name = self._get_stdout_file_name() with open(command_stdout_file_name, 'rt') as out_f_h: out_lines = out_f_h.readlines() tests.util.assert_some_line_has(self, out_lines, 'Processing count files') from_gtf_se_path = os.path.join(self._out_dir, 'fromGTF.SE.txt') from_gtf_se_header, from_gtf_se_rows, error = output_parser.parse_from_gtf( from_gtf_se_path) self.assertFalse(error) self.assertEqual(len(from_gtf_se_rows), 1) from_gtf_se_row = from_gtf_se_rows[0] self.assertEqual(from_gtf_se_row['GeneID'], tests.util.double_quote(tests.util.gene_id_str(1))) self.assertEqual(from_gtf_se_row['exonStart_0base'], '200') self.assertEqual(from_gtf_se_row['exonEnd'], '300') jc_raw_se_path = os.path.join(self._out_dir, 'JC.raw.input.SE.txt') jc_raw_se_header, jc_raw_se_rows, error = output_parser.parse_jc_raw( jc_raw_se_path) self.assertFalse(error) self.assertEqual(len(jc_raw_se_rows), 1) jc_raw_se_row = jc_raw_se_rows[0] self.assertEqual(jc_raw_se_row['ID'], from_gtf_se_row['ID']) self.assertEqual(jc_raw_se_row['IJC_SAMPLE_1'], '1,1') self.assertEqual(jc_raw_se_row['SJC_SAMPLE_1'], '0,0') self.assertEqual(jc_raw_se_row['IJC_SAMPLE_2'], '0,0') self.assertEqual(jc_raw_se_row['SJC_SAMPLE_2'], '1,1') se_mats_jc_path = os.path.join(self._out_dir, 'SE.MATS.JC.txt') se_mats_jc_header, se_mats_jc_rows, error = output_parser.parse_mats_jc( se_mats_jc_path) self.assertFalse(error) self._check_se_mats_jc_header(se_mats_jc_header) self.assertEqual(len(se_mats_jc_rows), 1) se_mats_jc_row = se_mats_jc_rows[0] pvalue = float(se_mats_jc_row['PValue']) tests.util.assert_within_bounds(self, pvalue, 0, 1) fdr = float(se_mats_jc_row['FDR']) tests.util.assert_within_bounds(self, fdr, 0, 1) inc_level_1_splits = se_mats_jc_row['IncLevel1'].split(',') self.assertEqual(len(inc_level_1_splits), 2) self.assertAlmostEqual(float(inc_level_1_splits[0]), 1) self.assertAlmostEqual(float(inc_level_1_splits[1]), 1) inc_level_2_splits = se_mats_jc_row['IncLevel2'].split(',') self.assertEqual(len(inc_level_2_splits), 2) self.assertAlmostEqual(float(inc_level_2_splits[0]), 0) self.assertAlmostEqual(float(inc_level_2_splits[1]), 0) self.assertAlmostEqual(float(se_mats_jc_row['IncLevelDifference']), 1) def _check_results_dup_input_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name, 'rt') as err_f_h: err_lines = err_f_h.readlines() dup_bam_path = self._sample_1_bams[0].path expected_error = '{} given 2 times'.format(dup_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error) def _check_results_dup_prep_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name, 'rt') as err_f_h: err_lines = err_f_h.readlines() for bam in self._sample_1_bams: dup_bam_path = bam.path expected_error = '{} found 2 times in .rmats'.format(dup_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error) def _check_results_miss_input_bam(self): self._check_no_error_results() def _check_results_miss_prep_bam(self): self.assertNotEqual(self._rmats_return_code, 0) command_stderr_file_name = self._get_stderr_file_name() with open(command_stderr_file_name, 'rt') as err_f_h: err_lines = err_f_h.readlines() for bam in self._sample_2_bams: miss_bam_path = bam.path expected_error = '{} not found in .rmats'.format(miss_bam_path) tests.util.assert_some_line_has(self, err_lines, expected_error) if __name__ == '__main__': unittest.main(verbosity=2)
2.046875
2
nltk/align/util.py
kruskod/nltk
1
1537
# Natural Language Toolkit: Aligner Utilities # # Copyright (C) 2001-2015 NLTK Project # Author: <NAME> # URL: <http://www.nltk.org/> # For license information, see LICENSE.TXT from nltk.align.api import Alignment def pharaohtext2tuples(pharaoh_text): """ Converts pharaoh text format into an Alignment object (a list of tuples). >>> pharaoh_text = '0-0 2-1 9-2 21-3 10-4 7-5' >>> pharaohtext2tuples(pharaoh_text) Alignment([(0, 0), (2, 1), (7, 5), (9, 2), (10, 4), (21, 3)]) :type pharaoh_text: str :param pharaoh_text: the word alignment outputs in the pharaoh output format :rtype: Alignment :return: An Alignment object that contains a list of integer tuples """ # Converts integers to strings for a word alignment point. list_of_tuples = [tuple(map(int,a.split('-'))) for a in pharaoh_text.split()] return Alignment(list_of_tuples) def alignment2pharaohtext(alignment): """ Converts an Alignment object (a list of tuples) into pharaoh text format. >>> alignment = [(0, 0), (2, 1), (9, 2), (21, 3), (10, 4), (7, 5)] >>> alignment2pharaohtext(alignment) '0-0 2-1 9-2 21-3 10-4 7-5' :type alignment: Alignment :param alignment: An Alignment object that contains a list of integer tuples :rtype: str :return: the word alignment outputs in the pharaoh output format """ pharaoh_text = ' '.join(str(i) + "-" + str(j) for i,j in alignment) return pharaoh_text
3.8125
4
grr/server/grr_response_server/databases/db_yara_test_lib.py
khanhgithead/grr
4,238
1538
<gh_stars>1000+ #!/usr/bin/env python # -*- encoding: utf-8 -*- """A module with test cases for the YARA database method.""" import os from grr_response_server.databases import db from grr_response_server.rdfvalues import objects as rdf_objects class DatabaseTestYaraMixin(object): """A mixin class for testing YARA methods of database implementations.""" def testWriteYaraSignatureReferenceIncorrectUsername(self): blob_id = rdf_objects.BlobID(os.urandom(32)) with self.assertRaises(db.UnknownGRRUserError) as context: self.db.WriteYaraSignatureReference(blob_id=blob_id, username="quux") self.assertEqual(context.exception.username, "quux") def testWriteYaraSignatureReferenceDuplicated(self): self.db.WriteGRRUser("foo") blob_id = rdf_objects.BlobID(os.urandom(32)) # Writing duplicated signatures is possible, it should not raise. self.db.WriteYaraSignatureReference(blob_id=blob_id, username="foo") self.db.WriteYaraSignatureReference(blob_id=blob_id, username="foo") def testVerifyYaraSignatureReferenceSimple(self): self.db.WriteGRRUser("foo") blob_id = rdf_objects.BlobID(os.urandom(32)) self.db.WriteYaraSignatureReference(blob_id=blob_id, username="foo") self.assertTrue(self.db.VerifyYaraSignatureReference(blob_id)) def testVerifyYaraSignatureReferenceIncorrect(self): blob_id = rdf_objects.BlobID(os.urandom(32)) self.assertFalse(self.db.VerifyYaraSignatureReference(blob_id))
2.328125
2
gpytorch/kernels/inducing_point_kernel.py
4aHxKzD/gpytorch
1
1539
#!/usr/bin/env python3 import copy import math import torch from ..distributions import MultivariateNormal from ..lazy import DiagLazyTensor, LowRankRootAddedDiagLazyTensor, LowRankRootLazyTensor, MatmulLazyTensor, delazify from ..mlls import InducingPointKernelAddedLossTerm from ..models import exact_prediction_strategies from ..utils.cholesky import psd_safe_cholesky from .kernel import Kernel class InducingPointKernel(Kernel): def __init__(self, base_kernel, inducing_points, likelihood, active_dims=None): super(InducingPointKernel, self).__init__(active_dims=active_dims) self.base_kernel = base_kernel self.likelihood = likelihood if inducing_points.ndimension() == 1: inducing_points = inducing_points.unsqueeze(-1) self.register_parameter(name="inducing_points", parameter=torch.nn.Parameter(inducing_points)) self.register_added_loss_term("inducing_point_loss_term") def _clear_cache(self): if hasattr(self, "_cached_kernel_mat"): del self._cached_kernel_mat @property def _inducing_mat(self): if not self.training and hasattr(self, "_cached_kernel_mat"): return self._cached_kernel_mat else: res = delazify(self.base_kernel(self.inducing_points, self.inducing_points)) if not self.training: self._cached_kernel_mat = res return res @property def _inducing_inv_root(self): if not self.training and hasattr(self, "_cached_kernel_inv_root"): return self._cached_kernel_inv_root else: chol = psd_safe_cholesky(self._inducing_mat, upper=True) eye = torch.eye(chol.size(-1), device=chol.device, dtype=chol.dtype) inv_root = torch.triangular_solve(eye, chol)[0] res = inv_root if not self.training: self._cached_kernel_inv_root = res return res def _get_covariance(self, x1, x2): k_ux1 = delazify(self.base_kernel(x1, self.inducing_points)) if torch.equal(x1, x2): covar = LowRankRootLazyTensor(k_ux1.matmul(self._inducing_inv_root)) # Diagonal correction for predictive posterior if not self.training: correction = (self.base_kernel(x1, x2, diag=True) - covar.diag()).clamp(0, math.inf) covar = LowRankRootAddedDiagLazyTensor(covar, DiagLazyTensor(correction)) else: k_ux2 = delazify(self.base_kernel(x2, self.inducing_points)) covar = MatmulLazyTensor( k_ux1.matmul(self._inducing_inv_root), k_ux2.matmul(self._inducing_inv_root).transpose(-1, -2) ) return covar def _covar_diag(self, inputs): if inputs.ndimension() == 1: inputs = inputs.unsqueeze(1) # Get diagonal of covar covar_diag = delazify(self.base_kernel(inputs, diag=True)) return DiagLazyTensor(covar_diag) def forward(self, x1, x2, diag=False, **kwargs): covar = self._get_covariance(x1, x2) if self.training: if not torch.equal(x1, x2): raise RuntimeError("x1 should equal x2 in training mode") zero_mean = torch.zeros_like(x1.select(-1, 0)) new_added_loss_term = InducingPointKernelAddedLossTerm( MultivariateNormal(zero_mean, self._covar_diag(x1)), MultivariateNormal(zero_mean, covar), self.likelihood, ) self.update_added_loss_term("inducing_point_loss_term", new_added_loss_term) if diag: return covar.diag() else: return covar def num_outputs_per_input(self, x1, x2): return self.base_kernel.num_outputs_per_input(x1, x2) def __deepcopy__(self, memo): replace_inv_root = False replace_kernel_mat = False if hasattr(self, "_cached_kernel_inv_root"): replace_inv_root = True kernel_inv_root = self._cached_kernel_inv_root if hasattr(self, "_cached_kernel_mat"): replace_kernel_mat = True kernel_mat = self._cached_kernel_mat cp = self.__class__( base_kernel=copy.deepcopy(self.base_kernel), inducing_points=copy.deepcopy(self.inducing_points), likelihood=self.likelihood, active_dims=self.active_dims, ) if replace_inv_root: cp._cached_kernel_inv_root = kernel_inv_root if replace_kernel_mat: cp._cached_kernel_mat = kernel_mat return cp def prediction_strategy(self, train_inputs, train_prior_dist, train_labels, likelihood): # Allow for fast variances return exact_prediction_strategies.SGPRPredictionStrategy( train_inputs, train_prior_dist, train_labels, likelihood )
2.03125
2
app/__init__.py
Jotasenpai/DigitalMediaStoreRESTfull
0
1540
import logging import os from flask import Flask from flask_cors import CORS from app.extensions import api from app.extensions.database import db from app.extensions.schema import ma from app.views import albums, artists, hello, tracks def create_app(config, **kwargs): logging.basicConfig(level=logging.INFO) app = Flask(__name__, **kwargs) CORS(app, resources={r"/api/*": {"origins": "*"}}) app.config.from_object(config) # app.url_map.strict_slashes = False with app.app_context(): api.init_app(app) db.init_app(app) db.create_all() ma.init_app(app) api.register_blueprint(hello.blp) api.register_blueprint(artists.blp) api.register_blueprint(albums.blp) api.register_blueprint(tracks.blp) try: os.makedirs(app.instance_path) except OSError: pass return app
1.890625
2
app.py
SASHA-PAIS/A-Flask-web-app-for-inventory-management
0
1541
<gh_stars>0 from flask import Flask, url_for, request, redirect from flask import render_template as render from flask_mysqldb import MySQL import yaml import json import MySQLdb import decimal class Encoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, decimal.Decimal): return str(obj) # Setting up the flask instance app = Flask(__name__) # Configure the database db = yaml.load(open('db.yaml')) app.config['MYSQL_HOST'] = db['mysql_host'] app.config['MYSQL_USER'] = db['mysql_user'] app.config['MYSQL_PASSWORD'] = db['mysql_password'] app.config['MYSQL_DB'] = db['mysql_db'] mysql = MySQL(app) link = {x:x for x in ["location", "product", "movement"]} link["index"] = '/' def init_database(): cursor = mysql.connection.cursor() # Initialise all tables cursor.execute(""" CREATE TABLE IF NOT EXISTS products(prod_id integer primary key auto_increment, prod_name varchar(20) UNIQUE NOT NULL, prod_quantity integer not null, unallocated_quantity integer); """) # Might have to create a trigger, let's see! cursor.execute(""" CREATE TABLE IF NOT EXISTS location(loc_id integer primary key auto_increment, loc_name varchar(20) unique not null); """) cursor.execute(""" CREATE TABLE IF NOT EXISTS logistics(trans_id integer primary key auto_increment, prod_id INTEGER NOT NULL, from_loc_id INTEGER NULL, to_loc_id INTEGER NULL, prod_quantity INTEGER NOT NULL, trans_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY(prod_id) REFERENCES products(prod_id), FOREIGN KEY(from_loc_id) REFERENCES location(loc_id), FOREIGN KEY(to_loc_id) REFERENCES location(loc_id)); """) mysql.connection.commit() cursor.close() @app.route('/') def summary(): init_database() msg = None q_data, warehouse, products = None, None, None cursor = mysql.connection.cursor() try: cursor.execute("Select * from location") warehouse = cursor.fetchall() cursor.execute("Select * from products") products = cursor.fetchall() cursor.execute(""" SELECT prod_name, unallocated_quantity, prod_quantity FROM products """) q_data = cursor.fetchall() except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e: msg = f"An error occured: {e}" print(msg) cursor.close() return render('index.html',link=link, title = "Summary", warehouses = warehouse, products = products, database = q_data) @app.route('/location.html', methods=['POST', 'GET']) def location(): init_database() msg=None cursor = mysql.connection.cursor() cursor.execute("SELECT * FROM location ORDER BY loc_id") warehouse_data = cursor.fetchall() cursor.execute("SELECT loc_name FROM location") loc_names = cursor.fetchall() loc_new = [] for i in range(len(loc_names)): loc_new.append(loc_names[i][0]) if request.method == 'POST': warehouse_name = request.form['warehouse_name'] warehouse_name = warehouse_name.capitalize() transaction_allowed = False if warehouse_name not in ['', ' ', None] and warehouse_name not in loc_new: transaction_allowed=True if transaction_allowed: try: cursor.execute("INSERT INTO location(loc_name) VALUES(%s)", (warehouse_name,)) mysql.connection.commit() except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e: msg = f"An error occured: {e}" else: msg = f"{warehouse_name} added succcessfully" if msg: print(msg) cursor.close() return redirect(url_for('location')) return render('location.html', link=link, warehouses=warehouse_data, transaction_message=msg, title = "Warehouse Locations") @app.route('/product.html', methods=['POST', 'GET']) def product(): init_database() msg=None cursor = mysql.connection.cursor() cursor.execute("SELECT * from products") products = cursor.fetchall() cursor.execute("SELECT prod_name FROM products") prod_names = cursor.fetchall() prod_new = [] for i in range(len(prod_names)): prod_new.append(prod_names[i][0]) if request.method == 'POST': prod_name = request.form['prod_name'] quantity = request.form['prod_quantity'] prod_name = prod_name.capitalize() transaction_allowed = False if prod_name not in ['', ' ', None] and prod_name not in prod_new: if quantity not in ['', ' ', None]: transaction_allowed= True if transaction_allowed: try: cursor.execute("INSERT INTO products(prod_name, prod_quantity, unallocated_quantity) VALUES (%s, %s, %s)", (prod_name, quantity, quantity)) mysql.connection.commit() except(MySQLdb.Error(not Warning), MySQLdb.Warning()) as e: msg = f"An error occured: {e}" else: msg = f"{prod_name} added succcessfully" if msg: print(msg) cursor.close() return redirect(url_for('product')) return render('product.html', link=link, products = products, transaction_message=msg, title="Products Log") @app.route('/movement.html', methods=['POST', 'GET']) def movement(): init_database() msg=None cursor = mysql.connection.cursor() cursor.execute("SELECT * FROM logistics") logistics_data = cursor.fetchall() cursor.execute("SELECT prod_id, prod_name, unallocated_quantity FROM products") products = cursor.fetchall() cursor.execute("SELECT loc_id, loc_name FROM location") locations = cursor.fetchall() # products - ((1, 'Piano', 250), (2, 'Iphone xr', 600), (6, 'Washing machine', 100), (7, 'Microwave', 50)) # x in product - (1, 'Piano', 250) # x[0] = 1 # for p_id in [x[0] for x in products]: # print(p_id) # 1 # 2 # 6 # 7 # print(locations) # for l_id in [x[0] for x in locations]: # print(l_id) # ((20, 'Andaman'), (19, 'Assam'), (26, 'Jodhpur'), (17, 'Puducherry')) # 20 # 19 # 26 # 17 log_summary = [] for p_id in [x[0] for x in products]: cursor.execute("SELECT prod_name FROM products WHERE prod_id = %s", str(p_id,)) temp_prod_name = cursor.fetchone() #print(temp_prod_name) ('Piano',) for l_id in [x[0] for x in locations]: cursor.execute("SELECT loc_name FROM location WHERE loc_id = %s", (l_id,)) #str(l_id,) giving an error temp_loc_name = cursor.fetchone() # print(temp_loc_name) - (Andaman,) #e.g. prod_id = 1 = piano, loc_id = 1 = andaman cursor.execute(""" SELECT SUM(log.prod_quantity) FROM logistics log WHERE log.prod_id = %s AND log.to_loc_id = %s """, (p_id, l_id)) sum_to_loc = cursor.fetchone() # No.of pianos that enter andaman cursor.execute(""" SELECT SUM(log.prod_quantity) FROM logistics log WHERE log.prod_id = %s AND log.from_loc_id = %s """, (p_id, l_id)) sum_from_loc = cursor.fetchone() # No. of pianos that leave andaman # print(sum_from_loc) if sum_from_loc[0] is None: #e.g. (None,) --> (0,) --> No pianos leave andaman sum_from_loc = (0,) if sum_to_loc[0] is None: #No pianos enter andaman sum_to_loc = (0,) #how much enters andaman - how much leaves andaman = how much remains (allocated) in andaman # log_summary += [(temp_prod_name + temp_loc_name + (sum_to_loc[0] - sum_from_loc[0],) )] ORRRRRRRRRRR log_summary.append(temp_prod_name + temp_loc_name + (sum_to_loc[0] - sum_from_loc[0],)) # (Piano,) + (Andaman,), (0,) = ('Piano', 'Andaman', 0) #print(log_summary) # [('Piano', 'Andaman', 0), ('Piano', 'Assam', 0), ('Piano', 'Jodhpur', 0), ('Piano', 'Puducherry', 0), # ('Iphone xr', 'Andaman', 0), ('Iphone xr', 'Assam', 0), ('Iphone xr', 'Jodhpur', 0), ('Iphone xr', 'Puducherry', 0), # ('Washing machine', 'Andaman', 0), ('Washing machine', 'Assam', 0), ('Washing machine', 'Jodhpur', 0), ('Washing machine', 'Puducherry', 0), # ('Microwave', 'Andaman', 0), ('Microwave', 'Assam', 0), ('Microwave', 'Jodhpur', 0), ('Microwave', 'Puducherry', 0)] alloc_json = {} for row in log_summary: try: if row[1] in alloc_json[row[0]].keys(): #Check if Andaman exists in Piano ka keys, Check if Assam, exists in Piano ka keys, etc. alloc_json[row[0]][row[1]] += row[2] #If yes, the add the quantity to the previous quantity else: alloc_json[row[0]][row[1]] = row[2] #If no, add it as a new quantity except (KeyError, TypeError): alloc_json[row[0]] = {} #Make the value of piano empty alloc_json[row[0]][row[1]] = row[2] #Add Andaman with quantity as a new value in the dictionary #print(alloc_json) # {'Piano': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0}, # 'Iphone xr': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0}, # 'Washing machine': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0}, # 'Microwave': {'Andaman': 0, 'Assam': 0, 'Jodhpur': 0, 'Puducherry': 0}} alloc_json = json.dumps(alloc_json, cls = Encoder) # print(alloc_json) # {"Piano": {"Andaman": 0, "Assam": 0, "Jodhpur": 0, "Puducherry": 0}, # "Iphone xr": {"Andaman": 0, "Assam": 0, "Jodhpur": 0, "Puducherry": 0}, # "Washing machine": {"Andaman": 0, "Assam": 0, "Jodhpur": 0, "Puducherry": 0}, # "Microwave": {"Andaman": 0, "Assam": 0, "Jodhpur": 0, "Puducherry": 0}} if request.method == 'POST': # transaction times are stored in UTC prod_name = request.form['prod_name'] from_loc = request.form['from_loc'] to_loc = request.form['to_loc'] quantity = request.form['quantity'] # if no 'from loc' is given, that means the product is being shipped to a warehouse (init condition) if from_loc in [None, '', ' ']: try: cursor.execute(""" INSERT INTO logistics(prod_id, to_loc_id, prod_quantity) SELECT products.prod_id, location.loc_id, %s FROM products, location WHERE products.prod_name = %s AND location.loc_name = %s """, (quantity, prod_name, to_loc)) # IMPORTANT to maintain consistency cursor.execute(""" UPDATE products SET unallocated_quantity = unallocated_quantity - %s WHERE prod_name = %s """, (quantity, prod_name)) mysql.connection.commit() except (MySQLdb.Error, MySQLdb.Warning) as e: msg = f"An error occured: {e}" else: msg = "Transaction added successfully" elif to_loc in [None, '', ' ']: print("To Location wasn't specified, will be unallocated") try: cursor.execute(""" INSERT INTO logistics(prod_id, from_loc_id, prod_quantity) SELECT products.prod_id, location.loc_id, %s FROM products, location WHERE products.prod_name = %s AND location.loc_name = %s """, (quantity, prod_name, from_loc)) #Important to maintain consistency cursor.execute(""" UPDATE products SET unallocated_quantity = unallocated_quantity + %s WHERE prod_name = %s """, (quantity, prod_name)) mysql.connection.commit() except(MySQLdb.Error, MySQLdb.Warning) as e: msg=f"An error occurred: {e}" else: msg = "Transaction added successfully" # if 'from loc' and 'to_loc' given the product is being shipped between warehouses else: try: cursor.execute("SELECT loc_id FROM location WHERE loc_name = %s", (from_loc,)) from_loc = ''.join([str(x[0]) for x in cursor.fetchall()]) # cursor.fetchall -> ((1,)), x -> (1,) x[0] -> 1 join converts 1 into a string cursor.execute("SELECT loc_id FROM location WHERE loc_name = %s", (to_loc,)) to_loc = ''.join([str(x[0]) for x in cursor.fetchall() ]) cursor.execute("SELECT prod_id FROM products WHERE prod_name = %s", (prod_name,)) prod_id = ''.join([str(x[0]) for x in cursor.fetchall() ]) cursor.execute(""" INSERT INTO logistics(prod_id, from_loc_id, to_loc_id, prod_quantity) VALUES(%s, %s, %s, %s) """, (prod_id, from_loc, to_loc, quantity)) mysql.connection.commit() except(MySQLdb.Error, MySQLdb.Warning) as e: msg=f"An error occurred: {e}" else: msg = "Transaction added successfully" #Print a transaction message if exists! if msg: print(msg) cursor.close() return redirect(url_for('movement')) return render('movement.html', title = "Product Movement", link=link, trans_message=msg, products=products, locations=locations, allocated = alloc_json, logs = logistics_data, database = log_summary) @app.route('/delete') def delete(): # Make sure that the queries are working properly....I'm having some doubts about the datatypes type_ = request.args.get('type') cursor = mysql.connection.cursor() if type_ == 'location': id_ = request.args.get('loc_id') cursor.execute("SELECT prod_id, SUM(prod_quantity) FROM logistics where to_loc_id = %s GROUP BY prod_id", (id_,)) in_place = cursor.fetchall() cursor.execute("SELECT prod_id, SUM(prod_quantity) FROM logistics where from_loc_id = %s GROUP BY prod_id", (id_,)) out_place = cursor.fetchall() #Convert list of tuples to dict in_place = dict(in_place) out_place = dict(out_place) all_place = {} #Inplace = {1:20, 3:2000} - keys - prod_id - toloc = mumbai #out_place = {3:100} - keys - prod_id - fromloc = mumbai for x in in_place.keys(): #calculator entered mumbai if x in out_place.keys(): #calculator left mumbai all_place[x] = in_place[x] - out_place[x] #2000 fridges came to mumbai from kolkata, 100 fridges were sent to daman diu, therefore, 1900 remains in mumbai which will be unallocated if mumbai is deleted else: all_place[x] = in_place[x] for products_ in all_place.keys(): cursor.execute(""" UPDATE products SET unallocated_quantity = unallocated_quantity + %s WHERE prod_id = %s """, (all_place[products_], products_)) cursor.execute("DELETE FROM location where loc_id = %s", (id_,)) mysql.connection.commit() cursor.close() return redirect(url_for('location')) elif type_ == 'product': id_ = request.args.get('prod_id') cursor.execute("DELETE FROM products WHERE prod_id = %s", (id_,)) mysql.connection.commit() cursor.close() return redirect(url_for('product')) @app.route('/edit', methods=['POST', 'GET']) def edit(): # Try capitalize() type_ = request.args.get('type') cursor = mysql.connection.cursor() cursor.execute("SELECT loc_name FROM location") loc_names = cursor.fetchall() loc_new = [] for i in range(len(loc_names)): loc_new.append(loc_names[i][0]) cursor.execute("SELECT prod_name FROM products") prod_names = cursor.fetchall() prod_new = [] for i in range(len(prod_names)): prod_new.append(prod_names[i][0]) if type_ == 'location' and request.method == 'POST': loc_id = request.form['loc_id'] loc_name = request.form['loc_name'] loc_name = loc_name.capitalize() if loc_name not in ['', ' ', None] and loc_name not in loc_new: cursor.execute("UPDATE location SET loc_name = %s WHERE loc_id = %s", (loc_name, loc_id)) mysql.connection.commit() cursor.close() return redirect(url_for('location')) elif type_ == 'product' and request.method == 'POST': prod_id = request.form['product_id'] prod_name = request.form['prod_name'] prod_quantity = request.form['prod_quantity'] prod_name = prod_name.capitalize() if prod_name not in ['', ' ', None] and prod_name not in prod_new: cursor.execute("UPDATE products SET prod_name = %s WHERE prod_id = %s", (prod_name, str(prod_id))) if prod_quantity not in ['', ' ', None] and prod_name not in prod_new: cursor.execute("SELECT prod_quantity FROM products WHERE prod_id = %s", (prod_id,)) old_prod_quantity = cursor.fetchone()[0] cursor.execute(""" UPDATE products SET prod_quantity = %s, unallocated_quantity = unallocated_quantity + %s - %s WHERE prod_id = %s """, (prod_quantity, prod_quantity, old_prod_quantity, str(prod_id))) mysql.connection.commit() cursor.close() return redirect(url_for('product')) return render(url_for(type_)) if __name__ == '__main__': app.run(debug=True)
2.65625
3
python/paddle/fluid/tests/unittests/ir/inference/test_trt_reduce_mean_op.py
zmxdream/Paddle
8
1542
<gh_stars>1-10 # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import numpy as np from inference_pass_test import InferencePassTest import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.core import PassVersionChecker from paddle.fluid.core import AnalysisConfig class TRTReduceMeanTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name="data", shape=[-1, 3, -1, -1], dtype="float32") reduce_mean = fluid.layers.reduce_mean( data, dim=[2, -1], keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds = { "data": np.random.random([3, 3, 56, 56]).astype("float32"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanTest.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanTest.DynamicShapeParam({ 'data': [1, 3, 16, 16] }, {'data': [3, 3, 56, 56]}, {'data': [3, 3, 56, 56]}, False) def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanAllNoBatchTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name="data", shape=[-1, 3, -1, -1], dtype="float32") reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds = { "data": np.random.random([3, 3, 56, 56]).astype("float32"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanAllNoBatchTest.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanAllNoBatchTest.DynamicShapeParam( { 'data': [1, 3, 16, 16] }, {'data': [3, 3, 56, 56]}, {'data': [3, 3, 56, 56]}, False) def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanTestFP16(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name="data", shape=[-1, 3, -1, -1], dtype="float32") reduce_mean = fluid.layers.reduce_mean( data, dim=[2, -1], keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds = { "data": np.random.random([3, 3, 56, 56]).astype("float32"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanTestFP16.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Half, False, False) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanTestFP16.DynamicShapeParam({ 'data': [1, 3, 16, 16] }, {'data': [3, 3, 56, 56]}, {'data': [3, 3, 56, 56]}, False) def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanAllTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name="data", shape=[-1, 3, 56, 56], dtype="float32") reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds = { "data": np.random.random([3, 3, 56, 56]).astype("float32"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanAllTest.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) self.fetch_list = [out] self.dynamic_shape_params = TRTReduceMeanAllTest.DynamicShapeParam({ 'data': [1, 3, 56, 56] }, {'data': [3, 3, 56, 56]}, {'data': [3, 3, 56, 56]}, False) def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanTestStatic(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name="data", shape=[3, 3, 56, 56], dtype="float32") reduce_mean = fluid.layers.reduce_mean( data, dim=[2, -1], keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds = { "data": np.random.random([3, 3, 56, 56]).astype("float32"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanTestStatic.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) self.fetch_list = [out] def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanStaticAllTest(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name="data", shape=[4, 3, 56, 56], dtype="float32") reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds = { "data": np.random.random([4, 3, 56, 56]).astype("float32"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanStaticAllTest.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Float32, False, False) self.fetch_list = [out] def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanStaticFP16(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name="data", shape=[4, 3, 56, 56], dtype="float32") reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds = { "data": np.random.random([4, 3, 56, 56]).astype("float32"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanStaticFP16.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Half, False, False) self.fetch_list = [out] def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) class TRTReduceMeanFP16Static(InferencePassTest): def setUp(self): with fluid.program_guard(self.main_program, self.startup_program): data = fluid.data( name="data", shape=[4, 3, 56, 56], dtype="float32") reduce_mean = fluid.layers.reduce_mean(data, keep_dim=True) out = fluid.layers.batch_norm(reduce_mean, is_test=True) self.feeds = { "data": np.random.random([4, 3, 56, 56]).astype("float32"), } self.enable_trt = True self.trt_parameters = TRTReduceMeanFP16Static.TensorRTParam( 1 << 30, 32, 1, AnalysisConfig.Precision.Half, True, False) self.fetch_list = [out] def test_check_output(self): if core.is_compiled_with_cuda(): use_gpu = True self.check_output_with_option(use_gpu, flatten=True) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) if __name__ == "__main__": unittest.main()
1.875
2
configs/vinbig/detectors_resnext.py
SeHwanJoo/mmdetection_vinbig
2
1543
_base_ = [ '../_base_/models/cascade_rcnn_r50_fpn.py', './dataset_base.py', './scheduler_base.py', '../_base_/default_runtime.py' ] model = dict( pretrained='open-mmlab://resnext101_32x4d', backbone=dict( type='DetectoRS_ResNeXt', pretrained='open-mmlab://resnext101_32x4d', depth=101, groups=32, base_width=4, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True), output_img=True, plugins=[ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2), stages=(False, False, True, True), in_channels=512, position='after_conv2') ] ), neck=dict( type='RFP', rfp_steps=2, aspp_out_channels=64, aspp_dilations=(1, 3, 6, 1), rfp_backbone=dict( rfp_inplanes=256, type='DetectoRS_ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True), pretrained='open-mmlab://resnext101_32x4d', style='pytorch')), roi_head=dict( bbox_head=[ dict( type='Shared2FCBBoxHead', num_classes=14 ), dict( type='Shared2FCBBoxHead', num_classes=14 ), dict( type='Shared2FCBBoxHead', num_classes=14 ) ] ), test_cfg=dict( rpn=dict( nms_thr=0.7 ), rcnn=dict( score_thr=0.0, nms=dict(type='nms', iou_threshold=0.4) ) ) )
1.382813
1
skbio/draw/tests/test_distributions.py
johnchase/scikit-bio
0
1544
<gh_stars>0 # ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function from unittest import TestCase, main import numpy as np import numpy.testing as npt import matplotlib.pyplot as plt from skbio.draw import boxplots, grouped_distributions from skbio.draw._distributions import ( _calc_data_point_locations, _calc_data_point_ticks, _color_box_plot, _create_legend, _get_distribution_markers, _is_single_matplotlib_color, _plot_bar_data, _plot_box_data, _plot_scatter_data, _set_axes_options, _set_figure_size, _validate_input, _validate_x_values) class DistributionsTests(TestCase): def setUp(self): # Test null data list. self.Null = None # Test empty data list. self.Empty = [] # Test nested empty data list. self.EmptyNested = [[]] # Test nested empty data list (for bar/scatter plots). self.EmptyDeeplyNested = [[[]]] # Test invalid number of samples in data list (for bar/scatter plots). self.InvalidNumSamples = [[[1, 2, 3, 4, 5]], [[4, 5, 6, 7, 8], [2, 3, 2]], [[4, 7, 10, 33, 32, 6, 7, 8]]] # Test valid data with three samples and four data points # (for bar/scatter plots). self.ValidTypicalData = [[[1.0, 2, 3.5, 5], [2, 3, 5, 6], [2, 3, 8]], [[4, 7, 8], [8, 9, 10, 11], [9.0, 4, 1, 1]], [[4, 33, 32, 6, 8], [5, 4, 8, 13], [1, 1, 2]], [[2, 2, 2, 2], [3, 9, 8], [2, 1, 6, 7, 4, 5]]] # Test valid data with one sample (for bar/scatter plots). self.ValidSingleSampleData = [[[1, 2, 3, 4, 5]], [[4, 5, 6, 7, 8]], [[4, 7, 10, 33, 32, 6, 7, 8]]] # Test typical data to be plotted by the boxplot function. self.ValidTypicalBoxData = [[3.4, 10, 11.67, 12.0, 2, 2, 99.99], [2.3, 4, 5, 88, 9, 10, 11, 1, 0, 3, -8], [2, 9, 7, 5, 6]] def tearDown(self): # We get a warning from mpl if we don't clean up our figures. plt.close('all') def test_validate_input_null(self): with npt.assert_raises(ValueError): _validate_input(self.Null, None, None, None) def test_validate_input_empty(self): with npt.assert_raises(ValueError): _validate_input(self.Empty, None, None, None) def test_validate_input_empty_nested(self): with npt.assert_raises(ValueError): _validate_input(self.EmptyNested, None, None, None) def test_validate_input_empty_deeply_nested(self): num_points, num_samples = _validate_input(self.EmptyDeeplyNested, None, None, None) self.assertEqual(num_points, 1) self.assertEqual(num_samples, 1) def test_validate_input_empty_point(self): with npt.assert_raises(ValueError): _validate_input([[[1, 2, 3], [4, 5]], []], None, None, None) def test_validate_input_invalid_num_samples(self): with npt.assert_raises(ValueError): _validate_input(self.InvalidNumSamples, None, None, None) def test_validate_input_invalid_data_point_names(self): with npt.assert_raises(ValueError): _validate_input(self.ValidSingleSampleData, None, ["T0", "T1"], None) def test_validate_input_invalid_sample_names(self): with npt.assert_raises(ValueError): _validate_input(self.ValidSingleSampleData, None, None, ["Men", "Women"]) def test_validate_input_all_valid_input(self): self.assertEqual(_validate_input(self.ValidTypicalData, [1, 3, 4, 8], ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"]), (4, 3)) def test_validate_x_values_invalid_x_values(self): with npt.assert_raises(ValueError): _validate_x_values([1, 2, 3, 4], ["T0", "T1", "T2"], len(self.ValidSingleSampleData)) def test_validate_x_values_invalid_x_tick_labels(self): with npt.assert_raises(ValueError): _validate_x_values(None, ["T0"], len(self.ValidSingleSampleData)) def test_validate_x_values_nonnumber_x_values(self): with npt.assert_raises(ValueError): _validate_x_values(["foo", 2, 3], None, len(self.ValidSingleSampleData)) def test_validate_x_values_valid_x_values(self): _validate_x_values([1, 2.0, 3], None, 3) def test_get_distribution_markers_null_marker_list(self): self.assertEqual(_get_distribution_markers('colors', None, 5), ['b', 'g', 'r', 'c', 'm']) def test_get_distribution_markers_empty_marker_list(self): self.assertEqual(_get_distribution_markers('colors', None, 4), ['b', 'g', 'r', 'c']) def test_get_distribution_markers_insufficient_markers(self): self.assertEqual(npt.assert_warns(RuntimeWarning, _get_distribution_markers, 'colors', None, 10), ['b', 'g', 'r', 'c', 'm', 'y', 'w', 'b', 'g', 'r']) self.assertEqual(npt.assert_warns(RuntimeWarning, _get_distribution_markers, 'symbols', ['^', '>', '<'], 5), ['^', '>', '<', '^', '>']) def test_get_distribution_markers_bad_marker_type(self): with npt.assert_raises(ValueError): _get_distribution_markers('shapes', [], 3) def test_get_distribution_markers_zero_markers(self): self.assertEqual(_get_distribution_markers('symbols', None, 0), []) self.assertEqual(_get_distribution_markers('symbols', ['^'], 0), []) def test_get_distribution_markers_negative_num_markers(self): with npt.assert_raises(ValueError): _get_distribution_markers('symbols', [], -1) def test_plot_bar_data(self): fig, ax = plt.subplots() result = _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'stdv') self.assertEqual(result[0].__class__.__name__, "Rectangle") self.assertEqual(len(result), 1) self.assertAlmostEqual(result[0].get_width(), 0.5) self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0, 1.0)) self.assertAlmostEqual(result[0].get_height(), 2.0) fig, ax = plt.subplots() result = _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'sem') self.assertEqual(result[0].__class__.__name__, "Rectangle") self.assertEqual(len(result), 1) self.assertAlmostEqual(result[0].get_width(), 0.5) self.assertAlmostEqual(result[0].get_facecolor(), (1.0, 0.0, 0.0, 1.0)) self.assertAlmostEqual(result[0].get_height(), 2.0) def test_plot_bar_data_bad_error_bar_type(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError): _plot_bar_data(ax, [1, 2, 3], 'red', 0.5, 3.75, 1.5, 'var') def test_plot_bar_data_empty(self): fig, ax = plt.subplots() result = _plot_bar_data(ax, [], 'red', 0.5, 3.75, 1.5, 'stdv') self.assertTrue(result is None) fig, ax = plt.subplots() result = _plot_bar_data(ax, [], 'red', 0.5, 3.75, 1.5, 'sem') self.assertTrue(result is None) def test_plot_scatter_data(self): fig, ax = plt.subplots() result = _plot_scatter_data(ax, [1, 2, 3], '^', 0.77, 1, 1.5, 'stdv') self.assertEqual(result.get_sizes(), 20) def test_plot_scatter_data_empty(self): fig, ax = plt.subplots() result = _plot_scatter_data(ax, [], '^', 0.77, 1, 1.5, 'stdv') self.assertTrue(result is None) def test_plot_box_data(self): fig, ax = plt.subplots() result = _plot_box_data(ax, [0, 0, 7, 8, -3, 44], 'blue', 0.33, 55, 1.5, 'stdv') self.assertEqual(result.__class__.__name__, "dict") self.assertEqual(len(result['boxes']), 1) self.assertEqual(len(result['medians']), 1) self.assertEqual(len(result['whiskers']), 2) # mpl < 1.4.0 creates two Line2D instances, mpl 1.4.0 creates one, # though the resulting plot looks identical between the two versions. # see: # https://github.com/pydata/pandas/issues/8382#issuecomment-56840974 # https://github.com/matplotlib/matplotlib/issues/3544 self.assertTrue(len(result['fliers']) == 1 or len(result['fliers']) == 2) self.assertEqual(len(result['caps']), 2) def test_plot_box_data_empty(self): fig, ax = plt.subplots() result = _plot_box_data(ax, [], 'blue', 0.33, 55, 1.5, 'stdv') self.assertTrue(result is None) def test_calc_data_point_locations_invalid_x_values(self): with npt.assert_raises(ValueError): _calc_data_point_locations(3, [1, 10.5]) def test_calc_data_point_locations_default_spacing(self): locs = _calc_data_point_locations(4) np.testing.assert_allclose(locs, [1, 2, 3, 4]) def test_calc_data_point_locations_custom_spacing(self): # Scaling down from 3..12 to 1..4. locs = _calc_data_point_locations(4, [3, 4, 10, 12]) np.testing.assert_allclose(locs, np.array([1, 1.33333333, 3.33333333, 4])) # Sorted order shouldn't affect scaling. locs = _calc_data_point_locations(4, [4, 3, 12, 10]) np.testing.assert_allclose(locs, np.array([1.33333333, 1, 4, 3.33333333])) # Scaling up from 0.001..0.87 to 1..3. locs = _calc_data_point_locations(3, [0.001, 0.2543, 0.87]) np.testing.assert_allclose(locs, np.array([1, 1.58296893, 3])) def test_calc_data_point_ticks(self): ticks = _calc_data_point_ticks(np.array([1, 5, 9, 11]), 1, 0.5, False) np.testing.assert_allclose(ticks, [1.25, 5.25, 9.25, 11.25]) ticks = _calc_data_point_ticks(np.array([0]), 3, 0.5, False) np.testing.assert_allclose(ticks, [0.75]) def test_set_axes_options(self): fig, ax = plt.subplots() _set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label", x_tick_labels=["T0", "T1"]) self.assertEqual(ax.get_title(), "Plot Title") self.assertEqual(ax.get_ylabel(), "y-axis label") self.assertEqual(ax.get_xticklabels()[0].get_text(), "T0") self.assertEqual(ax.get_xticklabels()[1].get_text(), "T1") def test_set_axes_options_ylim(self): fig, ax = plt.subplots() _set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label", x_tick_labels=["T0", "T1", "T2"], y_min=0, y_max=1) self.assertEqual(ax.get_title(), "Plot Title") self.assertEqual(ax.get_ylabel(), "y-axis label") self.assertEqual(ax.get_xticklabels()[0].get_text(), "T0") self.assertEqual(ax.get_xticklabels()[1].get_text(), "T1") self.assertEqual(ax.get_ylim(), (0.0, 1.0)) def test_set_axes_options_x_values_as_tick_labels(self): fig, ax = plt.subplots() _set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label", x_values=[42, 45, 800]) self.assertEqual(ax.get_title(), "Plot Title") self.assertEqual(ax.get_ylabel(), "y-axis label") self.assertEqual(ax.get_xticklabels()[0].get_text(), '42') self.assertEqual(ax.get_xticklabels()[1].get_text(), '45') self.assertEqual(ax.get_xticklabels()[2].get_text(), '800') def test_set_axes_options_bad_ylim(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError): _set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label", x_tick_labels=["T0", "T1", "T2"], y_min='car', y_max=30) def test_set_axes_options_invalid_x_tick_labels_orientation(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError): _set_axes_options(ax, "Plot Title", "x-axis label", "y-axis label", x_tick_labels=["T0", "T1"], x_tick_labels_orientation='brofist') def test_create_legend(self): fig, ax = plt.subplots() _create_legend(ax, ['b', 'r'], ['dist1', 'dist2'], 'colors') self.assertEqual(len(ax.get_legend().get_texts()), 2) fig, ax = plt.subplots() _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'], 'symbols') self.assertEqual(len(ax.get_legend().get_texts()), 3) def test_create_legend_invalid_input(self): fig, ax = plt.subplots() with npt.assert_raises(ValueError): _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2'], 'symbols') with npt.assert_raises(ValueError): _create_legend(ax, ['^', '<', '>'], ['dist1', 'dist2', 'dist3'], 'foo') def test_grouped_distributions_bar(self): fig = grouped_distributions('bar', self.ValidTypicalData, [1, 4, 10, 11], ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"], ['b', 'r', 'g'], "x-axis label", "y-axis label", "Test") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), "Test") self.assertEqual(ax.get_xlabel(), "x-axis label") self.assertEqual(ax.get_ylabel(), "y-axis label") self.assertEqual(len(ax.get_xticklabels()), 4) np.testing.assert_allclose(ax.get_xticks(), [1.1125, 2.0125, 3.8125, 4.1125]) def test_grouped_distributions_insufficient_colors(self): args = ('bar', self.ValidTypicalData, [1, 4, 10, 11], ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"], ['b', 'r'], "x-axis label", "y-axis label", "Test") npt.assert_warns(RuntimeWarning, grouped_distributions, *args) def test_grouped_distributions_scatter(self): fig = grouped_distributions('scatter', self.ValidTypicalData, [1, 4, 10, 11], ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"], ['^', '>', '<'], "x-axis label", "y-axis label", "Test") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), "Test") self.assertEqual(ax.get_xlabel(), "x-axis label") self.assertEqual(ax.get_ylabel(), "y-axis label") self.assertEqual(len(ax.get_xticklabels()), 4) np.testing.assert_allclose(ax.get_xticks(), [1.075, 1.975, 3.775, 4.075]) def test_grouped_distributions_insufficient_symbols(self): args = ('scatter', self.ValidTypicalData, [1, 4, 10, 11], ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"], ['^'], "x-axis label", "y-axis label", "Test") npt.assert_warns(RuntimeWarning, grouped_distributions, *args) def test_grouped_distributions_empty_marker_list(self): grouped_distributions('scatter', self.ValidTypicalData, [1, 4, 10, 11], ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"], [], "x-axis label", "y-axis label", "Test") def test_grouped_distributions_box(self): fig = grouped_distributions('box', self.ValidTypicalData, [1, 4, 10, 11], ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"], ['b', 'g', 'y'], "x-axis label", "y-axis label", "Test") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), "Test") self.assertEqual(ax.get_xlabel(), "x-axis label") self.assertEqual(ax.get_ylabel(), "y-axis label") self.assertEqual(len(ax.get_xticklabels()), 4) np.testing.assert_allclose(ax.get_xticks(), [1.075, 1.975, 3.775, 4.075]) def test_grouped_distributions_error(self): with npt.assert_raises(ValueError): grouped_distributions('pie', self.ValidTypicalData, [1, 4, 10, 11], ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"], ['b', 'g', 'y'], "x-axis label", "y-axis label", "Test") def test_grouped_distributions_negative_distribution_width(self): args = ('box', self.ValidTypicalData, [1, 4, 10, 11], ["T0", "T1", "T2", "T3"], ["Infants", "Children", "Teens"], ['b', 'g', 'y'], "x-axis label", "y-axis label", "Test") with self.assertRaises(ValueError): grouped_distributions(*args, distribution_width=0) with self.assertRaises(ValueError): grouped_distributions(*args, distribution_width=-42) def test_boxplots(self): fig = boxplots(self.ValidTypicalBoxData, [1, 4, 10], ["Data 1", "Data 2", "Data 3"], "Test", "x-axis label", "y-axis label", legend=(('blue', 'red'), ('foo', 'bar'))) ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), "Test") self.assertEqual(ax.get_xlabel(), "x-axis label") self.assertEqual(ax.get_ylabel(), "y-axis label") self.assertEqual(len(ax.get_xticklabels()), 3) self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) def test_boxplots_empty_distributions(self): fig = boxplots([[1, 2, 3], [], [4, 5, 6]], [1, 4, 10], ["Data 1", "Data 2", "Data 3"], "Test", "x-axis label", "y-axis label") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), "Test") self.assertEqual(ax.get_xlabel(), "x-axis label") self.assertEqual(ax.get_ylabel(), "y-axis label") self.assertEqual(len(ax.get_xticklabels()), 3) self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) # second distribution (empty) should have nans since it is hidden. # boxplots in mpl < 1.4.0 have 8 lines per boxplot, while mpl 1.4.0 has # 7. in either case, the line at index 8 should have a nan for its y # value lines = ax.get_lines() self.assertTrue(np.isnan(lines[8].get_xydata()[0][1])) # line in first distribution should *not* have nan for its y value self.assertFalse(np.isnan(lines[0].get_xydata()[0][1])) # All distributions are empty. fig = boxplots([[], [], []], [1, 4, 10], ["Data 1", "Data 2", "Data 3"], "Test", "x-axis label", "y-axis label") ax = fig.get_axes()[0] self.assertEqual(ax.get_title(), "Test") self.assertEqual(ax.get_xlabel(), "x-axis label") self.assertEqual(ax.get_ylabel(), "y-axis label") self.assertEqual(len(ax.get_xticklabels()), 3) self.assertTrue(np.array_equal(ax.get_xticks(), [1, 4, 10])) lines = ax.get_lines() self.assertTrue(np.isnan(lines[0].get_xydata()[0][1])) self.assertTrue(np.isnan(lines[8].get_xydata()[0][1])) self.assertTrue(np.isnan(lines[16].get_xydata()[0][1])) def test_boxplots_box_colors(self): # Coloring works with all empty distributions. fig = boxplots([[], [], []], box_colors=['blue', 'red', 'yellow']) ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) # patch colors should match what we specified self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0)) self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0, 0.0, 1.0)) self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0)) # patch location should include at least one nan since the distribution # is empty, and thus hidden for patch in ax.patches: self.assertTrue(np.isnan(patch.xy[0][1])) fig = boxplots([[], [], []], box_colors='pink') ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) for patch in ax.patches: npt.assert_almost_equal( patch.get_facecolor(), (1.0, 0.7529411764705882, 0.796078431372549, 1.0)) self.assertTrue(np.isnan(patch.xy[0][1])) # Coloring works with some empty distributions. fig = boxplots([[], [1, 2, 3.5], []], box_colors=['blue', 'red', 'yellow']) ax = fig.get_axes()[0] self.assertEqual(len(ax.get_xticklabels()), 3) self.assertEqual(ax.patches[0].get_facecolor(), (0.0, 0.0, 1.0, 1.0)) self.assertEqual(ax.patches[1].get_facecolor(), (1.0, 0.0, 0.0, 1.0)) self.assertEqual(ax.patches[2].get_facecolor(), (1.0, 1.0, 0.0, 1.0)) self.assertTrue(np.isnan(ax.patches[0].xy[0][1])) self.assertFalse(np.isnan(ax.patches[1].xy[0][1])) self.assertTrue(np.isnan(ax.patches[2].xy[0][1])) def test_boxplots_invalid_input(self): # Non-numeric entries in distribution. with npt.assert_raises(ValueError): boxplots([[1, 'foo', 3]]) # Number of colors doesn't match number of distributions. with npt.assert_raises(ValueError): boxplots([[1, 2, 3], [], [4, 5, 6]], box_colors=['blue', 'red']) # Invalid legend. with npt.assert_raises(ValueError): boxplots([[1, 2, 3]], legend=('foo', 'bar', 'baz')) def test_color_box_plot(self): fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, ['blue', 'w', (1, 1, 0.9)]) # Some colors are None. fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, ['blue', None, (1, 1, 0.9)]) # All colors are None. fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) _color_box_plot(ax, box_plot, [None, None, None]) def test_color_box_plot_invalid_input(self): # Invalid color. fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) with npt.assert_raises(ValueError): _color_box_plot(ax, box_plot, ['red', 'foobarbaz', 'blue']) # Wrong number of colors. fig, ax = plt.subplots() box_plot = plt.boxplot(self.ValidTypicalBoxData) with npt.assert_raises(ValueError): _color_box_plot(ax, box_plot, ['blue', (1, 1, 0.9)]) def test_is_single_matplotlib_color(self): self.assertTrue(_is_single_matplotlib_color('w')) self.assertTrue(_is_single_matplotlib_color('white')) self.assertTrue(_is_single_matplotlib_color([1, 1, 1])) self.assertTrue(_is_single_matplotlib_color([1, 1, 1, 1])) self.assertTrue(_is_single_matplotlib_color((1, 1, 1))) self.assertTrue(_is_single_matplotlib_color((1, 1, 1, 1))) self.assertTrue(_is_single_matplotlib_color((1.0, 1.0, 1.0, 1.0))) self.assertTrue(_is_single_matplotlib_color((1.0, 1, 1.0))) self.assertTrue(_is_single_matplotlib_color((2.0, 1, 1.0))) self.assertFalse(_is_single_matplotlib_color(['w', 'r'])) self.assertFalse(_is_single_matplotlib_color(['w'])) self.assertFalse(_is_single_matplotlib_color(('w',))) self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1),))) self.assertFalse(_is_single_matplotlib_color(((1.0, 1.0, 1), (0.9, 0.9)))) def test_set_figure_size(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') _set_figure_size(fig, 3, 4) self.assertTrue(np.array_equal(fig.get_size_inches(), (3, 4))) def test_set_figure_size_defaults(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') orig_fig_size = fig.get_size_inches() _set_figure_size(fig) self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size)) def test_set_figure_size_invalid(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofoo', 'barbarbar'], x_tick_labels_orientation='vertical') orig_fig_size = fig.get_size_inches() _set_figure_size(fig, -1, 0) self.assertTrue(np.array_equal(fig.get_size_inches(), orig_fig_size)) def test_set_figure_size_long_labels(self): fig, ax = plt.subplots() _set_axes_options(ax, 'foo', 'x_foo', 'y_foo', x_tick_labels=['foofoofooooooooooooooooooooooooo' 'oooooooooooooooooooooooooooooooo' 'oooooooooooooooooooooooooooooooo' 'oooo', 'barbarbar'], x_tick_labels_orientation='vertical') npt.assert_warns(RuntimeWarning, _set_figure_size, fig, 3, 3) npt.assert_array_equal(fig.get_size_inches(), (3, 3)) if __name__ == '__main__': main()
2.25
2
packages/gtmapi/lmsrvcore/api/interfaces/__init__.py
jjwatts/gigantum-client
60
1545
<gh_stars>10-100 from lmsrvcore.api.interfaces.user import User from lmsrvcore.api.interfaces.git import GitCommit, GitRef, GitRepository
1.125
1
tensorflow_probability/python/bijectors/invert_test.py
matthieucoquet/probability
0
1546
# Copyright 2018 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for Bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.bijectors import bijector_test_util from tensorflow_probability.python.internal import tensorshape_util from tensorflow_probability.python.internal import test_util as tfp_test_util from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes class InvertBijectorTest(tf.test.TestCase): """Tests the correctness of the Y = Invert(bij) transformation.""" def testBijector(self): for fwd in [ tfb.Identity(), tfb.Exp(), tfb.Affine(shift=[0., 1.], scale_diag=[2., 3.]), tfb.Softplus(), tfb.SoftmaxCentered(), ]: rev = tfb.Invert(fwd) self.assertStartsWith(rev.name, "_".join(["invert", fwd.name])) x = [[[1., 2.], [2., 3.]]] self.assertAllClose( self.evaluate(fwd.inverse(x)), self.evaluate(rev.forward(x))) self.assertAllClose( self.evaluate(fwd.forward(x)), self.evaluate(rev.inverse(x))) self.assertAllClose( self.evaluate(fwd.forward_log_det_jacobian(x, event_ndims=1)), self.evaluate(rev.inverse_log_det_jacobian(x, event_ndims=1))) self.assertAllClose( self.evaluate(fwd.inverse_log_det_jacobian(x, event_ndims=1)), self.evaluate(rev.forward_log_det_jacobian(x, event_ndims=1))) def testScalarCongruency(self): bijector = tfb.Invert(tfb.Exp()) bijector_test_util.assert_scalar_congruency( bijector, lower_x=1e-3, upper_x=1.5, eval_func=self.evaluate, rtol=0.05) def testShapeGetters(self): bijector = tfb.Invert( tfb.SoftmaxCentered(validate_args=True)) x = tf.TensorShape([2]) y = tf.TensorShape([1]) self.assertAllEqual(y, bijector.forward_event_shape(x)) self.assertAllEqual( tensorshape_util.as_list(y), self.evaluate( bijector.forward_event_shape_tensor(tensorshape_util.as_list(x)))) self.assertAllEqual(x, bijector.inverse_event_shape(y)) self.assertAllEqual( tensorshape_util.as_list(x), self.evaluate( bijector.inverse_event_shape_tensor(tensorshape_util.as_list(y)))) def testDocstringExample(self): exp_gamma_distribution = ( tfd.TransformedDistribution( distribution=tfd.Gamma(concentration=1., rate=2.), bijector=tfb.Invert(tfb.Exp()))) self.assertAllEqual( [], self.evaluate( tf.shape( exp_gamma_distribution.sample(seed=tfp_test_util.test_seed())))) if __name__ == "__main__": tf.test.main()
1.8125
2
dayu_widgets/alert.py
ZSD-tim/dayu_widgets
0
1547
#!/usr/bin/env python # -*- coding: utf-8 -*- ################################################################### # Author: <NAME> # Date : 2019.2 # Email : <EMAIL> ################################################################### """ MAlert class. """ import six import functools from dayu_widgets.avatar import MAvatar from dayu_widgets.label import MLabel from dayu_widgets import dayu_theme from dayu_widgets.tool_button import MToolButton from dayu_widgets.mixin import property_mixin from dayu_widgets.qt import QWidget, QHBoxLayout, MPixmap, Qt, MIcon, Property @property_mixin class MAlert(QWidget): """ Alert component for feedback. Property: dayu_type: The feedback type with different color container. dayu_text: The feedback string showed in container. """ InfoType = 'info' SuccessType = 'success' WarningType = 'warning' ErrorType = 'error' def __init__(self, text='', parent=None, flags=Qt.Widget): super(MAlert, self).__init__(parent, flags) self.setAttribute(Qt.WA_StyledBackground) self._icon_label = MAvatar() self._icon_label.set_dayu_size(dayu_theme.tiny) self._content_label = MLabel().secondary() self._close_button = MToolButton().svg('close_line.svg').tiny().icon_only() self._close_button.clicked.connect(functools.partial(self.setVisible, False)) self._main_lay = QHBoxLayout() self._main_lay.setContentsMargins(8, 8, 8, 8) self._main_lay.addWidget(self._icon_label) self._main_lay.addWidget(self._content_label) self._main_lay.addStretch() self._main_lay.addWidget(self._close_button) self.setLayout(self._main_lay) self.set_show_icon(True) self.set_closeable(False) self._dayu_type = None self._dayu_text = None self.set_dayu_type(MAlert.InfoType) self.set_dayu_text(text) def set_closeable(self, closeable): """Display the close icon button or not.""" self._close_button.setVisible(closeable) def set_show_icon(self, show_icon): """Display the information type icon or not.""" self._icon_label.setVisible(show_icon) def _set_dayu_text(self): self._content_label.setText(self._dayu_text) self.setVisible(bool(self._dayu_text)) def set_dayu_text(self, value): """Set the feedback content.""" if isinstance(value, six.string_types): self._dayu_text = value else: raise TypeError("Input argument 'value' should be string type, " "but get {}".format(type(value))) self._set_dayu_text() def _set_dayu_type(self): self._icon_label.set_dayu_image(MPixmap('{}_fill.svg'.format(self._dayu_type), vars(dayu_theme).get(self._dayu_type + '_color'))) self.style().polish(self) def set_dayu_type(self, value): """Set feedback type.""" if value in [MAlert.InfoType, MAlert.SuccessType, MAlert.WarningType, MAlert.ErrorType]: self._dayu_type = value else: raise ValueError("Input argument 'value' should be one of " "info/success/warning/error string.") self._set_dayu_type() def get_dayu_type(self): """ Get MAlert feedback type. :return: str """ return self._dayu_type def get_dayu_text(self): """ Get MAlert feedback message. :return: six.string_types """ return self._dayu_text dayu_text = Property(six.text_type, get_dayu_text, set_dayu_text) dayu_type = Property(str, get_dayu_type, set_dayu_type) def info(self): """Set MAlert to InfoType""" self.set_dayu_type(MAlert.InfoType) return self def success(self): """Set MAlert to SuccessType""" self.set_dayu_type(MAlert.SuccessType) return self def warning(self): """Set MAlert to WarningType""" self.set_dayu_type(MAlert.WarningType) return self def error(self): """Set MAlert to ErrorType""" self.set_dayu_type(MAlert.ErrorType) return self def closable(self): """Set MAlert closebale is True""" self.set_closeable(True) return self
1.960938
2
week03/code05.py
byeongal/KMUCP
0
1548
<gh_stars>0 input_str = input("문자열을 입력해 주세요. >> ") print("입력받은 문자열의 길이는", len(input_str), "입니다.")
2.65625
3
jobs/SCH/JB_SALES_HIERARCHY_FLAG_N_SR.py
bibinvasudev/EBI_Project
0
1549
<reponame>bibinvasudev/EBI_Project # SCH1101.sh --> JB_SALES_HIERARCHY_FLAG_N_SR.py #************************************************************************************************************** # # Created by : bibin # Version : 1.0 # # Description : # 1. This script will load the data into 'SALES_HIERARCHY' table based on stream lookups. # # # Initial Creation: # # Date (YYYY-MM-DD) Change Description # ----------------- ------------------ # 2018-11-02 Initial creation # #************************************************************************************************************** # Importing required Lib from dependencies.spark import start_spark from dependencies.EbiReadWrite import EbiReadWrite import logging import sys from time import gmtime, strftime import cx_Oracle import py4j import pyspark # Spark logging logger = logging.getLogger(__name__) # Date Formats start_date = "'"+strftime("%Y-%m-%d %H:%M:%S", gmtime())+"'" log_date =strftime("%Y%m%d", gmtime()) # Job Naming Details script_name = "SCH1101.SH" app_name = "JB_SALES_HIERARCHY_FLAG_N_SR" log_filename = app_name + '_' + log_date + '.log' # Query for loading invoice table def query_data(db_schema): query = """INSERT INTO """+ db_schema +""".SALES_HIERARCHY (SALES_GEOGRAPHY, SALES_MULTI_AREA, SALES_AREA, SALES_MULTI_REGION, SALES_REGION, SALES_DISTRICT, SALES_TEAM, EMPLOYEE_ID, SALES_REP_NUMBER, LOGIN_ID, SALES_REP_NAME, SALES_REP_ORG, COMP_PLAN_TYPE_CODE, COMP_PLAN_TITLE, COMP_PLAN_CATEGORY_CODE, COMP_PLAN_DESCRIPTION, GOAL_CURR_CODE, START_DATE, END_DATE, STATUS_CODE, PARTICIPANT_LEVEL_CODE, SALES_REP_TYPE_CODE, CURRENT_RECORD_FLAG, LAST_HIRE_DATE) SELECT B.WW_DIRECT_GEO_DESCRIPTION AS SALES_GEOGRAPHY, B.MULTI_AREA_DESCRIPTION AS SALES_MULTI_AREA, B.AREA_DESCRIPTION AS SALES_AREA, B.MULTI_REGION_DESCRIPTION AS SALES_MULTI_REGION, SUBSTR(B.REGION_DESCRIPTION,1,50) AS SALES_REGION, SUBSTR(B.DISTRICT_DESCRIPTION,1,50) AS SALES_DISTRICT, SUBSTR(B.TEAM_DESCRIPTION,1,50) AS SALES_TEAM, A.EMPLOYEE_ID, A.BK_SALES_REP_NUMBER AS SALES_REP_NUMBER, SUBSTR(A.EMP_SYS_LOGIN_ID,1,10) AS LOGIN_ID, SUBSTR(A.SALES_REP_NAME,1,50) AS SALES_REP_NAME, A.ORGANIZATION_NAME AS SALES_REP_ORG, A.COMP_PLAN_TYPE_CODE, A.COMP_PLAN_TITLE, A.COMP_PLAN_CATEGORY_CODE, A.COMP_PLAN_DESCRIPTION, NULL AS GOAL_CURR_CODE , A.START_DATE, A.END_DATE, A.STATUS_CODE, A.PARTICIPANT_LEVEL_CODE, SUBSTR(A.SALES_REP_TYPE_CODE,1,5) AS SALES_REP_TYPE_CODE, A.CURRENT_RECORD_FLAG, C.RECENT_HIRE_DATE AS LAST_HIRE_DATE FROM ( SELECT a.*,ROW_NUMBER() over (partition by BK_SALES_REP_NUMBER ORDER BY END_DATE desc) as RANK FROM DIMS.SALES_PARTICIPANT a WHERE BK_SALES_REP_NUMBER NOT IN (SELECT DISTINCT BK_SALES_REP_NUMBER FROM DIMS.SALES_PARTICIPANT WHERE CURRENT_RECORD_FLAG = 'Y') AND PARTICIPANT_LEVEL_CODE = 'SR' ORDER BY BK_SALES_REP_NUMBER,SALES_PARTICIPANT_KEY ) A INNER JOIN DIMS.SALES_TERR_HIERAR_AS_IS_MV B ON B.TERRITORY_KEY = A.TERRITORY_KEY LEFT OUTER JOIN (SELECT LTRIM(BK_EMPLOYEE_ID,'0') BK_EMPLOYEE_ID,RECENT_HIRE_DATE FROM DIMS.WORKER_DETAIL WHERE CURRENT_RECORD_IND = 1 ) C ON C.BK_EMPLOYEE_ID = A.EMPLOYEE_ID WHERE RANK = 1""" return query # Main method def main(): try: src_count = '0' dest_count = '0' # start Spark application and get Spark session, logger and config spark, config = start_spark( app_name=app_name) # Create class Object Ebi_read_write_obj = EbiReadWrite(app_name,spark,config,logger) # DB prop Key of Source DB db_prop_key_load = config['DB_PROP_KEY_LOAD'] db_prop_key_extract = config['DB_PROP_KEY_EXTRACT'] db_schema = config['DB_SCHEMA'] log_file = config['LOG_DIR_NAME'] + "/" + log_filename #SQL Query query = query_data(db_schema) # Calling Job Class method --> get_target_data_update() Ebi_read_write_obj.get_target_data_update(query,db_prop_key_load) end_date="'"+strftime("%Y-%m-%d %H:%M:%S", gmtime())+"'" data_format = "JOB START DT : "+start_date+" | SCRIPT NAME : "+script_name+" | JOB : "+app_name+" | SRC COUNT : "+src_count+" | TGT COUNT : "+dest_count+" | JOB END DT : "+end_date+" | STATUS : %(message)s" Ebi_read_write_obj.create_log(data_format,log_file,logger) logger.info("Success") Ebi_read_write_obj.job_debugger_print(" \n __main__ " + app_name +" --> Job "+app_name+" Succeed \n") except Exception as err: # Write expeption in spark log or console end_date="'"+strftime("%Y-%m-%d %H:%M:%S", gmtime())+"'" data_format = "JOB START DT : "+start_date+" | SCRIPT NAME : "+script_name+" | JOB : "+app_name+" | SRC COUNT : "+src_count+" | TGT COUNT : "+dest_count+" | JOB END DT : "+end_date+" | STATUS : %(message)s" Ebi_read_write_obj.create_log(data_format,log_file,logger) logger.info("[Error] Failed") Ebi_read_write_obj.job_debugger_print(" \n Job "+app_name+" Failed\n") logger.error("\n __main__ "+ app_name +" --> Exception-Traceback :: " + str(err)) raise # Entry point for script if __name__ == "__main__": # Calling main() method main()
1.882813
2
myth/util.py
amanbhandari2002/mythproto
1
1550
def decodeLongLong(lst): high = int(lst[0]) << 32 low = int(lst[1]) if low < 0: low += 4294967296 if high < 0: high += 4294967296 return high + low def encodeLongLong(i): high = int(i / 4294967296) low = i - high return high, low def parseOk(str): if str == 'ok': return True else: return False def printList(lst): #for i in range(len(lst)): # print i, '\t', repr(lst[i]) pass # t is a nine item tuple returned by the time module. This method converts it to # MythTV's standard representation used on filenames def encodeTime(t): ret = '' for i in t[:-3]: si = str(i) if len(si) < 2: ret += si.zfill(2) else: ret += si return ret
3.1875
3
scripts/tator_tracker.py
openem-team/openem
10
1551
<reponame>openem-team/openem #!/usr/bin/env python3 import argparse import openem import os import cv2 import numpy as np from openem.tracking import * import json import sys import datetime import tator from pprint import pprint from collections import defaultdict import yaml import math import subprocess import sys def crop_localization(frame_bgr, localization): img_width = frame_bgr.shape[1] img_height = frame_bgr.shape[0] box_x = round(localization['x'] * img_width) box_y = round(localization['y'] * img_height) box_width = round(localization['width'] * img_width) box_height = round(localization['height'] * img_height) img_crop = frame_bgr[box_y:box_y+box_height,box_x:box_x+box_width,:] return img_crop def join_up_iteration(detections, track_ids): tracklets = defaultdict(list) num_tracklets = np.max(track_ids) + 1 assert(len(detections) == len(track_ids)) for d,tid in zip(detections, track_ids): tracklets[tid].append(d) return tracklets def extend_tracklets(tracklets, length): for track_id,track in tracklets.items(): if len(track) <= 16: continue ext_length = min(length,len(track)) sum_h=0.0 sum_w=0.0 track.sort(key=lambda x:x['frame']) def restore_det(det): det['x'] = det.get('orig_x',det['x']) det['y'] = det.get('orig_y',det['y']) det['width'] = det.get('orig_w',det['width']) det['height'] = det.get('orig_h',det['height']) det['orig_x'] = det['x'] det['orig_y'] = det['y'] det['orig_w'] = det['width'] det['orig_h'] = det['height'] restore_det(track[0]) restore_det(track[-1]) for d in track: sum_h += d['height'] sum_w += d['width'] angle,vel,comps = track_vel(track) vel_x = comps[0] vel_y = comps[1] avg_h = sum_h / len(track) avg_w = sum_w / len(track) new_x = min(1,max(0,track[-1]['x']+(vel_x*ext_length))) new_y = min(1,max(0,track[-1]['y']+(vel_y*ext_length))) old_x = min(1,max(0,track[0]['x']-(vel_x*ext_length))) old_y = min(1,max(0,track[0]['y']-(vel_y*ext_length))) min_x = min(track[-1]['x'],new_x) min_y = min(track[-1]['y'],new_y) if min_x > 0 and min_y > 0: track[-1]['x'] = min_x track[-1]['y'] = min_y track[-1]['width'] = min(max(0,abs(new_x-track[-1]['x'])+avg_w),1) track[-1]['height'] = min(max(0,abs(new_x-track[-1]['y'])+avg_h),1) else: track[-1]['width'] = 0 track[-1]['height'] = 0 min_x = min(track[0]['x'],old_x) min_y = min(track[0]['y'],old_y) if min_x > 0 and min_y > 0: track[0]['x'] = min(max(0,min_x),1) track[0]['y'] = min(max(0,min_y),1) track[0]['width'] = min(max(abs(old_x-track[0]['x'])+avg_w,0),1) track[0]['height'] = min(max(abs(old_x-track[0]['y'])+avg_h,0),1) else: track[0]['width'] = 0 track[0]['height'] = 0 return tracklets def split_tracklets(tracklets): track_ids=[] detections=[] for track_id,track in tracklets.items(): for d in track: track_ids.append(track_id) detections.append(d) return detections,track_ids def trim_tracklets(detections, track_ids, max_length): tracklets = join_up_iteration(detections, track_ids) next_track_id = 1 new_tracklets = {} for track_id,detections in tracklets.items(): new_track_count=math.ceil(len(detections)/max_length) for i in range(new_track_count): start=max_length*i end=max_length+(max_length*i) new_tracklets[next_track_id] = detections[start:end] next_track_id += 1 detections, track_ids = split_tracklets(new_tracklets) track_ids = renumber_track_ids(track_ids) return detections, track_ids if __name__=="__main__": parser = argparse.ArgumentParser(description=__doc__) tator.get_parser(parser) parser.add_argument("--detection-type-id", type=int, required=True) parser.add_argument("--tracklet-type-id", type=int, required=True) parser.add_argument("--version-id", type=int) parser.add_argument("--input-version-id", type=int) parser.add_argument("--strategy-config", type=str) parser.add_argument("--dry-run", action='store_true') parser.add_argument('media_files', type=str, nargs='*') args = parser.parse_args() # Weight methods methods = ['hybrid', 'iou', 'iou-motion', 'iou-global-motion'] # Weight methods that require the video visual_methods = ['hybrid', 'iou-global-motion'] api = tator.get_api(args.host, args.token) detection_type = api.get_localization_type(args.detection_type_id) project = detection_type.project version_id = args.version_id default_strategy = {"method": "hybrid", "frame-diffs": [1,2,4,8,16,32,64,128,256], "args": {}, "extension": {'method' : None}, "max-length": {}, "min-length": 0} if args.strategy_config: strategy = {**default_strategy} with open(args.strategy_config, "r") as strategy_file: strategy.update(yaml.load(strategy_file)) else: strategy = default_strategy if strategy['method'] == 'hybrid': model_file = strategy['args']['model_file'] batch_size = strategy['args'].get('batch_size', 4) comparator=FeaturesComparator(model_file) #extractor=FeaturesExtractor(args.model_file) class_method = strategy.get('class-method',None) classify_function = None classify_args = {} if class_method: pip_package=class_method.get('pip',None) if pip_package: p = subprocess.run([sys.executable, "-m", "pip", "install", pip_package]) print("Finished process.", flush=True) function_name = class_method.get('function',None) classify_args = class_method.get('args',None) names = function_name.split('.') module = __import__(names[0]) for name in names[1:-1]: module = getattr(module,name) classify_function = getattr(module,names[-1]) print("Strategy: ", flush=True) pprint(strategy) print(args.media_files, flush=True) optional_fetch_args = {} if args.input_version_id: optional_fetch_args['version'] = [args.input_version_id] for media_file in args.media_files: comps=os.path.splitext(os.path.basename(media_file))[0] media_id=comps.split('_')[0] media = api.get_media(media_id) if media.attributes.get("Tracklet Generator Processed") != "No": print(f"Skipping media ID {media.id}, name {media.name} due to " f"'Tracklet Generator Processed' attribute being set to " f"something other than 'No'!") continue media_shape = (media.height, media.width) fps = media.fps localizations_by_frame = {} localizations = api.get_localization_list(project, type=args.detection_type_id, media_id=[media_id], **optional_fetch_args) localizations = [l.to_dict() for l in localizations] if len(localizations) == 0: print(f"No localizations present in media {media_file}", flush=True) continue print(f"Processing {len(localizations)} detections", flush=True) # Group by localizations by frame for lid, local in enumerate(localizations): frame = local['frame'] if frame in localizations_by_frame: localizations_by_frame[frame].append(local) else: localizations_by_frame[frame] = [local] detections=[] track_ids=[] track_id=1 # If media does not exist, download it. if strategy['method'] == 'iou-global-motion': if not os.path.exists(media_file): temp_path = f'/tmp/{os.path.basename(media_file)}' for progress in tator.util.download_media(api, media, temp_path): print(f"Downloading {media_file}, {progress}%...") print("Download finished!") # Unfrag the file subprocess.run(["ffmpeg", '-i', temp_path, '-c:v', 'copy', media_file]) os.remove(temp_path) if strategy['method'] == 'hybrid': # Not all visual methods need detection images vid=cv2.VideoCapture(media_file) ok=True frame = 0 while ok: ok,frame_bgr = vid.read() if frame in localizations_by_frame: for l in localizations_by_frame[frame]: l['bgr'] = crop_localization(frame_bgr, l) if l['attributes']['Confidence'] < 0.50: continue detections.append(l) track_ids.append(track_id) track_id += 1 frame+=1 else: # The method is analytical on the detections coordinates # and does not require processing the video for frame,frame_detections in localizations_by_frame.items(): for det in frame_detections: detections.append(det) track_ids.append(track_id) track_id += 1 print("Loaded all detections", flush=True) track_ids = renumber_track_ids(track_ids) if strategy['method'] == 'hybrid': weights_strategy = HybridWeights(comparator, None, None, media_shape, fps, 0.0, batch_size) elif strategy['method'] == 'iou': weights_strategy = IoUWeights(media_shape, **strategy['args']) elif strategy['method'] == 'iou-motion': weights_strategy = IoUMotionWeights(media_shape, **strategy['args']) elif strategy['method'] == 'iou-global-motion': weights_strategy = IoUGlobalMotionWeights(media_shape, media_file, **strategy['args']) # Generate localization bgr based on grouped localizations for x in strategy['frame-diffs']: print(f"Started {x}", flush=True) detections, track_ids, pairs, weights, is_cut, constraints = join_tracklets( detections, track_ids, x, weights_strategy) if x in strategy['max-length']: trim_to = strategy['max-length'][x] print(f"Trimming track to max length of {trim_to}") detections, track_ids = trim_tracklets(detections, track_ids, trim_to) _,det_counts_per_track=np.unique(track_ids,return_counts=True) print(f"frame-diff {x}: {len(detections)} to {len(det_counts_per_track)}", flush=True) if x > 1 and strategy['extension']['method'] == 'linear-motion': ext_frames=x print(f"Extending by linear motion, {ext_frames}") tracklets = join_up_iteration(detections,track_ids) tracklets = extend_tracklets(tracklets, ext_frames) detections, track_ids = split_tracklets(tracklets) # Now we make new track objects based on the result # from the graph solver # [ detection, detection, detection, ...] # [ track#, track#, track#,...] # [ 133, 33, 13, 133,] # [ 0,0,1,1] # TODO: Handle is_cut? def join_up_final(detections, track_ids): tracklets = defaultdict(list) num_tracklets = np.max(track_ids) + 1 assert(len(detections) == len(track_ids)) for d,tid in zip(detections, track_ids): tracklets[tid].append(d) return tracklets def make_object(track): track.sort(key=lambda x:x['frame']) if classify_function: valid,attrs = classify_function(media.to_dict(), track, **classify_args) elif len(track) >= strategy['min-length']: valid = True attrs = {} else: valid = False attrs = {} if valid: obj={"type": args.tracklet_type_id, "media_ids": [int(media_id)], "localization_ids": [x['id'] for x in track], **attrs, "version": version_id} return obj else: return None tracklets = join_up_final(detections, track_ids) new_objs=[make_object(tracklet) for tracklet in tracklets.values()] new_objs=[x for x in new_objs if x is not None] print(f"New objects = {len(new_objs)}") with open(f"/work/{media_id}.json", "w") as f: json.dump(new_objs,f) if not args.dry_run: for response in tator.util.chunked_create(api.create_state_list,project, state_spec=new_objs): pass try: api.update_media(int(media_id), {"attributes":{"Tracklet Generator Processed": str(datetime.datetime.now())}}) except: print("WARNING: Unable to set 'Tracklet Generator Processed' attribute")
2.15625
2
hypergan/losses/multi_loss.py
Darkar25/HyperGAN
1
1552
import tensorflow as tf import numpy as np import hyperchamber as hc from hypergan.losses.base_loss import BaseLoss from hypergan.multi_component import MultiComponent TINY=1e-8 class MultiLoss(BaseLoss): """Takes multiple distributions and does an additional approximator""" def _create(self, d_real, d_fake): gan = self.gan config = self.config losses = [] split = self.split for d in gan.discriminator.children: if config.swapped: d_swap = d_real d_real = d_fake d_fake = d_swap ds = self.split_batch(d.sample, split) d_real = ds[0] d_fake = tf.add_n(ds[1:])/(len(ds)-1) loss_object = self.config['loss_class'](gan, self.config, d_real=d_real, d_fake=d_fake) losses.append(loss_object) #relational layer? combine = MultiComponent(combine='concat', components=losses) g_loss = combine.g_loss_features d_loss = combine.d_loss_features self.d_loss = d_loss self.g_loss = g_loss self.losses = losses return [d_loss, g_loss]
2.09375
2
src/fidesops/api/v1/endpoints/policy_endpoints.py
mohan-pogala/fidesops
0
1553
import logging from typing import Any, Dict, List from fastapi import APIRouter, Body, Depends, Security from fastapi_pagination import ( Page, Params, ) from fastapi_pagination.bases import AbstractPage from fastapi_pagination.ext.sqlalchemy import paginate from fidesops.schemas.shared_schemas import FidesOpsKey from pydantic import conlist from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session from starlette.exceptions import HTTPException from starlette.status import HTTP_404_NOT_FOUND from fidesops.api import deps from fidesops.api.v1 import scope_registry as scopes from fidesops.api.v1 import urn_registry as urls from fidesops.common_exceptions import ( DataCategoryNotSupported, PolicyValidationError, RuleValidationError, RuleTargetValidationError, KeyOrNameAlreadyExists, ) from fidesops.models.client import ClientDetail from fidesops.models.policy import ( ActionType, Policy, Rule, RuleTarget, ) from fidesops.models.storage import StorageConfig from fidesops.schemas import policy as schemas from fidesops.schemas.api import BulkUpdateFailed from fidesops.util.oauth_util import verify_oauth_client router = APIRouter(tags=["Policy"], prefix=urls.V1_URL_PREFIX) logger = logging.getLogger(__name__) @router.get( urls.POLICY_LIST, status_code=200, response_model=Page[schemas.PolicyResponse], dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])], ) def get_policy_list( *, db: Session = Depends(deps.get_db), params: Params = Depends(), ) -> AbstractPage[Policy]: """ Return a paginated list of all Policy records in this system """ logger.info(f"Finding all policies with pagination params '{params}'") policies = Policy.query(db=db) return paginate(policies, params=params) def get_policy_or_error(db: Session, policy_key: FidesOpsKey) -> Policy: """Helper method to load Policy or throw a 404""" logger.info(f"Finding policy with key '{policy_key}'") policy = Policy.get_by(db=db, field="key", value=policy_key) if not policy: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f"No Policy found for key {policy_key}.", ) return policy @router.get( urls.POLICY_DETAIL, status_code=200, response_model=schemas.PolicyResponse, dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])], ) def get_policy( *, policy_key: FidesOpsKey, db: Session = Depends(deps.get_db), ) -> schemas.PolicyResponse: """ Return a single Policy """ return get_policy_or_error(db, policy_key) @router.patch( urls.POLICY_LIST, status_code=200, response_model=schemas.BulkPutPolicyResponse, ) def create_or_update_policies( *, client: ClientDetail = Security( verify_oauth_client, scopes=[scopes.POLICY_CREATE_OR_UPDATE], ), db: Session = Depends(deps.get_db), data: conlist(schemas.Policy, max_items=50) = Body(...), # type: ignore ) -> schemas.BulkPutPolicyResponse: """ Given a list of policy data elements, create or update corresponding Policy objects or report failure """ created_or_updated: List[Policy] = [] failed: List[BulkUpdateFailed] = [] logger.info(f"Starting bulk upsert for {len(data)} policies") for policy_schema in data: policy_data: Dict[str, Any] = dict(policy_schema) try: policy = Policy.create_or_update( db=db, data={ "name": policy_data["name"], "key": policy_data.get("key"), "client_id": client.id, }, ) except KeyOrNameAlreadyExists as exc: logger.warning("Create/update failed for policy: %s", exc) failure = { "message": exc.args[0], "data": policy_data, } failed.append(BulkUpdateFailed(**failure)) continue except PolicyValidationError as exc: logger.warning("Create/update failed for policy: %s", exc) failure = { "message": "This record could not be added because the data provided was invalid.", "data": policy_data, } failed.append(BulkUpdateFailed(**failure)) continue else: created_or_updated.append(policy) return schemas.BulkPutPolicyResponse( succeeded=created_or_updated, failed=failed, ) @router.patch( urls.RULE_LIST, status_code=200, response_model=schemas.BulkPutRuleResponse, ) def create_or_update_rules( *, client: ClientDetail = Security( verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE], ), policy_key: FidesOpsKey, db: Session = Depends(deps.get_db), input_data: conlist(schemas.RuleCreate, max_items=50) = Body(...), # type: ignore ) -> schemas.BulkPutRuleResponse: """ Given a list of Rule data elements, create or update corresponding Rule objects or report failure """ logger.info(f"Finding policy with key '{policy_key}'") policy = get_policy_or_error(db, policy_key) created_or_updated: List[Rule] = [] failed: List[BulkUpdateFailed] = [] logger.info( f"Starting bulk upsert for {len(input_data)} rules on policy {policy_key}" ) for schema in input_data: # Validate all FKs in the input data exist associated_storage_config_id = None if schema.action_type == ActionType.access.value: # Only validate the associated StorageConfig on access rules storage_destination_key = schema.storage_destination_key associated_storage_config: StorageConfig = StorageConfig.get_by( db=db, field="key", value=storage_destination_key, ) if not associated_storage_config: logger.warning( f"No storage config found with key {storage_destination_key}" ) failure = { "message": f"A StorageConfig with key {storage_destination_key} does not exist", "data": dict( schema ), # Be sure to pass the schema out the same way it came in } failed.append(BulkUpdateFailed(**failure)) continue else: associated_storage_config_id = associated_storage_config.id masking_strategy_data = None if schema.masking_strategy: masking_strategy_data = schema.masking_strategy.dict() try: rule = Rule.create_or_update( db=db, data={ "action_type": schema.action_type, "client_id": client.id, "key": schema.key, "name": schema.name, "policy_id": policy.id, "storage_destination_id": associated_storage_config_id, "masking_strategy": masking_strategy_data, }, ) except KeyOrNameAlreadyExists as exc: logger.warning( f"Create/update failed for rule '{schema.key}' on policy {policy_key}: {exc}" ) failure = { "message": exc.args[0], "data": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except RuleValidationError as exc: logger.warning( f"Create/update failed for rule '{schema.key}' on policy {policy_key}: {exc}" ) failure = { "message": exc.args[0], "data": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except ValueError as exc: logger.warning( f"Create/update failed for rule '{schema.key}' on policy {policy_key}: {exc}" ) failure = { "message": exc.args[0], "data": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue else: created_or_updated.append(rule) return schemas.BulkPutRuleResponse(succeeded=created_or_updated, failed=failed) @router.delete( urls.RULE_DETAIL, status_code=204, dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])], ) def delete_rule( *, policy_key: FidesOpsKey, rule_key: FidesOpsKey, db: Session = Depends(deps.get_db), ) -> None: """ Delete a policy rule. """ policy = get_policy_or_error(db, policy_key) logger.info(f"Finding rule with key '{rule_key}'") rule = Rule.filter( db=db, conditions=(Rule.key == rule_key and Rule.policy_id == policy.id) ).first() if not rule: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f"No Rule found for key {rule_key} on Policy {policy_key}.", ) logger.info(f"Deleting rule with key '{rule_key}'") rule.delete(db=db) @router.patch( urls.RULE_TARGET_LIST, status_code=200, response_model=schemas.BulkPutRuleTargetResponse, ) def create_or_update_rule_targets( *, client: ClientDetail = Security( verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE] ), policy_key: FidesOpsKey, rule_key: FidesOpsKey, db: Session = Depends(deps.get_db), input_data: conlist(schemas.RuleTarget, max_items=50) = Body(...), # type: ignore ) -> schemas.BulkPutRuleTargetResponse: """ Given a list of Rule data elements, create corresponding Rule objects or report failure """ policy = get_policy_or_error(db, policy_key) logger.info(f"Finding rule with key '{rule_key}'") rule = Rule.filter( db=db, conditions=(Rule.key == rule_key and Rule.policy_id == policy.id) ).first() if not rule: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f"No Rule found for key {rule_key} on Policy {policy_key}.", ) created_or_updated = [] failed = [] logger.info( f"Starting bulk upsert for {len(input_data)} rule targets on rule {rule_key}" ) for schema in input_data: try: target = RuleTarget.create_or_update( db=db, data={ "name": schema.name, "key": schema.key, "data_category": schema.data_category, "rule_id": rule.id, "client_id": client.id, }, ) except KeyOrNameAlreadyExists as exc: logger.warning( f"Create/update failed for rule target {schema.key} on rule {rule_key}: {exc}" ) failure = { "message": exc.args[0], "data": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except ( DataCategoryNotSupported, PolicyValidationError, RuleTargetValidationError, ) as exc: logger.warning( f"Create/update failed for rule target {schema.key} on rule {rule_key}: {exc}" ) failure = { "message": exc.args[0], "data": dict(schema), } failed.append(BulkUpdateFailed(**failure)) continue except IntegrityError as exc: logger.warning( f"Create/update failed for rule target {schema.key} on rule {rule_key}: {exc}" ) failure = { "message": f"DataCategory {schema.data_category} is already specified on Rule with ID {rule.id}", "data": dict(schema), } failed.append(BulkUpdateFailed(**failure)) else: created_or_updated.append(target) return schemas.BulkPutRuleTargetResponse( succeeded=created_or_updated, failed=failed, ) @router.delete( urls.RULE_TARGET_DETAIL, status_code=204, dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])], ) def delete_rule_target( *, policy_key: FidesOpsKey, rule_key: FidesOpsKey, rule_target_key: FidesOpsKey, db: Session = Depends(deps.get_db), ) -> None: """ Delete the rule target. """ policy = get_policy_or_error(db, policy_key) logger.info(f"Finding rule with key '{rule_key}'") rule = Rule.filter( db=db, conditions=(Rule.key == rule_key and Rule.policy_id == policy.id) ).first() if not rule: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f"No Rule found for key {rule_key} on Policy {policy_key}.", ) logger.info(f"Finding rule target with key '{rule_target_key}'") target = RuleTarget.filter( db=db, conditions=( RuleTarget.key == rule_target_key and RuleTarget.rule_id == rule.id ), ).first() if not target: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f"No RuleTarget found for key {rule_target_key} at Rule {rule_key} on Policy {policy_key}.", ) logger.info(f"Deleting rule target with key '{rule_target_key}'") target.delete(db=db)
1.828125
2
engage-analytics/sentiment_analysis/src/report/interface_report.py
oliveriopt/mood-analytics
0
1554
<gh_stars>0 import emoji import sentiment_analysis.src.report.cons_report as cons import sentiment_analysis.src.constants as global_cons from utils.data_connection.api_data_manager import APISourcesFetcher from utils.utilities import read_json_file, CUSTOM_YEAR_WEEK_AGG, extract_dimension, extract_question from sentiment_analysis.src.word_cloud import words_clouds from sentiment_analysis.src.clients_language_sentiments_entity import ClientsLanguageSentiment from nested_lookup import nested_lookup class InterFaceReport: def __init__(self, topics: dict, surveys: dict, company_id: str, weeks: list, g_client: ClientsLanguageSentiment, api_source_manager: APISourcesFetcher): self.topics = topics self.surveys = surveys self.company_id = company_id self.weeks = weeks self.g_client = g_client self.api_source_manager = api_source_manager self.thresholds = () self.table_surveys_replies = [] self.table_topics = [] self.table_topic_comment = [] self.counter_text_sr = None self.counter_text_topics = None self.info_file = read_json_file("en_US.json") self.image_base64_sr = None self.image_base64_topics = None def sort_by_dimension_sentiment_table(self) -> None: """ Sort by dimension and by sentiment :return: """ temp_table = [] for dimension in cons.dimensions: temp = [d for d in self.table_surveys_replies if d['dimension'] == dimension] temp = sorted(temp, key=lambda k: k['sentiment'], reverse=True) temp_table.extend(temp) self.table_surveys_replies = temp_table def insert_to_list_surveys_replies(self, features: list, company_week: int) -> None: """ Create array with the dictionary for interface :param features: list of features to extract :param company_week: company week of the company :return: """ for item_analyze in features: question = extract_question(self.info_file, dimension=item_analyze[0], week=company_week) dimension = extract_dimension(self.info_file, dimension=item_analyze[0]) comment = item_analyze[1] sentiment = item_analyze[2] temp = {} temp.update(dimension=dimension) temp.update(question=question) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_surveys_replies.append(temp) self.sort_by_dimension_sentiment_table() def insert_to_list_topics(self, features: list) -> None: """ Create array with the dictionary for interface - referenced to topic headlines :param features: list of features to extract :return: """ for item_analyze in features: topic_id = item_analyze[0] comment = item_analyze[1] sentiment = item_analyze[2] temp = {} temp.update(id=topic_id) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_topics.append(temp) self.table_topics = sorted(self.table_topics, key=lambda k: k['sentiment'], reverse=True) def insert_to_list_topic_comments(self, features: list) -> None: """ Create array with the dictionary for interface - referenced to topic comments :param features: list of features to extract :return: """ for item_analyze in features: topic_id_comment_id = item_analyze[0] comment = item_analyze[1] sentiment = item_analyze[2] temp = {} temp.update(id=topic_id_comment_id) temp.update(comment=emoji.emojize(comment, use_aliases=True)) temp.update(sentiment=sentiment) self.table_topic_comment.append(temp) self.table_topic_comment = sorted(self.table_topic_comment, key=lambda k: k['sentiment'], reverse=True) def word_cloud(self): """ Create wordcloud of the main words :return: """ self.image_base64_sr = words_clouds(self.counter_text_sr, cons.path_image_sr_wc) self.image_base64_topics = words_clouds(self.counter_text_topics, cons.path_image_topics_wc) @staticmethod def __count_filter_keys(entities: list) -> object: """ Count and filter keys :param entities: list of entities text :return: """ entities = ClientsLanguageSentiment.count_entities(entities=entities) entities = ClientsLanguageSentiment.filter_black_list(entities=entities) return entities def __process_sr(self) -> None: """ Process the surveys replies :return: """ for company_id, periods in self.surveys.items(): for period in self.weeks: period_parts = period.split(CUSTOM_YEAR_WEEK_AGG) translations_week = self.api_source_manager.get_company_week_from_period(week=period_parts[0], year=period_parts[1], company_id=self.company_id) sr_dimension = nested_lookup(global_cons.SR_DIMENSION, periods) sr_content = nested_lookup(global_cons.SR_CONTENT, periods) sr_sentiment = nested_lookup(global_cons.SENTIMENT, periods) sr_entities = nested_lookup(global_cons.SR_ENTITIES, periods) sr_comment_score = list(zip(sr_dimension, sr_content, sr_sentiment)) self.insert_to_list_surveys_replies(sr_comment_score, company_week=translations_week) self.counter_text_sr = self.__count_filter_keys(entities=sr_entities) def __process_topics(self) -> None: """ Process the topics :return: """ for company_id, topics in self.topics.items(): # heading topic_headings = nested_lookup(global_cons.TOPIC_CONTENT, topics) topic_headings_sentiments = nested_lookup(global_cons.TOPIC_SENTIMENT, topics) topic_ids = list(topics.keys()) topic_w_sentiments = list(zip(topic_ids, topic_headings, topic_headings_sentiments)) self.insert_to_list_topics(topic_w_sentiments) # comments for topic_id, topic in topics.items(): topic_comments = nested_lookup(global_cons.TOPIC_COMMENT, topic) topic_comments_scores = nested_lookup(global_cons.TOPIC_COMMENT_SENTIMENT, topic) topic_list_ids = [topic_id] * len(topic_comments) topic_w_scores = list(zip(topic_list_ids, topic_comments, topic_comments_scores)) self.insert_to_list_topic_comments(topic_w_scores) entities = nested_lookup(global_cons.TOPIC_ENTITIES, topics) self.counter_text_topics = ClientsLanguageSentiment.count_entities(entities) def process_interface(self) -> None: """ Take the info needed to write into report_pdf :return: """ self.__process_sr() self.__process_topics()
2.1875
2
dwh_analytic/dags/data_warehouse_prod/schema/dim_process.py
dnguyenngoc/analytic
0
1555
resource ='human ad machime' class DimProcess: def __init__( self, *kwargs, process_key: int, module: str, type: str, step: str, sub_step: str, resource: str = 'human', ): def step(self): return ['qc', 'auto_qc', 'apr_qc', 'keyer_input'] def example_data(self): data = { 'process_key': 1, 'resource': 'human', 'module': 'keyed_data', 'step': 'qc', 'sub_step': None, 'process_key': 2, 'resource': 'machine', 'module': 'keyed_data', 'step': 'transform', 'sub_step': None, } class FactDataExtractionModel: def __init__( self, *kwargs, project_id: str, document_id: str, doc_set_id: str, last_modified_time_key: int, last_modified_date_key: int, user_name: str = None, process_key: int, field_name: str, field_value: str = None, last_modified_timestamp: str ): self.project_id = project_id self.document_id = document_id self.doc_set_id = doc_set_id self.last_modified_time_key = last_modified_time_key self.last_modified_date_key = last_modified_date_key self.user_name = user_name self.process_key = process_key self.field_name = field_name self.field_value = field_value self.last_modified_timestamp = last_modified_timestamp
2.25
2
lino/modlib/gfks/mixins.py
NewRGB/lino
1
1556
# -*- coding: UTF-8 -*- # Copyright 2010-2018 Rumma & Ko Ltd # License: BSD (see file COPYING for details) from builtins import object from django.contrib.contenttypes.models import * from django.conf import settings from django.utils.translation import ugettext_lazy as _ from django.utils.text import format_lazy from lino.api import dd from lino.core.gfks import gfk2lookup from .fields import GenericForeignKey, GenericForeignKeyIdField class Controllable(dd.Model): # Translators: will also be concatenated with '(type)' '(object)' owner_label = _('Controlled by') controller_is_optional = True class Meta(object): abstract = True owner_type = dd.ForeignKey( ContentType, editable=True, blank=True, null=True, verbose_name=format_lazy(u"{} {}", owner_label, _('(type)'))) owner_id = GenericForeignKeyIdField( owner_type, editable=True, blank=True, null=True, verbose_name=format_lazy(u"{} {}", owner_label, _('(object)'))) owner = GenericForeignKey( 'owner_type', 'owner_id', verbose_name=owner_label) @classmethod def update_controller_field(cls, verbose_name=None, **kwargs): if verbose_name is not None: dd.update_field(cls, 'owner', verbose_name=verbose_name) kwargs.update( verbose_name=format_lazy(u"{} {}", verbose_name, _('(object)'))) dd.update_field(cls, 'owner_id', **kwargs) if verbose_name is not None: kwargs.update( verbose_name=format_lazy(u"{} {}", verbose_name, _('(type)'))) dd.update_field(cls, 'owner_type', **kwargs) def update_owned_instance(self, controllable): if self.owner: self.owner.update_owned_instance(controllable) super(Controllable, self).update_owned_instance(controllable) def save(self, *args, **kw): if settings.SITE.loading_from_dump: super(Controllable, self).save(*args, **kw) else: if self.owner: self.owner.update_owned_instance(self) super(Controllable, self).save(*args, **kw) if self.owner: self.owner.after_update_owned_instance(self) def controlled_rows(self, model, **kwargs): gfk = self._meta.get_field('owner') kwargs = gfk2lookup(gfk, self, **kwargs) return model.objects.filter(**kwargs)
2.078125
2
optical_form_reader/main.py
1enes/optical_form_reader
0
1557
<filename>optical_form_reader/main.py<gh_stars>0 import cv2 import numpy as np from imutils import contours from imutils.perspective import four_point_transform import imutils import cv2 import matplotlib.pyplot as plt import numpy as np from imutils import contours from imutils.perspective import four_point_transform,order_points import imutils cevap_anahtar={0:2,1:1,2:2,3:3,4:1,5:4,6:4,7:3,8:1,9:1,10:0,11:0,12:2,13:1,14:2,15:3,16:4,17:4,18:4,19:3,20:2,21:1,22:0,23:0,24:0,25:4,26:2,27:3,28:4,29:4,30:4,31:3,32:2,33:1,34:0,35:0,36:1,37:2,38:3,39:4} #, alfabe={0:'A',1:'B',2:'C',3:'Ç',4:'D',5:'E',6:'F',7:'G',8:'Ğ',9:'H',10:'I',11:'İ',12:'J',13:'K',14:'L',15:'M',16:'N',17:'O',18:'Ö',19:'P',20:'Q',21:'R',22:'S',23:'Ş',24:'T',25:'U',26:'Ü',27:'V',28:'W',29:'Y',30:'Z',31:'X'} def cevap_islemleri(isim,coords): a=0 thresh=cv2.threshold(isim,179,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method="top-to-bottom")[0] for (s,i) in enumerate(np.arange(0,len(coords),20)): cevap=None cnt=contours.sort_contours(coords[i:i+30])[0] toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) a+=1 toplam_beyaz=cv2.countNonZero(maske) if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) def cevap_contour_bul(isim,isim_gri): coord=[] thresholded=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) contour=cv2.findContours(thresholded,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) x_coords=[(0,0)] sayac=0 contour=imutils.grab_contours(contour) contour=contours.sort_contours(contour,method="top-to-bottom")[0] for c in contour: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx) (x, y, w, h) = cv2.boundingRect(approx) ar = w / float(h) if area<1500 and area>250 and ar>=0.9 and ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) M=cv2.moments(box) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) res=tekrar_bul(x_coords,x) if res is False and abs(x_coords[-1][1]-y)<35: coord.append(approx) x_coords.append((x,y)) sayac+=1 #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) elif abs(x_coords[-1][1]-y)>=35: coord.append(approx) x_coords=[(0,0)] sayac+=1 x_coords.append((x,y)) #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) else: continue return coord def ters_bul(kagit,areas): ret=False #print(areas[0][0]) if areas[0][0]!=1 and areas[0][1]+areas[1][1]>2300000: kagit=imutils.rotate(kagit,angle=180) print("Kağıdı ters koymuşsunuz,çevrildi") ret=True return ret,kagit else: return ret,kagit def kagit_bul(image,gray): thr=cv2.threshold(gray,150,255,cv2.THRESH_BINARY)[1] contour=cv2.findContours(thr,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) contour=imutils.grab_contours(contour) contour=sorted(contour,key=cv2.contourArea,reverse=True) for c in contour: approx=cv2.approxPolyDP(c,0.02*cv2.arcLength(c,True),True) if len(approx)==4: #cv2.drawContours(image,[approx],0,(0,255,0),thickness=3) break warp=four_point_transform(image,approx.reshape(4,2)) warp_gri=four_point_transform(gray,approx.reshape(4,2)) return warp,warp_gri def soru_grup_contour_bul(resim,gri): thr2=cv2.threshold(gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1] can=cv2.Canny(thr2,50,100) can=cv2.dilate(can,None,iterations=3) coords=[] cont=cv2.findContours(can,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) cont=imutils.grab_contours(cont) for c in cont: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx) (x, y, w, h) = cv2.boundingRect(approx) ar = w / float(h) if cv2.contourArea(c)>30 and ar>=0.9 and ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) if cv2.contourArea(box)>150: coords.append(approx) cv2.drawContours(resim,[box],0,(0,0,255),thickness=3) if len(coords)==5: return coords else: return 0 def tekrar_bul(array,koordinat): for c in array: if koordinat==c[0] or abs(koordinat-c[0])<15: return True #Tekrar var else: pass return False def contour_bul(isim,isim_gri,karmasiklik=0): coord=[] thr6=cv2.adaptiveThreshold(isim_gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) #thr6=cv2.threshold(isim_gri,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1] ar_value=200 #if karmasiklik==1: # ar_value=800 cont=cv2.findContours(thr6,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE) x_coords=[(0,0)] sayac=0 cont=imutils.grab_contours(cont) cont=contours.sort_contours(cont,method="top-to-bottom")[0] for c in cont: approx=cv2.approxPolyDP(c,0.0001*cv2.arcLength(c,True),True) area=cv2.contourArea(approx) (x, y, w, h) = cv2.boundingRect(approx) ar = w / float(h) if area<1300 and area>300 and ar>=0.9 and ar<=1.1: box=cv2.minAreaRect(approx) box=cv2.boxPoints(box) box=np.array(box,dtype=np.int) M=cv2.moments(box) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) # print(x,y) res=tekrar_bul(x_coords,x) if res is False and abs(x_coords[-1][1]-y)<35: coord.append(approx) x_coords.append((x,y)) sayac+=1 #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) elif abs(x_coords[-1][1]-y)>=35: coord.append(approx) x_coords=[(0,0)] sayac+=1 x_coords.append((x,y)) #cv2.drawContours(isim,[box],0,(255,0,0),thickness=3) #cv2.drawContours(isim,[approx],0,(0,0,255),thickness=2) else: continue return coord,thr6 def contour_cizdir(resim,cont,isim="default"): for c in cont: cv2.drawContours(resim,[c],0,(0,255,0),thickness=4) #print(f"Bulunan contour sayısı: {len(cont)}") def bolge_bul(resim,gri): bolgeler={} thr2=cv2.adaptiveThreshold(gri,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,9,8) areas=[] cont=cv2.findContours(thr2,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) cont=imutils.grab_contours(cont) temp=[] cont=contours.sort_contours(cont,"top-to-bottom")[0] a=0 for c in cont: approx=cv2.approxPolyDP(c,0.009*cv2.arcLength(c,True),True) if cv2.contourArea(approx)>10050 and len(approx)==4: a+=1 M=cv2.moments(approx) x=int(M['m10']/M['m00']) y=int(M['m01']/M['m00']) #areas.append([a,cv2.contourArea(approx)]) #cv2.putText(resim,"{}".format(a),(x,y),fontFace=cv2.FONT_HERSHEY_COMPLEX,fontScale=4,color=(255,0,0),thickness=3) temp.append(approx.reshape(4,2)) areas.append([a,cv2.contourArea(approx)]) #cv2.drawContours(resim,[approx],0,(255,0,0),thickness=3) #cv2.imshow("resim_olge",imutils.resize(resim,height=650)) if len(temp)>=5: bolgeler={'isim':temp[0],'ogrno':temp[1],'sinav_turu':temp[2],'soru_grubu':temp[3],'ogretim_onay':temp[4],'cevaplar':temp[5]} areas=sorted(areas,key=lambda x:x[1],reverse=True) return bolgeler,areas def cevap_islemleri(cevap,coords,col_no=1): iki_cevap=0 bos=0 dogru=0 q_no=0 yanlıs=0 if col_no==1: pass elif col_no==2: q_no=30 elif col_no==3: q_no=60 elif col_no==4: q_no=90 yanit=[] #cevap=cv2.cvtColor(cevap,cv2.COLOR_BGR2GRAY) thresh=cv2.threshold(cevap,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method="top-to-bottom")[0] for (s,i) in enumerate(np.arange(0,len(coords),5)): cevap=None cnt=contours.sort_contours(coords[i:i+5])[0] toplam_beyaz=None say=0 for (j,c) in enumerate(cnt): if len(cevap_anahtar)<=q_no+s: return (dogru,yanlıs,bos,iki_cevap) maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() toplam_beyaz=cv2.countNonZero(maske) #print(toplam_beyaz,j) if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,q_no+s) if toplam_beyaz>800: say+=1 if say>1: #İKİ ŞIK İŞARETLEME DURUMU iki_cevap+=1 continue elif cevap[0]<800:# BOŞ BIRAKMA DURUMU bos+=1 continue else: if cevap_anahtar[q_no+s]== cevap[1]: #print(cevap_anahtar[q_no+s],cevap[1]) dogru+=1 else: yanlıs+=1 ''' NUMBER OF TRUE,FALSE,NOT MARKED AND MARKED MORE THAN 1 ''' return(dogru,yanlıs,bos,iki_cevap) def isim_islemleri(isim,coords,thresh): a=0 yanit=[] ad_str="" coords=contours.sort_contours(coords,method="left-to-right")[0] for (s,i) in enumerate(np.arange(0,len(coords),32)): cevap=None cnt=contours.sort_contours(coords[i:i+32],method="top-to-bottom")[0] toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) #plt.imshow(maske,cmap='gray') #plt.show() #a+=1 toplam_beyaz=cv2.countNonZero(maske) #print(toplam_beyaz,j) if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) # print("cevap",cevap) if cevap[0]>500: yanit.append(alfabe[cevap[1]]) elif cevap[0]<600: yanit.append(" ") for s in yanit: ad_str+=s return ad_str def cevap_kolon(cevap): pts1=np.array([(2,50),(300,50),(2,1545),(300,1545)]) pts2=np.array([(300,50),(600,50),(302,1545),(602,1545)]) pts3=np.array([(600,50),(900,50),(602,1545),(902,1545)]) pts4=np.array([(900,50),(1200,50),(902,1545),(1202,1545)]) col1=four_point_transform(cevap,pts1) col2=four_point_transform(cevap,pts2) col3=four_point_transform(cevap,pts3) col4=four_point_transform(cevap,pts4) return col1,col2,col3,col4 def cevap_gri(col1,col2,col3,col4): ''' KOLONLARI GRİ YAPMAK İÇİN,MAİNDE YER KAPLAMASIN ''' col1_gri=cv2.cvtColor(col1,cv2.COLOR_BGR2GRAY) col2_gri=cv2.cvtColor(col2,cv2.COLOR_BGR2GRAY) col3_gri=cv2.cvtColor(col3,cv2.COLOR_BGR2GRAY) col4_gri=cv2.cvtColor(col4,cv2.COLOR_BGR2GRAY) return col1_gri,col2_gri,col3_gri,col4_gri def cevap_contour(col1,col2,col3,col4): col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4) col1_coord=cevap_contour_bul(col1,col1_gri) col2_coord=cevap_contour_bul(col2,col1_gri) col3_coord=cevap_contour_bul(col3,col1_gri) col4_coord=cevap_contour_bul(col4,col1_gri) return col1_coord,col2_coord,col3_coord,col4_coord def ogrno_islemleri(ogrno,ogrno_gri,coords): yanit="" thresh=cv2.threshold(ogrno_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method="left-to-right")[0] for (s,i) in enumerate(np.arange(0,len(coords),10)): cevap=None cnt=contours.sort_contours(coords[i:i+10],method="top-to-bottom")[0] toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() toplam_beyaz=cv2.countNonZero(maske) if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) if cevap[0]>500: yanit+=str(cevap[1]) print("Okul Numarası:",yanit) def sinav_islemleri(sinav,sinav_gri,coords): yanit=["QUİZ","ARA","FİNAL","BÜTÜNLEME"] thresh=cv2.threshold(sinav_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method="top-to-bottom")[0] for (s,i) in enumerate(np.arange(0,len(coords),10)): cevap=None cnt=contours.sort_contours(coords[i:i+10],method="left-to-right")[0] toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() toplam_beyaz=cv2.countNonZero(maske) if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) return yanit[cevap[1]] def sorugrup_islemleri(soru,soru_gri,coords): yanit=["A","B","C","D","E"] sayac=0 thresh=cv2.threshold(soru_gri,180,255,cv2.THRESH_BINARY_INV)[1] coords=contours.sort_contours(coords,method="top-to-bottom")[0] for (s,i) in enumerate(np.arange(0,len(coords),10)): cevap=None cnt=contours.sort_contours(coords[i:i+10],method="left-to-right")[0] toplam_beyaz=None for (j,c) in enumerate(cnt): maske=np.zeros(thresh.shape,dtype=np.uint8) cv2.drawContours(maske,[c],0,(255,255,255),thickness=-1) maske=cv2.bitwise_and(thresh,thresh,mask=maske) plt.imshow(maske,cmap='gray') #plt.show() sayac+=1 toplam_beyaz=cv2.countNonZero(maske) if cevap is None or toplam_beyaz>cevap[0]: cevap=(toplam_beyaz,j,s) if sayac==5: break print(cevap) if cevap[0]>500: return yanit[cevap[1]] #print("tespit edilemedi") return "Tespit edilemedi" #################################################################### def main_starter(bos_kagit,dolu_kagit): image=cv2.imread(bos_kagit) gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) kagit,kagit_gri=kagit_bul(image,gray) bolgeler,areas=bolge_bul(kagit,kagit_gri) ''' FIND SCHOOL NUMBER PART ''' ogrno_bos=four_point_transform(kagit,bolgeler['ogrno']) ogrno_bos_gri=four_point_transform(kagit_gri,bolgeler['ogrno']) ogrno_coord,ogrno_thresh=contour_bul(ogrno_bos,ogrno_bos_gri) contour_cizdir(ogrno_bos_gri,ogrno_coord,"ogrenci numarası") #v2.imshow("ogrno",imutils.resize(ogrno_bos,height=400)) ''' DIVIDE ANSWER PART INTO 4 SLICES AND FIND ONE BY ONE ''' cevap_bos=four_point_transform(kagit,bolgeler['cevaplar']) cevap_bos_gri=four_point_transform(kagit_gri,bolgeler['cevaplar']) col1,col2,col3,col4=cevap_kolon(cevap_bos) col1_gri,col2_gri,col3_gri,col4_gri=cevap_gri(col1,col2,col3,col4) col1_coord,col2_coord,col3_coord,col4_coord=cevap_contour(col1,col2,col3,col4) #contour_cizdir(col1,col1_coord) #cevap_islemleri(col2_gri,coord_cevap) ''' EXAM TYPE FIND PART ''' sinav_bos=four_point_transform(kagit,bolgeler['sinav_turu']) sinav_bos_gri=four_point_transform(kagit_gri,bolgeler['sinav_turu']) sinav_coord,sinav_thresh=contour_bul(sinav_bos,sinav_bos_gri) sinav_islemleri(sinav_bos,sinav_bos_gri,sinav_coord) #cv2.imshow("sınav türü",sinav_bos_gri) ''' OTHER PARTS THAT ON PAPER ''' sorugrup_bos=four_point_transform(kagit,bolgeler['soru_grubu']) sorugrup_bos_gri=four_point_transform(kagit_gri,bolgeler['soru_grubu']) sorugrup_coord,sorugrup_thresh=contour_bul(sorugrup_bos,sorugrup_bos_gri,1) coors=soru_grup_contour_bul(sorugrup_bos,sorugrup_bos_gri) soru_cont,soru_thr=contour_bul(sorugrup_bos,sorugrup_bos_gri,1) ############################### ogretim_bos=four_point_transform(kagit,bolgeler['ogretim_onay']) ogretim_bos_gri=four_point_transform(kagit_gri,bolgeler['ogretim_onay']) ogret_cont,ogret_thr=contour_bul(ogretim_bos,ogretim_bos_gri,1) ''' NAME FIND PART. ''' isim_bos=four_point_transform(kagit,bolgeler['isim']) isim_bos_gri=cv2.cvtColor(isim_bos,cv2.COLOR_BGR2GRAY) coord_isim, thres=contour_bul(isim_bos, isim_bos_gri) #contour_cizdir(isim_bos,coord,"isim_bos") #cevap_islemleri(cevap_bos_gri,coord) ############################################## resim=cv2.imread(dolu_kagit) resim_gri=cv2.cvtColor(resim,cv2.COLOR_BGR2GRAY) warp2,warp2_gri=kagit_bul(resim,resim_gri) bolgeler2,areas2=bolge_bul(warp2,warp2_gri) ret,warp2=ters_bul(warp2,areas2) ''' TERS İSE TEKRAR BOLGELERİ BUL ''' if ret==True: warp2_gri=cv2.cvtColor(warp2,cv2.COLOR_BGR2GRAY) bolgeler2,areas2=bolge_bul(warp2,warp2_gri) else: pass isim_dolu=four_point_transform(warp2,bolgeler2['isim']) isim_dolu_gri=cv2.cvtColor(isim_dolu,cv2.COLOR_BGR2GRAY) contour_cizdir(isim_dolu,coord_isim,"dolu_kagit_contourlu") ''' OGRETİM ONAY DOLU KAGIT ''' ogretim_dolu=four_point_transform(warp2,bolgeler2['ogretim_onay']) ogretim_dolu_gri=cv2.cvtColor(ogretim_dolu,cv2.COLOR_BGR2GRAY) ogret_onay=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogret_cont) print("Öğretim Onayı:",ogret_onay) #cv2.drawContours(ogretim_dolu,ogret_cont,-1,(255,0,0),thickness=3) #cv2.imshow("ogretc",ogretim_dolu) #ogretim_onayı=sorugrup_islemleri(ogretim_dolu,ogretim_dolu_gri,ogretimonay_coord) sorugrup_dolu=four_point_transform(warp2,bolgeler2['soru_grubu']) sorugrup_dolu_gri=cv2.cvtColor(sorugrup_dolu,cv2.COLOR_BGR2GRAY) soru_tur=sorugrup_islemleri(sorugrup_dolu,sorugrup_dolu_gri,soru_cont) print("Soru Grubu",soru_tur) thresh_dolu=cv2.threshold(isim_dolu_gri,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)[1] isim_str=isim_islemleri(isim_dolu_gri,coord_isim,thresh_dolu) print(isim_str) sinav_dolu=four_point_transform(warp2,bolgeler2['sinav_turu']) sinav_dolu_gri=cv2.cvtColor(sinav_dolu,cv2.COLOR_BGR2GRAY) sinav_turu=sinav_islemleri(sinav_dolu,sinav_dolu_gri,sinav_coord) print("Sınav Türü: ",sinav_turu) ogrno_dolu=four_point_transform(warp2,bolgeler2['ogrno']) ogrno_dolu_gri=cv2.cvtColor(ogrno_dolu,cv2.COLOR_BGR2GRAY) ogrno_islemleri(ogrno_dolu,ogrno_dolu_gri,ogrno_coord) cevap_dolu=four_point_transform(warp2,bolgeler2['cevaplar']) cevap_dolu_gri=cv2.cvtColor(cevap_dolu,cv2.COLOR_BGR2GRAY) col1_dolu,col2_dolu,col3_dolu,col4_dolu=cevap_kolon(cevap_dolu) col1_gri_dolu,col2_gri_dolu,col3_gri_dolu,col4_gri_dolu=cevap_gri(col1_dolu,col2_dolu,col3_dolu,col4_dolu) #contour_cizdir(col1_dolu,col1_coord,"colon1 dolu") if len(cevap_anahtar)<=30: basarim=cevap_islemleri(col1_gri_dolu,col1_coord,1) elif len(cevap_anahtar)<=60: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim=(basarim1[0]+basarim2[0],basarim1[1]+basarim2[1],basarim1[2]+basarim2[2],basarim1[3]+basarim2[3]) #print(basarim) elif len(cevap_anahtar)<=90: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3) basarim=basarim1+basarim2+basarim3 elif len(cevap_anahtar)<=120: basarim1=cevap_islemleri(col1_gri_dolu,col1_coord,1) basarim2=cevap_islemleri(col2_gri_dolu,col2_coord,2) basarim3=cevap_islemleri(col3_gri_dolu,col3_coord,3) basarim4=cevap_islemleri(col4_gri_dolu,col4_coord,4) basarim=basarim1+basarim2+basarim3+basarim4 print(f"Doğru cevap sayısı:{basarim[0]}\nYanlış cevap sayısı:{basarim[1]}\nBoş sayısı:{basarim[2]}\nİki cevap işaret:{basarim[3]}") cv2.waitKey() cv2.destroyAllWindows() if __name__ == '__main__': bos_kagit="optic_empty.jpg" dolu_kagit="optic_marked.jpg" main_starter(bos_kagit,dolu_kagit)
2.203125
2
service.py
Tigge/script.filmtipset-grade
1
1558
# Copyright (c) 2013, <NAME> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import xbmc import xbmcaddon import xbmcgui import filmtipset FILMTIPSET_ACCESS_KEY = "7ndg3Q3qwW8dPzbJMrB5Rw" class XBMCPlayer(xbmc.Player): def __init__(self, *args): self.imdb = None self.time = None self.time_total = None def onPlayBackStarted(self): self.update() def onPlayBackEnded(self): self.onDone() def onPlayBackStopped(self): self.onDone() def update(self): info = self.getVideoInfoTag() self.imdb = info.getIMDBNumber() self.time = self.getTime() self.time_total = self.getTotalTime() def onDone(self): print "getTime", self.time print "getTotalTime", self.time_total print "imdb", self.imdb addon = xbmcaddon.Addon(id='script.filmtipset-grade') key = addon.getSetting("key") user = addon.getSetting("user") grader = filmtipset.Filmtipset(FILMTIPSET_ACCESS_KEY, key, user) movie = grader.get_movie_imdb(self.imdb) print movie if movie["grade"]["type"] != "seen": dialog = xbmcgui.Dialog() grade = dialog.select("Grade " + movie["orgname"] + " on filmtipset:", ["Skip", "1", "2", "3", "4", "5"]) if grade != 0: print dialog, grade print grader.grade(movie["id"], grade) player = XBMCPlayer() while(not xbmc.abortRequested): if player.isPlayingVideo(): player.update() xbmc.sleep(1000)
1.640625
2
test/testMatrix.py
turkeydonkey/nzmath3
1
1559
<reponame>turkeydonkey/nzmath3 import unittest from nzmath.matrix import * import nzmath.vector as vector import nzmath.rational as rational import nzmath.poly.uniutil as uniutil Ra = rational.Rational Poly = uniutil.polynomial Int = rational.theIntegerRing # sub test try: from test.testMatrixFiniteField import * except: try: from nzmath.test.testMatrixFiniteField import * except: from .testMatrixFiniteField import * ## for RingMatrix a1 = createMatrix(1, 2, [3, 2]) a2 = Matrix(1, 2, [5, -6]) a3 = createMatrix(3, 2, [7, 8]+[3, -2]+[0, 10]) a4 = Matrix(3, 2, [21, -12]+[1, -1]+[0, 0]) a5 = createMatrix(1, 2, [Poly({0:3, 1:5}, Int), Poly({1:2}, Int)]) ## for RingSquareMatrix b1 = createMatrix(2, 2, [1, 2]+[3, 4]) b2 = Matrix(2, 2, [0, -1]+[1, -2]) b3 = createMatrix(3, 3, [0, 1, 2]+[5, 4, 6]+[7, 9, 8]) b4 = Matrix(3, 3, [1, 2, 3]+[0, 5, -2]+[7, 1, 9]) b5 = createMatrix(3, 3, [1, 3, 2, 4, 6, 5, 6, 8, 9]) b6 = createMatrix(3, 3, [1, 2, 4, 0, 3, 5, 0, 0, 0]) b7 = createMatrix(3, 3, [1, 0, 0, 9, 1, 0, 5, 6, 1]) b8 = Matrix(3, 3, [3, 15, 12]+[2,7,5]+[1,-4,-2]) ## for FieldMatrix c1 = createMatrix(1, 2, [Ra(3), Ra(2)]) c2 = createMatrix(4, 5, \ [Ra(0), 0, 1, 2, -1]+[0, 0, 5, 12, -2]+[0, 0, 1, 3, -1]+[0, 0, 1, 2, 0]) c3 = createMatrix(3, 2, [Ra(1), 2]+[2, 5]+[6, 7]) ## for FieldSquareMatrix d1 = createMatrix(2, 2, [Ra(1), Ra(2)]+[Ra(3), Ra(4)]) d2 = createMatrix(3, 3, [Ra(1), 2, 3]+[4, 5, 6]+[5, 7, 9]) d3 = Matrix(3, 3, \ [Ra(1), Ra(2), Ra(3)]+[Ra(0), Ra(5), Ra(-2)]+[7, 1, 9]) d4 = createMatrix(6, 6, \ [Ra(4), 2, 5, 0, 2, 1]+[5, 1, 2, 5, 1, 1]+[90, 7, 54, 8, 4, 6]+\ [7, 5, 0, 8, 2, 5]+[8, 2, 6, 5, -4, 2]+[4, 1, 5, 6, 3, 1]) d5 = createMatrix(4, 4, \ [Ra(2), -1, 0, 0]+[-1, 2, -1, 0]+[0, -1, 2, -1]+[0, 0, -1, 2]) d6 = createMatrix(4, 4, \ [Ra(1), 2, 3, 4]+[2, 3, 4, 5]+[3, 4, 5, 6]+[4, 5, 6, 7]) d7 = Matrix(3, 3, \ [Ra(1, 2), Ra(2, 3), Ra(1, 5)]+[Ra(3, 2), Ra(1, 3), Ra(2, 5)]+[Ra(-1, 2), Ra(4, 3), Ra(3, 5)]) ## other objects v1 = vector.Vector([1, 4]) v2 = vector.Vector([8]) v3 = vector.Vector([0, 0, 1]) class MatrixTest(unittest.TestCase): def testInit(self): lst_lst = Matrix(3, 2, [[21, -12], [1, -1], [0, 0]]) self.assertEqual(a4, lst_lst) lst_tuple = Matrix(3, 2, [(21, 1, 0), (-12, -1, 0)]) self.assertEqual(a4, lst_tuple) lst_vect = Matrix(3, 2, [vector.Vector([21, 1, 0]), vector.Vector([-12, -1, 0])]) self.assertEqual(a4, lst_vect) def testGetitem(self): self.assertEqual(2, a1[1, 2]) self.assertEqual(-2, b2[2, 2]) self.assertRaises(IndexError, a1.__getitem__, "wrong") self.assertEqual(vector.Vector([21, 1, 0]), a4[1]) def testEqual(self): self.assertTrue(a1 == Matrix(1, 2, [3, 2])) self.assertTrue(isinstance(a1 == a1, bool)) def testNonZero(self): self.assertTrue(not zeroMatrix(2, 3)) def testContains(self): self.assertTrue(5 in a2) def testCall(self): call = createMatrix(1, 2, [13, 4]) self.assertEqual(call, a5(2)) def testMap(self): pow_two = createMatrix(1, 2, [9, 4]) self.assertEqual(pow_two, a1.map(lambda n : n ** 2)) def testReduce(self): self.assertEqual(-2, a3.reduce(min)) def testGetRow(self): row1 = vector.Vector([3, -2]) self.assertEqual(row1, a3.getRow(2)) row2 = vector.Vector([1, 2]) self.assertEqual(row2, b1.getRow(1)) def testGetColumn(self): col1 = vector.Vector([-12, -1, 0]) self.assertEqual(col1, a4.getColumn(2)) col2 = vector.Vector([1, 3]) self.assertEqual(col2, b1.getColumn(1)) def testTranspose(self): trans = createMatrix(2, 3, [7, 3, 0]+[8, -2, 10]) self.assertEqual(trans, a3.transpose()) def testGetBlock(self): block = Matrix(2, 3, [4, 6, 5, 6, 8, 9]) self.assertEqual(block, b5.getBlock(2, 1, 2, 3)) def testSubMatrix(self): sub1 = createMatrix(2, 1, [-12, 0]) self.assertEqual(sub1, a4.subMatrix(2, 1)) sub2 = createMatrix(2, 2, [4, 5, 6, 9]) self.assertEqual(sub2, b5.subMatrix([2, 3], [1, 3])) class SquareMatrixTest(unittest.TestCase): def testIsUpperTriangularMatrix(self): UT = createMatrix(4, 4, \ [1, 2, 3, 4]+[0, 5, 6, 7]+[0, 0, 8, 9]+[0, 0, 0, 1]) notUT = createMatrix(4, 4, \ [1, 2, 3, 4]+[0, 5, 6, 7]+[0, 0, 8, 9]+[0, 0, 1, 1]) assert UT.isUpperTriangularMatrix() assert not notUT.isUpperTriangularMatrix() def testIsLowerTriangularMatrix(self): LT = createMatrix(4, 4, \ [1, 0, 0, 0]+[2, 3, 0, 0]+[4, 5, 6, 0]+[7, 8, 9, 10]) notLT = createMatrix(4, 4, \ [1, 0, 0, 0]+[2, 3, 1, 0]+[4, 5, 6, 0]+[7, 8, 9, 10]) assert LT.isLowerTriangularMatrix() assert not notLT.isLowerTriangularMatrix() def testIsDiagonalMatrix(self): diag = createMatrix(2, 2, [-3, 0, 0, 5]) assert diag.isDiagonalMatrix() def testIsScalarMatrix(self): scaler = createMatrix(2, 2, [10, 0, 0, 10]) assert scaler.isScalarMatrix() def testIsSymmetricMatrix(self): symmetric = createMatrix(2, 2, [2, 3, 3, 5]) assert symmetric.isSymmetricMatrix() class RingMatrixTest(unittest.TestCase): def testAdd(self): sum1 = createMatrix(1, 2, [8, -4]) self.assertEqual(sum1, a1 + a2) sum2 = createMatrix(2, 2, [1, 1, 4, 2]) self.assertEqual(sum2, b1 + b2) def testSub(self): sub1 = createMatrix(1, 2, [-2, 8]) self.assertEqual(sub1, a1 - a2) sub2 = createMatrix(2, 2, [1, 3, 2, 6]) self.assertEqual(sub2, b1 - b2) def testMul(self): mul1 = createMatrix(1, 2, [2, -7]) self.assertEqual(mul1, a1 * b2) mul2 = createMatrix(3, 2, [-15, -6]+[-2, -2]+[0, 0]) self.assertEqual(mul2, a4 * b1) mul3 = createMatrix(3, 2, [1, -1]+[109, -64]+[156, -93]) self.assertEqual(mul3, b3 * a4) def testScalarMul(self): mul = createMatrix(1, 2, [15, 10]) self.assertEqual(mul, 5 * a1) def testVectorMul(self): mul = vector.Vector([9, 19]) self.assertEqual(mul, b1 * v1) def testMod(self): mod1 = createMatrix(3, 2, [1, 2]+[0, 1]+[0, 1]) self.assertEqual(mod1, a3 % 3) def testNeg(self): neg = createMatrix(2, 2, [0, 1, -1, 2]) self.assertEqual(neg, -b2) def testHermiteNormalForm(self): already = createMatrix(4, 3, [1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1]) h = already.hermiteNormalForm() self.assertEqual(h, already) lessrank = createMatrix(2, 3, [1, 0, 0, 0, 1, 0]) h = lessrank.hermiteNormalForm() self.assertEqual(h.row, lessrank.row) self.assertEqual(h.column, lessrank.column) zerovec = vector.Vector([0, 0]) self.assertEqual(zerovec, h.getColumn(1)) square = createMatrix(3, 3, [1, 0, 0, 0, 1, 1, 0, 1, 1]) h = square.hermiteNormalForm() self.assertEqual(h.row, square.row) self.assertEqual(h.column, square.column) hermite = createMatrix(3, 3, [0, 1, 0, 0 ,0, 1, 0, 0, 1]) self.assertEqual(hermite, h) def testExtHermiteNormalForm(self): already = createMatrix(4, 3, [1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1]) U_1, h_1 = already.exthermiteNormalForm() self.assertEqual(h_1, already) self.assertEqual(already * U_1, h_1) lessrank = createMatrix(2, 3, [1, 0, 0, 0, 1, 0]) U_2, h_2 = lessrank.exthermiteNormalForm() self.assertEqual(h_2.row, lessrank.row) self.assertEqual(h_2.column, lessrank.column) self.assertEqual(lessrank * U_2, h_2) def testKernelAsModule(self): ker_1 = a1.kernelAsModule() self.assertEqual(a1 * ker_1[1], vector.Vector([0])) #zero test ker_2 = b1.kernelAsModule() self.assertEqual(ker_2, None) class RingSquareMatrixTest(unittest.TestCase): def testPow(self): pow1 = createMatrix(2, 2, [7, 10, 15, 22]) self.assertEqual(pow1, b1 ** 2) pow2 = createMatrix(2, 2, [1, 0, 0, 1]) self.assertEqual(pow2, b2 ** 0) def testIsOrthogonalMatrix(self): orthogonal = createMatrix(2, 2, [Ra(3, 5), Ra(4, 5), Ra(-4, 5), Ra(3, 5)]) assert orthogonal.isOrthogonalMatrix() def testIsAlternatingMatrix(self): alternate1 = createMatrix(2, 2, [0, 2, -2, 0]) assert alternate1.isAlternatingMatrix() alternate2 = createMatrix(2, [1, 2, -2, 0]) assert not alternate2.isAntisymmetricMatrix() def testIsSingular(self): assert b6.isSingular() def testTrace(self): self.assertEqual(15, b4.trace()) def testDeterminant(self): self.assertEqual(-2, b1.determinant()) #sf.bug #1914349 self.assertTrue(isinstance(b3.determinant(), int)) self.assertEqual(36, b3.determinant()) def testCofactor(self): self.assertEqual(-6, b5.cofactor(1, 2)) def testCommutator(self): commutator = createMatrix(2, 2, [5, -1, 9, -5]) self.assertEqual(commutator, b1.commutator(b2)) def testCharacteristicMatrix(self): charMat = createMatrix(2, 2, \ [Poly({0:-1,1:1}, Int), Poly({0:-2}, Int)]+[Poly({0:-3}, Int), Poly({0:-4,1:1}, Int)]) self.assertEqual(charMat, b1.characteristicMatrix()) def testCharacteristicPolynomial(self): assert d1.characteristicPolynomial() == d1.characteristicMatrix().determinant() def testAdjugateMatrix(self): adjugate = createMatrix(3, 3, [47, -15, -19, -14, -12, 2, -35, 13, 5]) self.assertEqual(adjugate, b4.adjugateMatrix()) assert d1 * d1.adjugateMatrix() == d1.determinant() * unitMatrix(d1.row) def testCofactorMatrix(self): cofact = d5.cofactorMatrix() self.assertEqual(d5.cofactor(2, 3), cofact[2, 3]) def testSmithNormalForm(self): self.assertEqual([12, 1, 1], b5.smithNormalForm()) self.assertRaises(ValueError, b6.smithNormalForm) self.assertEqual([1, 1, 1], b7.smithNormalForm()) self.assertEqual([9, 3, 1], b8.smithNormalForm()) def testExtSmithNormalForm(self): smith1 = Matrix(3, 3, [12, 0, 0, 0, 1, 0, 0, 0, 1]) U_1, V_1, M_1 = b5.extsmithNormalForm() self.assertEqual(smith1, M_1) self.assertEqual(M_1, U_1 * b5 * V_1) smith2 = Matrix(3, 3, [9, 0, 0, 0, 3, 0, 0, 0, 1]) U_2, V_2, M_2 = b8.extsmithNormalForm() self.assertEqual(smith2, M_2) self.assertEqual(M_2, U_2 * b8 * V_2) class FieldMatrixTest(unittest.TestCase): def testDiv(self): div = createMatrix(1, 2, [1, Ra(2, 3)]) self.assertEqual(div, c1 / 3) def testKernel(self): ker = c2.kernel() self.assertTrue(not c2 * ker) def testImage(self): img = createMatrix(4,3,[1,2,-1]+[5,12,-2]+[1,3,-1]+[1,2,0]) self.assertEqual(img, c2.image()) def testRank(self): self.assertEqual(3, c2.rank()) self.assertEqual(3, d3.rank()) def testInverseImage(self): self.assertEqual(d6, d5 * d5.inverseImage(d6)) self.assertRaises(NoInverseImage, d2.inverseImage, unitMatrix(3)) def testSolve(self): for i in range(1, d6.column+1): self.assertEqual(d6[i], d5 * d5.solve(d6[i])[0]) sol1 = c1.solve(v2) for i in range(len(sol1[1])): self.assertEqual(v2, c1 * (sol1[0]+sol1[1][i])) self.assertRaises(NoInverseImage, c3.solve, v3) def testColumnEchelonForm(self): echelon = createMatrix(4, 5,\ [Ra(0), 0, 1, 0, 0]+[0, 0, 0, 2, 3]+[0, 0, 0, 1, 0]+[0, 0, 0, 0, 1]) self.assertEqual(echelon, c2.columnEchelonForm()) class FieldSquareMatrixTest(unittest.TestCase): def testPow(self): pow3 = createMatrix(2, 2, [Ra(11, 2), Ra(-5, 2), Ra(-15, 4), Ra(7, 4)]) self.assertEqual(pow3, d1 ** (-2)) def testTriangulate(self): triangle = createMatrix(3, 3, \ [Ra(1, 1), 2, 3]+[0, 5, -2]+[0, 0, Ra(-86, 5)]) self.assertEqual(triangle, d3.triangulate()) def testDeterminant(self): self.assertEqual(Ra(-7, 15), d7.determinant()) def testInverse(self): cinverse = createMatrix(3, 3) cinverse.set([Ra(-47, 86), Ra(15, 86), Ra(19, 86)]+\ [Ra(7, 43), Ra(6, 43), Ra(-1, 43)]+[Ra(35, 86), Ra(-13, 86), Ra(-5, 86)]) self.assertEqual(cinverse, d3.inverse()) self.assertRaises(NoInverse, d2.inverse) self.assertEqual(d3.inverse() * c3, d3.inverse(c3)) def testInverseNoChange(self): # sf bug#1849220 M1 = SquareMatrix(2, 2, [Ra(1, 2), Ra(1, 2), Ra(1, 1), Ra(-3, 2)]) M1.inverse() M2 = SquareMatrix(2, 2, [Ra(1, 2), Ra(1, 2), Ra(1, 1), Ra(-3, 2)]) self.assertEqual(M2, M1) def testHessenbergForm(self): pass def testLUDecomposition(self): L, U = d4.LUDecomposition() assert L * U == d4 assert L.isLowerTriangularMatrix() assert U.isUpperTriangularMatrix() class MatrixRingTest (unittest.TestCase): def setUp(self): self.m2z = MatrixRing.getInstance(2, Int) def testZero(self): z = self.m2z.zero self.assertEqual(0, z[1, 1]) self.assertEqual(0, z[1, 2]) self.assertEqual(0, z[2, 1]) self.assertEqual(0, z[2, 2]) def testOne(self): o = self.m2z.one self.assertEqual(1, o[1, 1]) self.assertEqual(0, o[1, 2]) self.assertEqual(0, o[2, 1]) self.assertEqual(1, o[2, 2]) def testUnitMatrix(self): """ unitMatrix() is an alias of one. """ self.assertEqual(self.m2z.one, self.m2z.unitMatrix()) def testRingAPI(self): m3z = MatrixRing.getInstance(3, Int) m2q = MatrixRing.getInstance(2, rational.theRationalField) # issubring self.assertFalse(self.m2z.issubring(Int)) self.assertTrue(self.m2z.issubring(self.m2z)) self.assertTrue(self.m2z.issubring(m2q)) self.assertFalse(self.m2z.issubring(m3z)) # issuperring self.assertFalse(self.m2z.issuperring(Int)) self.assertTrue(self.m2z.issuperring(self.m2z)) self.assertFalse(self.m2z.issuperring(m2q)) self.assertFalse(self.m2z.issuperring(m3z)) # getCommonSuperring self.assertRaises(TypeError, self.m2z.getCommonSuperring, Int) class SubspaceTest(unittest.TestCase): def testSupplementBasis(self): ba = Subspace(3, 2, [1, 2, 3, 4, 5, 7]) supbase = createMatrix(3, 3, [1, 2, 0, 3, 4, 0, 5, 7, 1]) self.assertEqual(supbase, ba.supplementBasis()) def testSumOfSubspaces(self): unit1 = Subspace(3, 1, [1, 0, 0]) unit2 = Subspace(3, 2, [0, 0]+[1, 0]+[0, 1]) self.assertEqual(unitMatrix(3), unit1.sumOfSubspaces(unit2)) def testIntersectionOfSubspace(self): unit1 = Subspace(3, 2, [1, 0]+[0, 1]+[0, 0]) unit2 = unitMatrix(3) unit2.toSubspace() intersect = Subspace(3, 2, [-1, 0]+[0, -1]+[0, 0]) self.assertEqual(intersect, unit1.intersectionOfSubspaces(unit2)) class FunctionTest(unittest.TestCase): def testCreateMatrix(self): Q = rational.theRationalField mat1 = createMatrix(2, 3, [[2,3,4], [5,6,7]]) self.assertEqual(mat1.coeff_ring, Int) mat2 = createMatrix(2, 3, [[2,3,4], [5,6,7]], Q) self.assertEqual(mat2.coeff_ring, Q) mat3 = createMatrix(3, [(1, 2, 3), (4, 5, 6), (7, 8, 9)], Q) self.assertTrue(mat3.row == mat3.column) self.assertTrue(mat3.__class__, FieldSquareMatrix) mat4 = createMatrix(2, [vector.Vector([1, 4]), vector.Vector([6, 8])]) self.assertEqual(mat4.coeff_ring, Int) mat5 = createMatrix(5, 6, Int) self.assertTrue(mat5 == 0) mat6 = createMatrix(1, 4) self.assertTrue(mat6 == 0) mat7 = createMatrix(3, Q) self.assertTrue(mat7.row == mat7.column) self.assertTrue(mat7 == 0) self.assertEqual(mat7.coeff_ring, Q) mat8 = createMatrix(7) self.assertTrue(mat8 == 0) def suite(suffix="Test"): suite = unittest.TestSuite() all_names = globals() for name in all_names: if name.endswith(suffix): suite.addTest(unittest.makeSuite(all_names[name], "test")) return suite if __name__ == '__main__': runner = unittest.TextTestRunner() runner.run(suite())
2.40625
2
python/test-nose-3.py
li-ma/homework
0
1560
<gh_stars>0 # Module Level def setUp(): print 'test setup' def tearDown(): print 'test teardown' # Function Level def func_1_setup(): print 'test_func_1 setup' def func_1_teardown(): print 'test_func_1_teardown' # Target Func def test_func_1(): print 'test_func_1 run' assert True test_func_1.setUp = func_1_setup test_func_1.tearDown = func_1_teardown
2.0625
2
lib/reindex/reporting.py
scality/utapi
13
1561
import requests import redis import json import ast import sys import time import urllib import re import sys from threading import Thread from concurrent.futures import ThreadPoolExecutor import argparse def get_options(): parser = argparse.ArgumentParser() parser.add_argument("-i", "--sentinel-ip", default='127.0.0.1', help="Sentinel IP") parser.add_argument("-p", "--sentinel-port", default="16379", help="Sentinel Port") parser.add_argument("-v", "--redis-password", default=None, help="Redis AUTH Password") parser.add_argument("-n", "--sentinel-cluster-name", default='scality-s3', help="Redis cluster name") parser.add_argument("-b", "--bucketd-addr", default='http://127.0.0.1:9000', help="URL of the bucketd server") return parser.parse_args() def safe_print(content): print("{0}".format(content)) class askRedis(): def __init__(self, ip="127.0.0.1", port="16379", sentinel_cluster_name="scality-s3", password=<PASSWORD>): self._password = password r = redis.Redis(host=ip, port=port, db=0, password=password) self._ip, self._port = r.sentinel_get_master_addr_by_name(sentinel_cluster_name) def read(self, resource, name): r = redis.Redis(host=self._ip, port=self._port, db=0, password=self._password) res = 's3:%s:%s:storageUtilized:counter' % (resource, name) total_size = r.get(res) res = 's3:%s:%s:numberOfObjects:counter' % (resource, name) files = r.get(res) try: return {'files': int(files), "total_size": int(total_size)} except Exception as e: return {'files': 0, "total_size": 0} class S3ListBuckets(): def __init__(self, host='127.0.0.1:9000'): self.bucketd_host = host def run(self): docs = [] url = "%s/default/bucket/users..bucket" % self.bucketd_host session = requests.Session() r = session.get(url, timeout=30) if r.status_code == 200: payload = json.loads(r.text) for keys in payload['Contents']: key = keys["key"] r1 = re.match("(\w+)..\|..(\w+.*)", key) docs.append(r1.groups()) return docs return(self.userid, self.bucket, user, files, total_size) if __name__ == '__main__': options = get_options() redis_conf = dict( ip=options.sentinel_ip, port=options.sentinel_port, sentinel_cluster_name=options.sentinel_cluster_name, password=options.redis_password ) P = S3ListBuckets(options.bucketd_addr) listbuckets = P.run() userids = set([x for x, y in listbuckets]) executor = ThreadPoolExecutor(max_workers=1) for userid, bucket in listbuckets: U = askRedis(**redis_conf) data = U.read('buckets', bucket) content = "Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s " % ( userid, bucket, data["files"], data["total_size"]) executor.submit(safe_print, content) data = U.read('buckets', 'mpuShadowBucket'+bucket) content = "Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s " % ( userid, 'mpuShadowBucket'+bucket, data["files"], data["total_size"]) executor.submit(safe_print, content) executor.submit(safe_print, "") for userid in sorted(userids): U = askRedis(**redis_conf) data = U.read('accounts', userid) content = "Account:%s|NumberOFfiles:%s|StorageCapacity:%s " % ( userid, data["files"], data["total_size"]) executor.submit(safe_print, content)
2.625
3
src/skim/modeling/skim_attention/modeling_skim.py
recitalAI/skim-attention
4
1562
<reponame>recitalAI/skim-attention<gh_stars>1-10 from collections import namedtuple import logging from dataclasses import dataclass from typing import Optional, Tuple import math import torch from torch import nn from torch.nn import CrossEntropyLoss, LayerNorm from torch.autograd.function import Function from transformers.file_utils import ( ModelOutput, ) from transformers.modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from transformers.modeling_outputs import ( BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, TokenClassifierOutput, ) from transformers.models.bert.modeling_bert import ( BertConfig, BertEmbeddings, BertIntermediate, BertOutput, BertPooler, BertEncoder, BertOnlyMLMHead, ) from transformers.models.layoutlm.modeling_layoutlm import LayoutLMEmbeddings from .configuration_skim import ( SkimformerConfig, BertWithSkimEmbedConfig, SkimmingMaskConfig, ) logger = logging.getLogger(__name__) SkimformerEncoderOutput = namedtuple( "SkimformerEncoderOutput", ["hidden_states", "all_hidden_states"], ) class SkimformerTextEmbeddings(nn.Module): """Construct the text embeddings from word and token_type embeddings.""" def __init__(self, config): super().__init__() self.max_position_embeddings = config.max_position_embeddings self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids=None, token_type_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() device = input_ids.device else: input_shape = inputs_embeds.size()[:-1] device = inputs_embeds.device device = input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class Skimformer1DPositionEmbeddings(nn.Module): """Construct sequential position embeddings.""" def __init__(self, config): super().__init__() self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_layout_size) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.LayerNorm = LayerNorm(config.hidden_layout_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_shape, device, position_ids=None): seq_length = input_shape[1] if position_ids is None: position_ids = torch.arange( 0, seq_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).expand(input_shape) position_embeddings = self.position_embeddings(position_ids) position_embeddings = self.LayerNorm(position_embeddings) position_embeddings = self.dropout(position_embeddings) return position_embeddings class Skimformer2DPositionEmbeddings(nn.Module): """Construct the layout embeddings from the bounding box coordinates.""" def __init__(self, config): super().__init__() self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.degrade_2d_positions = config.degrade_2d_positions if hasattr(config, "degrade_2d_positions") else False self.LayerNorm = LayerNorm(config.hidden_layout_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, bbox=None): if self.degrade_2d_positions: try: x_center = (bbox[:, :, 0] + bbox[:, :, 2]) // 2 y_center = (bbox[:, :, 1] + bbox[:, :, 3]) // 2 x_center_position_embeddings = self.x_position_embeddings(x_center) y_center_position_embeddings = self.y_position_embeddings(y_center) except IndexError as e: raise IndexError("The :obj:`bbox` coordinate values should be within 0-1000 range.") from e embeddings = x_center_position_embeddings + y_center_position_embeddings else: try: left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1]) right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2]) lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3]) except IndexError as e: raise IndexError("The :obj:`bbox` coordinate values should be within 0-1000 range.") from e h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1]) w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0]) embeddings = ( left_position_embeddings + upper_position_embeddings + right_position_embeddings + lower_position_embeddings + h_position_embeddings + w_position_embeddings ) embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertWithSkimEmbedEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_layout_size) self.x_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.y_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.h_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.w_position_projection = nn.Linear(config.hidden_layout_size, config.hidden_size) self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) def forward( self, input_ids=None, bbox=None, token_type_ids=None, position_ids=None, inputs_embeds=None, ): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] device = input_ids.device if input_ids is not None else inputs_embeds.device if position_ids is None: position_ids = self.position_ids[:, :seq_length] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) words_embeddings = inputs_embeds position_embeddings = self.position_embeddings(position_ids) try: left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1]) right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2]) lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3]) except IndexError as e: raise IndexError("The :obj:`bbox`coordinate values should be within 0-1000 range.") from e h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1]) w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0]) # project into same dimension as text embeddings left_position_embeddings = self.x_position_projection(left_position_embeddings) upper_position_embeddings = self.y_position_projection(upper_position_embeddings) right_position_embeddings = self.x_position_projection(right_position_embeddings) lower_position_embeddings = self.y_position_projection(lower_position_embeddings) h_position_embeddings = self.h_position_projection(h_position_embeddings) w_position_embeddings = self.w_position_projection(w_position_embeddings) two_dim_pos_embeddings = ( left_position_embeddings + upper_position_embeddings + right_position_embeddings + lower_position_embeddings + h_position_embeddings + w_position_embeddings ) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = ( words_embeddings + position_embeddings + two_dim_pos_embeddings + token_type_embeddings ) embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class SkimAttention(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = config.skim_attention_head_size self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_layout_size, self.all_head_size) self.key = nn.Linear(config.hidden_layout_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_layout_states, attention_mask=None, ): key_layer = self.transpose_for_scores(self.key(hidden_layout_states)) query_layer = self.transpose_for_scores(self.query(hidden_layout_states)) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # return the attention probabilities only: Softmax(QK^T/sqrt(d)) return attention_probs class SkimformerSelfAttention(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = config.attention_head_size self.all_head_size = self.num_attention_heads * self.attention_head_size self.value = nn.Linear(config.hidden_size, self.all_head_size) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_probs, head_mask=None, ): value_layer = self.transpose_for_scores(self.value(hidden_states)) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask # Softmax(QK^T/sqrt(d)) . V context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class SkimformerSelfOutput(nn.Module): def __init__(self, config): super().__init__() all_head_size = config.num_attention_heads * config.attention_head_size self.dense = nn.Linear(all_head_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class SkimformerAttention(nn.Module): def __init__(self, config): super().__init__() self.self = SkimformerSelfAttention(config) self.output = SkimformerSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_probs, head_mask=None, ): if len(self.pruned_heads) > 0: num_attention_heads = attention_probs.shape[1] indices = [idx for idx in range(num_attention_heads) if idx not in self.pruned_heads] attention_probs = torch.index_select(attention_probs, 1, indices) self_output = self.self( hidden_states, attention_probs, head_mask, ) attention_output = self.output(self_output, hidden_states) return attention_output class SkimformerLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = SkimformerAttention(config) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward( self, hidden_states, attention_probs, head_mask=None, ): attention_output = self.attention( hidden_states, attention_probs, head_mask, ) layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) return layer_output def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class SkimformerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([SkimformerLayer(config) for _ in range(config.num_hidden_layers)]) def forward( self, hidden_states, attention_probs, head_mask=None, output_hidden_states=False, return_dict=None, ): all_hidden_states = () if output_hidden_states else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if getattr(self.config, "gradient_checkpointing", False): def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward layer_output = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_probs, layer_head_mask, ) else: layer_output = layer_module( hidden_states, attention_probs, layer_head_mask, ) hidden_states = layer_output if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, all_hidden_states, ] if v is not None ) return SkimformerEncoderOutput( hidden_states=hidden_states, all_hidden_states=all_hidden_states, ) class SkimformerPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = SkimformerConfig base_model_prefix = "skimformer" def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() class BertWithSkimEmbedPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BertWithSkimEmbedConfig base_model_prefix = "bertwithskimembed" _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() class SkimmingMaskPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = SkimmingMaskConfig base_model_prefix = "skimmingmask" _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() @dataclass class SkimformerModelOutput(ModelOutput): """ Output type of :class:`~SkimformerModel`. Args: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, hidden_size)`): Sequence of hidden-states at the last layer of the model. ``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then ``num_predict`` corresponds to ``sequence_length``. pooler_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): :obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor = None pooler_output: torch.FloatTensor = None attentions: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None class SkimformerModel(SkimformerPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.use_1d_positions = config.use_1d_positions self.text_embeddings = SkimformerTextEmbeddings(config) if self.use_1d_positions: self.one_dim_pos_embeddings = Skimformer1DPositionEmbeddings(config) else: self.two_dim_pos_embeddings = Skimformer2DPositionEmbeddings(config) self.contextualize_2d_positions = config.contextualize_2d_positions if self.contextualize_2d_positions: self.layout_encoder = BertEncoder( BertConfig( hidden_size=config.hidden_layout_size, num_hidden_layers=config.num_hidden_layers_layout_encoder, num_attention_heads=config.num_attention_heads_layout_encoder, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, hidden_dropout_prob=config.hidden_dropout_prob, attention_probs_dropout_prob=config.attention_probs_dropout_prob, max_position_embeddings=config.max_2d_position_embeddings, initializer_range=config.initializer_range, layer_norm_eps=config.layer_norm_eps, gradient_checkpointing=config.gradient_checkpointing, ) ) self.skim_attention = SkimAttention(config) self.encoder = SkimformerEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.text_embeddings.word_embeddings def set_input_embeddings(self, value): self.text_embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() batch_size, seq_length = input_shape elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape else: raise ValueError("You have to specify either input_ids or inputs_embeds") assert ( len(input_shape) == 2 ), "`input_ids` has to be of shape `[batch_size, sequence_length]`, but got shape: {}".format(input_shape) if bbox is not None: bbox_shape = bbox.size() assert ( len(bbox_shape) == 3 ), "`bbox` has to be of shape `[batch_size, sequence_length, 4]`, but got shape: {}".format(bbox_shape) device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length)), device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if bbox is None: bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) text_embedding_output = self.text_embeddings( input_ids=input_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) if self.use_1d_positions: pos_embedding_output = self.one_dim_pos_embeddings( input_shape=input_shape, device=device, position_ids=position_ids, ) else: pos_embedding_output = self.two_dim_pos_embeddings( bbox=bbox, ) if self.contextualize_2d_positions: pos_embedding_output = self.layout_encoder( hidden_states=pos_embedding_output, )[0] skim_attention_output = self.skim_attention( pos_embedding_output, attention_mask=extended_attention_mask, ) encoder_outputs = self.encoder( text_embedding_output, skim_attention_output, head_mask=head_mask, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: outputs = (sequence_output, pooled_output) if output_attentions: outputs = outputs + (skim_attention_output, ) if output_hidden_states: outputs = outputs + encoder_outputs[1:] return outputs return SkimformerModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, attentions=skim_attention_output if output_attentions else None, hidden_states=encoder_outputs.all_hidden_states, ) class BertWithSkimEmbedModel(BertWithSkimEmbedPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = BertWithSkimEmbedEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if bbox is None: bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device) extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.to(dtype=next(self.parameters()).dtype) else: head_mask = [None] * self.config.num_hidden_layers embedding_output = self.embeddings( input_ids=input_ids, bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) encoder_outputs = self.encoder( embedding_output, extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class SkimmingMaskModel(SkimmingMaskPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.core_model_type = config.core_model_type self.embeddings = BertEmbeddings(config) if self.core_model_type == "bert" else LayoutLMEmbeddings(config) self.two_dim_pos_embeddings = Skimformer2DPositionEmbeddings(config) self.contextualize_2d_positions = config.contextualize_2d_positions if config.contextualize_2d_positions: self.layout_encoder = BertEncoder( BertConfig( hidden_size=config.hidden_layout_size, num_hidden_layers=config.num_hidden_layers_layout_encoder, num_attention_heads=config.num_attention_heads_layout_encoder, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, hidden_dropout_prob=config.hidden_dropout_prob, attention_probs_dropout_prob=config.attention_probs_dropout_prob, max_position_embeddings=config.max_2d_position_embeddings, initializer_range=config.initializer_range, layer_norm_eps=config.layer_norm_eps, gradient_checkpointing=config.gradient_checkpointing, ) ) self.skim_attention = SkimAttention(config) self.top_k = config.top_k self.encoder = BertEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if bbox is None: bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) if self.core_model_type == "bert": text_embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) else: text_embedding_output = self.embeddings( input_ids=input_ids, bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) spatial_pos_embedding_output = self.two_dim_pos_embeddings(bbox=bbox) if self.contextualize_2d_positions: spatial_pos_embedding_output = self.layout_encoder(hidden_states=spatial_pos_embedding_output)[0] skim_attention_output = self.skim_attention( spatial_pos_embedding_output, attention_mask=extended_attention_mask, ) topk_idx = torch.topk(skim_attention_output, self.top_k, -1).indices skim_attention_mask = torch.zeros(skim_attention_output.shape, device=device) skim_attention_mask = skim_attention_mask.scatter(-1, topk_idx, 1) skim_attention_mask = skim_attention_mask * attention_mask[:, None, :, :] skim_attention_mask = (1.0 - skim_attention_mask) * -10000.0 encoder_outputs = self.encoder( text_embedding_output, skim_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class SkimformerForMaskedLM(SkimformerPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.skimformer = SkimformerModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.skimformer( input_ids, bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class SkimformerForTokenClassification(SkimformerPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.skimformer = SkimformerModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.skimformer( input_ids, bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class BertWithSkimEmbedForTokenClassification(BertWithSkimEmbedPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert_with_skim_embed = BertWithSkimEmbedModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def get_input_embeddings(self): return self.bert_with_skim_embed.embeddings.word_embeddings def set_input_embeddings(self, value): self.bert_with_skim_embed.embeddings.word_embeddings = value def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert_with_skim_embed( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels)[active_loss] active_labels = labels.view(-1)[active_loss] loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class SkimmingMaskForTokenClassification(SkimmingMaskPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.skimming_mask_model = SkimmingMaskModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() def get_input_embeddings(self): return self.skimming_mask_model.embeddings.word_embeddings def set_input_embeddings(self, value): self.skimming_mask_model.embeddings.word_embeddings = value def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.skimming_mask_model( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() if attention_mask is not None: if attention_mask.dim() == 3: active_loss = (torch.sum(attention_mask, dim=-1)).view(-1) > 0 else: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels)[active_loss] active_labels = labels.view(-1)[active_loss] loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
2.140625
2
api/routers/dashboard.py
xming521/coco_API
0
1563
import time import psutil import pymysql from fastapi import APIRouter from api.utils import response_code router = APIRouter() @router.get('/dashboard/getinfo') def getinfo(): from init_global import g res = {} db = g.db_pool.connection() cur = db.cursor() cur.execute(f'select count(app_name) from app_list') res['app_count'] = cur.fetchall()[0][0] cur.execute(f'select count(app_name) from app_list where status="running"') res['app_run_count'] = cur.fetchall()[0][0] res['image_count'] = len(g.dc.images.list()) res['networks_count'] = len(g.dc.networks.list()) cur = db.cursor(cursor=pymysql.cursors.DictCursor) cur.execute(f'select * from app_list order by start_time desc limit 10') res['recent_event'] = cur.fetchall() db.close() return response_code.resp_200(data={"res": res}) def get_performance(): res = {} # cpu cpuCount = psutil.cpu_count(logical=False) # CPU核心 cpuPercent = psutil.cpu_percent(0.5) # 使用率 cpufree = round(100 - cpuPercent, 2) # CPU空余 # 内存 m = psutil.virtual_memory() # 内存信息 memoryTotal = round(m.total / (1024.0 * 1024.0 * 1024.0), 2) # 总内存 memoryUsed = round(m.used / (1024.0 * 1024.0 * 1024.0), 2) # 已用内存 memoryFree = round(memoryTotal - memoryUsed, 2) # 剩余内存 # 磁盘 io = psutil.disk_partitions() diskCount = len(io) diskTotal = 0 # 总储存空间大小 diskUsed = 0 # 已用 diskFree = 0 # 剩余 for i in io: try: o = psutil.disk_usage(i.mountpoint) diskTotal += int(o.total / (1024.0 * 1024.0 * 1024.0)) diskUsed += int(o.used / (1024.0 * 1024.0 * 1024.0)) diskFree += int(o.free / (1024.0 * 1024.0 * 1024.0)) except: pass res['cpu'] = cpuPercent res['mem'] = m.percent res['disk'] = o.percent res['memoryTotal'] = memoryTotal res['memoryUsed'] = memoryUsed res['diskTotal'] = diskTotal res['diskUsed'] = diskUsed return res def push_realinfo(): from init_global import g from main import socket_manager as sm print(g.person_online) while g.person_online: res = get_performance() # print(res) g.push_loop.run_until_complete(sm.emit('dashboard', {'data': res})) time.sleep(3)
2.234375
2
retargeting/models/Kinematics.py
yujiatay/deep-motion-editing
1
1564
import torch import torch.nn as nn import numpy as np import math class ForwardKinematics: def __init__(self, args, edges): self.topology = [-1] * (len(edges) + 1) self.rotation_map = [] for i, edge in enumerate(edges): self.topology[edge[1]] = edge[0] self.rotation_map.append(edge[1]) self.world = args.fk_world self.pos_repr = args.pos_repr self.quater = args.rotation == 'quaternion' def forward_from_raw(self, raw, offset, world=None, quater=None): if world is None: world = self.world if quater is None: quater = self.quater if self.pos_repr == '3d': position = raw[:, -3:, :] rotation = raw[:, :-3, :] elif self.pos_repr == '4d': raise Exception('Not support') if quater: rotation = rotation.reshape((rotation.shape[0], -1, 4, rotation.shape[-1])) identity = torch.tensor((1, 0, 0, 0), dtype=torch.float, device=raw.device) else: rotation = rotation.reshape((rotation.shape[0], -1, 3, rotation.shape[-1])) identity = torch.zeros((3, ), dtype=torch.float, device=raw.device) identity = identity.reshape((1, 1, -1, 1)) new_shape = list(rotation.shape) new_shape[1] += 1 new_shape[2] = 1 rotation_final = identity.repeat(new_shape) for i, j in enumerate(self.rotation_map): rotation_final[:, j, :, :] = rotation[:, i, :, :] return self.forward(rotation_final, position, offset, world=world, quater=quater) ''' rotation should have shape batch_size * Joint_num * (3/4) * Time position should have shape batch_size * 3 * Time offset should have shape batch_size * Joint_num * 3 output have shape batch_size * Time * Joint_num * 3 ''' def forward(self, rotation: torch.Tensor, position: torch.Tensor, offset: torch.Tensor, order='xyz', quater=False, world=True): if not quater and rotation.shape[-2] != 3: raise Exception('Unexpected shape of rotation') if quater and rotation.shape[-2] != 4: raise Exception('Unexpected shape of rotation') rotation = rotation.permute(0, 3, 1, 2) position = position.permute(0, 2, 1) result = torch.empty(rotation.shape[:-1] + (3, ), device=position.device) norm = torch.norm(rotation, dim=-1, keepdim=True) #norm[norm < 1e-10] = 1 rotation = rotation / norm if quater: transform = self.transform_from_quaternion(rotation) else: transform = self.transform_from_euler(rotation, order) offset = offset.reshape((-1, 1, offset.shape[-2], offset.shape[-1], 1)) result[..., 0, :] = position for i, pi in enumerate(self.topology): if pi == -1: assert i == 0 continue transform[..., i, :, :] = torch.matmul(transform[..., pi, :, :], transform[..., i, :, :]) result[..., i, :] = torch.matmul(transform[..., i, :, :], offset[..., i, :, :]).squeeze() if world: result[..., i, :] += result[..., pi, :] return result def from_local_to_world(self, res: torch.Tensor): res = res.clone() for i, pi in enumerate(self.topology): if pi == 0 or pi == -1: continue res[..., i, :] += res[..., pi, :] return res @staticmethod def transform_from_euler(rotation, order): rotation = rotation / 180 * math.pi transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 1], order[1]), ForwardKinematics.transform_from_axis(rotation[..., 2], order[2])) transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 0], order[0]), transform) return transform @staticmethod def transform_from_axis(euler, axis): transform = torch.empty(euler.shape[0:3] + (3, 3), device=euler.device) cos = torch.cos(euler) sin = torch.sin(euler) cord = ord(axis) - ord('x') transform[..., cord, :] = transform[..., :, cord] = 0 transform[..., cord, cord] = 1 if axis == 'x': transform[..., 1, 1] = transform[..., 2, 2] = cos transform[..., 1, 2] = -sin transform[..., 2, 1] = sin if axis == 'y': transform[..., 0, 0] = transform[..., 2, 2] = cos transform[..., 0, 2] = sin transform[..., 2, 0] = -sin if axis == 'z': transform[..., 0, 0] = transform[..., 1, 1] = cos transform[..., 0, 1] = -sin transform[..., 1, 0] = sin return transform @staticmethod def transform_from_quaternion(quater: torch.Tensor): qw = quater[..., 0] qx = quater[..., 1] qy = quater[..., 2] qz = quater[..., 3] x2 = qx + qx y2 = qy + qy z2 = qz + qz xx = qx * x2 yy = qy * y2 wx = qw * x2 xy = qx * y2 yz = qy * z2 wy = qw * y2 xz = qx * z2 zz = qz * z2 wz = qw * z2 m = torch.empty(quater.shape[:-1] + (3, 3), device=quater.device) m[..., 0, 0] = 1.0 - (yy + zz) m[..., 0, 1] = xy - wz m[..., 0, 2] = xz + wy m[..., 1, 0] = xy + wz m[..., 1, 1] = 1.0 - (xx + zz) m[..., 1, 2] = yz - wx m[..., 2, 0] = xz - wy m[..., 2, 1] = yz + wx m[..., 2, 2] = 1.0 - (xx + yy) return m class InverseKinematics: def __init__(self, rotations: torch.Tensor, positions: torch.Tensor, offset, parents, constrains): self.rotations = rotations self.rotations.requires_grad_(True) self.position = positions self.position.requires_grad_(True) self.parents = parents self.offset = offset self.constrains = constrains self.optimizer = torch.optim.Adam([self.position, self.rotations], lr=1e-3, betas=(0.9, 0.999)) self.crit = nn.MSELoss() def step(self): self.optimizer.zero_grad() glb = self.forward(self.rotations, self.position, self.offset, order='', quater=True, world=True) loss = self.crit(glb, self.constrains) loss.backward() self.optimizer.step() self.glb = glb return loss.item() def tloss(self, time): return self.crit(self.glb[time, :], self.constrains[time, :]) def all_loss(self): res = [self.tloss(t).detach().numpy() for t in range(self.constrains.shape[0])] return np.array(res) ''' rotation should have shape batch_size * Joint_num * (3/4) * Time position should have shape batch_size * 3 * Time offset should have shape batch_size * Joint_num * 3 output have shape batch_size * Time * Joint_num * 3 ''' def forward(self, rotation: torch.Tensor, position: torch.Tensor, offset: torch.Tensor, order='xyz', quater=False, world=True): ''' if not quater and rotation.shape[-2] != 3: raise Exception('Unexpected shape of rotation') if quater and rotation.shape[-2] != 4: raise Exception('Unexpected shape of rotation') rotation = rotation.permute(0, 3, 1, 2) position = position.permute(0, 2, 1) ''' result = torch.empty(rotation.shape[:-1] + (3,), device=position.device) norm = torch.norm(rotation, dim=-1, keepdim=True) rotation = rotation / norm if quater: transform = self.transform_from_quaternion(rotation) else: transform = self.transform_from_euler(rotation, order) offset = offset.reshape((-1, 1, offset.shape[-2], offset.shape[-1], 1)) result[..., 0, :] = position for i, pi in enumerate(self.parents): if pi == -1: assert i == 0 continue result[..., i, :] = torch.matmul(transform[..., pi, :, :], offset[..., i, :, :]).squeeze() transform[..., i, :, :] = torch.matmul(transform[..., pi, :, :], transform[..., i, :, :]) if world: result[..., i, :] += result[..., pi, :] return result @staticmethod def transform_from_euler(rotation, order): rotation = rotation / 180 * math.pi transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 1], order[1]), ForwardKinematics.transform_from_axis(rotation[..., 2], order[2])) transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 0], order[0]), transform) return transform @staticmethod def transform_from_axis(euler, axis): transform = torch.empty(euler.shape[0:3] + (3, 3), device=euler.device) cos = torch.cos(euler) sin = torch.sin(euler) cord = ord(axis) - ord('x') transform[..., cord, :] = transform[..., :, cord] = 0 transform[..., cord, cord] = 1 if axis == 'x': transform[..., 1, 1] = transform[..., 2, 2] = cos transform[..., 1, 2] = -sin transform[..., 2, 1] = sin if axis == 'y': transform[..., 0, 0] = transform[..., 2, 2] = cos transform[..., 0, 2] = sin transform[..., 2, 0] = -sin if axis == 'z': transform[..., 0, 0] = transform[..., 1, 1] = cos transform[..., 0, 1] = -sin transform[..., 1, 0] = sin return transform @staticmethod def transform_from_quaternion(quater: torch.Tensor): qw = quater[..., 0] qx = quater[..., 1] qy = quater[..., 2] qz = quater[..., 3] x2 = qx + qx y2 = qy + qy z2 = qz + qz xx = qx * x2 yy = qy * y2 wx = qw * x2 xy = qx * y2 yz = qy * z2 wy = qw * y2 xz = qx * z2 zz = qz * z2 wz = qw * z2 m = torch.empty(quater.shape[:-1] + (3, 3), device=quater.device) m[..., 0, 0] = 1.0 - (yy + zz) m[..., 0, 1] = xy - wz m[..., 0, 2] = xz + wy m[..., 1, 0] = xy + wz m[..., 1, 1] = 1.0 - (xx + zz) m[..., 1, 2] = yz - wx m[..., 2, 0] = xz - wy m[..., 2, 1] = yz + wx m[..., 2, 2] = 1.0 - (xx + yy) return m
2.375
2
tests/operators/test_hive_operator.py
Ryan-Miao/airflow
0
1565
<reponame>Ryan-Miao/airflow<gh_stars>0 # -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import datetime import os import unittest from unittest import mock import nose from airflow import DAG, configuration, operators from airflow.models import TaskInstance from airflow.operators.hive_operator import HiveOperator from airflow.utils import timezone DEFAULT_DATE = datetime.datetime(2015, 1, 1) DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat() DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10] class TestHiveEnvironment(unittest.TestCase): def setUp(self): args = {'owner': 'airflow', 'start_date': DEFAULT_DATE} dag = DAG('test_dag_id', default_args=args) self.dag = dag self.hql = """ USE airflow; DROP TABLE IF EXISTS static_babynames_partitioned; CREATE TABLE IF NOT EXISTS static_babynames_partitioned ( state string, year string, name string, gender string, num int) PARTITIONED BY (ds string); INSERT OVERWRITE TABLE static_babynames_partitioned PARTITION(ds='{{ ds }}') SELECT state, year, name, gender, num FROM static_babynames; """ class TestHiveCli(unittest.TestCase): def setUp(self): self.nondefault_schema = "nondefault" os.environ["AIRFLOW__CORE__SECURITY"] = "kerberos" def tearDown(self): del os.environ["AIRFLOW__CORE__SECURITY"] def test_get_proxy_user_value(self): from airflow.hooks.hive_hooks import HiveCliHook hook = HiveCliHook() returner = mock.MagicMock() returner.extra_dejson = {'proxy_user': 'a_user_proxy'} hook.use_beeline = True hook.conn = returner # Run result = hook._prepare_cli_cmd() # Verify self.assertIn('hive.server2.proxy.user=a_user_proxy', result[2]) class HiveOperatorConfigTest(TestHiveEnvironment): def test_hive_airflow_default_config_queue(self): t = HiveOperator( task_id='test_default_config_queue', hql=self.hql, mapred_queue_priority='HIGH', mapred_job_name='airflow.test_default_config_queue', dag=self.dag) # just check that the correct default value in test_default.cfg is used test_config_hive_mapred_queue = configuration.conf.get( 'hive', 'default_hive_mapred_queue' ) self.assertEqual(t.get_hook().mapred_queue, test_config_hive_mapred_queue) def test_hive_airflow_default_config_queue_override(self): specific_mapred_queue = 'default' t = HiveOperator( task_id='test_default_config_queue', hql=self.hql, mapred_queue=specific_mapred_queue, mapred_queue_priority='HIGH', mapred_job_name='airflow.test_default_config_queue', dag=self.dag) self.assertEqual(t.get_hook().mapred_queue, specific_mapred_queue) class HiveOperatorTest(TestHiveEnvironment): def test_hiveconf_jinja_translate(self): hql = "SELECT ${num_col} FROM ${hiveconf:table};" t = HiveOperator( hiveconf_jinja_translate=True, task_id='dry_run_basic_hql', hql=hql, dag=self.dag) t.prepare_template() self.assertEqual(t.hql, "SELECT {{ num_col }} FROM {{ table }};") def test_hiveconf(self): hql = "SELECT * FROM ${hiveconf:table} PARTITION (${hiveconf:day});" t = HiveOperator( hiveconfs={'table': 'static_babynames', 'day': '{{ ds }}'}, task_id='dry_run_basic_hql', hql=hql, dag=self.dag) t.prepare_template() self.assertEqual( t.hql, "SELECT * FROM ${hiveconf:table} PARTITION (${hiveconf:day});") @mock.patch('airflow.operators.hive_operator.HiveOperator.get_hook') def test_mapred_job_name(self, mock_get_hook): mock_hook = mock.MagicMock() mock_get_hook.return_value = mock_hook t = HiveOperator( task_id='test_mapred_job_name', hql=self.hql, dag=self.dag) fake_execution_date = timezone.datetime(2018, 6, 19) fake_ti = TaskInstance(task=t, execution_date=fake_execution_date) fake_ti.hostname = 'fake_hostname' fake_context = {'ti': fake_ti} t.execute(fake_context) self.assertEqual( "Airflow HiveOperator task for {}.{}.{}.{}" .format(fake_ti.hostname, self.dag.dag_id, t.task_id, fake_execution_date.isoformat()), mock_hook.mapred_job_name) if 'AIRFLOW_RUNALL_TESTS' in os.environ: import airflow.hooks.hive_hooks import airflow.operators.presto_to_mysql class TestHivePresto(TestHiveEnvironment): def test_hive(self): t = HiveOperator( task_id='basic_hql', hql=self.hql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_queues(self): t = HiveOperator( task_id='test_hive_queues', hql=self.hql, mapred_queue='default', mapred_queue_priority='HIGH', mapred_job_name='airflow.test_hive_queues', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_dryrun(self): t = HiveOperator( task_id='dry_run_basic_hql', hql=self.hql, dag=self.dag) t.dry_run() def test_beeline(self): t = HiveOperator( task_id='beeline_hql', hive_cli_conn_id='hive_cli_default', hql=self.hql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_presto(self): sql = """ SELECT count(1) FROM airflow.static_babynames_partitioned; """ t = operators.presto_check_operator.PrestoCheckOperator( task_id='presto_check', sql=sql, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_presto_to_mysql(self): t = operators.presto_to_mysql.PrestoToMySqlTransfer( task_id='presto_to_mysql_check', sql=""" SELECT name, count(*) as ccount FROM airflow.static_babynames GROUP BY name """, mysql_table='test_static_babynames', mysql_preoperator='TRUNCATE TABLE test_static_babynames;', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hdfs_sensor(self): t = operators.sensors.HdfsSensor( task_id='hdfs_sensor_check', filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_webhdfs_sensor(self): t = operators.sensors.WebHdfsSensor( task_id='webhdfs_sensor_check', filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames', timeout=120, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_sql_sensor(self): t = operators.sensors.SqlSensor( task_id='hdfs_sensor_check', conn_id='presto_default', sql="SELECT 'x' FROM airflow.static_babynames LIMIT 1;", dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_stats(self): t = operators.hive_stats_operator.HiveStatsCollectionOperator( task_id='hive_stats_check', table="airflow.static_babynames_partitioned", partition={'ds': DEFAULT_DATE_DS}, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor(self): t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ "airflow.static_babynames_partitioned/ds={{ds}}" ], dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor_succeeds_on_multiple_partitions(self): t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ "airflow.static_babynames_partitioned/ds={{ds}}", "airflow.static_babynames_partitioned/ds={{ds}}" ], dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_named_hive_partition_sensor_parses_partitions_with_periods(self): t = operators.sensors.NamedHivePartitionSensor.parse_partition_name( partition="schema.table/part1=this.can.be.an.issue/part2=ok") self.assertEqual(t[0], "schema") self.assertEqual(t[1], "table") self.assertEqual(t[2], "part1=this.can.be.an.issue/part2=this_should_be_ok") @nose.tools.raises(airflow.exceptions.AirflowSensorTimeout) def test_named_hive_partition_sensor_times_out_on_nonexistent_partition(self): t = operators.sensors.NamedHivePartitionSensor( task_id='hive_partition_check', partition_names=[ "airflow.static_babynames_partitioned/ds={{ds}}", "airflow.static_babynames_partitioned/ds=nonexistent" ], poke_interval=0.1, timeout=1, dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_partition_sensor(self): t = operators.sensors.HivePartitionSensor( task_id='hive_partition_check', table='airflow.static_babynames_partitioned', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_metastore_sql_sensor(self): t = operators.sensors.MetastorePartitionSensor( task_id='hive_partition_check', table='airflow.static_babynames_partitioned', partition_name='ds={}'.format(DEFAULT_DATE_DS), dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive2samba(self): t = operators.hive_to_samba_operator.Hive2SambaOperator( task_id='hive2samba_check', samba_conn_id='tableau_samba', hql="SELECT * FROM airflow.static_babynames LIMIT 10000", destination_filepath='test_airflow.csv', dag=self.dag) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) def test_hive_to_mysql(self): t = operators.hive_to_mysql.HiveToMySqlTransfer( mysql_conn_id='airflow_db', task_id='hive_to_mysql_check', create=True, sql=""" SELECT name FROM airflow.static_babynames LIMIT 100 """, mysql_table='test_static_babynames', mysql_preoperator=[ 'DROP TABLE IF EXISTS test_static_babynames;', 'CREATE TABLE test_static_babynames (name VARCHAR(500))', ], dag=self.dag) t.clear(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE) t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
1.992188
2
main.py
OrionDark7/Alakajam12
0
1566
import pygame, math from game import map, ui window = pygame.display.set_mode([800, 600]) ui.window = window screen = "game" s = {"fullscreen": False} running = True gamedata = {"level": 0, "coal": 0, "iron": 1, "copper":0} tiles = pygame.sprite.Group() rails = pygame.sprite.Group() carts = pygame.sprite.Group() interactables = pygame.sprite.Group() listmap = [] clock = pygame.time.Clock() selected = pygame.image.load("./resources/images/selected.png") selected2 = pygame.image.load("./resources/images/selected2.png") box = pygame.image.load("./resources/images/box.png") uibox = pygame.image.load("./resources/images/ui box.png") class Mouse(pygame.sprite.Sprite): def __init__(self): pygame.sprite.Sprite.__init__(self) self.image = pygame.surface.Surface([1, 1]) self.rect = self.image.get_rect() self.rect.topleft = [0, 0] self.clickedcart = None self.hoveritem = None self.tl = self.rect.topleft self.mode = "select" def pos(self, position): self.rect.topleft = position self.tl = self.rect.topleft m = Mouse() def snaptogrid(pos): return [int(math.floor(pos[0] / 40)), int(math.floor(pos[1] / 40))] def loadlevel(number): global tiles, rails, carts, gamedata, listmap, interactables tiles, rails, interactables, listmap = map.loadmap(int(number)) carts.empty() gamedata["level"] = number gamedata["coal"] = 0 gamedata["iron"] = 1 gamedata["copper"] = 0 loadlevel(0) while running: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False elif event.type == pygame.MOUSEBUTTONDOWN: m.pos(pygame.mouse.get_pos()) if screen == "game": if pygame.sprite.spritecollide(m, carts, False) and m.mode == "select": carts.update("select", m, listmap) if m.clickedcart != None: m.mode = "action" elif m.mode == "action" and m.clickedcart != None and listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0: m.clickedcart.pathfind(listmap, snaptogrid(m.tl)) m.clickedcart = None m.mode = "select" elif event.type == pygame.MOUSEMOTION: m.pos(pygame.mouse.get_pos()) if screen == "game": m.hoveritem = None if len(pygame.sprite.spritecollide(m, carts, False)) > 0: m.hoveritem = pygame.sprite.spritecollide(m, carts, False)[0] elif len(pygame.sprite.spritecollide(m, interactables, False)) > 0: m.hoveritem = pygame.sprite.spritecollide(m, interactables, False)[0] elif event.type == pygame.KEYDOWN: if event.key == pygame.K_SPACE: carts.add(map.Cart(snaptogrid(m.tl), "miner")) if screen == "game": window.fill([100, 100, 100]) tiles.draw(window) carts.draw(window) carts.update("update", m, listmap) if not m.hoveritem == None and not m.mode == "action": window.blit(box, [m.rect.left+10, m.rect.top+10]) ui.Resize(30) ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25]) if m.hoveritem.type.startswith("mine") and m.hoveritem not in carts: ui.Resize(18) ui.Text("Carts Inside: " + str(m.hoveritem.data["carts"]), [m.rect.left+27, m.rect.top+47]) ui.Text("Max Carts: " + str(m.hoveritem.data["max"]), [m.rect.left+27, m.rect.top+60]) if not m.clickedcart == None: window.blit(selected2, [m.clickedcart.rect.left-2, m.clickedcart.rect.top-2]) if m.mode == "action": window.blit(box, [m.rect.left+10, m.rect.top+10]) ui.Resize(30) try: ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25]) except: ui.Text(m.clickedcart.type.upper(), [m.rect.left+27, m.rect.top+25]) if listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0: ui.Resize(22) ui.Text("Click to move", [m.rect.left+27, m.rect.top+45]) ui.Text("Cart Here", [m.rect.left+27, m.rect.top+60]) window.blit(selected, [snaptogrid(m.tl)[0]*40-2, snaptogrid(m.tl)[1]*40-2]) window.blit(uibox, [555, 475]) pygame.display.flip() clock.tick(60) fps = clock.get_fps() pygame.quit()
2.875
3
Code/extract_method3.py
AbdullahNoori/CS-2.1-Trees-Sorting
0
1567
<gh_stars>0 # Written by <NAME> # Example for Compose Methods: Extract Method. import math def get_distance(xc1=5, xc2=7.25, yc1=22, yc2=-4.84): # Calculate the distance between the two circle return math.sqrt((xc1-xc2)**2 + (yc1 - yc2)**2) print('distance', get_distance()) # *** somewhere else in your program *** def get_length(xa=-50, ya=99, xb=.67, yb=.26): # calcualte the length of vector AB vector which is a vector between A and B points. return math.sqrt((xa-xb)*(xa-xb) + (ya-yb)*(ya-yb)) print('length', get_length())
3.375
3
sympy/integrals/prde.py
Abhi58/sympy
2
1568
""" Algorithms for solving Parametric Risch Differential Equations. The methods used for solving Parametric Risch Differential Equations parallel those for solving Risch Differential Equations. See the outline in the docstring of rde.py for more information. The Parametric Risch Differential Equation problem is, given f, g1, ..., gm in K(t), to determine if there exist y in K(t) and c1, ..., cm in Const(K) such that Dy + f*y == Sum(ci*gi, (i, 1, m)), and to find such y and ci if they exist. For the algorithms here G is a list of tuples of factions of the terms on the right hand side of the equation (i.e., gi in k(t)), and Q is a list of terms on the right hand side of the equation (i.e., qi in k[t]). See the docstring of each function for more information. """ from __future__ import print_function, division from sympy.core import Dummy, ilcm, Add, Mul, Pow, S from sympy.core.compatibility import reduce, range from sympy.integrals.rde import (order_at, order_at_oo, weak_normalizer, bound_degree) from sympy.integrals.risch import (gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative) from sympy.matrices import zeros, eye from sympy.polys import Poly, lcm, cancel, sqf_list from sympy.polys.polymatrix import PolyMatrix as Matrix from sympy.solvers import solve def prde_normal_denom(fa, fd, G, DE): """ Parametric Risch Differential Equation - Normal part of the denominator. Given a derivation D on k[t] and f, g1, ..., gm in k(t) with f weakly normalized with respect to t, return the tuple (a, b, G, h) such that a, h in k[t], b in k<t>, G = [g1, ..., gm] in k(t)^m, and for any solution c1, ..., cm in Const(k) and y in k(t) of Dy + f*y == Sum(ci*gi, (i, 1, m)), q == y*h in k<t> satisfies a*Dq + b*q == Sum(ci*Gi, (i, 1, m)). """ dn, ds = splitfactor(fd, DE) Gas, Gds = list(zip(*G)) gd = reduce(lambda i, j: i.lcm(j), Gds, Poly(1, DE.t)) en, es = splitfactor(gd, DE) p = dn.gcd(en) h = en.gcd(en.diff(DE.t)).quo(p.gcd(p.diff(DE.t))) a = dn*h c = a*h ba = a*fa - dn*derivation(h, DE)*fd ba, bd = ba.cancel(fd, include=True) G = [(c*A).cancel(D, include=True) for A, D in G] return (a, (ba, bd), G, h) def real_imag(ba, bd, gen): """ Helper function, to get the real and imaginary part of a rational function evaluated at sqrt(-1) without actually evaluating it at sqrt(-1) Separates the even and odd power terms by checking the degree of terms wrt mod 4. Returns a tuple (ba[0], ba[1], bd) where ba[0] is real part of the numerator ba[1] is the imaginary part and bd is the denominator of the rational function. """ bd = bd.as_poly(gen).as_dict() ba = ba.as_poly(gen).as_dict() denom_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in bd.items()] denom_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in bd.items()] bd_real = sum(r for r in denom_real) bd_imag = sum(r for r in denom_imag) num_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in ba.items()] num_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in ba.items()] ba_real = sum(r for r in num_real) ba_imag = sum(r for r in num_imag) ba = ((ba_real*bd_real + ba_imag*bd_imag).as_poly(gen), (ba_imag*bd_real - ba_real*bd_imag).as_poly(gen)) bd = (bd_real*bd_real + bd_imag*bd_imag).as_poly(gen) return (ba[0], ba[1], bd) def prde_special_denom(a, ba, bd, G, DE, case='auto'): """ Parametric Risch Differential Equation - Special part of the denominator. case is one of {'exp', 'tan', 'primitive'} for the hyperexponential, hypertangent, and primitive cases, respectively. For the hyperexponential (resp. hypertangent) case, given a derivation D on k[t] and a in k[t], b in k<t>, and g1, ..., gm in k(t) with Dt/t in k (resp. Dt/(t**2 + 1) in k, sqrt(-1) not in k), a != 0, and gcd(a, t) == 1 (resp. gcd(a, t**2 + 1) == 1), return the tuple (A, B, GG, h) such that A, B, h in k[t], GG = [gg1, ..., ggm] in k(t)^m, and for any solution c1, ..., cm in Const(k) and q in k<t> of a*Dq + b*q == Sum(ci*gi, (i, 1, m)), r == q*h in k[t] satisfies A*Dr + B*r == Sum(ci*ggi, (i, 1, m)). For case == 'primitive', k<t> == k[t], so it returns (a, b, G, 1) in this case. """ # TODO: Merge this with the very similar special_denom() in rde.py if case == 'auto': case = DE.case if case == 'exp': p = Poly(DE.t, DE.t) elif case == 'tan': p = Poly(DE.t**2 + 1, DE.t) elif case in ['primitive', 'base']: B = ba.quo(bd) return (a, B, G, Poly(1, DE.t)) else: raise ValueError("case must be one of {'exp', 'tan', 'primitive', " "'base'}, not %s." % case) nb = order_at(ba, p, DE.t) - order_at(bd, p, DE.t) nc = min([order_at(Ga, p, DE.t) - order_at(Gd, p, DE.t) for Ga, Gd in G]) n = min(0, nc - min(0, nb)) if not nb: # Possible cancellation. if case == 'exp': dcoeff = DE.d.quo(Poly(DE.t, DE.t)) with DecrementLevel(DE): # We are guaranteed to not have problems, # because case != 'base'. alphaa, alphad = frac_in(-ba.eval(0)/bd.eval(0)/a.eval(0), DE.t) etaa, etad = frac_in(dcoeff, DE.t) A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE) if A is not None: Q, m, z = A if Q == 1: n = min(n, m) elif case == 'tan': dcoeff = DE.d.quo(Poly(DE.t**2 + 1, DE.t)) with DecrementLevel(DE): # We are guaranteed to not have problems, # because case != 'base'. betaa, alphaa, alphad = real_imag(ba, bd*a, DE.t) betad = alphad etaa, etad = frac_in(dcoeff, DE.t) if recognize_log_derivative(2*betaa, betad, DE): A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE) B = parametric_log_deriv(betaa, betad, etaa, etad, DE) if A is not None and B is not None: Q, s, z = A # TODO: Add test if Q == 1: n = min(n, s/2) N = max(0, -nb) pN = p**N pn = p**-n # This is 1/h A = a*pN B = ba*pN.quo(bd) + Poly(n, DE.t)*a*derivation(p, DE).quo(p)*pN G = [(Ga*pN*pn).cancel(Gd, include=True) for Ga, Gd in G] h = pn # (a*p**N, (b + n*a*Dp/p)*p**N, g1*p**(N - n), ..., gm*p**(N - n), p**-n) return (A, B, G, h) def prde_linear_constraints(a, b, G, DE): """ Parametric Risch Differential Equation - Generate linear constraints on the constants. Given a derivation D on k[t], a, b, in k[t] with gcd(a, b) == 1, and G = [g1, ..., gm] in k(t)^m, return Q = [q1, ..., qm] in k[t]^m and a matrix M with entries in k(t) such that for any solution c1, ..., cm in Const(k) and p in k[t] of a*Dp + b*p == Sum(ci*gi, (i, 1, m)), (c1, ..., cm) is a solution of Mx == 0, and p and the ci satisfy a*Dp + b*p == Sum(ci*qi, (i, 1, m)). Because M has entries in k(t), and because Matrix doesn't play well with Poly, M will be a Matrix of Basic expressions. """ m = len(G) Gns, Gds = list(zip(*G)) d = reduce(lambda i, j: i.lcm(j), Gds) d = Poly(d, field=True) Q = [(ga*(d).quo(gd)).div(d) for ga, gd in G] if not all([ri.is_zero for _, ri in Q]): N = max([ri.degree(DE.t) for _, ri in Q]) M = Matrix(N + 1, m, lambda i, j: Q[j][1].nth(i)) else: M = Matrix(0, m, []) # No constraints, return the empty matrix. qs, _ = list(zip(*Q)) return (qs, M) def poly_linear_constraints(p, d): """ Given p = [p1, ..., pm] in k[t]^m and d in k[t], return q = [q1, ..., qm] in k[t]^m and a matrix M with entries in k such that Sum(ci*pi, (i, 1, m)), for c1, ..., cm in k, is divisible by d if and only if (c1, ..., cm) is a solution of Mx = 0, in which case the quotient is Sum(ci*qi, (i, 1, m)). """ m = len(p) q, r = zip(*[pi.div(d) for pi in p]) if not all([ri.is_zero for ri in r]): n = max([ri.degree() for ri in r]) M = Matrix(n + 1, m, lambda i, j: r[j].nth(i)) else: M = Matrix(0, m, []) # No constraints. return q, M def constant_system(A, u, DE): """ Generate a system for the constant solutions. Given a differential field (K, D) with constant field C = Const(K), a Matrix A, and a vector (Matrix) u with coefficients in K, returns the tuple (B, v, s), where B is a Matrix with coefficients in C and v is a vector (Matrix) such that either v has coefficients in C, in which case s is True and the solutions in C of Ax == u are exactly all the solutions of Bx == v, or v has a non-constant coefficient, in which case s is False Ax == u has no constant solution. This algorithm is used both in solving parametric problems and in determining if an element a of K is a derivative of an element of K or the logarithmic derivative of a K-radical using the structure theorem approach. Because Poly does not play well with Matrix yet, this algorithm assumes that all matrix entries are Basic expressions. """ if not A: return A, u Au = A.row_join(u) Au = Au.rref(simplify=cancel, normalize_last=False)[0] # Warning: This will NOT return correct results if cancel() cannot reduce # an identically zero expression to 0. The danger is that we might # incorrectly prove that an integral is nonelementary (such as # risch_integrate(exp((sin(x)**2 + cos(x)**2 - 1)*x**2), x). # But this is a limitation in computer algebra in general, and implicit # in the correctness of the Risch Algorithm is the computability of the # constant field (actually, this same correctness problem exists in any # algorithm that uses rref()). # # We therefore limit ourselves to constant fields that are computable # via the cancel() function, in order to prevent a speed bottleneck from # calling some more complex simplification function (rational function # coefficients will fall into this class). Furthermore, (I believe) this # problem will only crop up if the integral explicitly contains an # expression in the constant field that is identically zero, but cannot # be reduced to such by cancel(). Therefore, a careful user can avoid this # problem entirely by being careful with the sorts of expressions that # appear in his integrand in the variables other than the integration # variable (the structure theorems should be able to completely decide these # problems in the integration variable). Au = Au.applyfunc(cancel) A, u = Au[:, :-1], Au[:, -1] for j in range(A.cols): for i in range(A.rows): if A[i, j].has(*DE.T): # This assumes that const(F(t0, ..., tn) == const(K) == F Ri = A[i, :] # Rm+1; m = A.rows Rm1 = Ri.applyfunc(lambda x: derivation(x, DE, basic=True)/ derivation(A[i, j], DE, basic=True)) Rm1 = Rm1.applyfunc(cancel) um1 = cancel(derivation(u[i], DE, basic=True)/ derivation(A[i, j], DE, basic=True)) for s in range(A.rows): # A[s, :] = A[s, :] - A[s, i]*A[:, m+1] Asj = A[s, j] A.row_op(s, lambda r, jj: cancel(r - Asj*Rm1[jj])) # u[s] = u[s] - A[s, j]*u[m+1 u.row_op(s, lambda r, jj: cancel(r - Asj*um1)) A = A.col_join(Rm1) u = u.col_join(Matrix([um1])) return (A, u) def prde_spde(a, b, Q, n, DE): """ Special Polynomial Differential Equation algorithm: Parametric Version. Given a derivation D on k[t], an integer n, and a, b, q1, ..., qm in k[t] with deg(a) > 0 and gcd(a, b) == 1, return (A, B, Q, R, n1), with Qq = [q1, ..., qm] and R = [r1, ..., rm], such that for any solution c1, ..., cm in Const(k) and q in k[t] of degree at most n of a*Dq + b*q == Sum(ci*gi, (i, 1, m)), p = (q - Sum(ci*ri, (i, 1, m)))/a has degree at most n1 and satisfies A*Dp + B*p == Sum(ci*qi, (i, 1, m)) """ R, Z = list(zip(*[gcdex_diophantine(b, a, qi) for qi in Q])) A = a B = b + derivation(a, DE) Qq = [zi - derivation(ri, DE) for ri, zi in zip(R, Z)] R = list(R) n1 = n - a.degree(DE.t) return (A, B, Qq, R, n1) def prde_no_cancel_b_large(b, Q, n, DE): """ Parametric Poly Risch Differential Equation - No cancellation: deg(b) large enough. Given a derivation D on k[t], n in ZZ, and b, q1, ..., qm in k[t] with b != 0 and either D == d/dt or deg(b) > max(0, deg(D) - 1), returns h1, ..., hr in k[t] and a matrix A with coefficients in Const(k) such that if c1, ..., cm in Const(k) and q in k[t] satisfy deg(q) <= n and Dq + b*q == Sum(ci*qi, (i, 1, m)), then q = Sum(dj*hj, (j, 1, r)), where d1, ..., dr in Const(k) and A*Matrix([[c1, ..., cm, d1, ..., dr]]).T == 0. """ db = b.degree(DE.t) m = len(Q) H = [Poly(0, DE.t)]*m for N in range(n, -1, -1): # [n, ..., 0] for i in range(m): si = Q[i].nth(N + db)/b.LC() sitn = Poly(si*DE.t**N, DE.t) H[i] = H[i] + sitn Q[i] = Q[i] - derivation(sitn, DE) - b*sitn if all(qi.is_zero for qi in Q): dc = -1 M = zeros(0, 2) else: dc = max([qi.degree(DE.t) for qi in Q]) M = Matrix(dc + 1, m, lambda i, j: Q[j].nth(i)) A, u = constant_system(M, zeros(dc + 1, 1), DE) c = eye(m) A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c)) return (H, A) def prde_no_cancel_b_small(b, Q, n, DE): """ Parametric Poly Risch Differential Equation - No cancellation: deg(b) small enough. Given a derivation D on k[t], n in ZZ, and b, q1, ..., qm in k[t] with deg(b) < deg(D) - 1 and either D == d/dt or deg(D) >= 2, returns h1, ..., hr in k[t] and a matrix A with coefficients in Const(k) such that if c1, ..., cm in Const(k) and q in k[t] satisfy deg(q) <= n and Dq + b*q == Sum(ci*qi, (i, 1, m)) then q = Sum(dj*hj, (j, 1, r)) where d1, ..., dr in Const(k) and A*Matrix([[c1, ..., cm, d1, ..., dr]]).T == 0. """ m = len(Q) H = [Poly(0, DE.t)]*m for N in range(n, 0, -1): # [n, ..., 1] for i in range(m): si = Q[i].nth(N + DE.d.degree(DE.t) - 1)/(N*DE.d.LC()) sitn = Poly(si*DE.t**N, DE.t) H[i] = H[i] + sitn Q[i] = Q[i] - derivation(sitn, DE) - b*sitn if b.degree(DE.t) > 0: for i in range(m): si = Poly(Q[i].nth(b.degree(DE.t))/b.LC(), DE.t) H[i] = H[i] + si Q[i] = Q[i] - derivation(si, DE) - b*si if all(qi.is_zero for qi in Q): dc = -1 M = Matrix() else: dc = max([qi.degree(DE.t) for qi in Q]) M = Matrix(dc + 1, m, lambda i, j: Q[j].nth(i)) A, u = constant_system(M, zeros(dc + 1, 1), DE) c = eye(m) A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c)) return (H, A) # else: b is in k, deg(qi) < deg(Dt) t = DE.t if DE.case != 'base': with DecrementLevel(DE): t0 = DE.t # k = k0(t0) ba, bd = frac_in(b, t0, field=True) Q0 = [frac_in(qi.TC(), t0, field=True) for qi in Q] f, B = param_rischDE(ba, bd, Q0, DE) # f = [f1, ..., fr] in k^r and B is a matrix with # m + r columns and entries in Const(k) = Const(k0) # such that Dy0 + b*y0 = Sum(ci*qi, (i, 1, m)) has # a solution y0 in k with c1, ..., cm in Const(k) # if and only y0 = Sum(dj*fj, (j, 1, r)) where # d1, ..., dr ar in Const(k) and # B*Matrix([c1, ..., cm, d1, ..., dr]) == 0. # Transform fractions (fa, fd) in f into constant # polynomials fa/fd in k[t]. # (Is there a better way?) f = [Poly(fa.as_expr()/fd.as_expr(), t, field=True) for fa, fd in f] else: # Base case. Dy == 0 for all y in k and b == 0. # Dy + b*y = Sum(ci*qi) is solvable if and only if # Sum(ci*qi) == 0 in which case the solutions are # y = d1*f1 for f1 = 1 and any d1 in Const(k) = k. f = [Poly(1, t, field=True)] # r = 1 B = Matrix([[qi.TC() for qi in Q] + [S(0)]]) # The condition for solvability is # B*Matrix([c1, ..., cm, d1]) == 0 # There are no constraints on d1. # Coefficients of t^j (j > 0) in Sum(ci*qi) must be zero. d = max([qi.degree(DE.t) for qi in Q]) if d > 0: M = Matrix(d, m, lambda i, j: Q[j].nth(i + 1)) A, _ = constant_system(M, zeros(d, 1), DE) else: # No constraints on the hj. A = Matrix(0, m, []) # Solutions of the original equation are # y = Sum(dj*fj, (j, 1, r) + Sum(ei*hi, (i, 1, m)), # where ei == ci (i = 1, ..., m), when # A*Matrix([c1, ..., cm]) == 0 and # B*Matrix([c1, ..., cm, d1, ..., dr]) == 0 # Build combined constraint matrix with m + r + m columns. r = len(f) I = eye(m) A = A.row_join(zeros(A.rows, r + m)) B = B.row_join(zeros(B.rows, m)) C = I.row_join(zeros(m, r)).row_join(-I) return f + H, A.col_join(B).col_join(C) def prde_cancel_liouvillian(b, Q, n, DE): """ Pg, 237. """ H = [] # Why use DecrementLevel? Below line answers that: # Assuming that we can solve such problems over 'k' (not k[t]) if DE.case == 'primitive': with DecrementLevel(DE): ba, bd = frac_in(b, DE.t, field=True) for i in range(n, -1, -1): if DE.case == 'exp': # this re-checking can be avoided with DecrementLevel(DE): ba, bd = frac_in(b + i*derivation(DE.t, DE)/DE.t, DE.t, field=True) with DecrementLevel(DE): Qy = [frac_in(q.nth(i), DE.t, field=True) for q in Q] fi, Ai = param_rischDE(ba, bd, Qy, DE) fi = [Poly(fa.as_expr()/fd.as_expr(), DE.t, field=True) for fa, fd in fi] ri = len(fi) if i == n: M = Ai else: M = Ai.col_join(M.row_join(zeros(M.rows, ri))) Fi, hi = [None]*ri, [None]*ri # from eq. on top of p.238 (unnumbered) for j in range(ri): hji = fi[j]*DE.t**i hi[j] = hji # building up Sum(djn*(D(fjn*t^n) - b*fjnt^n)) Fi[j] = -(derivation(hji, DE) - b*hji) H += hi # in the next loop instead of Q it has # to be Q + Fi taking its place Q = Q + Fi return (H, M) def param_poly_rischDE(a, b, q, n, DE): """Polynomial solutions of a parametric Risch differential equation. Given a derivation D in k[t], a, b in k[t] relatively prime, and q = [q1, ..., qm] in k[t]^m, return h = [h1, ..., hr] in k[t]^r and a matrix A with m + r columns and entries in Const(k) such that a*Dp + b*p = Sum(ci*qi, (i, 1, m)) has a solution p of degree <= n in k[t] with c1, ..., cm in Const(k) if and only if p = Sum(dj*hj, (j, 1, r)) where d1, ..., dr are in Const(k) and (c1, ..., cm, d1, ..., dr) is a solution of Ax == 0. """ m = len(q) if n < 0: # Only the trivial zero solution is possible. # Find relations between the qi. if all([qi.is_zero for qi in q]): return [], zeros(1, m) # No constraints. N = max([qi.degree(DE.t) for qi in q]) M = Matrix(N + 1, m, lambda i, j: q[j].nth(i)) A, _ = constant_system(M, zeros(M.rows, 1), DE) return [], A if a.is_ground: # Normalization: a = 1. a = a.LC() b, q = b.quo_ground(a), [qi.quo_ground(a) for qi in q] if not b.is_zero and (DE.case == 'base' or b.degree() > max(0, DE.d.degree() - 1)): return prde_no_cancel_b_large(b, q, n, DE) elif ((b.is_zero or b.degree() < DE.d.degree() - 1) and (DE.case == 'base' or DE.d.degree() >= 2)): return prde_no_cancel_b_small(b, q, n, DE) elif (DE.d.degree() >= 2 and b.degree() == DE.d.degree() - 1 and n > -b.as_poly().LC()/DE.d.as_poly().LC()): raise NotImplementedError("prde_no_cancel_b_equal() is " "not yet implemented.") else: # Liouvillian cases if DE.case == 'primitive' or DE.case == 'exp': return prde_cancel_liouvillian(b, q, n, DE) else: raise NotImplementedError("non-linear and hypertangent " "cases have not yet been implemented") # else: deg(a) > 0 # Iterate SPDE as long as possible cumulating coefficient # and terms for the recovery of original solutions. alpha, beta = 1, [0]*m while n >= 0: # and a, b relatively prime a, b, q, r, n = prde_spde(a, b, q, n, DE) beta = [betai + alpha*ri for betai, ri in zip(beta, r)] alpha *= a # Solutions p of a*Dp + b*p = Sum(ci*qi) correspond to # solutions alpha*p + Sum(ci*betai) of the initial equation. d = a.gcd(b) if not d.is_ground: break # a*Dp + b*p = Sum(ci*qi) may have a polynomial solution # only if the sum is divisible by d. qq, M = poly_linear_constraints(q, d) # qq = [qq1, ..., qqm] where qqi = qi.quo(d). # M is a matrix with m columns an entries in k. # Sum(fi*qi, (i, 1, m)), where f1, ..., fm are elements of k, is # divisible by d if and only if M*Matrix([f1, ..., fm]) == 0, # in which case the quotient is Sum(fi*qqi). A, _ = constant_system(M, zeros(M.rows, 1), DE) # A is a matrix with m columns and entries in Const(k). # Sum(ci*qqi) is Sum(ci*qi).quo(d), and the remainder is zero # for c1, ..., cm in Const(k) if and only if # A*Matrix([c1, ...,cm]) == 0. V = A.nullspace() # V = [v1, ..., vu] where each vj is a column matrix with # entries aj1, ..., ajm in Const(k). # Sum(aji*qi) is divisible by d with exact quotient Sum(aji*qqi). # Sum(ci*qi) is divisible by d if and only if ci = Sum(dj*aji) # (i = 1, ..., m) for some d1, ..., du in Const(k). # In that case, solutions of # a*Dp + b*p = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) # are the same as those of # (a/d)*Dp + (b/d)*p = Sum(dj*rj) # where rj = Sum(aji*qqi). if not V: # No non-trivial solution. return [], eye(m) # Could return A, but this has # the minimum number of rows. Mqq = Matrix([qq]) # A single row. r = [(Mqq*vj)[0] for vj in V] # [r1, ..., ru] # Solutions of (a/d)*Dp + (b/d)*p = Sum(dj*rj) correspond to # solutions alpha*p + Sum(Sum(dj*aji)*betai) of the initial # equation. These are equal to alpha*p + Sum(dj*fj) where # fj = Sum(aji*betai). Mbeta = Matrix([beta]) f = [(Mbeta*vj)[0] for vj in V] # [f1, ..., fu] # # Solve the reduced equation recursively. # g, B = param_poly_rischDE(a.quo(d), b.quo(d), r, n, DE) # g = [g1, ..., gv] in k[t]^v and and B is a matrix with u + v # columns and entries in Const(k) such that # (a/d)*Dp + (b/d)*p = Sum(dj*rj) has a solution p of degree <= n # in k[t] if and only if p = Sum(ek*gk) where e1, ..., ev are in # Const(k) and B*Matrix([d1, ..., du, e1, ..., ev]) == 0. # The solutions of the original equation are then # Sum(dj*fj, (j, 1, u)) + alpha*Sum(ek*gk, (k, 1, v)). # Collect solution components. h = f + [alpha*gk for gk in g] # Build combined relation matrix. A = -eye(m) for vj in V: A = A.row_join(vj) A = A.row_join(zeros(m, len(g))) A = A.col_join(zeros(B.rows, m).row_join(B)) return h, A def param_rischDE(fa, fd, G, DE): """ Solve a Parametric Risch Differential Equation: Dy + f*y == Sum(ci*Gi, (i, 1, m)). Given a derivation D in k(t), f in k(t), and G = [G1, ..., Gm] in k(t)^m, return h = [h1, ..., hr] in k(t)^r and a matrix A with m + r columns and entries in Const(k) such that Dy + f*y = Sum(ci*Gi, (i, 1, m)) has a solution y in k(t) with c1, ..., cm in Const(k) if and only if y = Sum(dj*hj, (j, 1, r)) where d1, ..., dr are in Const(k) and (c1, ..., cm, d1, ..., dr) is a solution of Ax == 0. Elements of k(t) are tuples (a, d) with a and d in k[t]. """ m = len(G) q, (fa, fd) = weak_normalizer(fa, fd, DE) # Solutions of the weakly normalized equation Dz + f*z = q*Sum(ci*Gi) # correspond to solutions y = z/q of the original equation. gamma = q G = [(q*ga).cancel(gd, include=True) for ga, gd in G] a, (ba, bd), G, hn = prde_normal_denom(fa, fd, G, DE) # Solutions q in k<t> of a*Dq + b*q = Sum(ci*Gi) correspond # to solutions z = q/hn of the weakly normalized equation. gamma *= hn A, B, G, hs = prde_special_denom(a, ba, bd, G, DE) # Solutions p in k[t] of A*Dp + B*p = Sum(ci*Gi) correspond # to solutions q = p/hs of the previous equation. gamma *= hs g = A.gcd(B) a, b, g = A.quo(g), B.quo(g), [gia.cancel(gid*g, include=True) for gia, gid in G] # a*Dp + b*p = Sum(ci*gi) may have a polynomial solution # only if the sum is in k[t]. q, M = prde_linear_constraints(a, b, g, DE) # q = [q1, ..., qm] where qi in k[t] is the polynomial component # of the partial fraction expansion of gi. # M is a matrix with m columns and entries in k. # Sum(fi*gi, (i, 1, m)), where f1, ..., fm are elements of k, # is a polynomial if and only if M*Matrix([f1, ..., fm]) == 0, # in which case the sum is equal to Sum(fi*qi). M, _ = constant_system(M, zeros(M.rows, 1), DE) # M is a matrix with m columns and entries in Const(k). # Sum(ci*gi) is in k[t] for c1, ..., cm in Const(k) # if and only if M*Matrix([c1, ..., cm]) == 0, # in which case the sum is Sum(ci*qi). ## Reduce number of constants at this point V = M.nullspace() # V = [v1, ..., vu] where each vj is a column matrix with # entries aj1, ..., ajm in Const(k). # Sum(aji*gi) is in k[t] and equal to Sum(aji*qi) (j = 1, ..., u). # Sum(ci*gi) is in k[t] if and only is ci = Sum(dj*aji) # (i = 1, ..., m) for some d1, ..., du in Const(k). # In that case, # Sum(ci*gi) = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) = Sum(dj*rj) # where rj = Sum(aji*qi) (j = 1, ..., u) in k[t]. if not V: # No non-trivial solution return [], eye(m) Mq = Matrix([q]) # A single row. r = [(Mq*vj)[0] for vj in V] # [r1, ..., ru] # Solutions of a*Dp + b*p = Sum(dj*rj) correspond to solutions # y = p/gamma of the initial equation with ci = Sum(dj*aji). try: # We try n=5. At least for prde_spde, it will always # terminate no matter what n is. n = bound_degree(a, b, r, DE, parametric=True) except NotImplementedError: # A temporary bound is set. Eventually, it will be removed. # the currently added test case takes large time # even with n=5, and much longer with large n's. n = 5 h, B = param_poly_rischDE(a, b, r, n, DE) # h = [h1, ..., hv] in k[t]^v and and B is a matrix with u + v # columns and entries in Const(k) such that # a*Dp + b*p = Sum(dj*rj) has a solution p of degree <= n # in k[t] if and only if p = Sum(ek*hk) where e1, ..., ev are in # Const(k) and B*Matrix([d1, ..., du, e1, ..., ev]) == 0. # The solutions of the original equation for ci = Sum(dj*aji) # (i = 1, ..., m) are then y = Sum(ek*hk, (k, 1, v))/gamma. ## Build combined relation matrix with m + u + v columns. A = -eye(m) for vj in V: A = A.row_join(vj) A = A.row_join(zeros(m, len(h))) A = A.col_join(zeros(B.rows, m).row_join(B)) ## Eliminate d1, ..., du. W = A.nullspace() # W = [w1, ..., wt] where each wl is a column matrix with # entries blk (k = 1, ..., m + u + v) in Const(k). # The vectors (bl1, ..., blm) generate the space of those # constant families (c1, ..., cm) for which a solution of # the equation Dy + f*y == Sum(ci*Gi) exists. They generate # the space and form a basis except possibly when Dy + f*y == 0 # is solvable in k(t}. The corresponding solutions are # y = Sum(blk'*hk, (k, 1, v))/gamma, where k' = k + m + u. v = len(h) M = Matrix([wl[:m] + wl[-v:] for wl in W]) # excise dj's. N = M.nullspace() # N = [n1, ..., ns] where the ni in Const(k)^(m + v) are column # vectors generating the space of linear relations between # c1, ..., cm, e1, ..., ev. C = Matrix([ni[:] for ni in N]) # rows n1, ..., ns. return [hk.cancel(gamma, include=True) for hk in h], C def limited_integrate_reduce(fa, fd, G, DE): """ Simpler version of step 1 & 2 for the limited integration problem. Given a derivation D on k(t) and f, g1, ..., gn in k(t), return (a, b, h, N, g, V) such that a, b, h in k[t], N is a non-negative integer, g in k(t), V == [v1, ..., vm] in k(t)^m, and for any solution v in k(t), c1, ..., cm in C of f == Dv + Sum(ci*wi, (i, 1, m)), p = v*h is in k<t>, and p and the ci satisfy a*Dp + b*p == g + Sum(ci*vi, (i, 1, m)). Furthermore, if S1irr == Sirr, then p is in k[t], and if t is nonlinear or Liouvillian over k, then deg(p) <= N. So that the special part is always computed, this function calls the more general prde_special_denom() automatically if it cannot determine that S1irr == Sirr. Furthermore, it will automatically call bound_degree() when t is linear and non-Liouvillian, which for the transcendental case, implies that Dt == a*t + b with for some a, b in k*. """ dn, ds = splitfactor(fd, DE) E = [splitfactor(gd, DE) for _, gd in G] En, Es = list(zip(*E)) c = reduce(lambda i, j: i.lcm(j), (dn,) + En) # lcm(dn, en1, ..., enm) hn = c.gcd(c.diff(DE.t)) a = hn b = -derivation(hn, DE) N = 0 # These are the cases where we know that S1irr = Sirr, but there could be # others, and this algorithm will need to be extended to handle them. if DE.case in ['base', 'primitive', 'exp', 'tan']: hs = reduce(lambda i, j: i.lcm(j), (ds,) + Es) # lcm(ds, es1, ..., esm) a = hn*hs b -= (hn*derivation(hs, DE)).quo(hs) mu = min(order_at_oo(fa, fd, DE.t), min([order_at_oo(ga, gd, DE.t) for ga, gd in G])) # So far, all the above are also nonlinear or Liouvillian, but if this # changes, then this will need to be updated to call bound_degree() # as per the docstring of this function (DE.case == 'other_linear'). N = hn.degree(DE.t) + hs.degree(DE.t) + max(0, 1 - DE.d.degree(DE.t) - mu) else: # TODO: implement this raise NotImplementedError V = [(-a*hn*ga).cancel(gd, include=True) for ga, gd in G] return (a, b, a, N, (a*hn*fa).cancel(fd, include=True), V) def limited_integrate(fa, fd, G, DE): """ Solves the limited integration problem: f = Dv + Sum(ci*wi, (i, 1, n)) """ fa, fd = fa*Poly(1/fd.LC(), DE.t), fd.monic() # interpretting limited integration problem as a # parametric Risch DE problem Fa = Poly(0, DE.t) Fd = Poly(1, DE.t) G = [(fa, fd)] + G h, A = param_rischDE(Fa, Fd, G, DE) V = A.nullspace() V = [v for v in V if v[0] != 0] if not V: return None else: # we can take any vector from V, we take V[0] c0 = V[0][0] # v = [-1, c1, ..., cm, d1, ..., dr] v = V[0]/(-c0) r = len(h) m = len(v) - r - 1 C = list(v[1: m + 1]) y = -sum([v[m + 1 + i]*h[i][0].as_expr()/h[i][1].as_expr() \ for i in range(r)]) y_num, y_den = y.as_numer_denom() Ya, Yd = Poly(y_num, DE.t), Poly(y_den, DE.t) Y = Ya*Poly(1/Yd.LC(), DE.t), Yd.monic() return Y, C def parametric_log_deriv_heu(fa, fd, wa, wd, DE, c1=None): """ Parametric logarithmic derivative heuristic. Given a derivation D on k[t], f in k(t), and a hyperexponential monomial theta over k(t), raises either NotImplementedError, in which case the heuristic failed, or returns None, in which case it has proven that no solution exists, or returns a solution (n, m, v) of the equation n*f == Dv/v + m*Dtheta/theta, with v in k(t)* and n, m in ZZ with n != 0. If this heuristic fails, the structure theorem approach will need to be used. The argument w == Dtheta/theta """ # TODO: finish writing this and write tests c1 = c1 or Dummy('c1') p, a = fa.div(fd) q, b = wa.div(wd) B = max(0, derivation(DE.t, DE).degree(DE.t) - 1) C = max(p.degree(DE.t), q.degree(DE.t)) if q.degree(DE.t) > B: eqs = [p.nth(i) - c1*q.nth(i) for i in range(B + 1, C + 1)] s = solve(eqs, c1) if not s or not s[c1].is_Rational: # deg(q) > B, no solution for c. return None M, N = s[c1].as_numer_denom() nfmwa = N*fa*wd - M*wa*fd nfmwd = fd*wd Qv = is_log_deriv_k_t_radical_in_field(N*fa*wd - M*wa*fd, fd*wd, DE, 'auto') if Qv is None: # (N*f - M*w) is not the logarithmic derivative of a k(t)-radical. return None Q, v = Qv if Q.is_zero or v.is_zero: return None return (Q*N, Q*M, v) if p.degree(DE.t) > B: return None c = lcm(fd.as_poly(DE.t).LC(), wd.as_poly(DE.t).LC()) l = fd.monic().lcm(wd.monic())*Poly(c, DE.t) ln, ls = splitfactor(l, DE) z = ls*ln.gcd(ln.diff(DE.t)) if not z.has(DE.t): # TODO: We treat this as 'no solution', until the structure # theorem version of parametric_log_deriv is implemented. return None u1, r1 = (fa*l.quo(fd)).div(z) # (l*f).div(z) u2, r2 = (wa*l.quo(wd)).div(z) # (l*w).div(z) eqs = [r1.nth(i) - c1*r2.nth(i) for i in range(z.degree(DE.t))] s = solve(eqs, c1) if not s or not s[c1].is_Rational: # deg(q) <= B, no solution for c. return None M, N = s[c1].as_numer_denom() nfmwa = N.as_poly(DE.t)*fa*wd - M.as_poly(DE.t)*wa*fd nfmwd = fd*wd Qv = is_log_deriv_k_t_radical_in_field(nfmwa, nfmwd, DE) if Qv is None: # (N*f - M*w) is not the logarithmic derivative of a k(t)-radical. return None Q, v = Qv if Q.is_zero or v.is_zero: return None return (Q*N, Q*M, v) def parametric_log_deriv(fa, fd, wa, wd, DE): # TODO: Write the full algorithm using the structure theorems. # try: A = parametric_log_deriv_heu(fa, fd, wa, wd, DE) # except NotImplementedError: # Heuristic failed, we have to use the full method. # TODO: This could be implemented more efficiently. # It isn't too worrisome, because the heuristic handles most difficult # cases. return A def is_deriv_k(fa, fd, DE): r""" Checks if Df/f is the derivative of an element of k(t). a in k(t) is the derivative of an element of k(t) if there exists b in k(t) such that a = Db. Either returns (ans, u), such that Df/f == Du, or None, which means that Df/f is not the derivative of an element of k(t). ans is a list of tuples such that Add(*[i*j for i, j in ans]) == u. This is useful for seeing exactly which elements of k(t) produce u. This function uses the structure theorem approach, which says that for any f in K, Df/f is the derivative of a element of K if and only if there are ri in QQ such that:: --- --- Dt \ r * Dt + \ r * i Df / i i / i --- = --. --- --- t f i in L i in E i K/C(x) K/C(x) Where C = Const(K), L_K/C(x) = { i in {1, ..., n} such that t_i is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for some a_i in C(x)(t_1, ..., t_i-1)* } (i.e., the set of all indices of logarithmic monomials of K over C(x)), and E_K/C(x) = { i in {1, ..., n} such that t_i is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for some a_i in C(x)(t_1, ..., t_i-1) } (i.e., the set of all indices of hyperexponential monomials of K over C(x)). If K is an elementary extension over C(x), then the cardinality of L_K/C(x) U E_K/C(x) is exactly the transcendence degree of K over C(x). Furthermore, because Const_D(K) == Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i is in E_K/C(x) and deg(Dt_i) == 0 when t_i is in L_K/C(x), implying in particular that E_K/C(x) and L_K/C(x) are disjoint. The sets L_K/C(x) and E_K/C(x) must, by their nature, be computed recursively using this same function. Therefore, it is required to pass them as indices to D (or T). E_args are the arguments of the hyperexponentials indexed by E_K (i.e., if i is in E_K, then T[i] == exp(E_args[i])). This is needed to compute the final answer u such that Df/f == Du. log(f) will be the same as u up to a additive constant. This is because they will both behave the same as monomials. For example, both log(x) and log(2*x) == log(x) + log(2) satisfy Dt == 1/x, because log(2) is constant. Therefore, the term const is returned. const is such that log(const) + f == u. This is calculated by dividing the arguments of one logarithm from the other. Therefore, it is necessary to pass the arguments of the logarithmic terms in L_args. To handle the case where we are given Df/f, not f, use is_deriv_k_in_field(). See also ======== is_log_deriv_k_t_radical_in_field, is_log_deriv_k_t_radical """ # Compute Df/f dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)), fd*fa dfa, dfd = dfa.cancel(dfd, include=True) # Our assumption here is that each monomial is recursively transcendental if len(DE.exts) != len(DE.D): if [i for i in DE.cases if i == 'tan'] or \ (set([i for i in DE.cases if i == 'primitive']) - set(DE.indices('log'))): raise NotImplementedError("Real version of the structure " "theorems with hypertangent support is not yet implemented.") # TODO: What should really be done in this case? raise NotImplementedError("Nonelementary extensions not supported " "in the structure theorems.") E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.indices('exp')] L_part = [DE.D[i].as_expr() for i in DE.indices('log')] lhs = Matrix([E_part + L_part]) rhs = Matrix([dfa.as_expr()/dfd.as_expr()]) A, u = constant_system(lhs, rhs, DE) if not all(derivation(i, DE, basic=True).is_zero for i in u) or not A: # If the elements of u are not all constant # Note: See comment in constant_system # Also note: derivation(basic=True) calls cancel() return None else: if not all(i.is_Rational for i in u): raise NotImplementedError("Cannot work with non-rational " "coefficients in this case.") else: terms = ([DE.extargs[i] for i in DE.indices('exp')] + [DE.T[i] for i in DE.indices('log')]) ans = list(zip(terms, u)) result = Add(*[Mul(i, j) for i, j in ans]) argterms = ([DE.T[i] for i in DE.indices('exp')] + [DE.extargs[i] for i in DE.indices('log')]) l = [] ld = [] for i, j in zip(argterms, u): # We need to get around things like sqrt(x**2) != x # and also sqrt(x**2 + 2*x + 1) != x + 1 # Issue 10798: i need not be a polynomial i, d = i.as_numer_denom() icoeff, iterms = sqf_list(i) l.append(Mul(*([Pow(icoeff, j)] + [Pow(b, e*j) for b, e in iterms]))) dcoeff, dterms = sqf_list(d) ld.append(Mul(*([Pow(dcoeff, j)] + [Pow(b, e*j) for b, e in dterms]))) const = cancel(fa.as_expr()/fd.as_expr()/Mul(*l)*Mul(*ld)) return (ans, result, const) def is_log_deriv_k_t_radical(fa, fd, DE, Df=True): r""" Checks if Df is the logarithmic derivative of a k(t)-radical. b in k(t) can be written as the logarithmic derivative of a k(t) radical if there exist n in ZZ and u in k(t) with n, u != 0 such that n*b == Du/u. Either returns (ans, u, n, const) or None, which means that Df cannot be written as the logarithmic derivative of a k(t)-radical. ans is a list of tuples such that Mul(*[i**j for i, j in ans]) == u. This is useful for seeing exactly what elements of k(t) produce u. This function uses the structure theorem approach, which says that for any f in K, Df is the logarithmic derivative of a K-radical if and only if there are ri in QQ such that:: --- --- Dt \ r * Dt + \ r * i / i i / i --- = Df. --- --- t i in L i in E i K/C(x) K/C(x) Where C = Const(K), L_K/C(x) = { i in {1, ..., n} such that t_i is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for some a_i in C(x)(t_1, ..., t_i-1)* } (i.e., the set of all indices of logarithmic monomials of K over C(x)), and E_K/C(x) = { i in {1, ..., n} such that t_i is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for some a_i in C(x)(t_1, ..., t_i-1) } (i.e., the set of all indices of hyperexponential monomials of K over C(x)). If K is an elementary extension over C(x), then the cardinality of L_K/C(x) U E_K/C(x) is exactly the transcendence degree of K over C(x). Furthermore, because Const_D(K) == Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i is in E_K/C(x) and deg(Dt_i) == 0 when t_i is in L_K/C(x), implying in particular that E_K/C(x) and L_K/C(x) are disjoint. The sets L_K/C(x) and E_K/C(x) must, by their nature, be computed recursively using this same function. Therefore, it is required to pass them as indices to D (or T). L_args are the arguments of the logarithms indexed by L_K (i.e., if i is in L_K, then T[i] == log(L_args[i])). This is needed to compute the final answer u such that n*f == Du/u. exp(f) will be the same as u up to a multiplicative constant. This is because they will both behave the same as monomials. For example, both exp(x) and exp(x + 1) == E*exp(x) satisfy Dt == t. Therefore, the term const is returned. const is such that exp(const)*f == u. This is calculated by subtracting the arguments of one exponential from the other. Therefore, it is necessary to pass the arguments of the exponential terms in E_args. To handle the case where we are given Df, not f, use is_log_deriv_k_t_radical_in_field(). See also ======== is_log_deriv_k_t_radical_in_field, is_deriv_k """ H = [] if Df: dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)).cancel(fd**2, include=True) else: dfa, dfd = fa, fd # Our assumption here is that each monomial is recursively transcendental if len(DE.exts) != len(DE.D): if [i for i in DE.cases if i == 'tan'] or \ (set([i for i in DE.cases if i == 'primitive']) - set(DE.indices('log'))): raise NotImplementedError("Real version of the structure " "theorems with hypertangent support is not yet implemented.") # TODO: What should really be done in this case? raise NotImplementedError("Nonelementary extensions not supported " "in the structure theorems.") E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.indices('exp')] L_part = [DE.D[i].as_expr() for i in DE.indices('log')] lhs = Matrix([E_part + L_part]) rhs = Matrix([dfa.as_expr()/dfd.as_expr()]) A, u = constant_system(lhs, rhs, DE) if not all(derivation(i, DE, basic=True).is_zero for i in u) or not A: # If the elements of u are not all constant # Note: See comment in constant_system # Also note: derivation(basic=True) calls cancel() return None else: if not all(i.is_Rational for i in u): # TODO: But maybe we can tell if they're not rational, like # log(2)/log(3). Also, there should be an option to continue # anyway, even if the result might potentially be wrong. raise NotImplementedError("Cannot work with non-rational " "coefficients in this case.") else: n = reduce(ilcm, [i.as_numer_denom()[1] for i in u]) u *= n terms = ([DE.T[i] for i in DE.indices('exp')] + [DE.extargs[i] for i in DE.indices('log')]) ans = list(zip(terms, u)) result = Mul(*[Pow(i, j) for i, j in ans]) # exp(f) will be the same as result up to a multiplicative # constant. We now find the log of that constant. argterms = ([DE.extargs[i] for i in DE.indices('exp')] + [DE.T[i] for i in DE.indices('log')]) const = cancel(fa.as_expr()/fd.as_expr() - Add(*[Mul(i, j/n) for i, j in zip(argterms, u)])) return (ans, result, n, const) def is_log_deriv_k_t_radical_in_field(fa, fd, DE, case='auto', z=None): """ Checks if f can be written as the logarithmic derivative of a k(t)-radical. It differs from is_log_deriv_k_t_radical(fa, fd, DE, Df=False) for any given fa, fd, DE in that it finds the solution in the given field not in some (possibly unspecified extension) and "in_field" with the function name is used to indicate that. f in k(t) can be written as the logarithmic derivative of a k(t) radical if there exist n in ZZ and u in k(t) with n, u != 0 such that n*f == Du/u. Either returns (n, u) or None, which means that f cannot be written as the logarithmic derivative of a k(t)-radical. case is one of {'primitive', 'exp', 'tan', 'auto'} for the primitive, hyperexponential, and hypertangent cases, respectively. If case is 'auto', it will attempt to determine the type of the derivation automatically. See also ======== is_log_deriv_k_t_radical, is_deriv_k """ fa, fd = fa.cancel(fd, include=True) # f must be simple n, s = splitfactor(fd, DE) if not s.is_one: pass z = z or Dummy('z') H, b = residue_reduce(fa, fd, DE, z=z) if not b: # I will have to verify, but I believe that the answer should be # None in this case. This should never happen for the # functions given when solving the parametric logarithmic # derivative problem when integration elementary functions (see # Bronstein's book, page 255), so most likely this indicates a bug. return None roots = [(i, i.real_roots()) for i, _ in H] if not all(len(j) == i.degree() and all(k.is_Rational for k in j) for i, j in roots): # If f is the logarithmic derivative of a k(t)-radical, then all the # roots of the resultant must be rational numbers. return None # [(a, i), ...], where i*log(a) is a term in the log-part of the integral # of f respolys, residues = list(zip(*roots)) or [[], []] # Note: this might be empty, but everything below should work find in that # case (it should be the same as if it were [[1, 1]]) residueterms = [(H[j][1].subs(z, i), i) for j in range(len(H)) for i in residues[j]] # TODO: finish writing this and write tests p = cancel(fa.as_expr()/fd.as_expr() - residue_reduce_derivation(H, DE, z)) p = p.as_poly(DE.t) if p is None: # f - Dg will be in k[t] if f is the logarithmic derivative of a k(t)-radical return None if p.degree(DE.t) >= max(1, DE.d.degree(DE.t)): return None if case == 'auto': case = DE.case if case == 'exp': wa, wd = derivation(DE.t, DE).cancel(Poly(DE.t, DE.t), include=True) with DecrementLevel(DE): pa, pd = frac_in(p, DE.t, cancel=True) wa, wd = frac_in((wa, wd), DE.t) A = parametric_log_deriv(pa, pd, wa, wd, DE) if A is None: return None n, e, u = A u *= DE.t**e elif case == 'primitive': with DecrementLevel(DE): pa, pd = frac_in(p, DE.t) A = is_log_deriv_k_t_radical_in_field(pa, pd, DE, case='auto') if A is None: return None n, u = A elif case == 'base': # TODO: we can use more efficient residue reduction from ratint() if not fd.is_sqf or fa.degree() >= fd.degree(): # f is the logarithmic derivative in the base case if and only if # f = fa/fd, fd is square-free, deg(fa) < deg(fd), and # gcd(fa, fd) == 1. The last condition is handled by cancel() above. return None # Note: if residueterms = [], returns (1, 1) # f had better be 0 in that case. n = reduce(ilcm, [i.as_numer_denom()[1] for _, i in residueterms], S(1)) u = Mul(*[Pow(i, j*n) for i, j in residueterms]) return (n, u) elif case == 'tan': raise NotImplementedError("The hypertangent case is " "not yet implemented for is_log_deriv_k_t_radical_in_field()") elif case in ['other_linear', 'other_nonlinear']: # XXX: If these are supported by the structure theorems, change to NotImplementedError. raise ValueError("The %s case is not supported in this function." % case) else: raise ValueError("case must be one of {'primitive', 'exp', 'tan', " "'base', 'auto'}, not %s" % case) common_denom = reduce(ilcm, [i.as_numer_denom()[1] for i in [j for _, j in residueterms]] + [n], S(1)) residueterms = [(i, j*common_denom) for i, j in residueterms] m = common_denom//n if common_denom != n*m: # Verify exact division raise ValueError("Inexact division") u = cancel(u**m*Mul(*[Pow(i, j) for i, j in residueterms])) return (common_denom, u)
3.03125
3
ssh_telnet/netmiko/ex07_netmiko_command_mult_prompts.py
levs72/pyneng-examples
11
1569
from pprint import pprint import yaml import netmiko import paramiko def send_cmd_with_prompt(device, command, *, wait_for, confirmation): if type(wait_for) == str: wait_for = [wait_for] if type(confirmation) == str: confirmation = [confirmation] with netmiko.Netmiko(**device) as ssh: ssh.enable() result = ssh.send_command_timing( command, strip_prompt=False, strip_command=False ) for wait, confirm in zip(wait_for, confirmation): if wait in result: result += ssh.send_command_timing( confirm, strip_prompt=False, strip_command=False ) return result if __name__ == "__main__": with open("devices.yaml") as f: devices = yaml.safe_load(f) r1 = devices[0] out = send_cmd_with_prompt( r1, "copy run start", wait_for="Destination filename", confirmation="\n" ) print(out) """ R1#copy run start Destination filename [startup-config]? Building configuration... [OK] R1# """
2.84375
3
mppi/Utilities/AttributeDict.py
marcodalessandro76/MPPI
1
1570
<filename>mppi/Utilities/AttributeDict.py<gh_stars>1-10 class AttributeDict(object): """ A class to convert a nested Dictionary into an object with key-values accessibly using attribute notation (AttributeDict.attribute) instead of key notation (Dict["key"]). This class recursively sets Dicts to objects, allowing you to recurse down nested dicts (like: AttributeDict.attr.attr) """ def __init__(self, **entries): self.add_entries(**entries) def add_entries(self, **entries): for key, value in entries.items(): if type(value) is dict: self.__dict__[key] = AttributeDict(**value) else: self.__dict__[key] = value def getAttributes(self): """ Return all the attributes of the object """ return self.__dict__.keys()
3.5
4
LightTestLoop.py
Growing-Beyond-Earth/GettingStarted
0
1571
# GROWNG BEYOND EARTH CONTROL BOX Traning # RASPBERRY PI PICO / MICROPYTHON # FAIRCHILD TROPICAL BOTANIC GARDEN, Oct 18, 2021 # The Growing Beyond Earth (GBE) control box is a device that controls # the LED lights and fan in a GBE growth chamber. It can also control # accessories including a 12v water pump and environmental sensors. # The device is based on a Raspberry Pi Pico microcontroller running # Micropython. # lesson Written by @MarioTheMaker from sys import stdin, stdout, exit import machine import time #Set the brightness for each color red_brightness = 100 green_brightness = 100 blue_brightness = 100 white_brightness = 100 # Pulse width modulation (PWM) is a way to get an artificial analog output on a digital pin. # It achieves this by rapidly toggling the pin from low to high. There are two parameters # associated with this: the frequency of the toggling, and the duty cycle. # The duty cycle is defined to be how long the pin is high compared with the length of a # single period (low plus high time). Maximum duty cycle is when the pin is high all of the # time, and minimum is when it is low all of the time. # https://projects.raspberrypi.org/en/projects/getting-started-with-the-pico/7#: # control I/O pins # machine.Pin(id, mode=- 1, pull=- 1, *, value, drive, alt) # Access the pin peripheral (GPIO pin) associated with the given id. # If additional arguments are given in the constructor then they are used to initialise # the pin. Any settings that are not specified will remain in their previous state. # More info https://docs.micropython.org/en/latest/library/machine.Pin.html r=machine.PWM(machine.Pin(0)); r.freq(20000) # Red channel g=machine.PWM(machine.Pin(2)); g.freq(20000) # Green channel b=machine.PWM(machine.Pin(1)); b.freq(20000) # Blue channel w=machine.PWM(machine.Pin(3)); w.freq(20000) # White channel # More info https://docs.micropython.org/en/latest/library/machine.PWM.html # Start a loop and change the brightness multiplier "n" # PWM.duty_u16([value]) Get the current duty cycle of the PWM output, # as an unsigned 16-bit value in the range 0 to 65535 inclusive. n = 100 while n > 0: print("Power Level ",n) r.duty_u16(int(red_brightness)*n) g.duty_u16(int(green_brightness)*n) b.duty_u16(int(blue_brightness)*n) w.duty_u16(int(white_brightness)*n) time.sleep(.3) n = n - 5 #Turn all the lights off time.sleep(3) r.duty_u16(0) g.duty_u16(0) b.duty_u16(0) w.duty_u16(0)
3.453125
3
core/known_bugs_utils.py
nicolasbock/hotsos
0
1572
<gh_stars>0 import os import yaml from core import plugintools from core import constants from core.searchtools import SearchDef from core.issues.issue_utils import IssueEntry LAUNCHPAD = "launchpad" MASTER_YAML_KNOWN_BUGS_KEY = "bugs-detected" KNOWN_BUGS = {MASTER_YAML_KNOWN_BUGS_KEY: []} class BugSearchDef(SearchDef): def __init__(self, pattern, bug_id, hint, reason, reason_format_result_groups=None): """ @param reason: string reason describing the issue and why it has been flagged. This string can be a template i.e. containing {} fields that can be rendered using results. @param reason_format_result_groups: if the reason string is a template, this is a list of indexes in the results that can be extracted for inclusion in the reason. """ super().__init__(pattern, tag=bug_id, hint=hint) self._reason = reason if reason is None: self._reason = "" self.reason_format_result_groups = reason_format_result_groups @property def reason(self): return self._reason def rendered_reason(self, search_result): if self._reason and self.reason_format_result_groups: values = [] for idx in self.reason_format_result_groups: values.append(search_result.get(idx)) return self._reason.format(*values) return self._reason def _get_known_bugs(): """ Fetch the current plugin known_bugs.yaml if it exists and return its contents or None if it doesn't exist yet. """ if not os.path.isdir(constants.PLUGIN_TMP_DIR): raise Exception("plugin tmp dir '{}' not found". format(constants.PLUGIN_TMP_DIR)) known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR, "known_bugs.yaml") if not os.path.exists(known_bugs_yaml): return {} bugs = yaml.safe_load(open(known_bugs_yaml)) if bugs and bugs.get(MASTER_YAML_KNOWN_BUGS_KEY): return bugs return {} def add_known_bug(bug_id, description=None, type=LAUNCHPAD): """ Fetch the current plugin known_bugs.yaml if it exists and add new bug with description of the bug. """ if not os.path.isdir(constants.PLUGIN_TMP_DIR): raise Exception("plugin tmp dir '{}' not found". format(constants.PLUGIN_TMP_DIR)) if type == LAUNCHPAD: new_bug = "https://bugs.launchpad.net/bugs/{}".format(bug_id) if description is None: description = "no description provided" entry = IssueEntry(new_bug, description, key="id") current = _get_known_bugs() if current and current.get(MASTER_YAML_KNOWN_BUGS_KEY): current[MASTER_YAML_KNOWN_BUGS_KEY].append(entry.data) else: current = {MASTER_YAML_KNOWN_BUGS_KEY: [entry.data]} known_bugs_yaml = os.path.join(constants.PLUGIN_TMP_DIR, "known_bugs.yaml") with open(known_bugs_yaml, 'w') as fd: fd.write(yaml.dump(current)) def add_known_bugs_to_master_plugin(): """ Fetch the current plugin known_bugs.yaml and add it to the master yaml. Note that this can only be called once per plugin and is typically performed as a final part after all others have executed. """ bugs = _get_known_bugs() if bugs and bugs.get(MASTER_YAML_KNOWN_BUGS_KEY): plugintools.save_part(bugs, priority=99)
2.28125
2
examples/xml-rpc/echoserver.py
keobox/yap101
0
1573
import SimpleXMLRPCServer as xmls def echo(msg): print 'Got', msg return msg class echoserver(xmls.SimpleXMLRPCServer): allow_reuse_address = True server = echoserver(('127.0.0.1', 8001)) server.register_function(echo, 'echo') print 'Listening on port 8001' try: server.serve_forever() except: server.server_close()
2.953125
3
tf_pose/slim/nets/mobilenet/mobilenet_v2_test.py
gpspelle/pose-estimation
862
1574
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for mobilenet_v2.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import tensorflow as tf from nets.mobilenet import conv_blocks as ops from nets.mobilenet import mobilenet from nets.mobilenet import mobilenet_v2 slim = tf.contrib.slim def find_ops(optype): """Find ops of a given type in graphdef or a graph. Args: optype: operation type (e.g. Conv2D) Returns: List of operations. """ gd = tf.get_default_graph() return [var for var in gd.get_operations() if var.type == optype] class MobilenetV2Test(tf.test.TestCase): def setUp(self): tf.reset_default_graph() def testCreation(self): spec = dict(mobilenet_v2.V2_DEF) _, ep = mobilenet.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec) num_convs = len(find_ops('Conv2D')) # This is mostly a sanity test. No deep reason for these particular # constants. # # All but first 2 and last one have two convolutions, and there is one # extra conv that is not in the spec. (logits) self.assertEqual(num_convs, len(spec['spec']) * 2 - 2) # Check that depthwise are exposed. for i in range(2, 17): self.assertIn('layer_%d/depthwise_output' % i, ep) def testCreationNoClasses(self): spec = copy.deepcopy(mobilenet_v2.V2_DEF) net, ep = mobilenet.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec, num_classes=None) self.assertIs(net, ep['global_pool']) def testImageSizes(self): for input_size, output_size in [(224, 7), (192, 6), (160, 5), (128, 4), (96, 3)]: tf.reset_default_graph() _, ep = mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, input_size, input_size, 3))) self.assertEqual(ep['layer_18/output'].get_shape().as_list()[1:3], [output_size] * 2) def testWithSplits(self): spec = copy.deepcopy(mobilenet_v2.V2_DEF) spec['overrides'] = { (ops.expanded_conv,): dict(split_expansion=2), } _, _ = mobilenet.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec) num_convs = len(find_ops('Conv2D')) # All but 3 op has 3 conv operatore, the remainign 3 have one # and there is one unaccounted. self.assertEqual(num_convs, len(spec['spec']) * 3 - 5) def testWithOutputStride8(self): out, _ = mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=8, scope='MobilenetV2') self.assertEqual(out.get_shape().as_list()[1:3], [28, 28]) def testDivisibleBy(self): tf.reset_default_graph() mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, divisible_by=16, min_depth=32) s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')] s = set(s) self.assertSameElements([32, 64, 96, 160, 192, 320, 384, 576, 960, 1280, 1001], s) def testDivisibleByWithArgScope(self): tf.reset_default_graph() # Verifies that depth_multiplier arg scope actually works # if no default min_depth is provided. with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32): mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 2)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1) s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')] s = set(s) self.assertSameElements(s, [32, 192, 128, 1001]) def testFineGrained(self): tf.reset_default_graph() # Verifies that depth_multiplier arg scope actually works # if no default min_depth is provided. mobilenet_v2.mobilenet( tf.placeholder(tf.float32, (10, 224, 224, 2)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.01, finegrain_classification_mode=True) s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')] s = set(s) # All convolutions will be 8->48, except for the last one. self.assertSameElements(s, [8, 48, 1001, 1280]) def testMobilenetBase(self): tf.reset_default_graph() # Verifies that mobilenet_base returns pre-pooling layer. with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32): net, _ = mobilenet_v2.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1) self.assertEqual(net.get_shape().as_list(), [10, 7, 7, 128]) def testWithOutputStride16(self): tf.reset_default_graph() out, _ = mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=16) self.assertEqual(out.get_shape().as_list()[1:3], [14, 14]) def testWithOutputStride8AndExplicitPadding(self): tf.reset_default_graph() out, _ = mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=8, use_explicit_padding=True, scope='MobilenetV2') self.assertEqual(out.get_shape().as_list()[1:3], [28, 28]) def testWithOutputStride16AndExplicitPadding(self): tf.reset_default_graph() out, _ = mobilenet.mobilenet_base( tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=mobilenet_v2.V2_DEF, output_stride=16, use_explicit_padding=True) self.assertEqual(out.get_shape().as_list()[1:3], [14, 14]) def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self): sc = mobilenet.training_scope(is_training=None) self.assertNotIn('is_training', sc[slim.arg_scope_func_key( slim.batch_norm)]) def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self): sc = mobilenet.training_scope(is_training=False) self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) sc = mobilenet.training_scope(is_training=True) self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) sc = mobilenet.training_scope() self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) if __name__ == '__main__': tf.test.main()
2.0625
2
firebase-gist.py
darwin/firebase-gist
1
1575
from firebase import firebase import os import datetime import json import logging from boto.s3.connection import S3Connection from boto.s3.key import Key from github3 import login firebase_url = os.environ['FIREBASE_DB'] firebase_secret = os.environ['FIREBASE_SECRET'] firebase_path = os.environ['FIREBASE_PATH'] firebase_username = os.environ['FIREBASE_USERNAME'] # not checked ATM gh_token = os.environ['GH_TOKEN'] gh_gist = os.environ['GH_GIST'] gh_fname = os.environ['GH_FNAME'] logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) def connect_firebase(): f = firebase.FirebaseApplication(firebase_url, None) f.authentication = firebase.FirebaseAuthentication(firebase_secret, firebase_username, admin=True) return f logger.info('==================================') logger.info('Fetching firebase data') f = connect_firebase() data = f.get(firebase_path, None) new_content = json.dumps(data, ensure_ascii=False, indent=2, sort_keys=True) logger.info('Reading existing gist') gh = login(token=gh_token) gist = gh.gist(gh_gist) old_content = "" for f in gist.iter_files(): if f.filename == gh_fname: old_content = f.content break if old_content == new_content: logger.info('No changes detected') else: logger.info('Updating gist with new content') gist.edit(files={ gh_fname: { "content": new_content } }) logger.info('Done.')
2.421875
2
practice/2008/qualification/C-Fly_swatter/c.py
victorWeiFreelancer/CodeJam
0
1576
import sys sys.dont_write_bytecode = True def hitP(f, R, t, r, g): if f>=g/2 : return 0.0 missArea = 0.0 gridL = g+2*r nGrids = (R - t) // gridL missGridSideLength = g - 2*f print("gridL %.12f; nGrids %d" %(gridL, nGrids) ) indentSquareLength = nGrids*gridL remain = (R - t) - indentSquareLength missArea += (nGrids * missGridSideLength)**2 remainMissArea = 0 if remain - 2*r > 2*f if remain > g+r: totalArea = R**2 / 4.0 print( "missed a %.12f, total area %.12f" %(missR**2, (R-t)**2) ) return (totalArea - missArea) / (R-t)**2 def main(): numTestCases = int(input()) for i in range(numTestCases): f, R, t, r, g = list(map(float, input().split())) p = hitP(f, R, t, r, g) print( "Case #%d: %.6f" %(i+1, p)) if __name__ == '__main__': main()
2.9375
3
synapse/notifier.py
rkfg/synapse
1
1577
<gh_stars>1-10 # -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from collections import namedtuple from typing import ( Awaitable, Callable, Dict, Iterable, List, Optional, Set, Tuple, TypeVar, Union, ) import attr from prometheus_client import Counter from twisted.internet import defer import synapse.server from synapse.api.constants import EventTypes, HistoryVisibility, Membership from synapse.api.errors import AuthError from synapse.events import EventBase from synapse.handlers.presence import format_user_presence_state from synapse.logging.context import PreserveLoggingContext from synapse.logging.opentracing import log_kv, start_active_span from synapse.logging.utils import log_function from synapse.metrics import LaterGauge from synapse.streams.config import PaginationConfig from synapse.types import ( Collection, PersistedEventPosition, RoomStreamToken, StreamToken, UserID, ) from synapse.util.async_helpers import ObservableDeferred, timeout_deferred from synapse.util.metrics import Measure from synapse.visibility import filter_events_for_client logger = logging.getLogger(__name__) notified_events_counter = Counter("synapse_notifier_notified_events", "") users_woken_by_stream_counter = Counter( "synapse_notifier_users_woken_by_stream", "", ["stream"] ) T = TypeVar("T") # TODO(paul): Should be shared somewhere def count(func: Callable[[T], bool], it: Iterable[T]) -> int: """Return the number of items in it for which func returns true.""" n = 0 for x in it: if func(x): n += 1 return n class _NotificationListener: """This represents a single client connection to the events stream. The events stream handler will have yielded to the deferred, so to notify the handler it is sufficient to resolve the deferred. """ __slots__ = ["deferred"] def __init__(self, deferred): self.deferred = deferred class _NotifierUserStream: """This represents a user connected to the event stream. It tracks the most recent stream token for that user. At a given point a user may have a number of streams listening for events. This listener will also keep track of which rooms it is listening in so that it can remove itself from the indexes in the Notifier class. """ def __init__( self, user_id: str, rooms: Collection[str], current_token: StreamToken, time_now_ms: int, ): self.user_id = user_id self.rooms = set(rooms) self.current_token = current_token # The last token for which we should wake up any streams that have a # token that comes before it. This gets updated every time we get poked. # We start it at the current token since if we get any streams # that have a token from before we have no idea whether they should be # woken up or not, so lets just wake them up. self.last_notified_token = current_token self.last_notified_ms = time_now_ms with PreserveLoggingContext(): self.notify_deferred = ObservableDeferred(defer.Deferred()) def notify( self, stream_key: str, stream_id: Union[int, RoomStreamToken], time_now_ms: int, ): """Notify any listeners for this user of a new event from an event source. Args: stream_key: The stream the event came from. stream_id: The new id for the stream the event came from. time_now_ms: The current time in milliseconds. """ self.current_token = self.current_token.copy_and_advance(stream_key, stream_id) self.last_notified_token = self.current_token self.last_notified_ms = time_now_ms noify_deferred = self.notify_deferred log_kv( { "notify": self.user_id, "stream": stream_key, "stream_id": stream_id, "listeners": self.count_listeners(), } ) users_woken_by_stream_counter.labels(stream_key).inc() with PreserveLoggingContext(): self.notify_deferred = ObservableDeferred(defer.Deferred()) noify_deferred.callback(self.current_token) def remove(self, notifier: "Notifier"): """Remove this listener from all the indexes in the Notifier it knows about. """ for room in self.rooms: lst = notifier.room_to_user_streams.get(room, set()) lst.discard(self) notifier.user_to_user_stream.pop(self.user_id) def count_listeners(self) -> int: return len(self.notify_deferred.observers()) def new_listener(self, token: StreamToken) -> _NotificationListener: """Returns a deferred that is resolved when there is a new token greater than the given token. Args: token: The token from which we are streaming from, i.e. we shouldn't notify for things that happened before this. """ # Immediately wake up stream if something has already since happened # since their last token. if self.last_notified_token != token: return _NotificationListener(defer.succeed(self.current_token)) else: return _NotificationListener(self.notify_deferred.observe()) class EventStreamResult(namedtuple("EventStreamResult", ("events", "tokens"))): def __bool__(self): return bool(self.events) @attr.s(slots=True, frozen=True) class _PendingRoomEventEntry: event_pos = attr.ib(type=PersistedEventPosition) extra_users = attr.ib(type=Collection[UserID]) room_id = attr.ib(type=str) type = attr.ib(type=str) state_key = attr.ib(type=Optional[str]) membership = attr.ib(type=Optional[str]) class Notifier: """This class is responsible for notifying any listeners when there are new events available for it. Primarily used from the /events stream. """ UNUSED_STREAM_EXPIRY_MS = 10 * 60 * 1000 def __init__(self, hs: "synapse.server.HomeServer"): self.user_to_user_stream = {} # type: Dict[str, _NotifierUserStream] self.room_to_user_streams = {} # type: Dict[str, Set[_NotifierUserStream]] self.hs = hs self.storage = hs.get_storage() self.event_sources = hs.get_event_sources() self.store = hs.get_datastore() self.pending_new_room_events = [] # type: List[_PendingRoomEventEntry] # Called when there are new things to stream over replication self.replication_callbacks = [] # type: List[Callable[[], None]] # Called when remote servers have come back online after having been # down. self.remote_server_up_callbacks = [] # type: List[Callable[[str], None]] self.clock = hs.get_clock() self.appservice_handler = hs.get_application_service_handler() self._pusher_pool = hs.get_pusherpool() self.federation_sender = None if hs.should_send_federation(): self.federation_sender = hs.get_federation_sender() self.state_handler = hs.get_state_handler() self.clock.looping_call( self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS ) # This is not a very cheap test to perform, but it's only executed # when rendering the metrics page, which is likely once per minute at # most when scraping it. def count_listeners(): all_user_streams = set() # type: Set[_NotifierUserStream] for streams in list(self.room_to_user_streams.values()): all_user_streams |= streams for stream in list(self.user_to_user_stream.values()): all_user_streams.add(stream) return sum(stream.count_listeners() for stream in all_user_streams) LaterGauge("synapse_notifier_listeners", "", [], count_listeners) LaterGauge( "synapse_notifier_rooms", "", [], lambda: count(bool, list(self.room_to_user_streams.values())), ) LaterGauge( "synapse_notifier_users", "", [], lambda: len(self.user_to_user_stream) ) def add_replication_callback(self, cb: Callable[[], None]): """Add a callback that will be called when some new data is available. Callback is not given any arguments. It should *not* return a Deferred - if it needs to do any asynchronous work, a background thread should be started and wrapped with run_as_background_process. """ self.replication_callbacks.append(cb) def on_new_room_event( self, event: EventBase, event_pos: PersistedEventPosition, max_room_stream_token: RoomStreamToken, extra_users: Optional[Collection[UserID]] = None, ): """Unwraps event and calls `on_new_room_event_args`.""" self.on_new_room_event_args( event_pos=event_pos, room_id=event.room_id, event_type=event.type, state_key=event.get("state_key"), membership=event.content.get("membership"), max_room_stream_token=max_room_stream_token, extra_users=extra_users or [], ) def on_new_room_event_args( self, room_id: str, event_type: str, state_key: Optional[str], membership: Optional[str], event_pos: PersistedEventPosition, max_room_stream_token: RoomStreamToken, extra_users: Optional[Collection[UserID]] = None, ): """Used by handlers to inform the notifier something has happened in the room, room event wise. This triggers the notifier to wake up any listeners that are listening to the room, and any listeners for the users in the `extra_users` param. The events can be peristed out of order. The notifier will wait until all previous events have been persisted before notifying the client streams. """ self.pending_new_room_events.append( _PendingRoomEventEntry( event_pos=event_pos, extra_users=extra_users or [], room_id=room_id, type=event_type, state_key=state_key, membership=membership, ) ) self._notify_pending_new_room_events(max_room_stream_token) self.notify_replication() def _notify_pending_new_room_events(self, max_room_stream_token: RoomStreamToken): """Notify for the room events that were queued waiting for a previous event to be persisted. Args: max_room_stream_token: The highest stream_id below which all events have been persisted. """ pending = self.pending_new_room_events self.pending_new_room_events = [] users = set() # type: Set[UserID] rooms = set() # type: Set[str] for entry in pending: if entry.event_pos.persisted_after(max_room_stream_token): self.pending_new_room_events.append(entry) else: if ( entry.type == EventTypes.Member and entry.membership == Membership.JOIN and entry.state_key ): self._user_joined_room(entry.state_key, entry.room_id) users.update(entry.extra_users) rooms.add(entry.room_id) if users or rooms: self.on_new_event( "room_key", max_room_stream_token, users=users, rooms=rooms, ) self._on_updated_room_token(max_room_stream_token) def _on_updated_room_token(self, max_room_stream_token: RoomStreamToken): """Poke services that might care that the room position has been updated. """ # poke any interested application service. self._notify_app_services(max_room_stream_token) self._notify_pusher_pool(max_room_stream_token) if self.federation_sender: self.federation_sender.notify_new_events(max_room_stream_token) def _notify_app_services(self, max_room_stream_token: RoomStreamToken): try: self.appservice_handler.notify_interested_services(max_room_stream_token) except Exception: logger.exception("Error notifying application services of event") def _notify_app_services_ephemeral( self, stream_key: str, new_token: Union[int, RoomStreamToken], users: Optional[Collection[Union[str, UserID]]] = None, ): try: stream_token = None if isinstance(new_token, int): stream_token = new_token self.appservice_handler.notify_interested_services_ephemeral( stream_key, stream_token, users or [] ) except Exception: logger.exception("Error notifying application services of event") def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken): try: self._pusher_pool.on_new_notifications(max_room_stream_token) except Exception: logger.exception("Error pusher pool of event") def on_new_event( self, stream_key: str, new_token: Union[int, RoomStreamToken], users: Optional[Collection[Union[str, UserID]]] = None, rooms: Optional[Collection[str]] = None, ): """Used to inform listeners that something has happened event wise. Will wake up all listeners for the given users and rooms. """ users = users or [] rooms = rooms or [] with Measure(self.clock, "on_new_event"): user_streams = set() log_kv( { "waking_up_explicit_users": len(users), "waking_up_explicit_rooms": len(rooms), } ) for user in users: user_stream = self.user_to_user_stream.get(str(user)) if user_stream is not None: user_streams.add(user_stream) for room in rooms: user_streams |= self.room_to_user_streams.get(room, set()) time_now_ms = self.clock.time_msec() for user_stream in user_streams: try: user_stream.notify(stream_key, new_token, time_now_ms) except Exception: logger.exception("Failed to notify listener") self.notify_replication() # Notify appservices self._notify_app_services_ephemeral( stream_key, new_token, users, ) def on_new_replication_data(self) -> None: """Used to inform replication listeners that something has happened without waking up any of the normal user event streams""" self.notify_replication() async def wait_for_events( self, user_id: str, timeout: int, callback: Callable[[StreamToken, StreamToken], Awaitable[T]], room_ids=None, from_token=StreamToken.START, ) -> T: """Wait until the callback returns a non empty response or the timeout fires. """ user_stream = self.user_to_user_stream.get(user_id) if user_stream is None: current_token = self.event_sources.get_current_token() if room_ids is None: room_ids = await self.store.get_rooms_for_user(user_id) user_stream = _NotifierUserStream( user_id=user_id, rooms=room_ids, current_token=current_token, time_now_ms=self.clock.time_msec(), ) self._register_with_keys(user_stream) result = None prev_token = from_token if timeout: end_time = self.clock.time_msec() + timeout while not result: try: now = self.clock.time_msec() if end_time <= now: break # Now we wait for the _NotifierUserStream to be told there # is a new token. listener = user_stream.new_listener(prev_token) listener.deferred = timeout_deferred( listener.deferred, (end_time - now) / 1000.0, self.hs.get_reactor(), ) with start_active_span("wait_for_events.deferred"): log_kv( { "wait_for_events": "sleep", "token": prev_token, } ) with PreserveLoggingContext(): await listener.deferred log_kv( { "wait_for_events": "woken", "token": user_stream.current_token, } ) current_token = user_stream.current_token result = await callback(prev_token, current_token) log_kv( { "wait_for_events": "result", "result": bool(result), } ) if result: break # Update the prev_token to the current_token since nothing # has happened between the old prev_token and the current_token prev_token = current_token except defer.TimeoutError: log_kv({"wait_for_events": "timeout"}) break except defer.CancelledError: log_kv({"wait_for_events": "cancelled"}) break if result is None: # This happened if there was no timeout or if the timeout had # already expired. current_token = user_stream.current_token result = await callback(prev_token, current_token) return result async def get_events_for( self, user: UserID, pagination_config: PaginationConfig, timeout: int, is_guest: bool = False, explicit_room_id: Optional[str] = None, ) -> EventStreamResult: """For the given user and rooms, return any new events for them. If there are no new events wait for up to `timeout` milliseconds for any new events to happen before returning. If explicit_room_id is not set, the user's joined rooms will be polled for events. If explicit_room_id is set, that room will be polled for events only if it is world readable or the user has joined the room. """ if pagination_config.from_token: from_token = pagination_config.from_token else: from_token = self.event_sources.get_current_token() limit = pagination_config.limit room_ids, is_joined = await self._get_room_ids(user, explicit_room_id) is_peeking = not is_joined async def check_for_updates( before_token: StreamToken, after_token: StreamToken ) -> EventStreamResult: if after_token == before_token: return EventStreamResult([], (from_token, from_token)) events = [] # type: List[EventBase] end_token = from_token for name, source in self.event_sources.sources.items(): keyname = "%s_key" % name before_id = getattr(before_token, keyname) after_id = getattr(after_token, keyname) if before_id == after_id: continue new_events, new_key = await source.get_new_events( user=user, from_key=getattr(from_token, keyname), limit=limit, is_guest=is_peeking, room_ids=room_ids, explicit_room_id=explicit_room_id, ) if name == "room": new_events = await filter_events_for_client( self.storage, user.to_string(), new_events, is_peeking=is_peeking, ) elif name == "presence": now = self.clock.time_msec() new_events[:] = [ { "type": "m.presence", "content": format_user_presence_state(event, now), } for event in new_events ] events.extend(new_events) end_token = end_token.copy_and_replace(keyname, new_key) return EventStreamResult(events, (from_token, end_token)) user_id_for_stream = user.to_string() if is_peeking: # Internally, the notifier keeps an event stream per user_id. # This is used by both /sync and /events. # We want /events to be used for peeking independently of /sync, # without polluting its contents. So we invent an illegal user ID # (which thus cannot clash with any real users) for keying peeking # over /events. # # I am sorry for what I have done. user_id_for_stream = "_PEEKING_%s_%s" % ( explicit_room_id, user_id_for_stream, ) result = await self.wait_for_events( user_id_for_stream, timeout, check_for_updates, room_ids=room_ids, from_token=from_token, ) return result async def _get_room_ids( self, user: UserID, explicit_room_id: Optional[str] ) -> Tuple[Collection[str], bool]: joined_room_ids = await self.store.get_rooms_for_user(user.to_string()) if explicit_room_id: if explicit_room_id in joined_room_ids: return [explicit_room_id], True if await self._is_world_readable(explicit_room_id): return [explicit_room_id], False raise AuthError(403, "Non-joined access not allowed") return joined_room_ids, True async def _is_world_readable(self, room_id: str) -> bool: state = await self.state_handler.get_current_state( room_id, EventTypes.RoomHistoryVisibility, "" ) if state and "history_visibility" in state.content: return ( state.content["history_visibility"] == HistoryVisibility.WORLD_READABLE ) else: return False @log_function def remove_expired_streams(self) -> None: time_now_ms = self.clock.time_msec() expired_streams = [] expire_before_ts = time_now_ms - self.UNUSED_STREAM_EXPIRY_MS for stream in self.user_to_user_stream.values(): if stream.count_listeners(): continue if stream.last_notified_ms < expire_before_ts: expired_streams.append(stream) for expired_stream in expired_streams: expired_stream.remove(self) @log_function def _register_with_keys(self, user_stream: _NotifierUserStream): self.user_to_user_stream[user_stream.user_id] = user_stream for room in user_stream.rooms: s = self.room_to_user_streams.setdefault(room, set()) s.add(user_stream) def _user_joined_room(self, user_id: str, room_id: str): new_user_stream = self.user_to_user_stream.get(user_id) if new_user_stream is not None: room_streams = self.room_to_user_streams.setdefault(room_id, set()) room_streams.add(new_user_stream) new_user_stream.rooms.add(room_id) def notify_replication(self) -> None: """Notify the any replication listeners that there's a new event""" for cb in self.replication_callbacks: cb() def notify_remote_server_up(self, server: str): """Notify any replication that a remote server has come back up""" # We call federation_sender directly rather than registering as a # callback as a) we already have a reference to it and b) it introduces # circular dependencies. if self.federation_sender: self.federation_sender.wake_destination(server)
1.640625
2
saleor/checkout/tests/test_base_calculations.py
nestfiy/saleor
0
1578
<reponame>nestfiy/saleor<gh_stars>0 from decimal import Decimal from prices import Money, TaxedMoney from ...discount import DiscountValueType, VoucherType from ...discount.utils import get_product_discount_on_sale from ..base_calculations import ( base_checkout_total, base_tax_rate, calculate_base_line_total_price, calculate_base_line_unit_price, ) from ..fetch import fetch_checkout_lines def test_calculate_base_line_unit_price(checkout_with_single_item): # given checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert not checkout_line_info.voucher variant = checkout_line_info.variant # when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) assert prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_custom_price(checkout_with_single_item): # given line = checkout_with_single_item.lines.first() price_override = Decimal("12.22") line.price_override = price_override line.save(update_fields=["price_override"]) checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert not checkout_line_info.voucher # when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then currency = checkout_line_info.channel_listing.currency expected_price = Money(price_override, currency) assert prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_variant_on_sale( checkout_with_single_item, discount_info, category ): # given checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert not checkout_line_info.voucher variant = checkout_line_info.variant # set category on sale variant.product.category = category variant.product.save() checkout_line_info.product = variant.product # when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] ) # then expected_undiscounted_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) product_collections = set(pc.id for pc in checkout_line_info.collections) _, sale_discount = get_product_discount_on_sale( product=checkout_line_info.product, product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) expected_price = sale_discount(expected_undiscounted_price) assert prices_data.undiscounted_price == expected_undiscounted_price assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_variant_on_sale_custom_price( checkout_with_single_item, discount_info, category ): # given line = checkout_with_single_item.lines.first() price_override = Decimal("20.00") line.price_override = price_override line.save(update_fields=["price_override"]) checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert not checkout_line_info.voucher variant = checkout_line_info.variant # set category on sale variant.product.category = category variant.product.save() checkout_line_info.product = variant.product # when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] ) # then currency = checkout_line_info.channel_listing.currency expected_undiscounted_price = Money(price_override, currency) product_collections = set(pc.id for pc in checkout_line_info.collections) _, sale_discount = get_product_discount_on_sale( product=checkout_line_info.product, product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) expected_price = sale_discount(expected_undiscounted_price) assert prices_data.undiscounted_price == expected_undiscounted_price assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_fixed_voucher( checkout_with_single_item, voucher, channel_USD ): # given checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.save() voucher_amount = Money(Decimal(3), checkout_with_single_item.currency) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount = voucher_amount voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher variant = checkout_line_info.variant # when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) assert prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price - voucher_amount def test_calculate_base_line_unit_price_with_fixed_voucher_custom_prices( checkout_with_single_item, voucher, channel_USD ): # given checkout_line = checkout_with_single_item.lines.first() price_override = Decimal("20.00") checkout_line.price_override = price_override checkout_line.save(update_fields=["price_override"]) voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.save() voucher_amount = Money(Decimal(3), checkout_with_single_item.currency) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount = voucher_amount voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher # when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then currency = checkout_line_info.channel_listing.currency expected_price = Money(price_override, currency) assert prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price - voucher_amount def test_calculate_base_line_unit_price_with_percentage_voucher( checkout_with_single_item, voucher, channel_USD ): # given checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.discount_value_type = DiscountValueType.PERCENTAGE voucher.save() voucher_percent_value = Decimal(10) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount_value = voucher_percent_value voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher variant = checkout_line_info.variant # when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_voucher_amount = Money(Decimal("1"), checkout_with_single_item.currency) expected_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) assert prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price - expected_voucher_amount def test_calculate_base_line_unit_price_with_percentage_voucher_custom_prices( checkout_with_single_item, voucher, channel_USD ): # given checkout_line = checkout_with_single_item.lines.first() price_override = Decimal("20.00") checkout_line.price_override = price_override checkout_line.save(update_fields=["price_override"]) voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.discount_value_type = DiscountValueType.PERCENTAGE voucher.save() voucher_percent_value = Decimal(10) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount_value = voucher_percent_value voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher # when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then currency = checkout_line_info.channel_listing.currency expected_price = Money(price_override, currency) expected_voucher_amount = Money( price_override * voucher_percent_value / 100, checkout_with_single_item.currency ) assert prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price - expected_voucher_amount def test_calculate_base_line_unit_price_with_discounts_apply_once_per_order( checkout_with_single_item, voucher, channel_USD ): # given checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order = True voucher.discount_value_type = DiscountValueType.PERCENTAGE voucher.save() voucher_percent_value = Decimal(10) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount_value = voucher_percent_value voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher variant = checkout_line_info.variant # when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) assert prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale == expected_price # apply once per order is applied when calculating line total. assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_discounts_once_per_order_custom_prices( checkout_with_single_item, voucher, channel_USD ): # given checkout_line = checkout_with_single_item.lines.first() price_override = Decimal("20.00") checkout_line.price_override = price_override checkout_line.save(update_fields=["price_override"]) voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order = True voucher.discount_value_type = DiscountValueType.PERCENTAGE voucher.save() voucher_percent_value = Decimal(10) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount_value = voucher_percent_value voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher # when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then currency = checkout_line_info.channel_listing.currency expected_price = Money(price_override, currency) assert prices_data.undiscounted_price == expected_price assert prices_data.price_with_sale == expected_price # apply once per order is applied when calculating line total. assert prices_data.price_with_discounts == expected_price def test_calculate_base_line_unit_price_with_variant_on_sale_and_voucher( checkout_with_single_item, discount_info, category, voucher, channel_USD ): # given checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.save() voucher_amount = Money(Decimal(3), checkout_with_single_item.currency) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount = voucher_amount voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher variant = checkout_line_info.variant # set category on sale variant.product.category = category variant.product.save() checkout_line_info.product = variant.product # when prices_data = calculate_base_line_unit_price( checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] ) # then expected_undiscounted_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) product_collections = set(pc.id for pc in checkout_line_info.collections) _, sale_discount = get_product_discount_on_sale( product=checkout_line_info.product, product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) sale_discount_amount = sale_discount(expected_undiscounted_price) expected_price = expected_undiscounted_price - sale_discount_amount assert prices_data.undiscounted_price == expected_undiscounted_price assert prices_data.price_with_sale == expected_price assert prices_data.price_with_discounts == expected_price - voucher_amount def test_calculate_base_line_total_price(checkout_with_single_item): # given quantity = 3 checkout_line = checkout_with_single_item.lines.first() checkout_line.quantity = quantity checkout_line.save() checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert not checkout_line_info.voucher variant = checkout_line_info.variant # when prices_data = calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) assert prices_data.undiscounted_price == expected_price * quantity assert prices_data.price_with_sale == expected_price * quantity assert prices_data.price_with_discounts == expected_price * quantity def test_calculate_base_line_total_price_with_variant_on_sale( checkout_with_single_item, discount_info, category ): # given quantity = 3 checkout_line = checkout_with_single_item.lines.first() checkout_line.quantity = quantity checkout_line.save() checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert not checkout_line_info.voucher variant = checkout_line_info.variant # set category on sale variant.product.category = category variant.product.save() checkout_line_info.product = variant.product # when prices_data = calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] ) # then expected_undiscounted_unit_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) product_collections = set(pc.id for pc in checkout_line_info.collections) _, sale_discount = get_product_discount_on_sale( product=checkout_line_info.product, product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) sale_discount_amount = sale_discount(expected_undiscounted_unit_price) expected_price = expected_undiscounted_unit_price - sale_discount_amount assert prices_data.undiscounted_price == expected_undiscounted_unit_price * quantity assert prices_data.price_with_sale == expected_price * quantity assert prices_data.price_with_discounts == expected_price * quantity def test_calculate_base_line_total_price_with_fixed_voucher( checkout_with_single_item, voucher, channel_USD ): # given quantity = 3 checkout_line = checkout_with_single_item.lines.first() checkout_line.quantity = quantity checkout_line.save() checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.save() voucher_amount = Money(Decimal(3), checkout_with_single_item.currency) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount = voucher_amount voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher variant = checkout_line_info.variant # when prices_data = calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_unit_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) assert prices_data.undiscounted_price == expected_unit_price * quantity assert prices_data.price_with_sale == expected_unit_price * quantity assert ( prices_data.price_with_discounts == (expected_unit_price - voucher_amount) * quantity ) def test_calculate_base_line_total_price_with_percentage_voucher( checkout_with_single_item, voucher, channel_USD ): # given quantity = 3 checkout_line = checkout_with_single_item.lines.first() checkout_line.quantity = quantity checkout_line.save() checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.discount_value_type = DiscountValueType.PERCENTAGE voucher.save() voucher_percent_value = Decimal(10) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount_value = voucher_percent_value voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher variant = checkout_line_info.variant # when prices_data = calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_voucher_amount = Money(Decimal("1"), checkout_with_single_item.currency) expected_unit_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) assert prices_data.undiscounted_price == expected_unit_price * quantity assert prices_data.price_with_sale == expected_unit_price * quantity assert ( prices_data.price_with_discounts == (expected_unit_price - expected_voucher_amount) * quantity ) def test_calculate_base_line_total_price_with_discounts_apply_once_per_order( checkout_with_single_item, voucher, channel_USD ): # given quantity = 3 checkout_line = checkout_with_single_item.lines.first() checkout_line.quantity = quantity checkout_line.save() checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order = True voucher.discount_value_type = DiscountValueType.PERCENTAGE voucher.save() voucher_percent_value = Decimal(10) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount_value = voucher_percent_value voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher variant = checkout_line_info.variant # when prices_data = calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[] ) # then expected_voucher_amount = Money(Decimal("1"), checkout_with_single_item.currency) expected_unit_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) assert prices_data.undiscounted_price == expected_unit_price * quantity assert prices_data.price_with_sale == expected_unit_price * quantity # apply once per order is applied when calculating line total. assert ( prices_data.price_with_discounts == (expected_unit_price * quantity) - expected_voucher_amount ) def test_calculate_base_line_total_price_with_variant_on_sale_and_voucher( checkout_with_single_item, discount_info, category, voucher, channel_USD ): # given quantity = 3 checkout_line = checkout_with_single_item.lines.first() checkout_line.quantity = quantity checkout_line.save() checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.save() voucher_amount = Money(Decimal(3), checkout_with_single_item.currency) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount = voucher_amount voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher variant = checkout_line_info.variant # set category on sale variant.product.category = category variant.product.save() checkout_line_info.product = variant.product # when prices_data = calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] ) # then expected_undiscounted_unit_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) product_collections = set(pc.id for pc in checkout_line_info.collections) _, sale_discount = get_product_discount_on_sale( product=checkout_line_info.product, product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) sale_discount_amount = sale_discount(expected_undiscounted_unit_price) expected_unit_price = expected_undiscounted_unit_price - sale_discount_amount assert prices_data.undiscounted_price == expected_undiscounted_unit_price * quantity assert prices_data.price_with_sale == expected_unit_price * quantity assert ( prices_data.price_with_discounts == (expected_unit_price - voucher_amount) * quantity ) def test_calculate_base_line_total_price_with_variant_on_sale_and_voucher_applied_once( checkout_with_single_item, discount_info, category, voucher, channel_USD ): # given quantity = 3 checkout_line = checkout_with_single_item.lines.first() checkout_line.quantity = quantity checkout_line.save() checkout_line = checkout_with_single_item.lines.first() voucher.products.add(checkout_line.variant.product) voucher.type = VoucherType.SPECIFIC_PRODUCT voucher.apply_once_per_order = True voucher.save() voucher_amount = Money(Decimal(3), checkout_with_single_item.currency) voucher_channel_listing = voucher.channel_listings.get(channel=channel_USD) voucher_channel_listing.discount = voucher_amount voucher_channel_listing.save() checkout_with_single_item.voucher_code = voucher.code checkout_lines_info, _ = fetch_checkout_lines(checkout_with_single_item) checkout_line_info = checkout_lines_info[0] assert checkout_line_info.voucher variant = checkout_line_info.variant # set category on sale variant.product.category = category variant.product.save() checkout_line_info.product = variant.product # when prices_data = calculate_base_line_total_price( checkout_line_info, checkout_with_single_item.channel, discounts=[discount_info] ) # then expected_undiscounted_unit_price = variant.get_price( product=checkout_line_info.product, collections=checkout_line_info.collections, channel=checkout_with_single_item.channel, channel_listing=checkout_line_info.channel_listing, discounts=[], ) product_collections = set(pc.id for pc in checkout_line_info.collections) _, sale_discount = get_product_discount_on_sale( product=checkout_line_info.product, product_collections=product_collections, discount=discount_info, channel=checkout_with_single_item.channel, variant_id=variant.id, ) sale_discount_amount = sale_discount(expected_undiscounted_unit_price) expected_unit_price = expected_undiscounted_unit_price - sale_discount_amount assert prices_data.undiscounted_price == expected_undiscounted_unit_price * quantity assert prices_data.price_with_sale == expected_unit_price * quantity assert ( prices_data.price_with_discounts == (expected_unit_price * quantity) - voucher_amount ) def test_base_tax_rate_net_price_zero(): price = TaxedMoney(net=Money(0, "USD"), gross=Money(3, "USD")) assert base_tax_rate(price) == Decimal("0.0") def test_base_tax_rate_gross_price_zero(): price = TaxedMoney(net=Money(3, "USD"), gross=Money(0, "USD")) assert base_tax_rate(price) == Decimal("0.0") def test_base_checkout_total(): # given currency = "USD" taxed_money = TaxedMoney(net=Money(10, currency), gross=Money(10, currency)) subtotal = taxed_money shipping_price = taxed_money discount = Money(5, currency) # when total = base_checkout_total(subtotal, shipping_price, discount, currency) expected = subtotal + shipping_price - discount # then assert total == expected def test_base_checkout_total_high_discount(): # given currency = "USD" zero_taxed_money = TaxedMoney(net=Money(0, currency), gross=Money(0, currency)) subtotal = TaxedMoney(net=Money(10, currency), gross=Money(12, currency)) shipping_price = zero_taxed_money discount = Money(20, currency) # when total = base_checkout_total(subtotal, shipping_price, discount, currency) # then assert total == zero_taxed_money
2.1875
2
tests/test_date.py
andy-z/ged4py
10
1579
#!/usr/bin/env python # -*- coding: utf-8 -*- """Tests for `ged4py.date` module.""" import unittest from ged4py.calendar import ( CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor ) from ged4py.date import ( DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor ) class TestDateVisitor(CalendarDateVisitor, DateValueVisitor): def visitGregorian(self, date): if not isinstance(date, GregorianDate): raise TypeError(str(type(date))) return ("gregorian", date) def visitJulian(self, date): if not isinstance(date, JulianDate): raise TypeError(str(type(date))) return ("julian", date) def visitHebrew(self, date): if not isinstance(date, HebrewDate): raise TypeError(str(type(date))) return ("hebrew", date) def visitFrench(self, date): if not isinstance(date, FrenchDate): raise TypeError(str(type(date))) return ("french", date) def visitSimple(self, date): if not isinstance(date, DateValueSimple): raise TypeError(str(type(date))) return ("simple", date.date) def visitPeriod(self, date): if not isinstance(date, DateValuePeriod): raise TypeError(str(type(date))) return ("period", date.date1, date.date2) def visitFrom(self, date): if not isinstance(date, DateValueFrom): raise TypeError(str(type(date))) return ("from", date.date) def visitTo(self, date): if not isinstance(date, DateValueTo): raise TypeError(str(type(date))) return ("to", date.date) def visitRange(self, date): if not isinstance(date, DateValueRange): raise TypeError(str(type(date))) return ("range", date.date1, date.date2) def visitBefore(self, date): if not isinstance(date, DateValueBefore): raise TypeError(str(type(date))) return ("before", date.date) def visitAfter(self, date): if not isinstance(date, DateValueAfter): raise TypeError(str(type(date))) return ("after", date.date) def visitAbout(self, date): if not isinstance(date, DateValueAbout): raise TypeError(str(type(date))) return ("about", date.date) def visitCalculated(self, date): if not isinstance(date, DateValueCalculated): raise TypeError(str(type(date))) return ("calculated", date.date) def visitEstimated(self, date): if not isinstance(date, DateValueEstimated): raise TypeError(str(type(date))) return ("estimated", date.date) def visitInterpreted(self, date): if not isinstance(date, DateValueInterpreted): raise TypeError(str(type(date))) return ("interpreted", date.date, date.phrase) def visitPhrase(self, date): if not isinstance(date, DateValuePhrase): raise TypeError(str(type(date))) return ("phrase", date.phrase) class TestDetailDate(unittest.TestCase): """Tests for `ged4py.date` module.""" def test_001_cal_date(self): """Test date.CalendarDate class.""" date = GregorianDate(2017, "OCT", 9) self.assertEqual(date.year, 2017) self.assertIsNone(date.dual_year) self.assertFalse(date.bc) self.assertEqual(date.year_str, "2017") self.assertEqual(date.month, "OCT") self.assertEqual(date.month_num, 10) self.assertEqual(date.day, 9) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = GregorianDate(2017, "OCT", bc=True) self.assertEqual(date.year, 2017) self.assertIsNone(date.dual_year) self.assertTrue(date.bc) self.assertEqual(date.year_str, "2017 B.C.") self.assertEqual(date.month, "OCT") self.assertEqual(date.month_num, 10) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = GregorianDate(1699, "FEB", dual_year=1700) self.assertEqual(date.year, 1699) self.assertEqual(date.dual_year, 1700) self.assertFalse(date.bc) self.assertEqual(date.year_str, "1699/00") self.assertEqual(date.month, "FEB") self.assertEqual(date.month_num, 2) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = HebrewDate(5000) self.assertEqual(date.year, 5000) self.assertFalse(date.bc) self.assertEqual(date.year_str, "5000") self.assertIsNone(date.month) self.assertIsNone(date.month_num) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.HEBREW) date = FrenchDate(1, "FRUC", 1) self.assertEqual(date.year, 1) self.assertFalse(date.bc) self.assertEqual(date.year_str, "1") self.assertEqual(date.month, "FRUC") self.assertEqual(date.month_num, 12) self.assertEqual(date.day, 1) self.assertEqual(date.calendar, CalendarType.FRENCH_R) date = JulianDate(5, "JAN", bc=True) self.assertEqual(date.year, 5) self.assertTrue(date.bc) self.assertEqual(date.year_str, "5 B.C.") self.assertEqual(date.month, "JAN") self.assertEqual(date.month_num, 1) self.assertIsNone(date.day) self.assertEqual(date.calendar, CalendarType.JULIAN) def test_002_cal_date_key(self): """Test date.CalendarDate class.""" date = GregorianDate(2017, "OCT", 9) self.assertEqual(date.key(), (2458035.5, 0)) date = GregorianDate(1699, "FEB", 1, dual_year=1700) self.assertEqual(date.key(), (2342003.5, 0)) date = FrenchDate(2017, "VENT", bc=True) self.assertEqual(date.key(), (1638959.5, 1)) date = HebrewDate(2017, "TSH", 22) self.assertEqual(date.key(), (1084542.5, 0)) date = JulianDate(1000) self.assertEqual(date.key(), (2086672.5, 1)) def test_003_cal_date_cmp(self): """Test date.CalendarDate class.""" self.assertTrue(GregorianDate(2016, "JAN", 1) < GregorianDate(2017, "JAN", 1)) self.assertTrue(GregorianDate(2017, "JAN", 1) < GregorianDate(2017, "FEB", 1)) self.assertTrue(GregorianDate(2017, "JAN", 1) < GregorianDate(2017, "JAN", 2)) self.assertTrue(GregorianDate(2017, "JAN", 1) <= GregorianDate(2017, "JAN", 2)) self.assertTrue(GregorianDate(2017, "JAN", 2) > GregorianDate(2017, "JAN", 1)) self.assertTrue(GregorianDate(2017, "JAN", 2) >= GregorianDate(2017, "JAN", 1)) self.assertTrue(GregorianDate(2017, "JAN", 1) == GregorianDate(2017, "JAN", 1)) self.assertTrue(GregorianDate(2017, "JAN", 1) != GregorianDate(2017, "JAN", 2)) # missing day compares as "past" the last day of month, but before next month self.assertTrue(GregorianDate(2017, "JAN") > GregorianDate(2017, "JAN", 31)) self.assertTrue(GregorianDate(2017, "JAN") < GregorianDate(2017, "FEB", 1)) # missing month compares as "past" the last day of year, but before next year self.assertTrue(GregorianDate(2017) > GregorianDate(2017, "DEC", 31)) self.assertTrue(GregorianDate(2017) < GregorianDate(2018, "JAN", 1)) # dual date self.assertTrue(GregorianDate(1700, "JAN", 1) == GregorianDate(1699, "JAN", 1, dual_year=1700)) # compare Gregorian and Julian dates self.assertTrue(GregorianDate(1582, "OCT", 15) == JulianDate(1582, "OCT", 5)) self.assertTrue(GregorianDate(1582, "OCT", 16) > JulianDate(1582, "OCT", 5)) self.assertTrue(JulianDate(1582, "OCT", 6) > GregorianDate(1582, "OCT", 15)) self.assertTrue(GregorianDate(2000, "JAN", 14) == JulianDate(2000, "JAN", 1)) # compare Gregorian and French dates self.assertTrue(GregorianDate(1792, "SEP", 22) == FrenchDate(1, "VEND", 1)) self.assertTrue(GregorianDate(1792, "SEP", 23) > FrenchDate(1, "VEND", 1)) self.assertTrue(FrenchDate(1, "VEND", 2) > GregorianDate(1792, "SEP", 22)) self.assertTrue(GregorianDate(2020, "SEP", 21) == FrenchDate(228, "COMP", 5)) # compare Gregorian and Hebrew dates self.assertTrue(GregorianDate(2020, "JAN", 1) == HebrewDate(5780, "SVN", 4)) def test_004_cal_date_str(self): """Test date.CalendarDate class.""" date = GregorianDate(2017, "OCT", 9) self.assertEqual(str(date), "9 OCT 2017") date = GregorianDate(2017, "OCT", bc=True) self.assertEqual(str(date), "OCT 2017 B.C.") date = GregorianDate(1699, "JAN", 1, dual_year=1700) self.assertEqual(str(date), "1 JAN 1699/00") date = HebrewDate(5000) self.assertEqual(str(date), "@#DHEBREW@ 5000") date = FrenchDate(1, "VEND", 1) self.assertEqual(str(date), "@#DFRENCH R@ 1 VEND 1") date = JulianDate(1582, "OCT", 5) self.assertEqual(str(date), "@#DJULIAN@ 5 OCT 1582") def test_005_cal_date_parse(self): """Test date.CalendarDate.parse method.""" date = CalendarDate.parse("31 MAY 2020") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 2020) self.assertIsNone(date.dual_year) self.assertFalse(date.bc) self.assertEqual(date.month, "MAY") self.assertEqual(date.month_num, 5) self.assertEqual(date.day, 31) self.assertEqual(date.original, "31 MAY 2020") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = CalendarDate.parse("@#DGREGORIAN@ 10 MAR 1698/99") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 1698) self.assertEqual(date.dual_year, 1699) self.assertFalse(date.bc) self.assertEqual(date.month, "MAR") self.assertEqual(date.month_num, 3) self.assertEqual(date.day, 10) self.assertEqual(date.original, "@#DGREGORIAN@ 10 MAR 1698/99") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = CalendarDate.parse("10 MAR 1699/00") self.assertIsInstance(date, GregorianDate) self.assertEqual(date.year, 1699) self.assertEqual(date.dual_year, 1700) self.assertEqual(date.original, "10 MAR 1699/00") self.assertEqual(date.calendar, CalendarType.GREGORIAN) date = CalendarDate.parse("@#DJULIAN@ 100 B.C.") self.assertIsInstance(date, JulianDate) self.assertEqual(date.year, 100) self.assertTrue(date.bc) self.assertIsNone(date.month) self.assertIsNone(date.month_num) self.assertIsNone(date.day) self.assertEqual(date.original, "@#DJULIAN@ 100 B.C.") self.assertEqual(date.calendar, CalendarType.JULIAN) date = CalendarDate.parse("@#DFRENCH R@ 15 GERM 0001") self.assertIsInstance(date, FrenchDate) self.assertEqual(date.year, 1) self.assertFalse(date.bc) self.assertEqual(date.month, "GERM") self.assertEqual(date.month_num, 7) self.assertEqual(date.day, 15) self.assertEqual(date.original, "@#DFRENCH R@ 15 GERM 0001") self.assertEqual(date.calendar, CalendarType.FRENCH_R) date = CalendarDate.parse("@#DHEBREW@ 7 NSN 5000") self.assertIsInstance(date, HebrewDate) self.assertEqual(date.year, 5000) self.assertFalse(date.bc) self.assertEqual(date.month, "NSN") self.assertEqual(date.month_num, 8) self.assertEqual(date.day, 7) self.assertEqual(date.original, "@#DHEBREW@ 7 NSN 5000") self.assertEqual(date.calendar, CalendarType.HEBREW) # cannot handle ROMAN with self.assertRaises(ValueError): date = CalendarDate.parse("@#DROMAN@ 2020") # cannot handle UNKNOWN with self.assertRaises(ValueError): date = CalendarDate.parse("@#DUNKNOWN@ 2020") # dual year only works for GREGORIAN with self.assertRaises(ValueError): date = CalendarDate.parse("@#DJULIAN@ 2020/21") # cannot parse nonsense with self.assertRaises(ValueError): date = CalendarDate.parse("start of time") def test_006_cal_date_visitor(self): """Test date.CalendarDate.accept method.""" visitor = TestDateVisitor() date = GregorianDate(2017, "OCT", 9) value = date.accept(visitor) self.assertEqual(value, ("gregorian", date)) date = HebrewDate(5000) value = date.accept(visitor) self.assertEqual(value, ("hebrew", date)) date = FrenchDate(1, "VEND", 1) value = date.accept(visitor) self.assertEqual(value, ("french", date)) date = JulianDate(1582, "OCT", 5) value = date.accept(visitor) self.assertEqual(value, ("julian", date)) def test_007_cal_date_hash(self): """Test date.CalendarDate hash.""" self.assertEqual(hash(GregorianDate(2017, "OCT", 9)), hash(GregorianDate(2017, "OCT", 9))) self.assertEqual(hash(GregorianDate(2017, "OCT", 9, bc=True)), hash(GregorianDate(2017, "OCT", 9, bc=True))) self.assertEqual(hash(FrenchDate(1, "VEND", 1)), hash(FrenchDate(1, "VEND", 1))) self.assertEqual(hash(FrenchDate(1)), hash(FrenchDate(1))) def test_010_date_no_date(self): """Test date.DateValue class.""" date = DateValue.parse("not a date") self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertEqual(date.phrase, "not a date") self.assertEqual(str(date), "(not a date)") def test_012_date_parse_period(self): """Test date.DateValue class.""" date = DateValue.parse("FROM 1967") self.assertIsInstance(date, DateValueFrom) self.assertEqual(date.kind, DateValueTypes.FROM) self.assertEqual(date.date, GregorianDate(1967)) self.assertEqual(str(date), "FROM 1967") date = DateValue.parse("TO 1 JAN 2017") self.assertIsInstance(date, DateValueTo) self.assertEqual(date.kind, DateValueTypes.TO) self.assertEqual(date.date, GregorianDate(2017, "JAN", 1)) self.assertEqual(str(date), "TO 1 JAN 2017") date = DateValue.parse("FROM 1920 TO 2000") self.assertIsInstance(date, DateValuePeriod) self.assertEqual(date.kind, DateValueTypes.PERIOD) self.assertEqual(date.date1, GregorianDate(1920)) self.assertEqual(date.date2, GregorianDate(2000)) self.assertEqual(str(date), "FROM 1920 TO 2000") date = DateValue.parse("from mar 1920 to 1 apr 2000") self.assertIsInstance(date, DateValuePeriod) self.assertEqual(date.kind, DateValueTypes.PERIOD) self.assertEqual(date.date1, GregorianDate(1920, "MAR")) self.assertEqual(date.date2, GregorianDate(2000, "APR", 1)) self.assertEqual(str(date), "FROM MAR 1920 TO 1 APR 2000") def test_013_date_parse_range(self): """Test date.DateValue class.""" date = DateValue.parse("BEF 1967B.C.") self.assertIsInstance(date, DateValueBefore) self.assertEqual(date.kind, DateValueTypes.BEFORE) self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(str(date), "BEFORE 1967 B.C.") date = DateValue.parse("AFT 1 JAN 2017") self.assertIsInstance(date, DateValueAfter) self.assertEqual(date.kind, DateValueTypes.AFTER) self.assertEqual(date.date, GregorianDate(2017, "JAN", 1)) self.assertEqual(str(date), "AFTER 1 JAN 2017") date = DateValue.parse("BET @#DJULIAN@ 1600 AND 2000") self.assertIsInstance(date, DateValueRange) self.assertEqual(date.kind, DateValueTypes.RANGE) self.assertEqual(date.date1, JulianDate(1600)) self.assertEqual(date.date2, GregorianDate(2000)) self.assertEqual(str(date), "BETWEEN @#DJULIAN@ 1600 AND 2000") date = DateValue.parse("bet mar 1920 and apr 2000") self.assertIsInstance(date, DateValueRange) self.assertEqual(date.kind, DateValueTypes.RANGE) self.assertEqual(date.date1, GregorianDate(1920, "MAR")) self.assertEqual(date.date2, GregorianDate(2000, "APR")) self.assertEqual(str(date), "BETWEEN MAR 1920 AND APR 2000") def test_014_date_parse_approx(self): """Test date.DateValue class.""" dates = {"500 B.C.": GregorianDate(500, bc=True), "JAN 2017": GregorianDate(2017, "JAN"), "31 JAN 2017": GregorianDate(2017, "JAN", 31)} approx = [ ("ABT", "ABOUT", DateValueAbout, DateValueTypes.ABOUT), ("CAL", "CALCULATED", DateValueCalculated, DateValueTypes.CALCULATED), ("EST", "ESTIMATED", DateValueEstimated, DateValueTypes.ESTIMATED) ] for appr, fmt, klass, typeEnum in approx: for datestr, value in dates.items(): date = DateValue.parse(appr + " " + datestr) self.assertIsInstance(date, klass) self.assertEqual(date.kind, typeEnum) self.assertEqual(str(date), fmt + " " + datestr) self.assertEqual(date.date, value) def test_015_date_parse_phrase(self): """Test date.DateValue class.""" date = DateValue.parse("(some phrase)") self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertEqual(date.phrase, "some phrase") date = DateValue.parse("INT 1967 B.C. (some phrase)") self.assertIsInstance(date, DateValueInterpreted) self.assertEqual(date.kind, DateValueTypes.INTERPRETED) self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(date.phrase, "some phrase") self.assertEqual(str(date), "INTERPRETED 1967 B.C. (some phrase)") date = DateValue.parse("INT @#DGREGORIAN@ 1 JAN 2017 (some phrase)") self.assertIsInstance(date, DateValueInterpreted) self.assertEqual(date.kind, DateValueTypes.INTERPRETED) self.assertEqual(date.date, GregorianDate(2017, "JAN", 1)) self.assertEqual(date.phrase, "some phrase") self.assertEqual(str(date), "INTERPRETED 1 JAN 2017 (some phrase)") def test_016_date_parse_simple(self): """Test date.DateValue class.""" date = DateValue.parse("1967 B.C.") self.assertIsInstance(date, DateValueSimple) self.assertEqual(date.kind, DateValueTypes.SIMPLE) self.assertEqual(date.date, GregorianDate(1967, bc=True)) self.assertEqual(str(date), "1967 B.C.") date = DateValue.parse("@#DGREGORIAN@ 1 JAN 2017") self.assertIsInstance(date, DateValueSimple) self.assertEqual(date.kind, DateValueTypes.SIMPLE) self.assertEqual(date.date, GregorianDate(2017, "JAN", 1)) self.assertEqual(str(date), "1 JAN 2017") def test_017_date_cmp(self): """Test date.Date class.""" dv = DateValue.parse("2016") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2016), GregorianDate(2016))) dv = DateValue.parse("31 DEC 2000") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000, "DEC", 31), GregorianDate(2000, "DEC", 31))) dv = DateValue.parse("BET 31 DEC 2000 AND 1 JAN 2001") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000, "DEC", 31), GregorianDate(2001, "JAN", 1))) # order of dates is messed up dv = DateValue.parse("BET 31 DEC 2000 AND 1 JAN 2000") self.assertIsInstance(dv.key(), tuple) self.assertEqual(dv.key(), (GregorianDate(2000, "DEC", 31), GregorianDate(2000, "JAN", 1))) self.assertTrue(DateValue.parse("2016") < DateValue.parse("2017")) self.assertTrue(DateValue.parse("2 JAN 2016") > DateValue.parse("1 JAN 2016")) self.assertTrue(DateValue.parse("BET 1900 AND 2000") < DateValue.parse("FROM 1920 TO 1999")) # comparing simple date with range self.assertTrue(DateValue.parse("1 JAN 2000") > DateValue.parse("BET 1 JAN 1999 AND 1 JAN 2000")) self.assertNotEqual(DateValue.parse("1 JAN 2000"), DateValue.parse("BET 1 JAN 2000 AND 1 JAN 2001")) self.assertTrue(DateValue.parse("1 JAN 2000") < DateValue.parse("BET 1 JAN 2000 AND 1 JAN 2001")) self.assertTrue(DateValue.parse("1 JAN 2000") > DateValue.parse("BEF 1 JAN 2000")) self.assertTrue(DateValue.parse("1 JAN 2000") > DateValue.parse("TO 1 JAN 2000")) self.assertTrue(DateValue.parse("1 JAN 2000") < DateValue.parse("AFT 1 JAN 2000")) self.assertTrue(DateValue.parse("1 JAN 2000") < DateValue.parse("FROM 1 JAN 2000")) # comparing ranges self.assertEqual(DateValue.parse("FROM 1 JAN 2000 TO 1 JAN 2001"), DateValue.parse("BET 1 JAN 2000 AND 1 JAN 2001")) self.assertTrue(DateValue.parse("FROM 1 JAN 1999 TO 1 JAN 2001") < DateValue.parse("BET 1 JAN 2000 AND 1 JAN 2001")) self.assertTrue(DateValue.parse("FROM 1 JAN 2000 TO 1 JAN 2002") > DateValue.parse("BET 1 JAN 2000 AND 1 JAN 2001")) # Less specific date compares later than more specific self.assertTrue(DateValue.parse("2000") > DateValue.parse("31 DEC 2000")) self.assertTrue(DateValue.parse("DEC 2000") > DateValue.parse("31 DEC 2000")) # phrase is always later than any regular date self.assertTrue(DateValue.parse("(Could be 1996 or 1998)") > DateValue.parse("2000")) # "empty" date is always later than any regular date self.assertTrue(DateValue.parse("") > DateValue.parse("2000")) def test_018_date_parse_empty(self): """Test date.DateValue class.""" for value in (None, ""): date = DateValue.parse(value) self.assertIsInstance(date, DateValuePhrase) self.assertEqual(date.kind, DateValueTypes.PHRASE) self.assertIsNone(date.phrase) self.assertEqual(str(date), "") def test_019_date_value_visitor(self): """Test date.DateValue class.""" visitor = TestDateVisitor() date1 = GregorianDate(2017, "JAN", 1) date2 = GregorianDate(2017, "DEC", 31) value = DateValueSimple(date1).accept(visitor) self.assertEqual(value, ("simple", date1)) value = DateValueFrom(date1).accept(visitor) self.assertEqual(value, ("from", date1)) value = DateValueTo(date1).accept(visitor) self.assertEqual(value, ("to", date1)) value = DateValuePeriod(date1, date2).accept(visitor) self.assertEqual(value, ("period", date1, date2)) value = DateValueBefore(date1).accept(visitor) self.assertEqual(value, ("before", date1)) value = DateValueAfter(date1).accept(visitor) self.assertEqual(value, ("after", date1)) value = DateValueRange(date1, date2).accept(visitor) self.assertEqual(value, ("range", date1, date2)) value = DateValueAbout(date1).accept(visitor) self.assertEqual(value, ("about", date1)) value = DateValueCalculated(date1).accept(visitor) self.assertEqual(value, ("calculated", date1)) value = DateValueEstimated(date1).accept(visitor) self.assertEqual(value, ("estimated", date1)) value = DateValueInterpreted(date1, "phrase").accept(visitor) self.assertEqual(value, ("interpreted", date1, "phrase")) value = DateValuePhrase("phrase").accept(visitor) self.assertEqual(value, ("phrase", "phrase")) def test_020_date_hash(self): """Test date.Date hash""" dv1 = DateValue.parse("2016") dv2 = DateValue.parse("2016") self.assertEqual(hash(dv1), hash(dv2)) dv1 = DateValue.parse("31 DEC 2000") dv2 = DateValue.parse("31 DEC 2000") self.assertEqual(hash(dv1), hash(dv2)) dv1 = DateValue.parse("BET 31 DEC 2000 AND 1 JAN 2001") dv2 = DateValue.parse("BET 31 DEC 2000 AND 1 JAN 2001") self.assertEqual(hash(dv1), hash(dv2))
3
3
src/quart/local.py
Dunkledore/quart
3
1580
<reponame>Dunkledore/quart<gh_stars>1-10 from __future__ import annotations import asyncio import copy from contextvars import ContextVar # noqa # contextvars not understood as stdlib from typing import Any # noqa # contextvars not understood as stdlib from typing import Callable, Dict, Optional class TaskLocal: """An object local to the current task.""" __slots__ = ("_storage",) def __init__(self) -> None: # Note as __setattr__ is overidden below, use the object __setattr__ object.__setattr__(self, "_storage", ContextVar("storage")) def __getattr__(self, name: str) -> Any: values = self._storage.get({}) try: return values[name] except KeyError: raise AttributeError(name) def __setattr__(self, name: str, value: Any) -> None: values = self._storage.get({}) values[name] = value self._storage.set(values) def __delattr__(self, name: str) -> None: values = self._storage.get({}) try: del values[name] self._storage.set(values) except KeyError: raise AttributeError(name) @staticmethod def _task_identity() -> int: loop = asyncio.get_event_loop() if loop.is_running(): task = asyncio.current_task() task_id = id(task) return task_id else: return 0 class LocalStack: def __init__(self) -> None: self._task_local = TaskLocal() def push(self, value: Any) -> None: stack = getattr(self._task_local, "stack", None) if stack is None: self._task_local.stack = stack = [] stack.append(value) def pop(self) -> Any: stack = getattr(self._task_local, "stack", None) if stack is None or stack == []: return None else: return stack.pop() @property def top(self) -> Any: try: return self._task_local.stack[-1] except (AttributeError, IndexError): return None class LocalProxy: """Proxy to a task local object.""" __slots__ = ("__dict__", "__local", "__wrapped__") def __init__(self, local: Callable, name: Optional[str] = None) -> None: # Note as __setattr__ is overidden below, use the object __setattr__ object.__setattr__(self, "__LocalProxy_local", local) object.__setattr__(self, "__wrapped__", local) object.__setattr__(self, "__name__", name) def _get_current_object(self) -> Any: return object.__getattribute__(self, "__LocalProxy_local")() @property def __dict__(self) -> Dict[str, Any]: # type: ignore try: return self._get_current_object().__dict__ except RuntimeError: raise AttributeError("__dict__") def __repr__(self) -> str: try: obj = self._get_current_object() except RuntimeError: return "<%s unbound>" % self.__class__.__name__ return repr(obj) def __bool__(self) -> bool: try: return bool(self._get_current_object()) except RuntimeError: return False def __dir__(self) -> Any: try: return dir(self._get_current_object()) except RuntimeError: return [] def __getattr__(self, name: Any) -> Any: if name == "__members__": return dir(self._get_current_object()) return getattr(self._get_current_object(), name) def __setitem__(self, key: Any, value: Any) -> Any: self._get_current_object()[key] = value def __delitem__(self, key: Any) -> Any: del self._get_current_object()[key] async def __aiter__(self) -> Any: async for x in self._get_current_object(): yield x __setattr__ = lambda x, n, v: setattr( # noqa: E731, E501 x._get_current_object(), n, v # type: ignore ) __delattr__ = lambda x, n: delattr(x._get_current_object(), n) # type: ignore # noqa: E731 __str__ = lambda x: str(x._get_current_object()) # type: ignore # noqa: E731 __lt__ = lambda x, o: x._get_current_object() < o # noqa: E731 __le__ = lambda x, o: x._get_current_object() <= o # noqa: E731 __eq__ = lambda x, o: x._get_current_object() == o # type: ignore # noqa: E731 __ne__ = lambda x, o: x._get_current_object() != o # type: ignore # noqa: E731 __gt__ = lambda x, o: x._get_current_object() > o # noqa: E731 __ge__ = lambda x, o: x._get_current_object() >= o # noqa: E731 __hash__ = lambda x: hash(x._get_current_object()) # type: ignore # noqa: E731 __call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw) # noqa: E731 __len__ = lambda x: len(x._get_current_object()) # noqa: E731 __getitem__ = lambda x, i: x._get_current_object()[i] # noqa: E731 __iter__ = lambda x: iter(x._get_current_object()) # noqa: E731 __contains__ = lambda x, i: i in x._get_current_object() # noqa: E731 __add__ = lambda x, o: x._get_current_object() + o # noqa: E731 __sub__ = lambda x, o: x._get_current_object() - o # noqa: E731 __mul__ = lambda x, o: x._get_current_object() * o # noqa: E731 __floordiv__ = lambda x, o: x._get_current_object() // o # noqa: E731 __mod__ = lambda x, o: x._get_current_object() % o # noqa: E731 __divmod__ = lambda x, o: x._get_current_object().__divmod__(o) # noqa: E731 __pow__ = lambda x, o: x._get_current_object() ** o # noqa: E731 __lshift__ = lambda x, o: x._get_current_object() << o # noqa: E731 __rshift__ = lambda x, o: x._get_current_object() >> o # noqa: E731 __and__ = lambda x, o: x._get_current_object() & o # noqa: E731 __xor__ = lambda x, o: x._get_current_object() ^ o # noqa: E731 __or__ = lambda x, o: x._get_current_object() | o # noqa: E731 __div__ = lambda x, o: x._get_current_object().__div__(o) # noqa: E731 __truediv__ = lambda x, o: x._get_current_object().__truediv__(o) # noqa: E731 __neg__ = lambda x: -(x._get_current_object()) # noqa: E731 __pos__ = lambda x: +(x._get_current_object()) # noqa: E731 __abs__ = lambda x: abs(x._get_current_object()) # noqa: E731 __invert__ = lambda x: ~(x._get_current_object()) # noqa: E731 __complex__ = lambda x: complex(x._get_current_object()) # noqa: E731 __int__ = lambda x: int(x._get_current_object()) # noqa: E731 __float__ = lambda x: float(x._get_current_object()) # noqa: E731 __oct__ = lambda x: oct(x._get_current_object()) # noqa: E731 __hex__ = lambda x: hex(x._get_current_object()) # noqa: E731 __index__ = lambda x: x._get_current_object().__index__() # noqa: E731 __coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o) # noqa: E731 __enter__ = lambda x: x._get_current_object().__enter__() # noqa: E731 __exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw) # noqa: E731 __radd__ = lambda x, o: o + x._get_current_object() # noqa: E731 __rsub__ = lambda x, o: o - x._get_current_object() # noqa: E731 __rmul__ = lambda x, o: o * x._get_current_object() # noqa: E731 __rdiv__ = lambda x, o: o / x._get_current_object() # noqa: E731 __rtruediv__ = __rdiv__ __rfloordiv__ = lambda x, o: o // x._get_current_object() # noqa: E731 __rmod__ = lambda x, o: o % x._get_current_object() # noqa: E731 __rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o) # noqa: E731 __copy__ = lambda x: copy.copy(x._get_current_object()) # noqa: E731 __deepcopy__ = lambda x, memo: copy.deepcopy(x._get_current_object(), memo) # noqa: E731 __await__ = lambda x: x._get_current_object().__await__() # noqa: E731
2.375
2
pytorch3dunet/unet3d/predictor.py
searobbersduck/pytorch-3dunet
0
1581
import time import h5py import hdbscan import numpy as np import torch from sklearn.cluster import MeanShift from pytorch3dunet.datasets.hdf5 import SliceBuilder from pytorch3dunet.unet3d.utils import get_logger from pytorch3dunet.unet3d.utils import unpad logger = get_logger('UNet3DPredictor') class _AbstractPredictor: def __init__(self, model, loader, output_file, config, **kwargs): self.model = model self.loader = loader self.output_file = output_file self.config = config self.predictor_config = kwargs @staticmethod def _volume_shape(dataset): # TODO: support multiple internal datasets raw = dataset.raws[0] if raw.ndim == 3: return raw.shape else: return raw.shape[1:] @staticmethod def _get_output_dataset_names(number_of_datasets, prefix='predictions'): if number_of_datasets == 1: return [prefix] else: return [f'{prefix}{i}' for i in range(number_of_datasets)] def predict(self): raise NotImplementedError class StandardPredictor(_AbstractPredictor): """ Applies the model on the given dataset and saves the result in the `output_file` in the H5 format. Predictions from the network are kept in memory. If the results from the network don't fit in into RAM use `LazyPredictor` instead. The output dataset names inside the H5 is given by `des_dataset_name` config argument. If the argument is not present in the config 'predictions{n}' is used as a default dataset name, where `n` denotes the number of the output head from the network. Args: model (Unet3D): trained 3D UNet model used for prediction data_loader (torch.utils.data.DataLoader): input data loader output_file (str): path to the output H5 file config (dict): global config dict """ def __init__(self, model, loader, output_file, config, **kwargs): super().__init__(model, loader, output_file, config, **kwargs) def predict(self): out_channels = self.config['model'].get('out_channels') if out_channels is None: out_channels = self.config['model']['dt_out_channels'] prediction_channel = self.config.get('prediction_channel', None) if prediction_channel is not None: logger.info(f"Using only channel '{prediction_channel}' from the network output") device = self.config['device'] output_heads = self.config['model'].get('output_heads', 1) logger.info(f'Running prediction on {len(self.loader)} batches...') # dimensionality of the the output predictions volume_shape = self._volume_shape(self.loader.dataset) if prediction_channel is None: prediction_maps_shape = (out_channels,) + volume_shape else: # single channel prediction map prediction_maps_shape = (1,) + volume_shape logger.info(f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}') avoid_block_artifacts = self.predictor_config.get('avoid_block_artifacts', True) logger.info(f'Avoid block artifacts: {avoid_block_artifacts}') # create destination H5 file h5_output_file = h5py.File(self.output_file, 'w') # allocate prediction and normalization arrays logger.info('Allocating prediction and normalization arrays...') prediction_maps, normalization_masks = self._allocate_prediction_maps(prediction_maps_shape, output_heads, h5_output_file) # Sets the module in evaluation mode explicitly (necessary for batchnorm/dropout layers if present) self.model.eval() # Set the `testing=true` flag otherwise the final Softmax/Sigmoid won't be applied! self.model.testing = True # Run predictions on the entire input dataset with torch.no_grad(): for batch, indices in self.loader: # send batch to device batch = batch.to(device) # forward pass predictions = self.model(batch) # wrap predictions into a list if there is only one output head from the network if output_heads == 1: predictions = [predictions] # for each output head for prediction, prediction_map, normalization_mask in zip(predictions, prediction_maps, normalization_masks): # convert to numpy array prediction = prediction.cpu().numpy() # for each batch sample for pred, index in zip(prediction, indices): # save patch index: (C,D,H,W) if prediction_channel is None: channel_slice = slice(0, out_channels) else: channel_slice = slice(0, 1) index = (channel_slice,) + index if prediction_channel is not None: # use only the 'prediction_channel' logger.info(f"Using channel '{prediction_channel}'...") pred = np.expand_dims(pred[prediction_channel], axis=0) logger.info(f'Saving predictions for slice:{index}...') if avoid_block_artifacts: # unpad in order to avoid block artifacts in the output probability maps u_prediction, u_index = unpad(pred, index, volume_shape) # accumulate probabilities into the output prediction array prediction_map[u_index] += u_prediction # count voxel visits for normalization normalization_mask[u_index] += 1 else: # accumulate probabilities into the output prediction array prediction_map[index] += pred # count voxel visits for normalization normalization_mask[index] += 1 # save results to self._save_results(prediction_maps, normalization_masks, output_heads, h5_output_file, self.loader.dataset) # close the output H5 file h5_output_file.close() def _allocate_prediction_maps(self, output_shape, output_heads, output_file): # initialize the output prediction arrays prediction_maps = [np.zeros(output_shape, dtype='float32') for _ in range(output_heads)] # initialize normalization mask in order to average out probabilities of overlapping patches normalization_masks = [np.zeros(output_shape, dtype='uint8') for _ in range(output_heads)] return prediction_maps, normalization_masks def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset): # save probability maps prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') for prediction_map, normalization_mask, prediction_dataset in zip(prediction_maps, normalization_masks, prediction_datasets): prediction_map = prediction_map / normalization_mask if dataset.mirror_padding: pad_width = dataset.pad_width logger.info(f'Dataset loaded with mirror padding, pad_width: {pad_width}. Cropping before saving...') prediction_map = prediction_map[:, pad_width:-pad_width, pad_width:-pad_width, pad_width:-pad_width] logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset, data=prediction_map, compression="gzip") class LazyPredictor(StandardPredictor): """ Applies the model on the given dataset and saves the result in the `output_file` in the H5 format. Predicted patches are directly saved into the H5 and they won't be stored in memory. Since this predictor is slower than the `StandardPredictor` it should only be used when the predicted volume does not fit into RAM. The output dataset names inside the H5 is given by `des_dataset_name` config argument. If the argument is not present in the config 'predictions{n}' is used as a default dataset name, where `n` denotes the number of the output head from the network. Args: model (Unet3D): trained 3D UNet model used for prediction data_loader (torch.utils.data.DataLoader): input data loader output_file (str): path to the output H5 file config (dict): global config dict """ def __init__(self, model, loader, output_file, config, **kwargs): super().__init__(model, loader, output_file, config, **kwargs) def _allocate_prediction_maps(self, output_shape, output_heads, output_file): # allocate datasets for probability maps prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') prediction_maps = [ output_file.create_dataset(dataset_name, shape=output_shape, dtype='float32', chunks=True, compression='gzip') for dataset_name in prediction_datasets] # allocate datasets for normalization masks normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization') normalization_masks = [ output_file.create_dataset(dataset_name, shape=output_shape, dtype='uint8', chunks=True, compression='gzip') for dataset_name in normalization_datasets] return prediction_maps, normalization_masks def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset): if dataset.mirror_padding: logger.warn( f'Mirror padding unsupported in LazyPredictor. Output predictions will be padded with pad_width: {dataset.pad_width}') prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization') # normalize the prediction_maps inside the H5 for prediction_map, normalization_mask, prediction_dataset, normalization_dataset in zip(prediction_maps, normalization_masks, prediction_datasets, normalization_datasets): # split the volume into 4 parts and load each into the memory separately logger.info(f'Normalizing {prediction_dataset}...') z, y, x = prediction_map.shape[1:] # take slices which are 1/27 of the original volume patch_shape = (z // 3, y // 3, x // 3) for index in SliceBuilder._build_slices(prediction_map, patch_shape=patch_shape, stride_shape=patch_shape): logger.info(f'Normalizing slice: {index}') prediction_map[index] /= normalization_mask[index] # make sure to reset the slice that has been visited already in order to avoid 'double' normalization # when the patches overlap with each other normalization_mask[index] = 1 logger.info(f'Deleting {normalization_dataset}...') del output_file[normalization_dataset] class EmbeddingsPredictor(_AbstractPredictor): """ Applies the embedding model on the given dataset and saves the result in the `output_file` in the H5 format. The resulting volume is the segmentation itself (not the embedding vectors) obtained by clustering embeddings with HDBSCAN or MeanShift algorithm patch by patch and then stitching the patches together. """ def __init__(self, model, loader, output_file, config, clustering, iou_threshold=0.7, noise_label=-1, **kwargs): super().__init__(model, loader, output_file, config, **kwargs) self.iou_threshold = iou_threshold self.noise_label = noise_label self.clustering = clustering assert clustering in ['hdbscan', 'meanshift'], 'Only HDBSCAN and MeanShift are supported' logger.info(f'IoU threshold: {iou_threshold}') self.clustering_name = clustering self.clustering = self._get_clustering(clustering, kwargs) def predict(self): device = self.config['device'] output_heads = self.config['model'].get('output_heads', 1) logger.info(f'Running prediction on {len(self.loader)} patches...') # dimensionality of the the output segmentation volume_shape = self._volume_shape(self.loader.dataset) logger.info(f'The shape of the output segmentation (DHW): {volume_shape}') logger.info('Allocating segmentation array...') # initialize the output prediction arrays output_segmentations = [np.zeros(volume_shape, dtype='int32') for _ in range(output_heads)] # initialize visited_voxels arrays visited_voxels_arrays = [np.zeros(volume_shape, dtype='uint8') for _ in range(output_heads)] # Sets the module in evaluation mode explicitly self.model.eval() self.model.testing = True # Run predictions on the entire input dataset with torch.no_grad(): for batch, indices in self.loader: # logger.info(f'Predicting embeddings for slice:{index}') # send batch to device batch = batch.to(device) # forward pass embeddings = self.model(batch) # wrap predictions into a list if there is only one output head from the network if output_heads == 1: embeddings = [embeddings] for prediction, output_segmentation, visited_voxels_array in zip(embeddings, output_segmentations, visited_voxels_arrays): # convert to numpy array prediction = prediction.cpu().numpy() # iterate sequentially because of the current simple stitching that we're using for pred, index in zip(prediction, indices): # convert embeddings to segmentation with hdbscan clustering segmentation = self._embeddings_to_segmentation(pred) # stitch patches self._merge_segmentation(segmentation, index, output_segmentation, visited_voxels_array) # save results with h5py.File(self.output_file, 'w') as output_file: prediction_datasets = self._get_output_dataset_names(output_heads, prefix=f'segmentation/{self.clustering_name}') for output_segmentation, prediction_dataset in zip(output_segmentations, prediction_datasets): logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset, data=output_segmentation, compression="gzip") def _embeddings_to_segmentation(self, embeddings): """ Cluster embeddings vectors with HDBSCAN and return the segmented volume. Args: embeddings (ndarray): 4D (CDHW) embeddings tensor Returns: 3D (DHW) segmentation """ # shape of the output segmentation output_shape = embeddings.shape[1:] # reshape (C, D, H, W) -> (C, D * H * W) and transpose -> (D * H * W, C) flattened_embeddings = embeddings.reshape(embeddings.shape[0], -1).transpose() logger.info('Clustering embeddings...') # perform clustering and reshape in order to get the segmentation volume start = time.time() clusters = self.clustering.fit_predict(flattened_embeddings).reshape(output_shape) logger.info( f'Number of clusters found by {self.clustering}: {np.max(clusters)}. Duration: {time.time() - start} sec.') return clusters def _merge_segmentation(self, segmentation, index, output_segmentation, visited_voxels_array): """ Given the `segmentation` patch, its `index` in the `output_segmentation` array and the array visited voxels merge the segmented patch (`segmentation`) into the `output_segmentation` Args: segmentation (ndarray): segmented patch index (tuple): position of the patch inside `output_segmentation` volume output_segmentation (ndarray): current state of the output segmentation visited_voxels_array (ndarray): array of voxels visited so far (same size as `output_segmentation`); visited voxels will be marked by a number greater than 0 """ index = tuple(index) # get new unassigned label max_label = np.max(output_segmentation) + 1 # make sure there are no clashes between current segmentation patch and the output_segmentation # but keep the noise label noise_mask = segmentation == self.noise_label segmentation += int(max_label) segmentation[noise_mask] = self.noise_label # get the overlap mask in the current patch overlap_mask = visited_voxels_array[index] > 0 # get the new labels inside the overlap_mask new_labels = np.unique(segmentation[overlap_mask]) merged_labels = self._merge_labels(output_segmentation[index], new_labels, segmentation) # relabel new segmentation with the merged labels for current_label, new_label in merged_labels: segmentation[segmentation == new_label] = current_label # update the output_segmentation output_segmentation[index] = segmentation # visit the patch visited_voxels_array[index] += 1 def _merge_labels(self, current_segmentation, new_labels, new_segmentation): def _most_frequent_label(labels): unique, counts = np.unique(labels, return_counts=True) ind = np.argmax(counts) return unique[ind] result = [] # iterate over new_labels and merge regions if the IoU exceeds a given threshold for new_label in new_labels: # skip 'noise' label assigned by hdbscan if new_label == self.noise_label: continue new_label_mask = new_segmentation == new_label # get only the most frequent overlapping label most_frequent_label = _most_frequent_label(current_segmentation[new_label_mask]) # skip 'noise' label if most_frequent_label == self.noise_label: continue current_label_mask = current_segmentation == most_frequent_label # compute Jaccard index iou = np.bitwise_and(new_label_mask, current_label_mask).sum() / np.bitwise_or(new_label_mask, current_label_mask).sum() if iou > self.iou_threshold: # merge labels result.append((most_frequent_label, new_label)) return result def _get_clustering(self, clustering_alg, kwargs): logger.info(f'Using {clustering_alg} for clustering') if clustering_alg == 'hdbscan': min_cluster_size = kwargs.get('min_cluster_size', 50) min_samples = kwargs.get('min_samples', None), metric = kwargs.get('metric', 'euclidean') cluster_selection_method = kwargs.get('cluster_selection_method', 'eom') logger.info(f'HDBSCAN params: min_cluster_size: {min_cluster_size}, min_samples: {min_samples}') return hdbscan.HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, metric=metric, cluster_selection_method=cluster_selection_method) else: bandwidth = kwargs['bandwidth'] logger.info(f'MeanShift params: bandwidth: {bandwidth}, bin_seeding: True') # use fast MeanShift with bin seeding return MeanShift(bandwidth=bandwidth, bin_seeding=True)
2.3125
2
var/spack/repos/builtin/packages/visionary-dev-tools/package.py
electronicvisions/spack
2
1582
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import os.path as osp class VisionaryDevTools(Package): """Developer convenience packages common to all visionary development meta packages. Application specific build tools belong to the dedicated meta packages.""" homepage = '' # some random tarball, to make `spack fetch --dependencies visionary-defaults` work url = 'https://github.com/electronicvisions/spack/archive/v0.8.tar.gz' # This is only a dummy tarball (see difference between version numbers) # TODO: as soon as a MetaPackage-concept has been merged, please update this package version('1.0', '372ce038842f20bf0ae02de50c26e85d', url='https://github.com/electronicvisions/spack/archive/v0.8.tar.gz') depends_on('ack') depends_on('autoconf') depends_on('automake') depends_on('bash-completion') depends_on('bazel') depends_on('bear') depends_on('cairo +X') depends_on('cloc') depends_on('cmake') depends_on('connect-proxy') depends_on('cppcheck +htmlreport') depends_on('cquery') depends_on('doxygen+graphviz') depends_on('emacs ~X') depends_on('gdb') depends_on('genpybind') depends_on('git+tcltk') depends_on('git-fat-git') depends_on('gtkplus') depends_on('imagemagick') depends_on('jq') depends_on('libpcap') depends_on('libtool') depends_on('llvm+visionary+python~libcxx build_type=Release') depends_on('mercurial') depends_on('mosh') depends_on('munge') depends_on('ncdu') depends_on('node-js') depends_on('octave+fftw') depends_on('openssh') depends_on('pigz') depends_on('pkg-config') depends_on('py-autopep8') depends_on('py-black', when="^[email protected]:") depends_on('py-configargparse') depends_on('py-doxypypy') depends_on('py-flake8') depends_on('py-gdbgui') depends_on('py-git-review') depends_on('py-ipython') depends_on('py-jedi') depends_on('py-junit-xml') depends_on('py-language-server') depends_on('py-line-profiler') depends_on('py-nose') depends_on('py-nose2') depends_on('py-memory-profiler') depends_on('py-pudb') depends_on('py-pylint@:1.999.999', when="^python@:2.999.999") depends_on('py-pylint', when="^[email protected]:") depends_on('py-pyserial') depends_on('py-pytest') depends_on('py-pytest-xdist') depends_on('py-ranger-fm') depends_on('py-sqlalchemy') depends_on('py-virtualenv') depends_on('py-xmlrunner') depends_on('py-yq') depends_on('rtags') depends_on('tar') depends_on('texinfo') # ECM (2020-05-14): removed 'the-silver-searcher' due to build fail on [email protected] depends_on('tig') depends_on('time') depends_on('tmux') depends_on('units') depends_on('valgrind') depends_on('verilator') depends_on('vim +python +ruby +perl +cscope +huge +x') depends_on('visionary-xilinx') depends_on('wget') depends_on('yaml-cpp+shared') depends_on('zsh') def install(self, spec, prefix): mkdirp(prefix.etc) # store a copy of this package. filename = osp.basename(osp.dirname(__file__)) # gives name of parent folder install(__file__, join_path(prefix.etc, filename + '.py')) # we could create some filesystem view here?
1.492188
1
extra/convertBAMtoPILFER.py
MartaLoBalastegui/XICRA
3
1583
#usr/bin/env python ## useful imports import time import io import os import re import sys from sys import argv import subprocess ## ARGV if len (sys.argv) < 5: print ("\nUsage:") print ("python3 %s bam_file folder bedtools_bin samtools_bin logfile\n" %os.path.realpath(__file__)) exit() bam_file = os.path.abspath(argv[1]) folder = argv[2] bedtools_exe = argv[3] samtools_exe = argv[4] logFile = argv[5] # start output_file = open(logFile, 'a') output_file.write("\nConvert BAM to Pilfer Input file:\n") ## Variables dirname_name = os.path.dirname(bam_file) split_name = os.path.splitext( os.path.basename(bam_file) ) bed_file = folder + '/' + split_name[0] + '.bed' sam_file = folder + '/' + split_name[0] + '.sam' pilfer_tmp = folder + '/' + split_name[0] + '.tmp.pilfer.bed' pilfer_file = folder + '/' + split_name[0] + '.pilfer.bed' ## START print ("\n+ Converting BAM file into PILFER input file") ## generate bed file with bedtools bamtobed -i bam_file if (os.path.isfile(bed_file)): print ("\t+ File %s already exists" %bed_file) else: cmd_bedtools = "%s bamtobed -i %s > %s" %(bedtools_exe, bam_file, bed_file) output_file.write(cmd_bedtools) output_file.write("\n") try: subprocess.check_output(cmd_bedtools, shell = True) except Exception as exc: print ('***ERROR:') print (cmd_bedtools) print('bedtools command generated an exception: %s' %exc) exit() ## generate samtools if (os.path.isfile(sam_file)): print ("\t+ File %s already exists" %sam_file) else: cmd_samtools = "%s view %s > %s" %(samtools_exe, bam_file, sam_file) output_file.write(cmd_samtools) output_file.write("\n") try: subprocess.check_output(cmd_samtools, shell = True) except Exception as exc: print ('***ERROR:') print (cmd_samtools) print('samtools view command generated an exception: %s' %exc) exit() ## generate paste filter tmp file if (os.path.isfile(pilfer_tmp)): print ("\t+ File %s already exists" %pilfer_tmp) else: ## paste Aligned.sortedByCoord.out.bed Aligned.sortedByCoord.out.sam | awk -v "OFS=\t" '{print $1, $2, $3, $16, $6}' cmd_paste = "paste %s %s | awk -v \"OFS=\t\" \'{print $1, $2, $3, $16, $6}\' > %s" %(bed_file, sam_file, pilfer_tmp) output_file.write(cmd_paste) output_file.write("\n") try: subprocess.check_output(cmd_paste, shell = True) except Exception as exc: print ('***ERROR:') print (cmd_paste) print('paste bed sam command generated an exception: %s' %exc) exit() ## parse pilfer tmp file counter = 1 previous_line = () # Open file OUT output_file = open(pilfer_file, 'w') # Open file IN fileHandler = open (pilfer_tmp, "r") while True: # Get next line from file line = fileHandler.readline().strip() # If line is empty then end of file reached if not line : break; seq = line.split('\t')[3] real_seq = seq.split('::PU') seq_len = len(str(real_seq[0])) ## Discard smaller if (previous_line): if (previous_line == line): line = previous_line counter += 1 else: line_split = previous_line.split('\t') output_file.write('%s\t%s\t%s\t%s::PI\t%s\t%s\n' %(line_split[0], line_split[1], line_split[2], line_split[3], counter, line_split[4])) #counter += 1 while True: #get next line next_line = fileHandler.readline().strip() if (next_line == line): counter += 1 else: line_split = line.split('\t') output_file.write('%s\t%s\t%s\t%s::PI\t%s\t%s\n' %(line_split[0], line_split[1], line_split[2], line_split[3], counter, line_split[4])) previous_line = next_line counter = 1 break; ## close and finish fileHandler.close() output_file.close()
2.40625
2
day7/main5list.py
nikhilsamninan/python-files
0
1584
a="<NAME>ought a butter the butter was bitter so betty bought a better butter which was not bitter" v=[a[-1] for a in a.split() if(len(a)%2==0)] print(v)
3.515625
4
app/reader.py
lcarnevale/proxy-mqtt2influx
0
1585
<reponame>lcarnevale/proxy-mqtt2influx<filename>app/reader.py<gh_stars>0 # -*- coding: utf-8 -*- #!/usr/bin/env python """Writer class based on InfluxDB This implementation does its best to follow the Robert Martin's Clean code guidelines. The comments follows the Google Python Style Guide: https://github.com/google/styleguide/blob/gh-pages/pyguide.md """ __copyright__ = 'Copyright 2021, FCRlab at University of Messina' __author__ = '<NAME> <<EMAIL>>' __credits__ = '' __description__ = 'Writer class based on InfluxDB' import time import logging import threading import persistqueue from datetime import datetime from influxdb_client.client.write_api import SYNCHRONOUS from influxdb_client import InfluxDBClient, Point, WritePrecision class Reader: def __init__(self, host, port, token, organization, bucket, mutex, verbosity): self.__url = "http://%s:%s" % (host, port) self.__token = token self.__organization = organization self.__bucket = bucket self.__mutex = mutex self.__reader = None self.__setup_logging(verbosity) def __setup_logging(self, verbosity): format = "%(asctime)s %(filename)s:%(lineno)d %(levelname)s - %(message)s" filename='log/mqtt2influx.log' datefmt = "%d/%m/%Y %H:%M:%S" level = logging.INFO if (verbosity): level = logging.DEBUG logging.basicConfig(filename=filename, filemode='a', format=format, level=level, datefmt=datefmt) def setup(self): self.__reader = threading.Thread( target = self.__reader_job, args = (self.__url, self.__token, self.__organization, self.__bucket) ) def __reader_job(self, url, token, organization, bucket): self.__mutex.acquire() q = persistqueue.SQLiteQueue('data', multithreading=True, auto_commit=True) self.__mutex.release() client = InfluxDBClient(url=url, token=token) write_api = client.write_api(write_options=SYNCHRONOUS) try: while (True): raw_data = q.get() logging.debug("Just got new data") logging.debug("Parsing data points") data = [ { "measurement": raw_data['measurement'], "tags": raw_data['tags'], "fields": raw_data['fields'], "time": raw_data['time'] } ] write_api.write(bucket, organization, data) logging.info("Data into InfluxDB") time.sleep(0.3) except KeyboardInterrupt: pass def start(self): self.__reader.start()
2.46875
2
example_problems/tutorial/tiling_mxn-boards_with_1x2-boards/services/tell_if_tilable/tell_if_tilable_server.py
DottaPaperella/TALight
0
1586
#!/usr/bin/env python3 from sys import stderr, exit, argv from random import randrange #from TALinputs import TALinput from multilanguage import Env, Lang, TALcolors # METADATA OF THIS TAL_SERVICE: problem="tiling_mxn-boards_with_1x2-boards" service="is_tilable" args_list = [ ('m',int), ('n',int), ('my_conjecture',str), ('h',int), ('k',int), ('lang',str), ('ISATTY',bool), ] ENV =Env(problem, service, args_list) TAc =TALcolors(ENV) LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'")) TAc.print(LANG.opening_msg, "green") # START CODING YOUR SERVICE: assert ENV['h']==1 assert ENV['k']==2 print() if (ENV['m'] * ENV['n']) % 2 == 1: if ENV['my_conjecture'] == "yes": TAc.NO() print(LANG.render_feedback("FALSE-is-not-tilable", f"Contrary to what you have asserted, the {ENV['m']}x{ENV['n']}-grid is NOT tilable. If you are not convinced you can submit a tiling of that grid to the service 'check_my_tiling'.")) if ENV['my_conjecture'] == "no": TAc.OK() print(LANG.render_feedback("TRUE-is-not-tilable", f"You are perfecty right: the {ENV['m']}x{ENV['n']}-grid is NOT tilable.")) if (ENV['m'] * ENV['n']) % 2 == 0: if ENV['my_conjecture'] == "yes": TAc.OK() print(LANG.render_feedback("TRUE-is-tilable", f"We agree on the fact that the {ENV['m']}x{ENV['n']}-grid is tilable. If you want to exhibit us a tiling for this grid you can submit it to the service 'check_my_tiling'.")) if ENV['my_conjecture'] == "no": TAc.NO() print(LANG.render_feedback("FALSE-is-tilable", f"No, the {ENV['m']}x{ENV['n']}-grid is tilable. If you can not believe a tiling of the {ENV['m']}x{ENV['n']}-grid exists try the service 'gimme_hints_on_a_tiling'.")) exit(0)
2.875
3
tests/unit/peapods/runtimes/remote/ssh/test_ssh_remote.py
yk/jina
1
1587
import pytest from jina.enums import RemoteAccessType from jina.flow import Flow from jina.parser import set_pea_parser, set_pod_parser from jina.peapods.pods import BasePod from jina.peapods.runtimes.remote.ssh import SSHRuntime from jina.proto import jina_pb2 @pytest.mark.skip('works locally, but until I findout how to mock ssh, this has to be skipped') def test_ssh_pea(): p = set_pea_parser().parse_args(['--host', '[email protected]', '--timeout', '5000']) with SSHRuntime(p, kind='pea') as pp: assert pp.status.envelope.status.code == jina_pb2.StatusProto.READY assert pp.status is None @pytest.mark.skip('works locally, but until I find out how to mock ssh, this has to be skipped') def test_ssh_pod(): p = set_pod_parser().parse_args(['--host', '[email protected]', '--timeout', '5000']) with SSHRuntime(p, kind='pod') as pp: assert pp.status.envelope.status.code == jina_pb2.StatusProto.READY assert pp.status is None @pytest.mark.skip('not implemented yet') def test_ssh_mutable_pod(): p = set_pod_parser().parse_args(['--host', '[email protected]', '--timeout', '5000']) p = BasePod(p) with SSHRuntime(p, kind='pod') as pp: assert pp.status.envelope.status.code == jina_pb2.StatusProto.READY assert pp.status is None @pytest.mark.skip('not implemented yet') def test_flow(): f = Flow().add().add(host='[email protected]', remote_access=RemoteAccessType.SSH) with f: pass
2.015625
2
waio/factory/models/basic.py
dotX12/waio
24
1588
from dataclasses import dataclass @dataclass class PayloadSender: phone: int name: str @dataclass class PayloadBaseModel: sender: PayloadSender payload_id: str
2.1875
2
Uber/validExpression.py
Nithanaroy/random_scripts
0
1589
def main(expr): openingParams = '({[' closingParams = ')}]' stack = [] for c in expr: if c in openingParams: stack.append(c) elif c in closingParams: topOfStack = stack.pop() openingIndex = openingParams.find(topOfStack) closingIndex = closingParams.find(c) if openingIndex is not closingIndex: return False if len(stack) == 0: return True return False if __name__ =='__main__': print main('{(abc})')
3.46875
3
sgains/tool.py
KrasnitzLab/sgains
1
1590
import os import sys from copy import deepcopy import traceback import functools from collections import defaultdict import yaml from argparse import ArgumentParser,\ RawDescriptionHelpFormatter, ArgumentDefaultsHelpFormatter from sgains.configuration.parser import SgainsValidator, Config from sgains.configuration.schema import sgains_schema from sgains.executor import Executor from sgains.pipelines.mappableregions_pipeline import MappableRegionsPipeline from sgains.pipelines.genomeindex_pipeline import GenomeIndexPipeline from sgains.pipelines.bins_pipeline import BinsPipeline from sgains.pipelines.mapping_pipeline import MappingPipeline from sgains.pipelines.extract_10x_pipeline import Extract10xPipeline from sgains.pipelines.varbin_10x_pipeline import Varbin10xPipeline from sgains.pipelines.varbin_pipeline import VarbinPipeline from sgains.pipelines.r_pipeline import Rpipeline from sgains.pipelines.composite_pipeline import CompositePipeline SGAINS_COMMANDS = { "genomeindex": { "config_groups": ["aligner", "genome"], "help": "builds appropriate hisat2 or bowtie index for the " "reference genome", }, "mappable_regions": { "config_groups": ["aligner", "genome", "mappable_regions", "sge"], "help": "finds all mappable regions in specified genome", }, "bins": { "config_groups": ["genome", "mappable_regions", "bins", "sge"], "help": "calculates all bins boundaries for specified bins count " "and read length", }, "prepare": { "config_groups": [ "aligner", "genome", "mappable_regions", "bins", "sge"], "help": "combines all preparation steps ('genome', 'mappable-regions' " "and 'bins') into single command", }, "mapping": { "config_groups": ["aligner", "genome", "reads", "mapping", "sge"], "help": "performs mapping of cells reads to the reference genome", }, "extract_10x": { "config_groups": [ "data_10x", "reads", "sge"], "help": "extracts cells reads from 10x Genomics datasets", }, "varbin": { "config_groups": ["bins", "mapping", "varbin", "sge"], "help": "applies varbin algorithm to count read mappings in each bin", }, "varbin_10x": { "config_groups": [ "data_10x", "bins", "varbin", "sge"], "help": "applies varbin algorithm to count read mappings in each bin " "to 10x Genomics datasets without realigning", }, "scclust": { "config_groups": ["bins", "varbin", "scclust"], "help": "segmentation and clustering based bin counts and " "preparation of the SCGV input data" }, "process": { "config_groups": [ "aligner", "genome", "reads", "mapping", "bins", "varbin", "scclust", "sge"], "help": "combines all process steps ('mapping', 'varbin' " "and 'scclust') into single command" }, } def build_common_options(parser): parser.add_argument( "-v", "--verbose", dest="verbose", action="count", help="set verbosity level [default: %(default)s]", default=0 ) parser.add_argument( "-c", "--config", dest="config", help="configuration file", metavar="path" ) parser.add_argument( "-n", "--dry-run", dest="dry_run", action="store_true", help="perform a trial run with no changes made", default=False ) parser.add_argument( "--force", "-F", dest="force", action="store_true", help="allows overwriting nonempty results directory", default=False ) parser.add_argument( "--parallel", "-p", dest="parallel", help="number of task to run in parallel", type=int, default=1 ) parser.add_argument( "--sge", dest="sge", action="store_true", help="parallelilizes commands using SGE cluster manager", default=False ) def _get_config_value(config, group_name, name): if config is None: return None group = config.config.get(group_name) if group is None: return None result = getattr(group, name) return result def build_cli_options(argparser, command=None, config=None, sge_flag=False): work_dirname = os.getcwd() if config is not None: work_dirname = config.work_dirname validator = SgainsValidator( deepcopy(sgains_schema), work_dirname=work_dirname) if command is None: config_groups = list(validator.schema.keys()) else: assert command in SGAINS_COMMANDS command = SGAINS_COMMANDS[command] config_groups = command["config_groups"] for group_name in config_groups: if group_name == "sge" and not sge_flag: continue group = validator.schema.get(group_name) group_parser = argparser.add_argument_group(f"{group_name} group:") assert group["type"] == "dict", (group_name, group) group_schema = group["schema"] for arg_name, arg_spec in group_schema.items(): name = f"--{arg_name.replace('_', '-')}" arg_type = str arg_type = arg_spec.get("type", "string") if arg_type == "string": arg_type = str elif arg_type == "integer": arg_type = int elif arg_type == "float": arg_type = float elif arg_type == "list": arg_type = list else: raise ValueError(f"wrong argument type {arg_type}") help_data = None meta_data = arg_spec.get("meta") if meta_data is not None: help_data = meta_data.get("help") arg_default = _get_config_value(config, group_name, arg_name) if arg_default is None: arg_default = arg_spec.get("default") group_parser.add_argument( name, help=help_data, dest=arg_name, type=arg_type, default=arg_default) return argparser def parse_cli_options(args): config_dict = defaultdict(dict) work_dirname = os.getcwd() if args.config is not None: assert os.path.exists(args.config), args.config with open(args.config, "r") as infile: config_dict = yaml.safe_load(infile) work_dirname = os.path.dirname(args.config) validator = SgainsValidator( deepcopy(sgains_schema), work_dirname=work_dirname) result = defaultdict(dict) config_groups = list(validator.schema.keys()) for group_name in config_groups: if group_name == "sge" and not args.sge: continue group = validator.schema.get(group_name) group_schema = group.get("schema") if group_schema is None: continue group_result = {} for arg_name in group_schema.keys(): arg_value = getattr(args, arg_name, None) if arg_value is not None: group_result[arg_name] = arg_value else: config_value = config_dict.get(group_name, None) if config_value is not None: config_value = config_value.get(arg_name, None) if config_value is not None: group_result[arg_name] = config_value if group_result: result[group_name] = group_result config = Config.from_dict(result, work_dirname) config.verbose = args.verbose config.config_file = args.config config.dry_run = args.dry_run config.force = args.force config.parallel = args.parallel config.sge = args.sge return config def main(argv=sys.argv[1:]): program_name = os.path.basename(sys.argv[0]) program_shortdesc = \ 'sgains - sparse genomic analysis of individual nuclei by ' \ 'sequencing pipeline' program_description = '''%s USAGE ''' % (program_shortdesc, ) try: config = Config.parse_argv(argv) sge_flag = Config.check_sge_argv(argv) argparser = ArgumentParser( description=program_description, formatter_class=ArgumentDefaultsHelpFormatter) build_common_options(argparser) subparsers = argparser.add_subparsers( title="sGAINS subcommands" ) for command in SGAINS_COMMANDS: command_name = command.replace("_", "-") command_help = SGAINS_COMMANDS[command].get("help", "") subparser = subparsers.add_parser( name=command_name, help=command_help, formatter_class=ArgumentDefaultsHelpFormatter ) build_cli_options(subparser, command, config, sge_flag=sge_flag) subparser.set_defaults(func=functools.partial(execute, command)) args = argparser.parse_args(argv) args.func(args) except KeyboardInterrupt: traceback.print_exc() return 0 except Exception as e: traceback.print_exc() indent = len(program_name) * " " sys.stderr.write(program_name + ": " + repr(e) + "\n") sys.stderr.write(indent + " for help use --help") sys.stderr.write('\n') return 2 def create_pipeline(command, config): if command == "genomeindex": return GenomeIndexPipeline(config) elif command == "mappable_regions": return MappableRegionsPipeline(config) elif command == "bins": return BinsPipeline(config) elif command == "mapping": return MappingPipeline(config) elif command == "varbin": return VarbinPipeline(config) elif command == "scclust": return Rpipeline(config) elif command == "extract_10x": return Extract10xPipeline(config) elif command == "varbin_10x": return Varbin10xPipeline(config) elif command == "prepare": pipelines = [ GenomeIndexPipeline(config), MappableRegionsPipeline(config), BinsPipeline(config), ] return CompositePipeline(config, pipelines) elif command == "process": pipelines = [ MappingPipeline(config), VarbinPipeline(config), Rpipeline(config), ] return CompositePipeline(config, pipelines) raise ValueError(f"Unexpected command: {command}") def execute(command, args): config = parse_cli_options(args) pipeline = create_pipeline(command, config) assert pipeline is not None, command executor = Executor(config) executor.run_pipeline(pipeline) if __name__ == "__main__": sys.exit(main())
1.953125
2
lib/modeling/VGG16.py
rsumner31/Detectron
429
1591
# Copyright (c) 2017-present, Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## """VGG16 from https://arxiv.org/abs/1409.1556.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from core.config import cfg def add_VGG16_conv5_body(model): model.Conv('data', 'conv1_1', 3, 64, 3, pad=1, stride=1) model.Relu('conv1_1', 'conv1_1') model.Conv('conv1_1', 'conv1_2', 64, 64, 3, pad=1, stride=1) model.Relu('conv1_2', 'conv1_2') model.MaxPool('conv1_2', 'pool1', kernel=2, pad=0, stride=2) model.Conv('pool1', 'conv2_1', 64, 128, 3, pad=1, stride=1) model.Relu('conv2_1', 'conv2_1') model.Conv('conv2_1', 'conv2_2', 128, 128, 3, pad=1, stride=1) model.Relu('conv2_2', 'conv2_2') model.MaxPool('conv2_2', 'pool2', kernel=2, pad=0, stride=2) model.StopGradient('pool2', 'pool2') model.Conv('pool2', 'conv3_1', 128, 256, 3, pad=1, stride=1) model.Relu('conv3_1', 'conv3_1') model.Conv('conv3_1', 'conv3_2', 256, 256, 3, pad=1, stride=1) model.Relu('conv3_2', 'conv3_2') model.Conv('conv3_2', 'conv3_3', 256, 256, 3, pad=1, stride=1) model.Relu('conv3_3', 'conv3_3') model.MaxPool('conv3_3', 'pool3', kernel=2, pad=0, stride=2) model.Conv('pool3', 'conv4_1', 256, 512, 3, pad=1, stride=1) model.Relu('conv4_1', 'conv4_1') model.Conv('conv4_1', 'conv4_2', 512, 512, 3, pad=1, stride=1) model.Relu('conv4_2', 'conv4_2') model.Conv('conv4_2', 'conv4_3', 512, 512, 3, pad=1, stride=1) model.Relu('conv4_3', 'conv4_3') model.MaxPool('conv4_3', 'pool4', kernel=2, pad=0, stride=2) model.Conv('pool4', 'conv5_1', 512, 512, 3, pad=1, stride=1) model.Relu('conv5_1', 'conv5_1') model.Conv('conv5_1', 'conv5_2', 512, 512, 3, pad=1, stride=1) model.Relu('conv5_2', 'conv5_2') model.Conv('conv5_2', 'conv5_3', 512, 512, 3, pad=1, stride=1) blob_out = model.Relu('conv5_3', 'conv5_3') return blob_out, 512, 1. / 16. def add_VGG16_roi_fc_head(model, blob_in, dim_in, spatial_scale): model.RoIFeatureTransform( blob_in, 'pool5', blob_rois='rois', method=cfg.FAST_RCNN.ROI_XFORM_METHOD, resolution=7, sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO, spatial_scale=spatial_scale ) model.FC('pool5', 'fc6', dim_in * 7 * 7, 4096) model.Relu('fc6', 'fc6') model.FC('fc6', 'fc7', 4096, 4096) blob_out = model.Relu('fc7', 'fc7') return blob_out, 4096
1.695313
2
setup.py
yangjing1127/xmind2testcase
537
1592
#!/usr/env/bin python # -*- coding: utf-8 -*- import io import os import sys from shutil import rmtree from setuptools import setup, find_packages, Command about = {} here = os.path.abspath(os.path.dirname(__file__)) with io.open(os.path.join(here, 'xmind2testcase', '__about__.py'), encoding='utf-8') as f: # custom exec(f.read(), about) with io.open('README.md', encoding='utf-8') as f: long_description = f.read() install_requires = [ # custom "xmind", "flask", "arrow", ] class PyPiCommand(Command): """ Build and publish this package and make a tag. Support: python setup.py pypi Copied from requests_html """ user_options = [] @staticmethod def status(s): """Prints things in green color.""" print('\033[0;32m{0}\033[0m'.format(s)) def initialize_options(self): """ override """ pass def finalize_options(self): """ override """ pass def run(self): self.status('Building Source and Wheel (universal) distribution...') os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable)) self.status('Uploading the package to PyPi via Twine...') os.system('twine upload dist/*') self.status('Publishing git tags...') os.system('git tag v{0}'.format(about['__version__'])) os.system('git push --tags') try: self.status('Removing current build artifacts...') rmtree(os.path.join(here, 'dist')) rmtree(os.path.join(here, 'build')) rmtree(os.path.join(here, 'xmind2testcase.egg-info')) # custom except OSError: pass self.status('Congratulations! Upload PyPi and publish git tag successfully...') sys.exit() setup( name=about['__title__'], version=about['__version__'], description=about['__description__'], long_description=long_description, long_description_content_type='text/markdown', keywords=about['__keywords__'], author=about['__author__'], author_email=about['__author_email__'], url=about['__url__'], license=about['__license__'], packages=find_packages(exclude=['tests', 'test.*', 'docs']), # custom package_data={ # custom '': ['README.md'], 'webtool': ['static/*', 'static/css/*', 'static/guide/*', 'templates/*', 'schema.sql'], }, install_requires=install_requires, extras_require={}, python_requires='>=3.0, <4', # custom classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], entry_points={ # custom 'console_scripts': [ 'xmind2testcase=xmind2testcase.cli:cli_main', ] }, cmdclass={ # python3 setup.py pypi 'pypi': PyPiCommand } )
2.03125
2
skultrafast/styles.py
Tillsten/skultrafast
10
1593
<reponame>Tillsten/skultrafast # -*- coding: utf-8 -*- """ Created on Thu Sep 17 21:33:24 2015 @author: Tillsten """ import matplotlib import matplotlib.pyplot as plt import numpy as np tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120), (44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150), (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148), (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199), (188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)] tableau20 = [(r/255., g/255., b/255.) for r,g,b, in tableau20] #plt.rcParams['savefig.dpi'] = 110 #plt.rcParams['font.family'] = 'Vera Sans' out_ticks = {'xtick.direction': 'out', 'xtick.major.width': 1.5, 'xtick.minor.width': 1, 'xtick.major.size': 6, 'xtick.minor.size': 3, 'xtick.minor.visible': True, 'ytick.direction': 'out', 'ytick.major.width': 1.5, 'ytick.minor.width': 1, 'ytick.major.size': 6, 'ytick.minor.size': 3, 'ytick.minor.visible': True, 'axes.spines.top': False, 'axes.spines.right': False, 'text.hinting': True, 'axes.titlesize': 'xx-large', 'axes.titleweight': 'semibold', } plt.figure(figsize=(6,4)) with plt.style.context(out_ticks): ax = plt.subplot(111) x = np.linspace(0, 7, 1000) y = np.exp(-x/1.5)*np.cos(x/1*(2*np.pi))#*np.cos(x/0.05*(2*np.pi)) l, = plt.plot(x, np.exp(-x/1.5), lw=0.5, color='grey') l, = plt.plot(x, -np.exp(-x/1.5), lw=0.5, color='grey') l, = plt.plot(x, y, lw=1.1) #l.set_clip_on(0) plt.tick_params(which='both', top=False, right=False) plt.margins(0.01) ax.text(7, 1, r'$y(t)=\exp\left(-t/1.5\right)\cos(\omega_1t)\cos(\omega_2t)$', fontsize=18, va='top', ha='right') #plt.title("Hallo") plt.setp(plt.gca(), xlabel='Time [s]', ylabel='Amplitude') ax = plt.axes([0.57, 0.25, 0.3, .2]) #ax.plot(np.fft.fftfreq(x.size)[:y.size/2], abs(np.fft.fft(y))[:y.size/2]) ax.fill_between(np.fft.fftfreq(x.size, x[1]-x[0])[:y.size/2], abs(np.fft.fft(y))[:y.size/2], alpha=0.2, color='r') ax.set_xlim(0, 10) ax.set_xlabel("Frequency") ax.xaxis.labelpad = 1 plt.locator_params(nbins=4) plt.tick_params(which='both', top=False, right=False) plt.tick_params(which='minor', bottom=False, left=False) #plt.grid(1, axis='y', linestyle='-', alpha=0.3, lw=.5) plt.show()
1.882813
2
src/oci/devops/models/github_build_run_source.py
ezequielramos/oci-python-sdk
3
1594
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from .build_run_source import BuildRunSource from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class GithubBuildRunSource(BuildRunSource): """ Specifies details of build run through GitHub. """ def __init__(self, **kwargs): """ Initializes a new GithubBuildRunSource object with values from keyword arguments. The default value of the :py:attr:`~oci.devops.models.GithubBuildRunSource.source_type` attribute of this class is ``GITHUB`` and it should not be changed. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param source_type: The value to assign to the source_type property of this GithubBuildRunSource. Allowed values for this property are: "MANUAL", "GITHUB", "GITLAB", "DEVOPS_CODE_REPOSITORY" :type source_type: str :param trigger_id: The value to assign to the trigger_id property of this GithubBuildRunSource. :type trigger_id: str :param trigger_info: The value to assign to the trigger_info property of this GithubBuildRunSource. :type trigger_info: oci.devops.models.TriggerInfo """ self.swagger_types = { 'source_type': 'str', 'trigger_id': 'str', 'trigger_info': 'TriggerInfo' } self.attribute_map = { 'source_type': 'sourceType', 'trigger_id': 'triggerId', 'trigger_info': 'triggerInfo' } self._source_type = None self._trigger_id = None self._trigger_info = None self._source_type = 'GITHUB' @property def trigger_id(self): """ **[Required]** Gets the trigger_id of this GithubBuildRunSource. The trigger that invoked the build run. :return: The trigger_id of this GithubBuildRunSource. :rtype: str """ return self._trigger_id @trigger_id.setter def trigger_id(self, trigger_id): """ Sets the trigger_id of this GithubBuildRunSource. The trigger that invoked the build run. :param trigger_id: The trigger_id of this GithubBuildRunSource. :type: str """ self._trigger_id = trigger_id @property def trigger_info(self): """ **[Required]** Gets the trigger_info of this GithubBuildRunSource. :return: The trigger_info of this GithubBuildRunSource. :rtype: oci.devops.models.TriggerInfo """ return self._trigger_info @trigger_info.setter def trigger_info(self, trigger_info): """ Sets the trigger_info of this GithubBuildRunSource. :param trigger_info: The trigger_info of this GithubBuildRunSource. :type: oci.devops.models.TriggerInfo """ self._trigger_info = trigger_info def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is None: return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self == other
2.03125
2
aiida_fleur/tests/tools/test_common_fleur_wf.py
anoopkcn/aiida-fleur
0
1595
<filename>aiida_fleur/tests/tools/test_common_fleur_wf.py<gh_stars>0 from __future__ import absolute_import import pytest import os # is_code def test_is_code_interface(fixture_code): from aiida_fleur.tools.common_fleur_wf import is_code assert is_code('random_string') is None assert is_code('fleur.inpGUT') is None assert is_code(99999) is None code = fixture_code('fleur.inpgen') code.store() assert is_code(code.uuid) assert is_code(code.pk) assert is_code('@'.join([code.label, code.get_computer_name()])) assert is_code(code) def test_get_inputs_fleur(): ''' Tests if get_inputs_fleur assembles inputs correctly. Note it is the work of FleurCalculation to check if input types are correct i.e. 'code' is a Fleur code etc. ''' from aiida_fleur.tools.common_fleur_wf import get_inputs_fleur from aiida.orm import Dict inputs = {'code': 'code', 'remote': 'remote', 'fleurinp': 'fleurinp', 'options': {'custom_scheduler_commands': 'test_command'}, 'label': 'label', 'description': 'description', 'settings': {'test': 1}, 'serial': False} results = get_inputs_fleur(**inputs) out_options = results['options'].get_dict() out_settings = results['settings'].get_dict() assert results['code'] == 'code' assert results['fleurinpdata'] == 'fleurinp' assert results['parent_folder'] == 'remote' assert results['description'] == 'description' assert results['label'] == 'label' assert out_options == {'custom_scheduler_commands': 'test_command', 'withmpi': True} assert out_settings == {'test': 1} inputs = {'code': 'code', 'remote': 'remote', 'fleurinp': 'fleurinp', 'options': {'custom_scheduler_commands': 'test_command'}, 'serial': True} results = get_inputs_fleur(**inputs) out_options = results['options'].get_dict() assert results['description'] == '' assert results['label'] == '' assert out_options == {'custom_scheduler_commands': 'test_command', 'withmpi': False, 'resources': {"num_machines": 1}} def test_get_inputs_inpgen(fixture_code, generate_structure): ''' Tests if get_inputs_fleur assembles inputs correctly. Note it is the work of FleurinputgenCalculation to check if input types are correct i.e. 'code' is a Fleur code etc. ''' from aiida_fleur.tools.common_fleur_wf import get_inputs_inpgen from aiida.orm import Dict code = fixture_code('fleur.inpgen') structure = generate_structure() params = Dict(dict={'test': 1}) inputs = {'structure': structure, 'inpgencode': code, 'options': {}, 'label': 'label', 'description': 'description', 'params': params} returns = {'metadata': { 'options': {'withmpi': False, 'resources': {'num_machines': 1}}, 'description': 'description', 'label': 'label'}, 'code': code, 'parameters': params, 'structure': structure } assert get_inputs_inpgen(**inputs) == returns # repeat without a label and description inputs = {'structure': structure, 'inpgencode': code, 'options': {}, 'params': params} returns = {'metadata': { 'options': {'withmpi': False, 'resources': {'num_machines': 1}}, 'description': '', 'label': ''}, 'code': code, 'parameters': params, 'structure': structure} assert get_inputs_inpgen(**inputs) == returns @pytest.mark.skip(reason="Test is not implemented") def test_get_scheduler_extras(): from aiida_fleur.tools.common_fleur_wf import get_scheduler_extras # test_and_get_codenode def test_test_and_get_codenode_inpgen(fixture_code): from aiida_fleur.tools.common_fleur_wf import test_and_get_codenode from aiida.orm import Code from aiida.common.exceptions import NotExistent # install code setup code code = fixture_code('fleur.inpgen') code_fleur = fixture_code('fleur.fleur') code_fleur.label = 'fleur_test' code_fleur.store() expected = 'fleur.inpgen' nonexpected = 'fleur.fleur' not_existing = 'fleur.not_existing' assert isinstance(test_and_get_codenode(code, expected), Code) with pytest.raises(ValueError) as msg: test_and_get_codenode(code, nonexpected, use_exceptions=True) assert str(msg.value) == ("Given Code node is not of expected code type.\n" "Valid labels for a fleur.fleur executable are:\n" "* fleur_test@localhost-test") with pytest.raises(ValueError) as msg: test_and_get_codenode(code, not_existing, use_exceptions=True) assert str(msg.value) == ("Code not valid, and no valid codes for fleur.not_existing.\n" "Configure at least one first using\n" " verdi code setup") def test_get_kpoints_mesh_from_kdensity(generate_structure): from aiida_fleur.tools.common_fleur_wf import get_kpoints_mesh_from_kdensity from aiida.orm import KpointsData a, b = get_kpoints_mesh_from_kdensity(generate_structure(), 0.1) assert a == ([21, 21, 21], [0.0, 0.0, 0.0]) assert isinstance(b, KpointsData) @pytest.mark.skip(reason="Test is not implemented") def test_determine_favorable_reaction(): from aiida_fleur.tools.common_fleur_wf import determine_favorable_reaction # @pytest.mark.skip(reason="There seems to be now way to add outputs to CalcJobNode") def test_performance_extract_calcs(fixture_localhost, generate_calc_job_node): from aiida_fleur.tools.common_fleur_wf import performance_extract_calcs from aiida.common.links import LinkType from aiida.orm import Dict out = Dict(dict={'title': 'A Fleur input generator calculation with aiida', 'energy': -138529.7052157, 'bandgap': 6.0662e-06, 'end_date': {'date': '2019/11/12', 'time': '16:12:08'}, 'unparsed': [], 'walltime': 43, 'warnings': {'info': {}, 'debug': {}, 'error': {}, 'warning': {}}, 'start_date': {'date': '2019/11/12', 'time': '16:11:25'}, 'parser_info': 'AiiDA Fleur Parser v0.2beta', 'CalcJob_uuid': '3dc62d43-b607-4415-920f-e0d34e805711', 'creator_name': 'fleur 30', 'energy_units': 'eV', 'kmax': 4.2, 'fermi_energy': 0.0605833326, 'spin_density': 0.0792504665, 'bandgap_units': 'eV', 'force_largest': 0.0, 'energy_hartree': -5090.8728101494, 'walltime_units': 'seconds', 'charge_density1': 0.0577674505, 'charge_density2': 0.0461840944, 'number_of_atoms': 4, 'parser_warnings': [], 'magnetic_moments': [3.3720063737, 3.3719345944, 3.3719329177, 3.3719329162], 'number_of_kpoints': 8, 'number_of_species': 1, 'fermi_energy_units': 'Htr', 'sum_of_eigenvalues': -2973.4129786677, 'output_file_version': '0.27', 'energy_hartree_units': 'Htr', 'number_of_atom_types': 4, 'number_of_iterations': 11, 'number_of_symmetries': 8, 'energy_core_electrons': -2901.8120489845, 'magnetic_moment_units': 'muBohr', 'overall_charge_density': 0.0682602474, 'creator_target_structure': ' ', 'energy_valence_electrons': -71.6009296831, 'magnetic_spin_up_charges': [9.1494766577, 9.1494806151, 9.1494806833, 9.1494806834], 'orbital_magnetic_moments': [], 'density_convergence_units': 'me/bohr^3', 'number_of_spin_components': 2, 'charge_den_xc_den_integral': -223.295208608, 'magnetic_spin_down_charges': [5.777470284, 5.7775460208, 5.7775477657, 5.7775477672], 'number_of_iterations_total': 11, 'creator_target_architecture': 'GEN', 'orbital_magnetic_moment_units': 'muBohr', 'orbital_magnetic_spin_up_charges': [], 'orbital_magnetic_spin_down_charges': []}) out.store() node = generate_calc_job_node('fleur.fleur', fixture_localhost) node.store() out.add_incoming(node, link_type=LinkType.CREATE, link_label='output_parameters') result = performance_extract_calcs([node.pk]) assert result == {'n_symmetries': [8], 'n_spin_components': [2], 'n_kpoints': [8], 'n_iterations': [11], 'walltime_sec': [43], 'walltime_sec_per_it': [3.909090909090909], 'n_iterations_total': [11], 'density_distance': [0.0682602474], 'computer': ['localhost-test'], 'n_atoms': [4], 'kmax': [4.2], 'cost': [75866.11200000001], 'costkonstant': [147.02734883720933], 'walltime_sec_cor': [43], 'total_cost': [834527.2320000001], 'fermi_energy': [0.0605833326], 'bandgap': [6.0662e-06], 'energy': [-138529.7052157], 'force_largest': [0.0], 'ncores': [12], 'pk': [node.pk], 'uuid': [node.uuid], 'serial': [False], 'resources': [{'num_machines': 1, 'num_mpiprocs_per_machine': 1}]} inputs_optimize = [(4, 8, 3, True, 0.5, None, 720), (4, 8, 3, True, 2, None, 720), (4, 8, 3, True, 100, None, 720), (4, 8, 3, True, 100, None, 720, 0.5), (4, 8, 3, False, 0.5, None, 720)] results_optimize = [ (4, 3, 8, 'Computational setup is perfect! Nodes: 4, MPIs per node 3, OMP per MPI 8. Number of k-points is 720'), (4, 6, 4, 'Computational setup is perfect! Nodes: 4, MPIs per node 6, OMP per MPI 4. Number of k-points is 720'), (4, 12, 2, 'Computational setup is perfect! Nodes: 4, MPIs per node 12, OMP per MPI 2. Number of k-points is 720'), (3, 24, 1, 'WARNING: Changed the number of nodes from 4 to 3'), (4, 20, 1, 'WARNING: Changed the number of MPIs per node from 8 to 20 an OMP from 3 to 1. Changed the number of nodes from 4 to 4. Number of k-points is 720.')] @pytest.mark.parametrize('input,result_correct', zip(inputs_optimize, results_optimize)) def test_optimize_calc_options(input, result_correct): from aiida_fleur.tools.common_fleur_wf import optimize_calc_options result = optimize_calc_options(*input) assert result == result_correct def test_find_last_in_restart(fixture_localhost, generate_calc_job_node, generate_work_chain_node): from aiida_fleur.tools.common_fleur_wf import find_last_in_restart from aiida.common.links import LinkType node1 = generate_calc_job_node('fleur.fleur', fixture_localhost) node2 = generate_calc_job_node('fleur.fleur', fixture_localhost) node3 = generate_calc_job_node('fleur.fleur', fixture_localhost) node_main = generate_work_chain_node('fleur.base_relax', fixture_localhost) node1.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node2.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node3.add_incoming(node_main, link_type=LinkType.CALL_CALC, link_label='CALL') node1.store() node2.store() node3.store() result = find_last_in_restart(node_main) assert result == node3.uuid
2.140625
2
src/probnum/random_variables/_random_variable.py
admdev8/probnum
0
1596
""" Random Variables. This module implements random variables. Random variables are the main in- and outputs of probabilistic numerical methods. """ from typing import Any, Callable, Dict, Generic, Optional, Tuple, TypeVar, Union import numpy as np from probnum import utils as _utils from probnum.type import ( ArrayLikeGetitemArgType, DTypeArgType, FloatArgType, RandomStateArgType, RandomStateType, ShapeArgType, ShapeType, ) try: # functools.cached_property is only available in Python >=3.8 from functools import cached_property except ImportError: from cached_property import cached_property _ValueType = TypeVar("ValueType") class RandomVariable(Generic[_ValueType]): """ Random variables are the main objects used by probabilistic numerical methods. Every probabilistic numerical method takes a random variable encoding the prior distribution as input and outputs a random variable whose distribution encodes the uncertainty arising from finite computation. The generic signature of a probabilistic numerical method is: ``output_rv = probnum_method(input_rv, method_params)`` In practice, most random variables used by methods in ProbNum have Dirac or Gaussian measure. Instances of :class:`RandomVariable` can be added, multiplied, etc. with arrays and linear operators. This may change their ``distribution`` and not necessarily all previously available methods are retained. The internals of :class:`RandomVariable` objects are assumed to be constant over their whole lifecycle. This is due to the caches used to make certain computations more efficient. As a consequence, altering the internal state of a :class:`RandomVariable` (e.g. its mean, cov, sampling function, etc.) will result in undefined behavior. In particular, this should be kept in mind when subclassing :class:`RandomVariable` or any of its descendants. Parameters ---------- shape : Shape of realizations of this random variable. dtype : Data type of realizations of this random variable. If ``object`` will be converted to ``numpy.dtype``. as_value_type : Function which can be used to transform user-supplied arguments, interpreted as realizations of this random variable, to an easy-to-process, normalized format. Will be called internally to transform the argument of functions like ``in_support``, ``cdf`` and ``logcdf``, ``pmf`` and ``logpmf`` (in :class:`DiscreteRandomVariable`), ``pdf`` and ``logpdf`` (in :class:`ContinuousRandomVariable`), and potentially by similar functions in subclasses. For instance, this method is useful if (``log``)``cdf`` and (``log``)``pdf`` both only work on :class:`np.float_` arguments, but we still want the user to be able to pass Python :class:`float`. Then ``as_value_type`` should be set to something like ``lambda x: np.float64(x)``. See Also -------- asrandvar : Transform into a :class:`RandomVariable`. Examples -------- """ # pylint: disable=too-many-instance-attributes,too-many-public-methods def __init__( self, shape: ShapeArgType, dtype: DTypeArgType, random_state: RandomStateArgType = None, parameters: Optional[Dict[str, Any]] = None, sample: Optional[Callable[[ShapeType], _ValueType]] = None, in_support: Optional[Callable[[_ValueType], bool]] = None, cdf: Optional[Callable[[_ValueType], np.float_]] = None, logcdf: Optional[Callable[[_ValueType], np.float_]] = None, quantile: Optional[Callable[[FloatArgType], _ValueType]] = None, mode: Optional[Callable[[], _ValueType]] = None, median: Optional[Callable[[], _ValueType]] = None, mean: Optional[Callable[[], _ValueType]] = None, cov: Optional[Callable[[], _ValueType]] = None, var: Optional[Callable[[], _ValueType]] = None, std: Optional[Callable[[], _ValueType]] = None, entropy: Optional[Callable[[], np.float_]] = None, as_value_type: Optional[Callable[[Any], _ValueType]] = None, ): # pylint: disable=too-many-arguments,too-many-locals """Create a new random variable.""" self.__shape = _utils.as_shape(shape) # Data Types self.__dtype = np.dtype(dtype) self.__median_dtype = RandomVariable.infer_median_dtype(self.__dtype) self.__moment_dtype = RandomVariable.infer_moment_dtype(self.__dtype) self._random_state = _utils.as_random_state(random_state) # Probability distribution of the random variable self.__parameters = parameters.copy() if parameters is not None else {} self.__sample = sample self.__in_support = in_support self.__cdf = cdf self.__logcdf = logcdf self.__quantile = quantile # Properties of the random variable self.__mode = mode self.__median = median self.__mean = mean self.__cov = cov self.__var = var self.__std = std self.__entropy = entropy # Utilities self.__as_value_type = as_value_type def __repr__(self) -> str: return f"<{self.shape} {self.__class__.__name__} with dtype={self.dtype}>" @property def shape(self) -> ShapeType: """Shape of realizations of the random variable.""" return self.__shape @cached_property def ndim(self) -> int: return len(self.__shape) @cached_property def size(self) -> int: return int(np.prod(self.__shape)) @property def dtype(self) -> np.dtype: """Data type of (elements of) a realization of this random variable.""" return self.__dtype @property def median_dtype(self) -> np.dtype: """The dtype of the :attr:`median`. It will be set to the dtype arising from the multiplication of values with dtypes :attr:`dtype` and :class:`np.float_`. This is motivated by the fact that, even for discrete random variables, e.g. integer-valued random variables, the :attr:`median` might lie in between two values in which case these values are averaged. For example, a uniform random variable on :math:`\\{ 1, 2, 3, 4 \\}` will have a median of :math:`2.5`. """ return self.__median_dtype @property def moment_dtype(self) -> np.dtype: """The dtype of any (function of a) moment of the random variable, e.g. its :attr:`mean`, :attr:`cov`, :attr:`var`, or :attr:`std`. It will be set to the dtype arising from the multiplication of values with dtypes :attr:`dtype` and :class:`np.float_`. This is motivated by the mathematical definition of a moment as a sum or an integral over products of probabilities and values of the random variable, which are represented as using the dtypes :class:`np.float_` and :attr:`dtype`, respectively. """ return self.__moment_dtype @property def random_state(self) -> RandomStateType: """Random state of the random variable. This attribute defines the RandomState object to use for drawing realizations from this random variable. If None (or np.random), the global np.random state is used. If integer, it is used to seed the local :class:`~numpy.random.RandomState` instance. """ return self._random_state @random_state.setter def random_state(self, seed: RandomStateArgType): """Get or set the RandomState object of the underlying distribution. This can be either None or an existing RandomState object. If None (or np.random), use the RandomState singleton used by np.random. If already a RandomState instance, use it. If an int, use a new RandomState instance seeded with seed. """ self._random_state = _utils.as_random_state(seed) @property def parameters(self) -> Dict[str, Any]: """ Parameters of the probability distribution. The parameters of the distribution such as mean, variance, et cetera stored in a ``dict``. """ return self.__parameters.copy() @cached_property def mode(self) -> _ValueType: """ Mode of the random variable. Returns ------- mode : float The mode of the random variable. """ if self.__mode is None: raise NotImplementedError mode = self.__mode() RandomVariable._check_property_value( "mode", mode, shape=self.__shape, dtype=self.__dtype, ) # Make immutable if isinstance(mode, np.ndarray): mode.setflags(write=False) return mode @cached_property def median(self) -> _ValueType: """ Median of the random variable. To learn about the dtype of the median, see :attr:`median_dtype`. Returns ------- median : float The median of the distribution. """ if self.__shape != (): raise NotImplementedError( "The median is only defined for scalar random variables." ) median = self.__median() RandomVariable._check_property_value( "median", median, shape=self.__shape, dtype=self.__median_dtype, ) # Make immutable if isinstance(median, np.ndarray): median.setflags(write=False) return median @cached_property def mean(self) -> _ValueType: """ Mean :math:`\\mathbb{E}(X)` of the distribution. To learn about the dtype of the mean, see :attr:`moment_dtype`. Returns ------- mean : array-like The mean of the distribution. """ if self.__mean is None: raise NotImplementedError mean = self.__mean() RandomVariable._check_property_value( "mean", mean, shape=self.__shape, dtype=self.__moment_dtype, ) # Make immutable if isinstance(mean, np.ndarray): mean.setflags(write=False) return mean @cached_property def cov(self) -> _ValueType: """ Covariance :math:`\\operatorname{Cov}(X) = \\mathbb{E}((X-\\mathbb{E}(X))(X-\\mathbb{E}(X))^\\top)` of the random variable. To learn about the dtype of the covariance, see :attr:`moment_dtype`. Returns ------- cov : array-like The kernels of the random variable. """ # pylint: disable=line-too-long if self.__cov is None: raise NotImplementedError cov = self.__cov() RandomVariable._check_property_value( "covariance", cov, shape=(self.size, self.size) if self.ndim > 0 else (), dtype=self.__moment_dtype, ) # Make immutable if isinstance(cov, np.ndarray): cov.setflags(write=False) return cov @cached_property def var(self) -> _ValueType: """ Variance :math:`\\operatorname{Var}(X) = \\mathbb{E}((X-\\mathbb{E}(X))^2)` of the distribution. To learn about the dtype of the variance, see :attr:`moment_dtype`. Returns ------- var : array-like The variance of the distribution. """ if self.__var is None: try: var = np.diag(self.cov).reshape(self.__shape).copy() except NotImplementedError as exc: raise NotImplementedError from exc else: var = self.__var() RandomVariable._check_property_value( "variance", var, shape=self.__shape, dtype=self.__moment_dtype, ) # Make immutable if isinstance(var, np.ndarray): var.setflags(write=False) return var @cached_property def std(self) -> _ValueType: """ Standard deviation of the distribution. To learn about the dtype of the standard deviation, see :attr:`moment_dtype`. Returns ------- std : array-like The standard deviation of the distribution. """ if self.__std is None: try: std = np.sqrt(self.var) except NotImplementedError as exc: raise NotImplementedError from exc else: std = self.__std() RandomVariable._check_property_value( "standard deviation", std, shape=self.__shape, dtype=self.__moment_dtype, ) # Make immutable if isinstance(std, np.ndarray): std.setflags(write=False) return std @cached_property def entropy(self) -> np.float_: if self.__entropy is None: raise NotImplementedError entropy = self.__entropy() entropy = RandomVariable._ensure_numpy_float( "entropy", entropy, force_scalar=True ) return entropy def in_support(self, x: _ValueType) -> bool: if self.__in_support is None: raise NotImplementedError in_support = self.__in_support(self._as_value_type(x)) if not isinstance(in_support, bool): raise ValueError( f"The function `in_support` must return a `bool`, but its return value " f"is of type `{type(x)}`." ) return in_support def sample(self, size: ShapeArgType = ()) -> _ValueType: """ Draw realizations from a random variable. Parameters ---------- size : tuple Size of the drawn sample of realizations. Returns ------- sample : array-like Sample of realizations with the given ``size`` and the inherent ``shape``. """ if self.__sample is None: raise NotImplementedError("No sampling method provided.") return self.__sample(size=_utils.as_shape(size)) def cdf(self, x: _ValueType) -> np.float_: """ Cumulative distribution function. Parameters ---------- x : array-like Evaluation points of the cumulative distribution function. The shape of this argument should be :code:`(..., S1, ..., SN)`, where :code:`(S1, ..., SN)` is the :attr:`shape` of the random variable. The cdf evaluation will be broadcast over all additional dimensions. Returns ------- q : array-like Value of the cumulative density function at the given points. """ if self.__cdf is not None: return RandomVariable._ensure_numpy_float( "cdf", self.__cdf(self._as_value_type(x)) ) elif self.__logcdf is not None: cdf = np.exp(self.logcdf(self._as_value_type(x))) assert isinstance(cdf, np.float_) return cdf else: raise NotImplementedError( f"Neither the `cdf` nor the `logcdf` of the random variable object " f"with type `{type(self).__name__}` is implemented." ) def logcdf(self, x: _ValueType) -> np.float_: """ Log-cumulative distribution function. Parameters ---------- x : array-like Evaluation points of the cumulative distribution function. The shape of this argument should be :code:`(..., S1, ..., SN)`, where :code:`(S1, ..., SN)` is the :attr:`shape` of the random variable. The logcdf evaluation will be broadcast over all additional dimensions. Returns ------- q : array-like Value of the log-cumulative density function at the given points. """ if self.__logcdf is not None: return RandomVariable._ensure_numpy_float( "logcdf", self.__logcdf(self._as_value_type(x)) ) elif self.__cdf is not None: logcdf = np.log(self.__cdf(x)) assert isinstance(logcdf, np.float_) return logcdf else: raise NotImplementedError( f"Neither the `logcdf` nor the `cdf` of the random variable object " f"with type `{type(self).__name__}` is implemented." ) def quantile(self, p: FloatArgType) -> _ValueType: """Quantile function. The quantile function :math:`Q \\colon [0, 1] \\to \\mathbb{R}` of a random variable :math:`X` is defined as :math:`Q(p) = \\inf\\{ x \\in \\mathbb{R} \\colon p \\le F_X(x) \\}`, where :math:`F_X \\colon \\mathbb{R} \\to [0, 1]` is the :meth:`cdf` of the random variable. From the definition it follows that the quantile function always returns values of the same dtype as the random variable. For instance, for a discrete distribution over the integers, the returned quantiles will also be integers. This means that, in general, :math:`Q(0.5)` is not equal to the :attr:`median` as it is defined in this class. See https://en.wikipedia.org/wiki/Quantile_function for more details and examples. """ if self.__shape != (): raise NotImplementedError( "The quantile function is only defined for scalar random variables." ) if self.__quantile is None: raise NotImplementedError try: p = _utils.as_numpy_scalar(p, dtype=np.floating) except TypeError as exc: raise TypeError( "The given argument `p` can not be cast to a `np.floating` object." ) from exc quantile = self.__quantile(p) if quantile.shape != self.__shape: raise ValueError( f"The quantile function should return values of the same shape as the " f"random variable, i.e. {self.__shape}, but it returned a value with " f"{quantile.shape}." ) if quantile.dtype != self.__dtype: raise ValueError( f"The quantile function should return values of the same dtype as the " f"random variable, i.e. `{self.__dtype.name}`, but it returned a value " f"with dtype `{quantile.dtype.name}`." ) return quantile def __getitem__(self, key: ArrayLikeGetitemArgType) -> "RandomVariable": return RandomVariable( shape=np.empty(shape=self.shape)[key].shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size)[key], mode=lambda: self.mode[key], mean=lambda: self.mean[key], var=lambda: self.var[key], std=lambda: self.std[key], entropy=lambda: self.entropy, as_value_type=self.__as_value_type, ) def reshape(self, newshape: ShapeArgType) -> "RandomVariable": """ Give a new shape to a random variable. Parameters ---------- newshape : int or tuple of ints New shape for the random variable. It must be compatible with the original shape. Returns ------- reshaped_rv : ``self`` with the new dimensions of ``shape``. """ newshape = _utils.as_shape(newshape) return RandomVariable( shape=newshape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size).reshape(size + newshape), mode=lambda: self.mode.reshape(newshape), median=lambda: self.median.reshape(newshape), mean=lambda: self.mean.reshape(newshape), cov=lambda: self.cov, var=lambda: self.var.reshape(newshape), std=lambda: self.std.reshape(newshape), entropy=lambda: self.entropy, as_value_type=self.__as_value_type, ) def transpose(self, *axes: int) -> "RandomVariable": """ Transpose the random variable. Parameters ---------- axes : None, tuple of ints, or n ints See documentation of numpy.ndarray.transpose. Returns ------- transposed_rv : The transposed random variable. """ return RandomVariable( shape=np.empty(shape=self.shape).transpose(*axes).shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size).transpose(*axes), mode=lambda: self.mode.transpose(*axes), median=lambda: self.median.transpose(*axes), mean=lambda: self.mean.transpose(*axes), cov=lambda: self.cov, var=lambda: self.var.transpose(*axes), std=lambda: self.std.transpose(*axes), entropy=lambda: self.entropy, as_value_type=self.__as_value_type, ) T = property(transpose) # Unary arithmetic operations def __neg__(self) -> "RandomVariable": return RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: -self.sample(size=size), in_support=lambda x: self.in_support(-x), mode=lambda: -self.mode, median=lambda: -self.median, mean=lambda: -self.mean, cov=lambda: self.cov, var=lambda: self.var, std=lambda: self.std, as_value_type=self.__as_value_type, ) def __pos__(self) -> "RandomVariable": return RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: +self.sample(size=size), in_support=lambda x: self.in_support(+x), mode=lambda: +self.mode, median=lambda: +self.median, mean=lambda: +self.mean, cov=lambda: self.cov, var=lambda: self.var, std=lambda: self.std, as_value_type=self.__as_value_type, ) def __abs__(self) -> "RandomVariable": return RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: abs(self.sample(size=size)), ) # Binary arithmetic operations __array_ufunc__ = None """ This prevents numpy from calling elementwise arithmetic operations allowing expressions like: y = np.array([1, 1]) + RV to call the arithmetic operations defined by RandomVariable instead of elementwise. Thus no array of RandomVariables but a RandomVariable with the correct shape is returned. """ def __add__(self, other: Any) -> "RandomVariable": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import add return add(self, other) def __radd__(self, other: Any) -> "RandomVariable": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import add return add(other, self) def __sub__(self, other: Any) -> "RandomVariable": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import sub return sub(self, other) def __rsub__(self, other: Any) -> "RandomVariable": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import sub return sub(other, self) def __mul__(self, other: Any) -> "RandomVariable": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import mul return mul(self, other) def __rmul__(self, other: Any) -> "RandomVariable": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import mul return mul(other, self) def __matmul__(self, other: Any) -> "RandomVariable": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import matmul return matmul(self, other) def __rmatmul__(self, other: Any) -> "RandomVariable": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import matmul return matmul(other, self) def __truediv__(self, other: Any) -> "RandomVariable": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import truediv return truediv(self, other) def __rtruediv__(self, other: Any) -> "RandomVariable": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import truediv return truediv(other, self) def __floordiv__(self, other: Any) -> "RandomVariable": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import floordiv return floordiv(self, other) def __rfloordiv__(self, other: Any) -> "RandomVariable": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import floordiv return floordiv(other, self) def __mod__(self, other: Any) -> "RandomVariable": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import mod return mod(self, other) def __rmod__(self, other: Any) -> "RandomVariable": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import mod return mod(other, self) def __divmod__(self, other: Any) -> "RandomVariable": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import divmod_ return divmod_(self, other) def __rdivmod__(self, other: Any) -> "RandomVariable": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import divmod_ return divmod_(other, self) def __pow__(self, other: Any) -> "RandomVariable": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import pow_ return pow_(self, other) def __rpow__(self, other: Any) -> "RandomVariable": # pylint: disable=import-outside-toplevel,cyclic-import from ._arithmetic import pow_ return pow_(other, self) @staticmethod def infer_median_dtype(value_dtype: DTypeArgType) -> np.dtype: return RandomVariable.infer_moment_dtype(value_dtype) @staticmethod def infer_moment_dtype(value_dtype: DTypeArgType) -> np.dtype: return np.promote_types(value_dtype, np.float_) def _as_value_type(self, x: Any) -> _ValueType: if self.__as_value_type is not None: return self.__as_value_type(x) return x @staticmethod def _check_property_value( name: str, value: Any, shape: Optional[Tuple[int, ...]] = None, dtype: Optional[np.dtype] = None, ): if shape is not None: if value.shape != shape: raise ValueError( f"The {name} of the random variable does not have the correct " f"shape. Expected {shape} but got {value.shape}." ) if dtype is not None: if not np.issubdtype(value.dtype, dtype): raise ValueError( f"The {name} of the random variable does not have the correct " f"dtype. Expected {dtype.name} but got {value.dtype.name}." ) @classmethod def _ensure_numpy_float( cls, name: str, value: Any, force_scalar: bool = False ) -> Union[np.float_, np.ndarray]: if np.isscalar(value): if not isinstance(value, np.float_): try: value = _utils.as_numpy_scalar(value, dtype=np.float_) except TypeError as err: raise TypeError( f"The function `{name}` specified via the constructor of " f"`{cls.__name__}` must return a scalar value that can be " f"converted to a `np.float_`, which is not possible for " f"{value} of type {type(value)}." ) from err elif not force_scalar: try: value = np.asarray(value, dtype=np.float_) except TypeError as err: raise TypeError( f"The function `{name}` specified via the constructor of " f"`{cls.__name__}` must return a value that can be converted " f"to a `np.ndarray` of type `np.float_`, which is not possible " f"for {value} of type {type(value)}." ) from err else: raise TypeError( f"The function `{name}` specified via the constructor of " f"`{cls.__name__}` must return a scalar value, but {value} of type " f"{type(value)} is not scalar." ) assert isinstance(value, (np.float_, np.ndarray)) return value class DiscreteRandomVariable(RandomVariable[_ValueType]): def __init__( self, shape: ShapeArgType, dtype: DTypeArgType, random_state: Optional[RandomStateType] = None, parameters: Optional[Dict[str, Any]] = None, sample: Optional[Callable[[ShapeArgType], _ValueType]] = None, in_support: Optional[Callable[[_ValueType], bool]] = None, pmf: Optional[Callable[[_ValueType], np.float_]] = None, logpmf: Optional[Callable[[_ValueType], np.float_]] = None, cdf: Optional[Callable[[_ValueType], np.float_]] = None, logcdf: Optional[Callable[[_ValueType], np.float_]] = None, quantile: Optional[Callable[[FloatArgType], _ValueType]] = None, mode: Optional[Callable[[], _ValueType]] = None, median: Optional[Callable[[], _ValueType]] = None, mean: Optional[Callable[[], _ValueType]] = None, cov: Optional[Callable[[], _ValueType]] = None, var: Optional[Callable[[], _ValueType]] = None, std: Optional[Callable[[], _ValueType]] = None, entropy: Optional[Callable[[], np.float_]] = None, ): # Probability mass function self.__pmf = pmf self.__logpmf = logpmf super().__init__( shape=shape, dtype=dtype, random_state=random_state, parameters=parameters, sample=sample, in_support=in_support, cdf=cdf, logcdf=logcdf, quantile=quantile, mode=mode, median=median, mean=mean, cov=cov, var=var, std=std, entropy=entropy, ) def pmf(self, x: _ValueType) -> np.float_: if self.__pmf is not None: return DiscreteRandomVariable._ensure_numpy_float("pmf", self.__pmf(x)) elif self.__logpmf is not None: pmf = np.exp(self.__logpmf(x)) assert isinstance(pmf, np.float_) return pmf else: raise NotImplementedError( f"Neither the `pmf` nor the `logpmf` of the discrete random variable " f"object with type `{type(self).__name__}` is implemented." ) def logpmf(self, x: _ValueType) -> np.float_: if self.__logpmf is not None: return DiscreteRandomVariable._ensure_numpy_float( "logpmf", self.__logpmf(self._as_value_type(x)) ) elif self.__pmf is not None: logpmf = np.log(self.__pmf(self._as_value_type(x))) assert isinstance(logpmf, np.float_) return logpmf else: raise NotImplementedError( f"Neither the `logpmf` nor the `pmf` of the discrete random variable " f"object with type `{type(self).__name__}` is implemented." ) class ContinuousRandomVariable(RandomVariable[_ValueType]): def __init__( self, shape: ShapeArgType, dtype: DTypeArgType, random_state: Optional[RandomStateType] = None, parameters: Optional[Dict[str, Any]] = None, sample: Optional[Callable[[ShapeArgType], _ValueType]] = None, in_support: Optional[Callable[[_ValueType], bool]] = None, pdf: Optional[Callable[[_ValueType], np.float_]] = None, logpdf: Optional[Callable[[_ValueType], np.float_]] = None, cdf: Optional[Callable[[_ValueType], np.float_]] = None, logcdf: Optional[Callable[[_ValueType], np.float_]] = None, quantile: Optional[Callable[[FloatArgType], _ValueType]] = None, mode: Optional[Callable[[], _ValueType]] = None, median: Optional[Callable[[], _ValueType]] = None, mean: Optional[Callable[[], _ValueType]] = None, cov: Optional[Callable[[], _ValueType]] = None, var: Optional[Callable[[], _ValueType]] = None, std: Optional[Callable[[], _ValueType]] = None, entropy: Optional[Callable[[], np.float_]] = None, ): # Probability density function self.__pdf = pdf self.__logpdf = logpdf super().__init__( shape=shape, dtype=dtype, random_state=random_state, parameters=parameters, sample=sample, in_support=in_support, cdf=cdf, logcdf=logcdf, quantile=quantile, mode=mode, median=median, mean=mean, cov=cov, var=var, std=std, entropy=entropy, ) def pdf(self, x: _ValueType) -> np.float_: """ Probability density or mass function. Following the predominant convention in mathematics, we express pdfs with respect to the Lebesgue measure unless stated otherwise. Parameters ---------- x : array-like Evaluation points of the probability density / mass function. The shape of this argument should be :code:`(..., S1, ..., SN)`, where :code:`(S1, ..., SN)` is the :attr:`shape` of the random variable. The pdf evaluation will be broadcast over all additional dimensions. Returns ------- p : array-like Value of the probability density / mass function at the given points. """ if self.__pdf is not None: return ContinuousRandomVariable._ensure_numpy_float( "pdf", self.__pdf(self._as_value_type(x)) ) if self.__logpdf is not None: pdf = np.exp(self.__logpdf(self._as_value_type(x))) assert isinstance(pdf, np.float_) return pdf raise NotImplementedError( f"Neither the `pdf` nor the `logpdf` of the continuous random variable " f"object with type `{type(self).__name__}` is implemented." ) def logpdf(self, x: _ValueType) -> np.float_: """ Natural logarithm of the probability density function. Parameters ---------- x : array-like Evaluation points of the log-probability density/mass function. The shape of this argument should be :code:`(..., S1, ..., SN)`, where :code:`(S1, ..., SN)` is the :attr:`shape` of the random variable. The logpdf evaluation will be broadcast over all additional dimensions. Returns ------- logp : array-like Value of the log-probability density / mass function at the given points. """ if self.__logpdf is not None: return ContinuousRandomVariable._ensure_numpy_float( "logpdf", self.__logpdf(self._as_value_type(x)) ) elif self.__pdf is not None: logpdf = np.log(self.__pdf(self._as_value_type(x))) assert isinstance(logpdf, np.float_) return logpdf else: raise NotImplementedError( f"Neither the `logpdf` nor the `pdf` of the continuous random variable " f"object with type `{type(self).__name__}` is implemented." )
3.5
4
platform/gcutil/lib/google_compute_engine/gcutil_lib/address_cmds_test.py
IsaacHuang/google-cloud-sdk
0
1597
# Copyright 2012 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for address collection commands.""" import path_initializer path_initializer.InitSysPath() import json import unittest import gflags as flags from gcutil_lib import address_cmds from gcutil_lib import gcutil_unittest from gcutil_lib import mock_api from gcutil_lib import mock_lists FLAGS = flags.FLAGS class AddressCmdsTest(gcutil_unittest.GcutilTestCase): def setUp(self): self.mock, self.api = mock_api.CreateApi(self.version) def testReserveAddressPromptsForRegion(self): expected_project = 'test_project' expected_address = 'test_address' expected_description = 'test address' expected_region = 'test-region' expected_source_address = '123.123.123.1' set_flags = { 'project': expected_project, 'description': expected_description, 'source_address': expected_source_address, } command = self._CreateAndInitializeCommand( address_cmds.ReserveAddress, 'reserveaddress', set_flags=set_flags) mock_lists.GetSampleRegionListCall( command, self.mock, num_responses=1, name=[expected_region]) call = self.mock.Respond('compute.addresses.insert', {}) command.Handle(expected_address) request = call.GetRequest() self.assertEqual('POST', request.method) self.assertEqual(expected_project, request.parameters['project']) self.assertEquals(expected_region, request.parameters['region']) body = json.loads(request.body) self.assertEqual(body['name'], expected_address) self.assertEqual(body['description'], expected_description) self.assertEquals(body['address'], expected_source_address) def testReserveAddressGeneratesCorrectRequest(self): expected_project = 'test_project' expected_address = 'test_address' expected_description = 'test address' submitted_region = 'test-region' expected_source_address = '192.168.127.12' set_flags = { 'project': expected_project, 'description': expected_description, 'region': submitted_region, 'source_address': expected_source_address, } command = self._CreateAndInitializeCommand( address_cmds.ReserveAddress, 'reserveaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.insert', {}) command.Handle(expected_address) request = call.GetRequest() self.assertEqual('POST', request.method) self.assertEqual(expected_project, request.parameters['project']) self.assertEquals(submitted_region, request.parameters['region']) body = json.loads(request.body) self.assertEqual(body['name'], expected_address) self.assertEqual(body['description'], expected_description) self.assertEquals(body['address'], expected_source_address) def testGetAddressGeneratesCorrectRequest(self): expected_project = 'test_project' expected_address = 'test_address' submitted_region = 'test-region' set_flags = { 'project': expected_project, 'region': submitted_region, } command = self._CreateAndInitializeCommand( address_cmds.GetAddress, 'getaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.get', {}) command.Handle(expected_address) request = call.GetRequest() self.assertEqual('GET', request.method) self.assertEqual(None, request.body) parameters = request.parameters self.assertEqual(parameters['project'], expected_project) self.assertEqual(parameters['region'], submitted_region) self.assertEqual(parameters['address'], expected_address) def testGetAddressPrintNonEmptyUsers(self): expected_project = 'test_project' submitted_region = 'test-region' set_flags = { 'project': expected_project, 'region': submitted_region, } command = self._CreateAndInitializeCommand( address_cmds.GetAddress, 'getaddress', set_flags=set_flags) data = command.GetDetailRow({'users': ['fr-1', 'fr-2']}) expected_data = { 'v1': [ ('users', ['fr-1', 'fr-2']) ], } self.assertEquals( gcutil_unittest.SelectTemplateForVersion( expected_data, command.api.version), data) def testGetAddressPrintEmptyUsers(self): expected_project = 'test_project' submitted_region = 'test-region' set_flags = { 'project': expected_project, 'region': submitted_region, } command = self._CreateAndInitializeCommand( address_cmds.GetAddress, 'getaddress', set_flags=set_flags) data = command.GetDetailRow({'users': []}) expected_data = { 'v1': [ ('users', []) ], } self.assertEquals( gcutil_unittest.SelectTemplateForVersion( expected_data, command.api.version), data) def testReleaseAddressGeneratesCorrectRequest(self): expected_project = 'test_project' expected_address = 'test_address' submitted_region = 'test-region' set_flags = { 'project': expected_project, 'region': submitted_region, } command = self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.delete', {}) command.Handle(expected_address) request = call.GetRequest() self.assertEqual('DELETE', request.method) self.assertEqual(None, request.body) parameters = request.parameters self.assertEqual(parameters['project'], expected_project) self.assertEqual(parameters['region'], submitted_region) self.assertEqual(parameters['address'], expected_address) def testReleaseAddressWithoutRegionFlag(self): expected_project = 'test_project' expected_region = 'test-region' expected_address = 'test_address' address = ('projects/%s/regions/%s/addresses/%s' % (expected_project, expected_region, expected_address)) set_flags = { 'project': 'incorrect_project', } command = self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags) call = self.mock.Respond('compute.addresses.delete', {}) command.Handle(address) request = call.GetRequest() self.assertEqual('DELETE', request.method) self.assertEqual(None, request.body) parameters = request.parameters self.assertEqual(parameters['project'], expected_project) self.assertEqual(parameters['region'], expected_region) self.assertEqual(parameters['address'], expected_address) def testReleaseMultipleAddresses(self): expected_project = 'test_project' expected_addresses = [ 'test-addresses-%02d' % x for x in xrange(100)] set_flags = { 'project': expected_project, 'region': 'region-a', } command = self._CreateAndInitializeCommand( address_cmds.ReleaseAddress, 'releaseaddress', set_flags=set_flags) calls = [self.mock.Respond('compute.addresses.delete', {}) for x in xrange(len(expected_addresses))] _, exceptions = command.Handle(*expected_addresses) self.assertEqual(0, len(exceptions)) sorted_calls = sorted([call.GetRequest().parameters['address'] for call in calls]) self.assertEqual(expected_addresses, sorted_calls) if __name__ == '__main__': unittest.main(testLoader=gcutil_unittest.GcutilLoader())
2.15625
2
vote/migrations/0005_auto_20210204_1900.py
jnegrete2005/JuradoFMS
2
1598
<filename>vote/migrations/0005_auto_20210204_1900.py<gh_stars>1-10 # Generated by Django 3.1.5 on 2021-02-05 00:00 import django.contrib.postgres.fields from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('vote', '0004_auto_20210131_1621'), ] operations = [ migrations.AlterField( model_name='competitor', name='min1', field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(), blank=True, null=True, size=9, verbose_name='minuto 1'), ), migrations.AlterField( model_name='competitor', name='min2', field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveSmallIntegerField(), blank=True, null=True, size=9, verbose_name='minuto 2'), ), ]
1.367188
1
tools/wasm-sourcemap.py
ngzhian/emscripten
1
1599
#!/usr/bin/env python # Copyright 2018 The Emscripten Authors. All rights reserved. # Emscripten is available under two separate licenses, the MIT license and the # University of Illinois/NCSA Open Source License. Both these licenses can be # found in the LICENSE file. """Utility tools that extracts DWARF information encoded in a wasm output produced by the LLVM tools, and encodes it as a wasm source map. Additionally, it can collect original sources, change files prefixes, and strip debug sections from a wasm file. """ import argparse from collections import OrderedDict, namedtuple import json import logging from math import floor, log import os import re from subprocess import Popen, PIPE import sys sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from tools.shared import asstr logger = logging.getLogger('wasm-sourcemap') def parse_args(): parser = argparse.ArgumentParser(prog='wasm-sourcemap.py', description=__doc__) parser.add_argument('wasm', help='wasm file') parser.add_argument('-o', '--output', help='output source map') parser.add_argument('-p', '--prefix', nargs='*', help='replace source debug filename prefix for source map', default=[]) parser.add_argument('-s', '--sources', action='store_true', help='read and embed source files from file system into source map') parser.add_argument('-l', '--load-prefix', nargs='*', help='replace source debug filename prefix for reading sources from file system (see also --sources)', default=[]) parser.add_argument('-w', nargs='?', help='set output wasm file') parser.add_argument('-x', '--strip', action='store_true', help='removes debug and linking sections') parser.add_argument('-u', '--source-map-url', nargs='?', help='specifies sourceMappingURL section contest') parser.add_argument('--dwarfdump', help="path to llvm-dwarfdump executable") parser.add_argument('--dwarfdump-output', nargs='?', help=argparse.SUPPRESS) return parser.parse_args() class Prefixes: def __init__(self, args): prefixes = [] for p in args: if '=' in p: prefix, replacement = p.split('=') prefixes.append({'prefix': prefix, 'replacement': replacement}) else: prefixes.append({'prefix': p, 'replacement': None}) self.prefixes = prefixes self.cache = {} def resolve(self, name): if name in self.cache: return self.cache[name] result = name for p in self.prefixes: if name.startswith(p['prefix']): if p['replacement'] is None: result = name[len(p['prefix'])::] else: result = p['replacement'] + name[len(p['prefix'])::] break self.cache[name] = result return result # SourceMapPrefixes contains resolver for file names that are: # - "sources" is for names that output to source maps JSON # - "load" is for paths that used to load source text SourceMapPrefixes = namedtuple('SourceMapPrefixes', 'sources, load') def encode_vlq(n): VLQ_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" x = (n << 1) if n >= 0 else ((-n << 1) + 1) result = "" while x > 31: result = result + VLQ_CHARS[32 + (x & 31)] x = x >> 5 return result + VLQ_CHARS[x] def read_var_uint(wasm, pos): n = 0 shift = 0 b = ord(wasm[pos:pos + 1]) pos = pos + 1 while b >= 128: n = n | ((b - 128) << shift) b = ord(wasm[pos:pos + 1]) pos = pos + 1 shift += 7 return n + (b << shift), pos def strip_debug_sections(wasm): logger.debug('Strip debug sections') pos = 8 stripped = wasm[:pos] while pos < len(wasm): section_start = pos section_id, pos_ = read_var_uint(wasm, pos) section_size, section_body = read_var_uint(wasm, pos_) pos = section_body + section_size if section_id == 0: name_len, name_pos = read_var_uint(wasm, section_body) name_end = name_pos + name_len name = wasm[name_pos:name_end] if name == "linking" or name == "sourceMappingURL" or name.startswith("reloc..debug_") or name.startswith(".debug_"): continue # skip debug related sections stripped = stripped + wasm[section_start:pos] return stripped def encode_uint_var(n): result = bytearray() while n > 127: result.append(128 | (n & 127)) n = n >> 7 result.append(n) return bytes(result) def append_source_mapping(wasm, url): logger.debug('Append sourceMappingURL section') section_name = "sourceMappingURL" section_content = encode_uint_var(len(section_name)) + section_name + encode_uint_var(len(url)) + url return wasm + encode_uint_var(0) + encode_uint_var(len(section_content)) + section_content def get_code_section_offset(wasm): logger.debug('Read sections index') pos = 8 while pos < len(wasm): section_id, pos_ = read_var_uint(wasm, pos) section_size, pos = read_var_uint(wasm, pos_) if section_id == 10: return pos pos = pos + section_size def remove_dead_entries(entries): # Remove entries for dead functions. It is a heuristics to ignore data if the # function starting address near to 0 (is equal to its size field length). block_start = 0 cur_entry = 0 while cur_entry < len(entries): if not entries[cur_entry]['eos']: cur_entry += 1 continue fn_start = entries[block_start]['address'] # Calculate the LEB encoded function size (including size field) fn_size_length = floor(log(entries[cur_entry]['address'] - fn_start + 1, 128)) + 1 min_live_offset = 1 + fn_size_length # 1 byte is for code section entries if fn_start < min_live_offset: # Remove dead code debug info block. del entries[block_start:cur_entry + 1] cur_entry = block_start continue cur_entry += 1 block_start = cur_entry def read_dwarf_entries(wasm, options): if options.dwarfdump_output: output = open(options.dwarfdump_output, 'r').read() elif options.dwarfdump: logger.debug('Reading DWARF information from %s' % wasm) if not os.path.exists(options.dwarfdump): logger.error('llvm-dwarfdump not found: ' + options.dwarfdump) sys.exit(1) process = Popen([options.dwarfdump, "-debug-info", "-debug-line", wasm], stdout=PIPE) output, err = process.communicate() exit_code = process.wait() if exit_code != 0: logger.error('Error during llvm-dwarfdump execution (%s)' % exit_code) sys.exit(1) else: logger.error('Please specify either --dwarfdump or --dwarfdump-output') sys.exit(1) entries = [] debug_line_chunks = re.split(r"debug_line\[(0x[0-9a-f]*)\]", asstr(output)) maybe_debug_info_content = debug_line_chunks[0] for i in range(1, len(debug_line_chunks), 2): stmt_list = debug_line_chunks[i] comp_dir_match = re.search(r"DW_AT_stmt_list\s+\(" + stmt_list + r"\)\s+" + r"DW_AT_comp_dir\s+\(\"([^\"]+)", maybe_debug_info_content) comp_dir = comp_dir_match.group(1) if comp_dir_match is not None else "" line_chunk = debug_line_chunks[i + 1] # include_directories[ 1] = "/Users/yury/Work/junk/sqlite-playground/src" # file_names[ 1]: # name: "playground.c" # dir_index: 1 # mod_time: 0x00000000 # length: 0x00000000 # # Address Line Column File ISA Discriminator Flags # ------------------ ------ ------ ------ --- ------------- ------------- # 0x0000000000000006 22 0 1 0 0 is_stmt # 0x0000000000000007 23 10 1 0 0 is_stmt prologue_end # 0x000000000000000f 23 3 1 0 0 # 0x0000000000000010 23 3 1 0 0 end_sequence # 0x0000000000000011 28 0 1 0 0 is_stmt include_directories = {'0': comp_dir} for dir in re.finditer(r"include_directories\[\s*(\d+)\] = \"([^\"]*)", line_chunk): include_directories[dir.group(1)] = dir.group(2) files = {} for file in re.finditer(r"file_names\[\s*(\d+)\]:\s+name: \"([^\"]*)\"\s+dir_index: (\d+)", line_chunk): dir = include_directories[file.group(3)] file_path = (dir + '/' if file.group(2)[0] != '/' else '') + file.group(2) files[file.group(1)] = file_path for line in re.finditer(r"\n0x([0-9a-f]+)\s+(\d+)\s+(\d+)\s+(\d+)(.*?end_sequence)?", line_chunk): entry = {'address': int(line.group(1), 16), 'line': int(line.group(2)), 'column': int(line.group(3)), 'file': files[line.group(4)], 'eos': line.group(5) is not None} if not entry['eos']: entries.append(entry) else: # move end of function to the last END operator entry['address'] -= 1 if entries[-1]['address'] == entry['address']: # last entry has the same address, reusing entries[-1]['eos'] = True else: entries.append(entry) remove_dead_entries(entries) # return entries sorted by the address field return sorted(entries, key=lambda entry: entry['address']) def build_sourcemap(entries, code_section_offset, prefixes, collect_sources): sources = [] sources_content = [] if collect_sources else None mappings = [] sources_map = {} last_address = 0 last_source_id = 0 last_line = 1 last_column = 1 for entry in entries: line = entry['line'] column = entry['column'] # ignore entries with line 0 if line == 0: continue # start at least at column 1 if column == 0: column = 1 address = entry['address'] + code_section_offset file_name = entry['file'] source_name = prefixes.sources.resolve(file_name) if source_name not in sources_map: source_id = len(sources) sources_map[source_name] = source_id sources.append(source_name) if collect_sources: load_name = prefixes.load.resolve(file_name) try: with open(load_name, 'r') as infile: source_content = infile.read() sources_content.append(source_content) except IOError: print('Failed to read source: %s' % load_name) sources_content.append(None) else: source_id = sources_map[source_name] address_delta = address - last_address source_id_delta = source_id - last_source_id line_delta = line - last_line column_delta = column - last_column mappings.append(encode_vlq(address_delta) + encode_vlq(source_id_delta) + encode_vlq(line_delta) + encode_vlq(column_delta)) last_address = address last_source_id = source_id last_line = line last_column = column return OrderedDict([('version', 3), ('names', []), ('sources', sources), ('sourcesContent', sources_content), ('mappings', ','.join(mappings))]) def main(): options = parse_args() wasm_input = options.wasm with open(wasm_input, 'rb') as infile: wasm = infile.read() entries = read_dwarf_entries(wasm_input, options) code_section_offset = get_code_section_offset(wasm) prefixes = SourceMapPrefixes(sources=Prefixes(options.prefix), load=Prefixes(options.load_prefix)) logger.debug('Saving to %s' % options.output) map = build_sourcemap(entries, code_section_offset, prefixes, options.sources) with open(options.output, 'w') as outfile: json.dump(map, outfile, separators=(',', ':')) if options.strip: wasm = strip_debug_sections(wasm) if options.source_map_url: wasm = append_source_mapping(wasm, options.source_map_url) if options.w: logger.debug('Saving wasm to %s' % options.w) with open(options.w, 'wb') as outfile: outfile.write(wasm) logger.debug('Done') return 0 if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG if os.environ.get('EMCC_DEBUG') else logging.INFO) sys.exit(main())
2.3125
2