repo_name
stringlengths
7
94
repo_path
stringlengths
4
237
repo_head_hexsha
stringlengths
40
40
content
stringlengths
10
680k
apis
stringlengths
2
680k
nikmagini/pilot
objectstoreSiteMover.py
1c84fcf6f7e43b669d2357326cdbe06382ac829f
#!/usr/bin/env python # Copyright European Organization for Nuclear Research (CERN) # # Licensed under the Apache License, Version 2.0 (the "License"); # You may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Authors: # - Wen Guan, <[email protected]>, 2014 # objectstoreSiteMover.py import os from configSiteMover import config_sm import SiteMover from xrootdObjectstoreSiteMover import xrootdObjectstoreSiteMover from S3ObjectstoreSiteMover import S3ObjectstoreSiteMover class objectstoreSiteMover(SiteMover.SiteMover): """ ObjectstoreSiteMover It uses the url to decide which ObjectstoreSiteMover implementation to be used. """ copyCommand = "objectstore" checksum_command = "adler32" def __init__(self, setup_path='', useTimerCommand=True, *args, **kwrds): self._setup = setup_path self._useTimerCommand = useTimerCommand def get_data(self, gpfn, lfn, path, fsize=0, fchecksum=0, guid=0, **pdict): gpfn = gpfn.replace("s3+rucio", "s3") if gpfn.startswith("root:"): sitemover = xrootdObjectstoreSiteMover(self.getSetup()) return sitemover.get_data(gpfn, lfn, path, fsize, fchecksum, guid, **pdict) if gpfn.startswith("s3:"): sitemover = S3ObjectstoreSiteMover(self.getSetup(), self._useTimerCommand) return sitemover.get_data(gpfn, lfn, path, fsize, fchecksum, guid, **pdict) return -1, "No objectstore sitemover found for this scheme(%s)" % gpfn def put_data(self, source, destination, fsize=0, fchecksum=0, **pdict): # Get input parameters from pdict lfn = pdict.get('lfn', '') logPath = pdict.get('logPath', '') if logPath != "": surl = logPath else: surl = os.path.join(destination, lfn) surl = surl.replace("s3+rucio", "s3") if surl.startswith("root:"): sitemover = xrootdObjectstoreSiteMover(self.getSetup()) return sitemover. put_data(source, destination, fsize, fchecksum, **pdict) if surl.startswith("s3:"): sitemover = S3ObjectstoreSiteMover(self.getSetup(), self._useTimerCommand) return sitemover. put_data(source, surl, fsize, fchecksum, **pdict) return -1, "No objectstore sitemover found for this scheme(%s)" % destination, destination, fsize, fchecksum, config_sm.ARCH_DEFAULT if __name__ == '__main__': os.environ['PilotHomeDir'] = os.getcwd() from SiteInformation import SiteInformation s1 = SiteInformation() #s1.getObjectstoresField("os_access_key", "eventservice", queuename='BNL_EC2W2_MCORE') f = objectstoreSiteMover() gpfn = "nonsens_gpfn" lfn = "AOD.310713._000004.pool.root.1" path = os.getcwd() fsize = "4261010441" fchecksum = "9145af38" dsname = "data11_7TeV.00177986.physics_Egamma.merge.AOD.r2276_p516_p523_tid310713_00" report = {} #print f.getGlobalFilePaths(dsname) #print f.findGlobalFilePath(lfn, dsname) #print f.getLocalROOTSetup() #path = "root://atlas-objectstore.cern.ch//atlas/eventservice/2181626927" # + your .root filename" """ source = "/bin/hostname" dest = "root://eosatlas.cern.ch//eos/atlas/unpledged/group-wisc/users/wguan/" lfn = "NTUP_PHOTON.01255150._000001.root.1" localSize = 17848 localChecksum = "89b93830" print f.put_data(source, dest, fsize=localSize, fchecksum=localChecksum, prodSourceLabel='ptest', experiment='ATLAS', report =report, lfn=lfn, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc') gpfn = "root://eosatlas.cern.ch//eos/atlas/unpledged/group-wisc/users/wguan/NTUP_PHOTON.01255150._000001.root.1" lfn = "NTUP_PHOTON.01255150._000001.root.1" tmpDir = "/tmp/" localSize = 17848 localChecksum = "89b93830" print f.get_data(gpfn, lfn, tmpDir, fsize=localSize, fchecksum=localChecksum, experiment='ATLAS', report =report, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc') """ # test S3 object store source = "/bin/hostname" #dest = "s3://ceph003.usatlas.bnl.gov:8443//wguan_bucket/dir1/dir2/NTUP_PHOTON.01255150._000001.root.1" dest = "s3://s3-us-west-2.amazonaws.com:80//s3-atlasdatadisk-west2-racf/dir1/" lfn = "NTUP_PHOTON.01255150._000001.root.1" localSize = None localChecksum = None print f.put_data(source, dest, fsize=localSize, fchecksum=localChecksum, prodSourceLabel='ptest', experiment='ATLAS', report =report, lfn=lfn, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc', jobId=2730987843, jobsetID=2728044425,pandaProxySecretKey='') gpfn = "s3://ceph003.usatlas.bnl.gov:8443//wguan_bucket/dir1/dir2/NTUP_PHOTON.01255150._000001.root.1" gpfn = "s3://s3-us-west-2.amazonaws.com:80//s3-atlasdatadisk-west2-racf/dir1/NTUP_PHOTON.01255150._000001.root.1" lfn = "NTUP_PHOTON.01255150._000001.root.1" tmpDir = "/tmp/" localSize = None localChecksum = None print f.get_data(gpfn, lfn, tmpDir, fsize=localSize, fchecksum=localChecksum, experiment='ATLAS', report =report, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc', jobId=2730987843, jobsetID=2728044425,pandaProxySecretKey='deb05b9fb5034a45b80c03bd671359c9')
[]
Miguel-mmf/Biblioteca_Dash_em-Python
codigos_videos/Exemplo_2.py
63d268f568c02bc9b6c73e1f52ade2475ffbb3c5
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # Esse arquivo possui algumas modificações em relação ao arquivo apresentado no vídeo do YouTube # Não deixe de assistir o vídeo e estudar pela documentação ofical Dash # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # importando as bibliotecas necessárias import dash import dash_core_components as dcc import dash_html_components as html # importando as funções que auxiliam no funcionamento das callbacks do subpacote dependencies do pacote dash from dash.dependencies import Input, Output # importando o módulo graph_objects da biblioteca plotly import plotly.graph_objects as go # adicionando um estilo externo através do link abaixo # esse link é o recomendado pela documentação da biblioteca Dash e ao acessar esse link no seu navegador, # você perceberá que ele possui a estrutura de um arquivo CSS external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] # criando a aplicação por meio da função Dash do pacote dash e atribuindo a variável app app = dash.Dash( __name__, external_stylesheets=external_stylesheets ) # criando uma função para gerar um gráfico com a biblioteca plotly.graph_objects def gera_grafico(tipo): # criando uma figura # caso você faça print(fig), um dicionário será apresentado uma vez que as figuras podem ser representadas dessa forma, necessitando de módulos da biblioteca plotly para trabalhar com as informações fig = go.Figure() # https://plotly.com/python/creating-and-updating-figures/ # adicionando um traço a figura fig.add_trace( go.Scatter( x=[0,1,2,3,4,5,6], y=[0,1,2,3,4,5,6], mode=tipo, name='Reta', ) ) fig.add_trace( go.Scatter( x=[0,1,2,3,4,5,6], y=[0,1,4,9,16,25,36], mode=tipo, name='Parábola', ) ) # adicionando um título ao gráfico fig.update_layout(title='Gráfico Exemplo') # variável retornada pela função gera_grafico(tipo) return fig # criando um layout para a variável app # adicionando ao layout um componente html.Div que irá conter os demais componentes que dão forma app.layout = html.Div([ # inserindo um componente da biblioteca dash HTML components como título/cabeçalho do layout html.H2( ['Painel de Visualização de Gráficos'], # o parâmetro style define estilos css para o componente style={ 'textAlign':'center', # texto alinhado 'font-weight':'bold' # texto em negrito } ), # adicionando uma linha horizontal no layout html.Hr(), # criando abas pai dentro do layout dcc.Tabs( # identidade/nome do componente id='tabs', # criando as abas filhas dentro do parâmetro children da função Tabs() children=[ dcc.Tab(label='Gráfico de linha',value='tab-1'), dcc.Tab(label='Gráfico de Barra',value='tab-2'), dcc.Tab(label='Gráfico de Linha e Pontos',value='tab-3') ] ), # onde será apresentado o conteúdo das abas logo após a callback ser ativada html.Div(id='tabs-content'), html.Hr(), ]) # Callback # estruturando a callback com as entradas (input) e saídas (output) @app.callback( # Output(component_id,component_property) Output('tabs-content','children'), [ # Input(component_id,component_property) Input('tabs','value') ] ) # função que será chamada pela callback def update_tab(tab): # quando a aba com valor igual a 'tab-1' for selecionada, a propriedade children do componente 'tabs-content' # receberá o gráfico de linha retornado abaixo pela função gera_gráfico(tipo='lines') if tab == 'tab-1': return html.Div([ dcc.Graph(figure = gera_grafico('lines')) ]) # quando a aba com valor igual a 'tab-2' for selecionada, a propriedade children do componente 'tabs-content' # receberá o gráfico de barras construído e retornado abaixo elif tab == 'tab-2': fig_bar = go.Figure() fig_bar.add_trace( go.Bar( x=[0,1,2,3,4,5,6], y=[0,1,2,3,4,5,6], ) ) fig_bar.add_trace( go.Bar( x=[0,1,2,3,4,5,6], y=[0,1,4,9,16,25,36], ) ) fig_bar.update_layout(title='Gráfico em Barras Exemplo') return html.Div([ dcc.Graph(figure = fig_bar) ]) # quando a aba com valor igual a 'tab-3' for selecionada, a propriedade children do componente 'tabs-content' # receberá o gráfico de linha retornado abaixo pela função gera_gráfico(tipo='lines+markers') elif tab == 'tab-3': return html.Div([ dcc.Graph(figure = gera_grafico('lines+markers')) ]) # caso nenhuma das condições acima sejam aceitas, significa que existe um erro, e assim, retornamos a mensagem de erro else: return html.Div(['Erro!']) # servindo a aplicação em dash como versão para teste if __name__ == "__main__": app.run_server(debug=True)
[((1211, 1273), 'dash.Dash', 'dash.Dash', (['__name__'], {'external_stylesheets': 'external_stylesheets'}), '(__name__, external_stylesheets=external_stylesheets)\n', (1220, 1273), False, 'import dash\n'), ((1629, 1640), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (1638, 1640), True, 'import plotly.graph_objects as go\n'), ((3561, 3595), 'dash.dependencies.Output', 'Output', (['"""tabs-content"""', '"""children"""'], {}), "('tabs-content', 'children')\n", (3567, 3595), False, 'from dash.dependencies import Input, Output\n'), ((1768, 1856), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': '[0, 1, 2, 3, 4, 5, 6]', 'y': '[0, 1, 2, 3, 4, 5, 6]', 'mode': 'tipo', 'name': '"""Reta"""'}), "(x=[0, 1, 2, 3, 4, 5, 6], y=[0, 1, 2, 3, 4, 5, 6], mode=tipo,\n name='Reta')\n", (1778, 1856), True, 'import plotly.graph_objects as go\n'), ((1942, 2037), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': '[0, 1, 2, 3, 4, 5, 6]', 'y': '[0, 1, 4, 9, 16, 25, 36]', 'mode': 'tipo', 'name': '"""Parábola"""'}), "(x=[0, 1, 2, 3, 4, 5, 6], y=[0, 1, 4, 9, 16, 25, 36], mode=tipo,\n name='Parábola')\n", (1952, 2037), True, 'import plotly.graph_objects as go\n'), ((2526, 2631), 'dash_html_components.H2', 'html.H2', (["['Painel de Visualização de Gráficos']"], {'style': "{'textAlign': 'center', 'font-weight': 'bold'}"}), "(['Painel de Visualização de Gráficos'], style={'textAlign':\n 'center', 'font-weight': 'bold'})\n", (2533, 2631), True, 'import dash_html_components as html\n'), ((2848, 2857), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (2855, 2857), True, 'import dash_html_components as html\n'), ((3367, 3394), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""tabs-content"""'}), "(id='tabs-content')\n", (3375, 3394), True, 'import dash_html_components as html\n'), ((3401, 3410), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (3408, 3410), True, 'import dash_html_components as html\n'), ((3659, 3681), 'dash.dependencies.Input', 'Input', (['"""tabs"""', '"""value"""'], {}), "('tabs', 'value')\n", (3664, 3681), False, 'from dash.dependencies import Input, Output\n'), ((4294, 4305), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (4303, 4305), True, 'import plotly.graph_objects as go\n'), ((4346, 4402), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': '[0, 1, 2, 3, 4, 5, 6]', 'y': '[0, 1, 2, 3, 4, 5, 6]'}), '(x=[0, 1, 2, 3, 4, 5, 6], y=[0, 1, 2, 3, 4, 5, 6])\n', (4352, 4402), True, 'import plotly.graph_objects as go\n'), ((4496, 4555), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': '[0, 1, 2, 3, 4, 5, 6]', 'y': '[0, 1, 4, 9, 16, 25, 36]'}), '(x=[0, 1, 2, 3, 4, 5, 6], y=[0, 1, 4, 9, 16, 25, 36])\n', (4502, 4555), True, 'import plotly.graph_objects as go\n'), ((5251, 5270), 'dash_html_components.Div', 'html.Div', (["['Erro!']"], {}), "(['Erro!'])\n", (5259, 5270), True, 'import dash_html_components as html\n'), ((3085, 3133), 'dash_core_components.Tab', 'dcc.Tab', ([], {'label': '"""Gráfico de linha"""', 'value': '"""tab-1"""'}), "(label='Gráfico de linha', value='tab-1')\n", (3092, 3133), True, 'import dash_core_components as dcc\n'), ((3146, 3194), 'dash_core_components.Tab', 'dcc.Tab', ([], {'label': '"""Gráfico de Barra"""', 'value': '"""tab-2"""'}), "(label='Gráfico de Barra', value='tab-2')\n", (3153, 3194), True, 'import dash_core_components as dcc\n'), ((3207, 3264), 'dash_core_components.Tab', 'dcc.Tab', ([], {'label': '"""Gráfico de Linha e Pontos"""', 'value': '"""tab-3"""'}), "(label='Gráfico de Linha e Pontos', value='tab-3')\n", (3214, 3264), True, 'import dash_core_components as dcc\n'), ((4722, 4747), 'dash_core_components.Graph', 'dcc.Graph', ([], {'figure': 'fig_bar'}), '(figure=fig_bar)\n', (4731, 4747), True, 'import dash_core_components as dcc\n')]
tochanenko/MetaProgramming
Lab 2/javaccflab/formatter.py
d37f21432483e39e135fd0dc4f8767836eea1609
import re import datetime from javaccflab.lexer import parse from javaccflab.java_token import TokenType, Token, update_token_value class Formatter: def __init__(self, files): self.__files = files self.__file = None self.__tokens = [] self.__to_fix = dict() def process(self): tokens = [] for file in self.__files: tokens.append(parse(open(file, 'r').read())) i = 0 while i < len(tokens): self.__tokens = tokens[i] self.__file = self.__files[i] self.__find_to_fix() tokens[i] = self.__tokens i += 1 i = 0 while i < len(tokens): self.__tokens = tokens[i] self.__file = self.__files[i] self.__fix() self.__fix_comments() tokens[i] = self.__tokens i += 1 return tokens def __find_to_fix(self): i = 0 while i < len(self.__tokens): token = self.__tokens[i] if token.get_value() == 'package': i = self.__fix_package(i) elif token.get_value() in ('class', 'interface') and self.__tokens[i - 1].get_value() != '.': i = self.__skip_ws_tokens(i + 1) if not Formatter.is_camel_upper_case(self.__tokens[i].get_value()): self.__to_fix[self.__tokens[i].get_value()] = Formatter.to_camel_upper_case( self.__tokens[i].get_value()) i = self.__fix_class_body(i, self.__tokens[i].get_value()) i += 1 def __fix_package(self, pos): pos = self.__skip_ws_tokens(pos) while self.__tokens[pos].get_value() != ';': if self.__tokens[pos].get_type() == TokenType.IDENTIFIER and not Formatter.is_lower_case( self.__tokens[pos].get_value()): self.__to_fix[self.__tokens[pos].get_value()] = Formatter.to_lower_case( (self.__tokens[pos].get_value())) pos += 1 return pos def __fix_class_body(self, pos, class_name): while self.__tokens[pos].get_value() != '{': pos += 1 count = 1 pos += 1 while count != 0: if self.__tokens[pos].get_value() == '{': count += 1 elif self.__tokens[pos].get_value() == '}': count -= 1 elif self.__tokens[pos].get_value() == 'static': i = self.__skip_ws_tokens(pos + 1) if self.__tokens[i].get_value() == '{': pos = i + 1 count += 1 continue elif self.__tokens[pos].get_type() in (TokenType.IDENTIFIER, TokenType.KEYWORD): if self.__is_parameter(pos): parameters, i = self.__get_field_names(pos) if self.__is_final(pos): for parameter in parameters: if not Formatter.is_snake_upper_case(parameter): self.__to_fix[parameter] = Formatter.to_snake_upper_case(parameter) else: for parameter in parameters: if not Formatter.is_camel_lower_case(parameter): self.__to_fix[parameter] = Formatter.to_camel_lower_case(parameter) pos = i else: self.__fix_method_name(pos, class_name) parameters = self.__get_method_parameters(pos) pos = self.__fix_method_body(pos, parameters) pos += 1 return pos def __fix_method_name(self, i, class_name): while self.__tokens[i].get_value() not in ('(', ';'): i += 1 i -= 1 while self.__tokens[i].get_type() == TokenType.WHITESPACE: i -= 1 if self.__tokens[i].get_value() != class_name and not Formatter.is_snake_lower_case( self.__tokens[i].get_value()): self.__to_fix[self.__tokens[i].get_value()] = Formatter.to_snake_lower_case(self.__tokens[i].get_value()) def __get_method_parameters(self, i): parameters = dict() while self.__tokens[i].get_value() != '(': i += 1 while self.__tokens[i].get_value() != ')': if self.__tokens[i + 1].get_value() in (')', ','): pos = i while self.__tokens[pos].get_type() == TokenType.WHITESPACE: pos -= 1 if not Formatter.is_camel_lower_case(self.__tokens[pos].get_value()): fixed_value = Formatter.to_camel_lower_case(self.__tokens[pos].get_value()) parameters[self.__tokens[pos].get_value()] = fixed_value update_token_value(self.__file, self.__tokens[pos], fixed_value) i += 1 return parameters def __fix_method_body(self, i, method_parameters): params = dict() while self.__tokens[i].get_value() not in ('{', ';'): if self.__tokens[i].get_value() in method_parameters.keys(): update_token_value(self.__file, self.__tokens[i], method_parameters[self.__tokens[i].get_value()]) i += 1 if self.__tokens[i].get_value() == ';': return i + 1 brace_count = 1 i += 1 while brace_count != 0: if self.__tokens[i].get_value() == '{': brace_count += 1 elif self.__tokens[i].get_value() == '}': brace_count -= 1 elif self.__tokens[i].get_value() in ('=', ';'): naming_pos = i - 1 while self.__tokens[naming_pos].get_type() == TokenType.WHITESPACE: naming_pos -= 1 if self.__tokens[naming_pos].get_type() == TokenType.IDENTIFIER: type_pos = naming_pos - 1 while self.__tokens[type_pos].get_type() == TokenType.WHITESPACE: type_pos -= 1 if (self.__tokens[type_pos].get_type() in (TokenType.IDENTIFIER, TokenType.KEYWORD) and \ self.__tokens[type_pos].get_value() not in ('class', 'identifier')) or self.__tokens[ type_pos].get_value() == ',': if not Formatter.is_camel_lower_case(self.__tokens[naming_pos].get_value()): fixed_value = Formatter.to_camel_lower_case(self.__tokens[naming_pos].get_value()) params[self.__tokens[naming_pos].get_value()] = fixed_value update_token_value(self.__file, self.__tokens[naming_pos], fixed_value) elif self.__tokens[i].get_type() == TokenType.IDENTIFIER and self.__tokens[ i].get_value() in params.keys(): update_token_value(self.__file, self.__tokens[i], params[self.__tokens[i].get_value()]) elif self.__tokens[i].get_type() == TokenType.IDENTIFIER and self.__tokens[ i].get_value() in method_parameters.keys(): update_token_value(self.__file, self.__tokens[i], method_parameters[self.__tokens[i].get_value()]) i += 1 return i def __get_field_names(self, i): params = [] while self.__tokens[i].get_value() != ';': if self.__tokens[i + 1].get_value() in (';', '=', ','): pos = i while self.__tokens[pos].get_type() == TokenType.WHITESPACE: pos -= 1 field_name = self.__tokens[pos].get_value() is_value = False if self.__tokens[i + 1].get_value() in (';', ','): while pos > 0 and self.__tokens[pos].get_value() not in (';', '}'): if self.__tokens[pos].get_value() == '=': is_value = True pos -= 1 if not is_value: params.append(field_name) i += 1 end = i return params, end def __is_final(self, i): while self.__tokens[i].get_value() not in (';', '=', '('): if self.__tokens[i].get_value() == 'final': return True i += 1 return False def __is_parameter(self, pos): while self.__tokens[pos].get_value() != ';' and pos < len(self.__tokens): if self.__tokens[pos].get_value() == '=': return True elif self.__tokens[pos].get_value() in ('class', 'interface', '(', ')'): return False pos += 1 return True def __fix(self): for token in self.__tokens: if token.get_value() in self.__to_fix and not token.is_fixed(): update_token_value(self.__file, token, self.__to_fix[token.get_value()]) def __fix_comments(self): self.__add_start_comment() i = 0 while i < len(self.__tokens): if self.__tokens[i].get_value() in ('class', 'interface'): i = self.__fix_class_comments(i) i += 1 i += 1 # Fix start comment def __add_start_comment(self): if not self.__is_start_comment_exists(): comment_token = Token(None, TokenType.COMMENT) comment_string = f'/*\n' \ f' * {self.__find_class_name()}\n' \ f' *\n' \ f' * {datetime.date.today().strftime("%B %d, %Y")}\n' \ f' */' update_token_value(self.__file, comment_token, comment_string) self.__tokens.insert(0, comment_token) self.__tokens.insert(1, Token('\n', TokenType.WHITESPACE)) self.__tokens.insert(1, Token('\n', TokenType.WHITESPACE)) def __is_start_comment_exists(self): i = self.__skip_ws_tokens(0) return self.__tokens[i].get_type() == TokenType.COMMENT def __find_class_name(self, i=0): while self.__tokens[i].get_value() not in ('class', 'interface') and self.__tokens[i - 1].get_value() != '.': i += 1 i = self.__skip_ws_tokens(i + 1) return self.__tokens[i].get_value() # Fix class comment def __fix_class_comments(self, pos): comment_token = self.__find_doc_comment_before(pos) if comment_token is None: comment_token = Token(None, TokenType.COMMENT) comment_string = f'/**\n' \ f' * Implementation of {self.__find_class_name(pos)}\n' \ f' */' update_token_value(self.__file, comment_token, comment_string) insert_pos = self.__find_token_before(pos, '\n') self.__tokens.insert(insert_pos, Token('\n', TokenType.WHITESPACE)) self.__tokens.insert(insert_pos + 1, comment_token) else: self.__fix_comment_links(comment_token) return self.__fix_class_body_comments(pos) # Fix comments for methods and fields def __fix_class_body_comments(self, pos): while self.__tokens[pos].get_value() != '{': pos += 1 count = 1 pos += 1 while count != 0: if self.__tokens[pos].get_value() == '{': count += 1 elif self.__tokens[pos].get_value() == '}': count -= 1 elif self.__tokens[pos].get_value() == 'static': i = self.__skip_ws_tokens(pos + 1) if self.__tokens[i].get_value() == '{': pos = i + 1 count += 1 continue elif self.__tokens[pos].get_type() in (TokenType.IDENTIFIER, TokenType.KEYWORD) and self.__tokens[ pos + 1].get_value() != '.' and self.__tokens[pos].get_value() not in ('class', 'interface'): if self.__is_parameter(pos): pos = self.__fix_field_comment(pos) else: pos = self.__fix_method_comment(pos) pos += 1 return pos def __fix_field_comment(self, pos): comment_token = self.__find_doc_comment_before(pos) indent = self.__get_indent(pos) if comment_token is None: field_names = ', '.join(self.__get_field_names(pos)[0]) visibility = self.__find_visibility(pos) comment_token = Token(None, TokenType.COMMENT) comment_string = comment_string = f'{indent}/**\n' \ f'{indent} * The {visibility} {field_names} {"constant" if self.__is_final(pos) else "variable"}{"s" if len(field_names) > 0 else ""}\n' \ f'{indent} */' update_token_value(self.__file, comment_token, comment_string) insert_pos = self.__find_token_before(pos, '\n') self.__tokens.insert(insert_pos, Token('\n', TokenType.WHITESPACE)) self.__tokens.insert(insert_pos + 1, comment_token) else: self.__fix_comment_links(comment_token) return self.__find_token_after(pos, ';') def __find_visibility(self, pos): pos = self.__find_token_before(pos, '\n') while self.__tokens[pos].get_value() not in ('=', ';', '('): if self.__tokens[pos].get_value() in ('private', 'public', 'protected'): return self.__tokens[pos].get_value() pos += 1 return 'package-private' def __fix_method_comment(self, pos): comment_token = self.__find_doc_comment_before(pos) indent = self.__get_indent(pos) all_params = [] if comment_token is None: params = self.__get_parameter_list(pos) params.extend(self.__get_type_parameter_list(pos)) if len(params) > 0: all_params.append("\n".join([f"{indent} * @param {param}" for param in params])) throws = self.__get_throws(pos) if len(throws) > 0: all_params.append("\n".join([f"{indent} * @throws {param}" for param in throws])) return_type = self.__get_return_type(pos) if len(return_type) > 0: all_params.append(f"{indent} * @return {self.__get_return_type(pos)}") comment_token = Token(None, TokenType.COMMENT) comment_string = f'{indent}/**\n' + \ '\n'.join(all_params) + \ ('' if len(params) <= 0 else ' ') + \ f'\n{indent} */' update_token_value(self.__file, comment_token, comment_string) insert_pos = self.__find_token_before(pos, '\n') self.__tokens.insert(insert_pos, Token('\n', TokenType.WHITESPACE)) self.__tokens.insert(insert_pos + 1, comment_token) else: self.__fix_comment_links(comment_token) params_list = self.__get_parameter_list(pos) params_list.extend(self.__get_type_parameter_list(pos)) throws_list = self.__get_throws(pos) return_type_value = self.__get_return_type(pos) params, throws, return_type = self.__fix_comment_params(comment_token) comment_string = comment_token.get_value() append_string = '' i = 0 if len(params) < len(params_list): append_string += "\n" + "\n".join( [f"{indent} * @param {param}" for param in Formatter.get_missing(params, params_list)]) i = comment_string.rfind('@param') if i != -1: i = comment_string.find('\n', i) if comment_string.find('\n', i) != -1 else comment_string.find('*', i) - 1 comment_string = comment_string[:i] + append_string + comment_string[i:] append_string = '' if len(throws) < len(throws_list): append_string += "\n" + "\n".join( [f"{indent} * @throws {param}" for param in Formatter.get_missing(throws, throws_list)]) i = comment_string.rfind('@throws') if i != -1: i = comment_string.find('\n', i) if comment_string.find('\n', i) != -1 else comment_string.find('*', i) - 1 comment_string = comment_string[:i] + append_string + comment_string[i:] append_string = '' i = comment_string.find('\n', i) if len(return_type) == '': append_string += "\n" + f"\n{indent} * @return {return_type_value}" else: i = comment_string.rfind('@return') while comment_string[i] != '\n': i -= 1 comment_string = comment_string[:i] + append_string + comment_string[i:] if comment_string != comment_token.get_value(): update_token_value(self.__file, comment_token, comment_string) return self.__skip_method(pos) @staticmethod def get_missing(before, after): missing_params = [] for value in after: if value not in before: missing_params.append(value) return missing_params def __get_parameter_list(self, pos): parameters = [] while self.__tokens[pos].get_value() != '(': pos += 1 while self.__tokens[pos].get_value() != ')': if self.__tokens[pos + 1].get_value() in (')', ','): i = pos while self.__tokens[i].get_type() == TokenType.WHITESPACE: i -= 1 parameters.append(self.__tokens[i].get_value()) pos += 1 return parameters def __get_type_parameter_list(self, pos): parameters = [] while self.__tokens[pos].get_value() != '<': if self.__tokens[pos].get_value() == '(': return parameters pos += 1 i = pos - 1 while self.__tokens[i].get_type() == TokenType.WHITESPACE: i -= 1 if self.__tokens[i].get_type() != TokenType.KEYWORD or self.__tokens[i].get_value() not in ('}', ';'): return parameters while self.__tokens[pos].get_value() != '>': if self.__tokens[pos - 1].get_value() in ('<', ','): i = pos while self.__tokens[i].get_type() == TokenType.WHITESPACE: i += 1 parameters.append(self.__tokens[i].get_value()) pos += 1 return parameters def __get_throws(self, pos): throws = [] is_throws = False while self.__tokens[pos].get_value() not in ('{', ';'): if self.__tokens[pos].get_value() == 'throws': is_throws = True elif is_throws and self.__tokens[pos].get_type() == TokenType.IDENTIFIER: throws.append(self.__tokens[pos].get_value()) pos += 1 return throws def __get_return_type(self, pos): return_type = [] while self.__tokens[pos].get_value() != '(': pos += 1 pos -= 1 while self.__tokens[pos].get_type() == TokenType.WHITESPACE: pos -= 1 while self.__tokens[pos].get_type() != TokenType.WHITESPACE: pos -= 1 while self.__tokens[pos].get_type() == TokenType.WHITESPACE: pos -= 1 if self.__tokens[pos].get_value() == '>': while self.__tokens[pos].get_value() != '<': return_type.append(self.__tokens[pos].get_value()) pos -= 1 return_type.append(self.__tokens[pos].get_value()) pos -= 1 while self.__tokens[pos].get_type() == TokenType.WHITESPACE: return_type.append(self.__tokens[pos].get_value()) pos -= 1 return_type.append(self.__tokens[pos].get_value()) return_type.reverse() return ''.join(return_type) def __fix_comment_params(self, comment_token): i = 0 params = [] throws = [] return_type = '' comment_string = comment_token.get_value() while i < len(comment_string): if comment_string[i] == '@': start = comment_string.find(' ', i) macro = comment_string[i:start] end = min(comment_string.find(' ', start + 1), comment_string.find('\n', start + 1)) end = end if end >= 0 else max(comment_string.find(' ', start + 1), comment_string.find('\n', start + 1)) if end > 0: value = comment_string[start + 1:end] new_value = self.__fix_link(value) if value != new_value: comment_string = comment_string.replace(value, new_value) update_token_value(self.__file, comment_token, comment_string) value = new_value if macro == '@param': params.append(value) elif macro == '@throws': throws.append(value) elif macro == '@return': return_type = value i += 1 return params, throws, return_type def __skip_method(self, pos): while self.__tokens[pos].get_value() != '{': if self.__tokens[pos].get_value() == ';': return pos + 1 pos += 1 count = 1 pos += 1 while count != 0: if self.__tokens[pos].get_value() == '{': count += 1 elif self.__tokens[pos].get_value() == '}': count -= 1 pos += 1 return pos def __find_doc_comment_before(self, pos): while self.__tokens[pos].get_value() != '\n': pos -= 1 while pos > 0 and self.__tokens[pos].get_type() == TokenType.WHITESPACE: pos -= 1 if self.__tokens[pos].get_type() == TokenType.COMMENT and self.__tokens[pos].get_value().startswith('/**'): return self.__tokens[pos] return None def __find_token_before(self, pos, value): while pos > 0 and self.__tokens[pos].get_value() != value: pos -= 1 return pos def __find_token_after(self, pos, value): while pos < len(self.__tokens) and self.__tokens[pos].get_value() != value: pos += 1 return pos def __fix_comment_links(self, comment_token): i = 0 link = None comment_string = comment_token.get_value() while i < len(comment_string): if comment_string[i] == '@': start = comment_string.find(' ', i) if comment_string[i:start] != '@see': i += 1 continue end = comment_string.find('\n', i) link = comment_string[start:end] elif comment_string[i] == '{': start = comment_string.find(' ', i) end = comment_string.find('}', i) link = comment_string[start:end] if link is not None: new_link = self.__fix_link(link) comment_string = comment_string.replace(link, new_link) link = None i += 1 if comment_string != comment_token.get_value(): update_token_value(self.__file, comment_token, comment_string) def __fix_link(self, link): for name in self.__to_fix.keys(): pos = link.find(name) if pos != -1 and not (link[pos - 1].isalpha() or link[ pos - 1].isdigit() or link[pos - 1] == '_'): link = link.replace(name, self.__to_fix[name]) return link def __get_indent(self, pos): pos = self.__find_token_before(pos, '\n') count = 0 while self.__tokens[pos].get_type() == TokenType.WHITESPACE: if self.__tokens[pos].get_value() == ' ': count += 1 pos += 1 return ' ' * count def __skip_ws_tokens(self, pos): while self.__tokens[pos].get_type() == TokenType.WHITESPACE: pos += 1 return pos @staticmethod def is_lower_case(naming): return naming.find('_') == -1 and naming.islower() @staticmethod def to_lower_case(naming): return ''.join([component.lower() for component in naming.split('_')]) @staticmethod def is_camel_lower_case(naming): return naming.find('_') == -1 and not naming.isupper() and not naming[0].isupper() @staticmethod def to_camel_lower_case(naming): naming = Formatter.remove_underscores_around(naming) components = [ component[0] + component[1:].lower() if component.isupper() else component[0].upper() + component[1:] for component in naming.split('_')] return components[0][0].lower() + components[0][1:] + ''.join(components[1:]) @staticmethod def is_camel_upper_case(naming): return naming.find('_') == -1 and not naming.isupper() and naming[0].isupper() @staticmethod def to_camel_upper_case(naming): lower = Formatter.to_camel_lower_case(naming) return lower[0].upper() + lower[1:] @staticmethod def is_snake_lower_case(naming): return naming.islower() @staticmethod def to_snake_lower_case(naming): naming = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', naming) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', naming).lower() @staticmethod def is_snake_upper_case(naming): return naming.isupper() @staticmethod def to_snake_upper_case(naming): return Formatter.to_snake_lower_case(naming).upper() @staticmethod def remove_underscores_around(naming): i = 0 while naming[i] == '_': i += 1 naming = naming[i:] j = len(naming) - 1 while naming[j] == '_': i -= 1 naming = naming[:j + 1] return naming
[((26009, 26054), 're.sub', 're.sub', (['"""(.)([A-Z][a-z]+)"""', '"""\\\\1_\\\\2"""', 'naming'], {}), "('(.)([A-Z][a-z]+)', '\\\\1_\\\\2', naming)\n", (26015, 26054), False, 'import re\n'), ((9361, 9391), 'javaccflab.java_token.Token', 'Token', (['None', 'TokenType.COMMENT'], {}), '(None, TokenType.COMMENT)\n', (9366, 9391), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((9669, 9731), 'javaccflab.java_token.update_token_value', 'update_token_value', (['self.__file', 'comment_token', 'comment_string'], {}), '(self.__file, comment_token, comment_string)\n', (9687, 9731), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((10517, 10547), 'javaccflab.java_token.Token', 'Token', (['None', 'TokenType.COMMENT'], {}), '(None, TokenType.COMMENT)\n', (10522, 10547), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((10723, 10785), 'javaccflab.java_token.update_token_value', 'update_token_value', (['self.__file', 'comment_token', 'comment_string'], {}), '(self.__file, comment_token, comment_string)\n', (10741, 10785), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((12524, 12554), 'javaccflab.java_token.Token', 'Token', (['None', 'TokenType.COMMENT'], {}), '(None, TokenType.COMMENT)\n', (12529, 12554), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((12878, 12940), 'javaccflab.java_token.update_token_value', 'update_token_value', (['self.__file', 'comment_token', 'comment_string'], {}), '(self.__file, comment_token, comment_string)\n', (12896, 12940), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((14438, 14468), 'javaccflab.java_token.Token', 'Token', (['None', 'TokenType.COMMENT'], {}), '(None, TokenType.COMMENT)\n', (14443, 14468), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((14699, 14761), 'javaccflab.java_token.update_token_value', 'update_token_value', (['self.__file', 'comment_token', 'comment_string'], {}), '(self.__file, comment_token, comment_string)\n', (14717, 14761), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((23947, 24009), 'javaccflab.java_token.update_token_value', 'update_token_value', (['self.__file', 'comment_token', 'comment_string'], {}), '(self.__file, comment_token, comment_string)\n', (23965, 24009), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((9819, 9852), 'javaccflab.java_token.Token', 'Token', (['"""\n"""', 'TokenType.WHITESPACE'], {}), "('\\n', TokenType.WHITESPACE)\n", (9824, 9852), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((9890, 9923), 'javaccflab.java_token.Token', 'Token', (['"""\n"""', 'TokenType.WHITESPACE'], {}), "('\\n', TokenType.WHITESPACE)\n", (9895, 9923), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((10893, 10926), 'javaccflab.java_token.Token', 'Token', (['"""\n"""', 'TokenType.WHITESPACE'], {}), "('\\n', TokenType.WHITESPACE)\n", (10898, 10926), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((13048, 13081), 'javaccflab.java_token.Token', 'Token', (['"""\n"""', 'TokenType.WHITESPACE'], {}), "('\\n', TokenType.WHITESPACE)\n", (13053, 13081), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((14868, 14901), 'javaccflab.java_token.Token', 'Token', (['"""\n"""', 'TokenType.WHITESPACE'], {}), "('\\n', TokenType.WHITESPACE)\n", (14873, 14901), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((17396, 17458), 'javaccflab.java_token.update_token_value', 'update_token_value', (['self.__file', 'comment_token', 'comment_string'], {}), '(self.__file, comment_token, comment_string)\n', (17414, 17458), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((26069, 26115), 're.sub', 're.sub', (['"""([a-z0-9])([A-Z])"""', '"""\\\\1_\\\\2"""', 'naming'], {}), "('([a-z0-9])([A-Z])', '\\\\1_\\\\2', naming)\n", (26075, 26115), False, 'import re\n'), ((4847, 4911), 'javaccflab.java_token.update_token_value', 'update_token_value', (['self.__file', 'self.__tokens[pos]', 'fixed_value'], {}), '(self.__file, self.__tokens[pos], fixed_value)\n', (4865, 4911), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((21386, 21448), 'javaccflab.java_token.update_token_value', 'update_token_value', (['self.__file', 'comment_token', 'comment_string'], {}), '(self.__file, comment_token, comment_string)\n', (21404, 21448), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n'), ((9422, 9443), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (9441, 9443), False, 'import datetime\n'), ((6692, 6763), 'javaccflab.java_token.update_token_value', 'update_token_value', (['self.__file', 'self.__tokens[naming_pos]', 'fixed_value'], {}), '(self.__file, self.__tokens[naming_pos], fixed_value)\n', (6710, 6763), False, 'from javaccflab.java_token import TokenType, Token, update_token_value\n')]
zangobot/secml
src/secml/adv/attacks/evasion/c_attack_evasion_pgd_exp.py
95a293e1201c24256eb7fe2f1d2125cd5f318c8c
""" .. module:: CAttackEvasionPGDExp :synopsis: Evasion attack using Projected Gradient Descent. .. moduleauthor:: Battista Biggio <[email protected]> """ from secml.adv.attacks.evasion import CAttackEvasionPGDLS class CAttackEvasionPGDExp(CAttackEvasionPGDLS): """Evasion attacks using Projected Gradient Descent with Exponential line search. This class implements the maximum-confidence evasion attacks proposed in: - https://arxiv.org/abs/1910.00470, EURASIP JIS, 2020. - https://arxiv.org/abs/1708.06939, ICCV W. ViPAR, 2017. It is the multi-class extension of our original work in: - https://arxiv.org/abs/1708.06131, ECML 2013, implemented using a standard projected gradient solver. This attack uses a faster line search than PGD-LS. In all our attacks, we use a smart double initialization to avoid using the mimicry term from our ECML 2013 paper, as described in: - https://pralab.diee.unica.it/sites/default/files/zhang15-tcyb.pdf, IEEE TCYB, 2015 If the attack is not successful when starting from x0, we initialize the optimization by projecting a point from another class onto the feasible domain and try again. Parameters ---------- classifier : CClassifier Target classifier. double_init_ds : CDataset or None, optional Dataset used to initialize an alternative init point (double init). double_init : bool, optional If True (default), use double initialization point. Needs double_init_ds not to be None. distance : {'l1' or 'l2'}, optional Norm to use for computing the distance of the adversarial example from the original sample. Default 'l2'. dmax : scalar, optional Maximum value of the perturbation. Default 1. lb, ub : int or CArray, optional Lower/Upper bounds. If int, the same bound will be applied to all the features. If CArray, a different bound can be specified for each feature. Default `lb = 0`, `ub = 1`. y_target : int or None, optional If None an error-generic attack will be performed, else a error-specific attack to have the samples misclassified as belonging to the `y_target` class. attack_classes : 'all' or CArray, optional Array with the classes that can be manipulated by the attacker or 'all' (default) if all classes can be manipulated. solver_params : dict or None, optional Parameters for the solver. Default None, meaning that default parameters will be used. See :class:`COptimizerPGDExp` for more information. Attributes ---------- class_type : 'e-pgd-exp' """ __class_type = 'e-pgd-exp' def __init__(self, classifier, double_init_ds=None, double_init=True, distance='l1', dmax=0, lb=0, ub=1, y_target=None, attack_classes='all', solver_params=None): # INTERNALS self._x0 = None self._y0 = None # this is an alternative init point. This could be a single point # (targeted evasion) or an array of multiple points, one for each # class (indiscriminate evasion). See _get_point_with_min_f_obj() self._xk = None super(CAttackEvasionPGDExp, self).__init__( classifier=classifier, double_init_ds=double_init_ds, double_init=double_init, distance=distance, dmax=dmax, lb=lb, ub=ub, y_target=y_target, attack_classes=attack_classes, solver_params=solver_params) self.solver_type = 'pgd-exp'
[]
kinteriq/mail-log-parser
mail_log_parser/data_manager.py
e4242387c1767db611e266d463c817aeb8a74377
import sqlite3 class ManageData: def __init__(self, queue_tracker_db, email_tracker_db, delivery_tracker_db): self.queue_tracker_db = queue_tracker_db self.email_tracker_db = email_tracker_db self.delivery_tracker_db = delivery_tracker_db def manage_queue_tracker(self, fields): """ Receive one of the following located groups as <fields>: [('ID', <id>), ('client_email', <email>)]; [('ID', <id>), ('receivers', <email>), ('status', <status>)]; [('ID', <id>)]; and manage the <queue_tracker_db> accordingly. """ if len(fields) == 1: ID = fields[0][1] self.manage_email_tracker(ID) self.manage_delivery_tracker(ID) del self.queue_tracker_db[ID] elif len(fields) == 2: ID, client_email = (f[1] for f in fields) self.queue_tracker_db[ID]['client_email'] = client_email elif len(fields) == 3: ID, receiver, status = (f[1] for f in fields) if status == 'sent': code = 1 else: code = 0 self.queue_tracker_db[ID]['receivers'][receiver] = code def manage_email_tracker(self, ID): """ Retrieve client's email from the <queue_tracker_db> by <ID> with the amount of 'receivers' whose 'status' == 1 and store it in the <email_tracker_db>. """ client_email = self.queue_tracker_db[ID]['client_email'] receivers = self.queue_tracker_db[ID]['receivers'] delivered_mail = [r for r in receivers if receivers[r] == 1] if client_email in self.email_tracker_db: self.email_tracker_db[client_email] += len(delivered_mail) else: self.email_tracker_db[client_email] = len(delivered_mail) def manage_delivery_tracker(self, ID): """ Go through all receivers of <ID> queue of <queue_tracker_db>, and add their delivery statuses to the <delivery_tracker_db> counter """ receivers = self.queue_tracker_db[ID]['receivers'] for receiver in receivers: if receivers[receiver] == 1: self.delivery_tracker_db['delivered'] += 1 else: self.delivery_tracker_db['undelivered'] += 1 class ManageDatabase(ManageData): def __init__(self, path, *args, **kwargs): self.path = path super().__init__(*args, **kwargs) def _execute_command(self, *command): con = sqlite3.connect(self.path) cursor = con.cursor() result = cursor.execute(*command) if result: result = result.fetchall() con.commit() con.close() return result def create_db(self): self._execute_command('''CREATE TABLE IF NOT EXISTS email_tracker (client_email TEXT PRIMARY KEY, num_of_letters_sent INTEGER)''') def transfer_data(self): for email, num_of_letters in self.email_tracker_db.items(): self._execute_command('''INSERT INTO email_tracker VALUES (?, ?)''', (email, num_of_letters))
[((2551, 2577), 'sqlite3.connect', 'sqlite3.connect', (['self.path'], {}), '(self.path)\n', (2566, 2577), False, 'import sqlite3\n')]
lychenyoko/content-aware-gan-compression
Util/training_util.py
fa4193df630dd7b0e7fc52dd60669d8e1aefc39d
import math def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01): noise = torch.randn_like(fake_img) / math.sqrt( fake_img.shape[2] * fake_img.shape[3] ) grad, = autograd.grad( outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True ) path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1)) path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length) path_penalty = (path_lengths - path_mean).pow(2).mean() return path_penalty, path_mean.detach(), path_lengths
[((126, 174), 'math.sqrt', 'math.sqrt', (['(fake_img.shape[2] * fake_img.shape[3])'], {}), '(fake_img.shape[2] * fake_img.shape[3])\n', (135, 174), False, 'import math\n')]
DIS-SIN/FlaskShell
app/configs/development_settings.py
5f6d0cfeac8bea0b274d16a497e3a20cd00b155a
######################################################## FLASK SETTINGS ############################################################## #Variable used to securly sign cookies ##THIS IS SET IN DEV ENVIRONMENT FOR CONVENIENCE BUT SHOULD BE SET AS AN ENVIRONMENT VARIABLE IN PROD SECRET_KEY = "dev" ######################################################## DATABSE SETTINGS #################################################### #Neo4j Database URI used by the Neomodel OGM ## THIS SHOULD BE SET AS AN ENVIRONMENT VARIABLE IN PRODUCTION ## DATABASE_URI = "bolt://test:test@localhost:7687"
[]
caoxiaoyue/PyAutoArray
autoarray/structures/grids/two_d/grid_2d_util.py
e10d3d6a5b8dd031f2ad277486bd539bd5858b2a
import numpy as np from typing import Tuple, Union, Optional from autoarray.structures.arrays.two_d import array_2d_util from autoarray.geometry import geometry_util from autoarray import numba_util from autoarray.mask import mask_2d_util @numba_util.jit() def grid_2d_centre_from(grid_2d_slim: np.ndarray) -> Tuple[float, float]: """ Returns the centre of a grid from a 1D grid. Parameters ---------- grid_2d_slim The 1D grid of values which are mapped to a 2D array. Returns ------- (float, float) The (y,x) central coordinates of the grid. """ centre_y = (np.max(grid_2d_slim[:, 0]) + np.min(grid_2d_slim[:, 0])) / 2.0 centre_x = (np.max(grid_2d_slim[:, 1]) + np.min(grid_2d_slim[:, 1])) / 2.0 return centre_y, centre_x @numba_util.jit() def grid_2d_slim_via_mask_from( mask_2d: np.ndarray, pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates a the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned on an array of shape (total_unmasked_pixels*sub_size**2, 2). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Masked coordinates are therefore removed and not included in the slimmed grid. Grid2D are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- mask_2d A 2D array of bools, where `False` values are unmasked and therefore included as part of the calculated sub-grid. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin : (float, flloat) The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A slimmed sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid array has dimensions (total_unmasked_pixels*sub_size**2, 2). Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) grid_slim = grid_2d_slim_via_mask_from(mask=mask, pixel_scales=(0.5, 0.5), sub_size=1, origin=(0.0, 0.0)) """ total_sub_pixels = mask_2d_util.total_sub_pixels_2d_from(mask_2d, sub_size) grid_slim = np.zeros(shape=(total_sub_pixels, 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=mask_2d.shape, pixel_scales=pixel_scales, origin=origin ) sub_index = 0 y_sub_half = pixel_scales[0] / 2 y_sub_step = pixel_scales[0] / (sub_size) x_sub_half = pixel_scales[1] / 2 x_sub_step = pixel_scales[1] / (sub_size) for y in range(mask_2d.shape[0]): for x in range(mask_2d.shape[1]): if not mask_2d[y, x]: y_scaled = (y - centres_scaled[0]) * pixel_scales[0] x_scaled = (x - centres_scaled[1]) * pixel_scales[1] for y1 in range(sub_size): for x1 in range(sub_size): grid_slim[sub_index, 0] = -( y_scaled - y_sub_half + y1 * y_sub_step + (y_sub_step / 2.0) ) grid_slim[sub_index, 1] = ( x_scaled - x_sub_half + x1 * x_sub_step + (x_sub_step / 2.0) ) sub_index += 1 return grid_slim def grid_2d_via_mask_from( mask_2d: np.ndarray, pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Masked pixels are given values (0.0, 0.0). Grids are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- mask_2d A 2D array of bools, where `False` values are unmasked and therefore included as part of the calculated sub-grid. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin : (float, flloat) The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size). Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) grid_2d = grid_2d_via_mask_from(mask=mask, pixel_scales=(0.5, 0.5), sub_size=1, origin=(0.0, 0.0)) """ grid_2d_slim = grid_2d_slim_via_mask_from( mask_2d=mask_2d, pixel_scales=pixel_scales, sub_size=sub_size, origin=origin ) return grid_2d_native_from( grid_2d_slim=grid_2d_slim, mask_2d=mask_2d, sub_size=sub_size ) def grid_2d_slim_via_shape_native_from( shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned in its slimmed dimensions with shape (total_pixels**2*sub_size**2, 2). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Grid2D are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0]. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- shape_native The (y,x) shape of the 2D array the sub-grid of coordinates is computed for. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid is slimmed and has dimensions (total_unmasked_pixels*sub_size**2, 2). Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) grid_2d_slim = grid_2d_slim_via_shape_native_from(shape_native=(3,3), pixel_scales=(0.5, 0.5), sub_size=2, origin=(0.0, 0.0)) """ return grid_2d_slim_via_mask_from( mask_2d=np.full(fill_value=False, shape=shape_native), pixel_scales=pixel_scales, sub_size=sub_size, origin=origin, ) def grid_2d_via_shape_native_from( shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Grids are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0]. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- shape_native The (y,x) shape of the 2D array the sub-grid of coordinates is computed for. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin : (float, flloat) The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size). Examples -------- grid_2d = grid_2d_via_shape_native_from(shape_native=(3, 3), pixel_scales=(1.0, 1.0), sub_size=2, origin=(0.0, 0.0)) """ return grid_2d_via_mask_from( mask_2d=np.full(fill_value=False, shape=shape_native), pixel_scales=pixel_scales, sub_size=sub_size, origin=origin, ) @numba_util.jit() def grid_scaled_2d_slim_radial_projected_from( extent: np.ndarray, centre: Tuple[float, float], pixel_scales: Union[float, Tuple[float, float]], sub_size: int, shape_slim: Optional[int] = 0, ) -> np.ndarray: """ Determine a projected radial grid of points from a 2D region of coordinates defined by an extent [xmin, xmax, ymin, ymax] and with a (y,x) centre. This functions operates as follows: 1) Given the region defined by the extent [xmin, xmax, ymin, ymax], the algorithm finds the longest 1D distance of the 4 paths from the (y,x) centre to the edge of the region (e.g. following the positive / negative y and x axes). 2) Use the pixel-scale corresponding to the direction chosen (e.g. if the positive x-axis was the longest, the pixel_scale in the x dimension is used). 3) Determine the number of pixels between the centre and the edge of the region using the longest path between the two chosen above. 4) Create a (y,x) grid of radial points where all points are at the centre's y value = 0.0 and the x values iterate from the centre in increasing steps of the pixel-scale. 5) Rotate these radial coordinates by the input `angle` clockwise. A schematric is shown below: ------------------- | | |<- - - - ->x | x = centre | | <-> = longest radial path from centre to extent edge | | ------------------- Using the centre x above, this function finds the longest radial path to the edge of the extent window. The returned `grid_radii` represents a radial set of points that in 1D sample the 2D grid outwards from its centre. This grid stores the radial coordinates as (y,x) values (where all y values are the same) as opposed to a 1D data structure so that it can be used in functions which require that a 2D grid structure is input. Parameters ---------- extent The extent of the grid the radii grid is computed using, with format [xmin, xmax, ymin, ymax] centre : (float, flloat) The (y,x) central coordinate which the radial grid is traced outwards from. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. shape_slim Manually choose the shape of the 1D projected grid that is returned. If 0, the border based on the 2D grid is used (due to numba None cannot be used as a default value). Returns ------- ndarray A radial set of points sampling the longest distance from the centre to the edge of the extent in along the positive x-axis. """ distance_to_positive_x = extent[1] - centre[1] distance_to_positive_y = extent[3] - centre[0] distance_to_negative_x = centre[1] - extent[0] distance_to_negative_y = centre[0] - extent[2] scaled_distance = max( [ distance_to_positive_x, distance_to_positive_y, distance_to_negative_x, distance_to_negative_y, ] ) if (scaled_distance == distance_to_positive_y) or ( scaled_distance == distance_to_negative_y ): pixel_scale = pixel_scales[0] else: pixel_scale = pixel_scales[1] if shape_slim == 0: shape_slim = sub_size * int((scaled_distance / pixel_scale)) + 1 grid_scaled_2d_slim_radii = np.zeros((shape_slim, 2)) grid_scaled_2d_slim_radii[:, 0] += centre[0] radii = centre[1] for slim_index in range(shape_slim): grid_scaled_2d_slim_radii[slim_index, 1] = radii radii += pixel_scale / sub_size return grid_scaled_2d_slim_radii @numba_util.jit() def grid_pixels_2d_slim_from( grid_scaled_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2d (y,x) scaled coordinates to a slimmed grid of 2d (y,x) pixel coordinate values. Pixel coordinates are returned as floats such that they include the decimal offset from each pixel's top-left corner relative to the input scaled coordinate. The input and output grids are both slimmed and therefore shape (total_pixels, 2). The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird. The scaled grid is defined by an origin and coordinates are shifted to this origin before computing their 1D grid pixel coordinate values. Parameters ---------- grid_scaled_2d_slim: np.ndarray The slimmed grid of 2D (y,x) coordinates in scaled units which are converted to pixel value coordinates. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted to. Returns ------- ndarray A slimmed grid of 2D (y,x) pixel-value coordinates with dimensions (total_pixels, 2). Examples -------- grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels_2d_slim = np.zeros((grid_scaled_2d_slim.shape[0], 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=shape_native, pixel_scales=pixel_scales, origin=origin ) for slim_index in range(grid_scaled_2d_slim.shape[0]): grid_pixels_2d_slim[slim_index, 0] = ( (-grid_scaled_2d_slim[slim_index, 0] / pixel_scales[0]) + centres_scaled[0] + 0.5 ) grid_pixels_2d_slim[slim_index, 1] = ( (grid_scaled_2d_slim[slim_index, 1] / pixel_scales[1]) + centres_scaled[1] + 0.5 ) return grid_pixels_2d_slim @numba_util.jit() def grid_pixel_centres_2d_slim_from( grid_scaled_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2D (y,x) scaled coordinates to a slimmed grid of 2D (y,x) pixel values. Pixel coordinates are returned as integers such that they map directly to the pixel they are contained within. The input and output grids are both slimmed and therefore shape (total_pixels, 2). The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird. The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this origin before computing their 1D grid pixel indexes. Parameters ---------- grid_scaled_2d_slim: np.ndarray The slimmed grid of 2D (y,x) coordinates in scaled units which is converted to pixel indexes. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted Returns ------- ndarray A slimmed grid of 2D (y,x) pixel indexes with dimensions (total_pixels, 2). Examples -------- grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels_2d_slim = np.zeros((grid_scaled_2d_slim.shape[0], 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=shape_native, pixel_scales=pixel_scales, origin=origin ) for slim_index in range(grid_scaled_2d_slim.shape[0]): grid_pixels_2d_slim[slim_index, 0] = int( (-grid_scaled_2d_slim[slim_index, 0] / pixel_scales[0]) + centres_scaled[0] + 0.5 ) grid_pixels_2d_slim[slim_index, 1] = int( (grid_scaled_2d_slim[slim_index, 1] / pixel_scales[1]) + centres_scaled[1] + 0.5 ) return grid_pixels_2d_slim @numba_util.jit() def grid_pixel_indexes_2d_slim_from( grid_scaled_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2D (y,x) scaled coordinates to a slimmed grid of pixel indexes. Pixel coordinates are returned as integers such that they are the pixel from the top-left of the 2D grid going rights and then downwards. The input and output grids are both slimmed and have shapes (total_pixels, 2) and (total_pixels,). For example: The pixel at the top-left, whose native index is [0,0], corresponds to slimmed pixel index 0. The fifth pixel on the top row, whose native index is [0,5], corresponds to slimmed pixel index 4. The first pixel on the second row, whose native index is [0,1], has slimmed pixel index 10 if a row has 10 pixels. The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this origin before computing their 1D grid pixel indexes. The input and output grids are both of shape (total_pixels, 2). Parameters ---------- grid_scaled_2d_slim: np.ndarray The slimmed grid of 2D (y,x) coordinates in scaled units which is converted to slimmed pixel indexes. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted. Returns ------- ndarray A grid of slimmed pixel indexes with dimensions (total_pixels,). Examples -------- grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixel_indexes_2d_slim = grid_pixel_indexes_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels_2d_slim = grid_pixel_centres_2d_slim_from( grid_scaled_2d_slim=grid_scaled_2d_slim, shape_native=shape_native, pixel_scales=pixel_scales, origin=origin, ) grid_pixel_indexes_2d_slim = np.zeros(grid_pixels_2d_slim.shape[0]) for slim_index in range(grid_pixels_2d_slim.shape[0]): grid_pixel_indexes_2d_slim[slim_index] = int( grid_pixels_2d_slim[slim_index, 0] * shape_native[1] + grid_pixels_2d_slim[slim_index, 1] ) return grid_pixel_indexes_2d_slim @numba_util.jit() def grid_scaled_2d_slim_from( grid_pixels_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2D (y,x) pixel coordinates to a slimmed grid of 2D (y,x) scaled values. The input and output grids are both slimmed and therefore shape (total_pixels, 2). The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird. The scaled coordinate origin is defined by the class attribute origin, and coordinates are shifted to this origin after computing their values from the 1D grid pixel indexes. Parameters ---------- grid_pixels_2d_slim: np.ndarray The slimmed grid of (y,x) coordinates in pixel values which is converted to scaled coordinates. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted. Returns ------- ndarray A slimmed grid of 2d scaled coordinates with dimensions (total_pixels, 2). Examples -------- grid_pixels_2d_slim = np.array([[0,0], [0,1], [1,0], [1,1]) grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_pixels_2d_slim=grid_pixels_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_scaled_2d_slim = np.zeros((grid_pixels_2d_slim.shape[0], 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=shape_native, pixel_scales=pixel_scales, origin=origin ) for slim_index in range(grid_scaled_2d_slim.shape[0]): grid_scaled_2d_slim[slim_index, 0] = ( -(grid_pixels_2d_slim[slim_index, 0] - centres_scaled[0] - 0.5) * pixel_scales[0] ) grid_scaled_2d_slim[slim_index, 1] = ( grid_pixels_2d_slim[slim_index, 1] - centres_scaled[1] - 0.5 ) * pixel_scales[1] return grid_scaled_2d_slim @numba_util.jit() def grid_pixel_centres_2d_from( grid_scaled_2d: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a native grid of 2D (y,x) scaled coordinates to a native grid of 2D (y,x) pixel values. Pixel coordinates are returned as integers such that they map directly to the pixel they are contained within. The input and output grids are both native resolution and therefore have shape (y_pixels, x_pixels, 2). The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird. The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this origin before computing their 1D grid pixel indexes. Parameters ---------- grid_scaled_2d: np.ndarray The native grid of 2D (y,x) coordinates in scaled units which is converted to pixel indexes. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted Returns ------- ndarray A native grid of 2D (y,x) pixel indexes with dimensions (y_pixels, x_pixels, 2). Examples -------- grid_scaled_2d = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixel_centres_2d = grid_pixel_centres_2d_from(grid_scaled_2d=grid_scaled_2d, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels_2d = np.zeros((grid_scaled_2d.shape[0], grid_scaled_2d.shape[1], 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=shape_native, pixel_scales=pixel_scales, origin=origin ) for y in range(grid_scaled_2d.shape[0]): for x in range(grid_scaled_2d.shape[1]): grid_pixels_2d[y, x, 0] = int( (-grid_scaled_2d[y, x, 0] / pixel_scales[0]) + centres_scaled[0] + 0.5 ) grid_pixels_2d[y, x, 1] = int( (grid_scaled_2d[y, x, 1] / pixel_scales[1]) + centres_scaled[1] + 0.5 ) return grid_pixels_2d @numba_util.jit() def relocated_grid_via_jit_from(grid, border_grid): """ Relocate the coordinates of a grid to its border if they are outside the border, where the border is defined as all pixels at the edge of the grid's mask (see *mask._border_1d_indexes*). This is performed as follows: 1: Use the mean value of the grid's y and x coordinates to determine the origin of the grid. 2: Compute the radial distance of every grid coordinate from the origin. 3: For every coordinate, find its nearest pixel in the border. 4: Determine if it is outside the border, by comparing its radial distance from the origin to its paired border pixel's radial distance. 5: If its radial distance is larger, use the ratio of radial distances to move the coordinate to the border (if its inside the border, do nothing). The method can be used on uniform or irregular grids, however for irregular grids the border of the 'image-plane' mask is used to define border pixels. Parameters ---------- grid : Grid2D The grid (uniform or irregular) whose pixels are to be relocated to the border edge if outside it. border_grid : Grid2D The grid of border (y,x) coordinates. """ grid_relocated = np.zeros(grid.shape) grid_relocated[:, :] = grid[:, :] border_origin = np.zeros(2) border_origin[0] = np.mean(border_grid[:, 0]) border_origin[1] = np.mean(border_grid[:, 1]) border_grid_radii = np.sqrt( np.add( np.square(np.subtract(border_grid[:, 0], border_origin[0])), np.square(np.subtract(border_grid[:, 1], border_origin[1])), ) ) border_min_radii = np.min(border_grid_radii) grid_radii = np.sqrt( np.add( np.square(np.subtract(grid[:, 0], border_origin[0])), np.square(np.subtract(grid[:, 1], border_origin[1])), ) ) for pixel_index in range(grid.shape[0]): if grid_radii[pixel_index] > border_min_radii: closest_pixel_index = np.argmin( np.square(grid[pixel_index, 0] - border_grid[:, 0]) + np.square(grid[pixel_index, 1] - border_grid[:, 1]) ) move_factor = ( border_grid_radii[closest_pixel_index] / grid_radii[pixel_index] ) if move_factor < 1.0: grid_relocated[pixel_index, :] = ( move_factor * (grid[pixel_index, :] - border_origin[:]) + border_origin[:] ) return grid_relocated @numba_util.jit() def furthest_grid_2d_slim_index_from( grid_2d_slim: np.ndarray, slim_indexes: np.ndarray, coordinate: Tuple[float, float] ) -> int: distance_to_centre = 0.0 for slim_index in slim_indexes: y = grid_2d_slim[slim_index, 0] x = grid_2d_slim[slim_index, 1] distance_to_centre_new = (x - coordinate[1]) ** 2 + (y - coordinate[0]) ** 2 if distance_to_centre_new >= distance_to_centre: distance_to_centre = distance_to_centre_new furthest_grid_2d_slim_index = slim_index return furthest_grid_2d_slim_index def grid_2d_slim_from( grid_2d_native: np.ndarray, mask: np.ndarray, sub_size: int ) -> np.ndarray: """ For a native 2D grid and mask of shape [total_y_pixels, total_x_pixels, 2], map the values of all unmasked pixels to a slimmed grid of shape [total_unmasked_pixels, 2]. The pixel coordinate origin is at the top left corner of the native grid and goes right-wards and downwards, such that for an grid of shape (3,3) where all pixels are unmasked: - pixel [0,0] of the 2D grid will correspond to index 0 of the 1D grid. - pixel [0,1] of the 2D grid will correspond to index 1 of the 1D grid. - pixel [1,0] of the 2D grid will correspond to index 4 of the 1D grid. Parameters ---------- grid_2d_native : ndarray The native grid of (y,x) values which are mapped to the slimmed grid. mask_2d A 2D array of bools, where `False` values mean unmasked and are included in the mapping. sub_size The size (sub_size x sub_size) of each unmasked pixels sub-array. Returns ------- ndarray A 1D grid of values mapped from the 2D grid with dimensions (total_unmasked_pixels). """ grid_1d_slim_y = array_2d_util.array_2d_slim_from( array_2d_native=grid_2d_native[:, :, 0], mask_2d=mask, sub_size=sub_size ) grid_1d_slim_x = array_2d_util.array_2d_slim_from( array_2d_native=grid_2d_native[:, :, 1], mask_2d=mask, sub_size=sub_size ) return np.stack((grid_1d_slim_y, grid_1d_slim_x), axis=-1) def grid_2d_native_from( grid_2d_slim: np.ndarray, mask_2d: np.ndarray, sub_size: int ) -> np.ndarray: """ For a slimmed 2D grid of shape [total_unmasked_pixels, 2], that was computed by extracting the unmasked values from a native 2D grid of shape [total_y_pixels, total_x_pixels, 2], map the slimmed grid's coordinates back to the native 2D grid where masked values are set to zero. This uses a 1D array 'slim_to_native' where each index gives the 2D pixel indexes of the grid's native unmasked pixels, for example: - If slim_to_native[0] = [0,0], the first value of the 1D array maps to the pixels [0,0,:] of the native 2D grid. - If slim_to_native[1] = [0,1], the second value of the 1D array maps to the pixels [0,1,:] of the native 2D grid. - If slim_to_native[4] = [1,1], the fifth value of the 1D array maps to the pixels [1,1,:] of the native 2D grid. Parameters ---------- grid_2d_slim The (y,x) values of the slimmed 2D grid which are mapped to the native 2D grid. mask_2d A 2D array of bools, where `False` values mean unmasked and are included in the mapping. sub_size The size (sub_size x sub_size) of each unmasked pixels sub-array. Returns ------- ndarray A NumPy array of shape [total_y_pixels, total_x_pixels, 2] corresponding to the (y,x) values of the native 2D mapped from the slimmed grid. """ grid_2d_native_y = array_2d_util.array_2d_native_from( array_2d_slim=grid_2d_slim[:, 0], mask_2d=mask_2d, sub_size=sub_size ) grid_2d_native_x = array_2d_util.array_2d_native_from( array_2d_slim=grid_2d_slim[:, 1], mask_2d=mask_2d, sub_size=sub_size ) return np.stack((grid_2d_native_y, grid_2d_native_x), axis=-1) @numba_util.jit() def grid_2d_slim_upscaled_from( grid_slim: np.ndarray, upscale_factor: int, pixel_scales: Union[float, Tuple[float, float]], ) -> np.ndarray: """ From an input slimmed 2D grid, return an upscaled slimmed 2D grid where (y,x) coordinates are added at an upscaled resolution to each grid coordinate, analogous to a sub-grid. Parameters ---------- grid_slim The slimmed grid of (y,x) coordinates over which a square uniform grid is overlaid. upscale_factor The upscaled resolution at which the new grid coordinates are computed. pixel_scales The pixel scale of the uniform grid that laid over the irregular grid of (y,x) coordinates. """ grid_2d_slim_upscaled = np.zeros( shape=(grid_slim.shape[0] * upscale_factor ** 2, 2) ) upscale_index = 0 y_upscale_half = pixel_scales[0] / 2 y_upscale_step = pixel_scales[0] / upscale_factor x_upscale_half = pixel_scales[1] / 2 x_upscale_step = pixel_scales[1] / upscale_factor for slim_index in range(grid_slim.shape[0]): y_grid = grid_slim[slim_index, 0] x_grid = grid_slim[slim_index, 1] for y in range(upscale_factor): for x in range(upscale_factor): grid_2d_slim_upscaled[upscale_index, 0] = ( y_grid + y_upscale_half - y * y_upscale_step - (y_upscale_step / 2.0) ) grid_2d_slim_upscaled[upscale_index, 1] = ( x_grid - x_upscale_half + x * x_upscale_step + (x_upscale_step / 2.0) ) upscale_index += 1 return grid_2d_slim_upscaled def grid_2d_of_points_within_radius( radius: float, centre: Tuple[float, float], grid_2d: np.ndarray ): y_inside = [] x_inside = [] for i in range(len(grid_2d[:, 0])): if (grid_2d[i, 0] - centre[0]) ** 2 + ( grid_2d[i, 1] - centre[1] ) ** 2 > radius ** 2: y_inside.append(grid_2d[i, 0]) x_inside.append(grid_2d[i, 1]) return np.asarray(y_inside, x_inside) def compute_polygon_area(points): x = points[:, 1] y = points[:, 0] return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
[((252, 268), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (266, 268), False, 'from autoarray import numba_util\n'), ((824, 840), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (838, 840), False, 'from autoarray import numba_util\n'), ((11036, 11052), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (11050, 11052), False, 'from autoarray import numba_util\n'), ((14926, 14942), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (14940, 14942), False, 'from autoarray import numba_util\n'), ((17600, 17616), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (17614, 17616), False, 'from autoarray import numba_util\n'), ((20218, 20234), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (20232, 20234), False, 'from autoarray import numba_util\n'), ((22955, 22971), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (22969, 22971), False, 'from autoarray import numba_util\n'), ((25401, 25417), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (25415, 25417), False, 'from autoarray import numba_util\n'), ((27988, 28004), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (28002, 28004), False, 'from autoarray import numba_util\n'), ((30644, 30660), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (30658, 30660), False, 'from autoarray import numba_util\n'), ((34662, 34678), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (34676, 34678), False, 'from autoarray import numba_util\n'), ((3015, 3071), 'autoarray.mask.mask_2d_util.total_sub_pixels_2d_from', 'mask_2d_util.total_sub_pixels_2d_from', (['mask_2d', 'sub_size'], {}), '(mask_2d, sub_size)\n', (3052, 3071), False, 'from autoarray.mask import mask_2d_util\n'), ((3091, 3128), 'numpy.zeros', 'np.zeros', ([], {'shape': '(total_sub_pixels, 2)'}), '(shape=(total_sub_pixels, 2))\n', (3099, 3128), True, 'import numpy as np\n'), ((3153, 3274), 'autoarray.geometry.geometry_util.central_scaled_coordinate_2d_from', 'geometry_util.central_scaled_coordinate_2d_from', ([], {'shape_native': 'mask_2d.shape', 'pixel_scales': 'pixel_scales', 'origin': 'origin'}), '(shape_native=mask_2d.shape,\n pixel_scales=pixel_scales, origin=origin)\n', (3200, 3274), False, 'from autoarray.geometry import geometry_util\n'), ((14632, 14657), 'numpy.zeros', 'np.zeros', (['(shape_slim, 2)'], {}), '((shape_slim, 2))\n', (14640, 14657), True, 'import numpy as np\n'), ((16936, 16979), 'numpy.zeros', 'np.zeros', (['(grid_scaled_2d_slim.shape[0], 2)'], {}), '((grid_scaled_2d_slim.shape[0], 2))\n', (16944, 16979), True, 'import numpy as np\n'), ((17004, 17124), 'autoarray.geometry.geometry_util.central_scaled_coordinate_2d_from', 'geometry_util.central_scaled_coordinate_2d_from', ([], {'shape_native': 'shape_native', 'pixel_scales': 'pixel_scales', 'origin': 'origin'}), '(shape_native=shape_native,\n pixel_scales=pixel_scales, origin=origin)\n', (17051, 17124), False, 'from autoarray.geometry import geometry_util\n'), ((19548, 19591), 'numpy.zeros', 'np.zeros', (['(grid_scaled_2d_slim.shape[0], 2)'], {}), '((grid_scaled_2d_slim.shape[0], 2))\n', (19556, 19591), True, 'import numpy as np\n'), ((19616, 19736), 'autoarray.geometry.geometry_util.central_scaled_coordinate_2d_from', 'geometry_util.central_scaled_coordinate_2d_from', ([], {'shape_native': 'shape_native', 'pixel_scales': 'pixel_scales', 'origin': 'origin'}), '(shape_native=shape_native,\n pixel_scales=pixel_scales, origin=origin)\n', (19663, 19736), False, 'from autoarray.geometry import geometry_util\n'), ((22623, 22661), 'numpy.zeros', 'np.zeros', (['grid_pixels_2d_slim.shape[0]'], {}), '(grid_pixels_2d_slim.shape[0])\n', (22631, 22661), True, 'import numpy as np\n'), ((24778, 24821), 'numpy.zeros', 'np.zeros', (['(grid_pixels_2d_slim.shape[0], 2)'], {}), '((grid_pixels_2d_slim.shape[0], 2))\n', (24786, 24821), True, 'import numpy as np\n'), ((24846, 24966), 'autoarray.geometry.geometry_util.central_scaled_coordinate_2d_from', 'geometry_util.central_scaled_coordinate_2d_from', ([], {'shape_native': 'shape_native', 'pixel_scales': 'pixel_scales', 'origin': 'origin'}), '(shape_native=shape_native,\n pixel_scales=pixel_scales, origin=origin)\n', (24893, 24966), False, 'from autoarray.geometry import geometry_util\n'), ((27341, 27404), 'numpy.zeros', 'np.zeros', (['(grid_scaled_2d.shape[0], grid_scaled_2d.shape[1], 2)'], {}), '((grid_scaled_2d.shape[0], grid_scaled_2d.shape[1], 2))\n', (27349, 27404), True, 'import numpy as np\n'), ((27429, 27549), 'autoarray.geometry.geometry_util.central_scaled_coordinate_2d_from', 'geometry_util.central_scaled_coordinate_2d_from', ([], {'shape_native': 'shape_native', 'pixel_scales': 'pixel_scales', 'origin': 'origin'}), '(shape_native=shape_native,\n pixel_scales=pixel_scales, origin=origin)\n', (27476, 27549), False, 'from autoarray.geometry import geometry_util\n'), ((29283, 29303), 'numpy.zeros', 'np.zeros', (['grid.shape'], {}), '(grid.shape)\n', (29291, 29303), True, 'import numpy as np\n'), ((29366, 29377), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (29374, 29377), True, 'import numpy as np\n'), ((29402, 29430), 'numpy.mean', 'np.mean', (['border_grid[:, (0)]'], {}), '(border_grid[:, (0)])\n', (29409, 29430), True, 'import numpy as np\n'), ((29453, 29481), 'numpy.mean', 'np.mean', (['border_grid[:, (1)]'], {}), '(border_grid[:, (1)])\n', (29460, 29481), True, 'import numpy as np\n'), ((29721, 29746), 'numpy.min', 'np.min', (['border_grid_radii'], {}), '(border_grid_radii)\n', (29727, 29746), True, 'import numpy as np\n'), ((32486, 32598), 'autoarray.structures.arrays.two_d.array_2d_util.array_2d_slim_from', 'array_2d_util.array_2d_slim_from', ([], {'array_2d_native': 'grid_2d_native[:, :, (0)]', 'mask_2d': 'mask', 'sub_size': 'sub_size'}), '(array_2d_native=grid_2d_native[:, :, (0)],\n mask_2d=mask, sub_size=sub_size)\n', (32518, 32598), False, 'from autoarray.structures.arrays.two_d import array_2d_util\n'), ((32633, 32745), 'autoarray.structures.arrays.two_d.array_2d_util.array_2d_slim_from', 'array_2d_util.array_2d_slim_from', ([], {'array_2d_native': 'grid_2d_native[:, :, (1)]', 'mask_2d': 'mask', 'sub_size': 'sub_size'}), '(array_2d_native=grid_2d_native[:, :, (1)],\n mask_2d=mask, sub_size=sub_size)\n', (32665, 32745), False, 'from autoarray.structures.arrays.two_d import array_2d_util\n'), ((32770, 32821), 'numpy.stack', 'np.stack', (['(grid_1d_slim_y, grid_1d_slim_x)'], {'axis': '(-1)'}), '((grid_1d_slim_y, grid_1d_slim_x), axis=-1)\n', (32778, 32821), True, 'import numpy as np\n'), ((34318, 34428), 'autoarray.structures.arrays.two_d.array_2d_util.array_2d_native_from', 'array_2d_util.array_2d_native_from', ([], {'array_2d_slim': 'grid_2d_slim[:, (0)]', 'mask_2d': 'mask_2d', 'sub_size': 'sub_size'}), '(array_2d_slim=grid_2d_slim[:, (0)],\n mask_2d=mask_2d, sub_size=sub_size)\n', (34352, 34428), False, 'from autoarray.structures.arrays.two_d import array_2d_util\n'), ((34465, 34575), 'autoarray.structures.arrays.two_d.array_2d_util.array_2d_native_from', 'array_2d_util.array_2d_native_from', ([], {'array_2d_slim': 'grid_2d_slim[:, (1)]', 'mask_2d': 'mask_2d', 'sub_size': 'sub_size'}), '(array_2d_slim=grid_2d_slim[:, (1)],\n mask_2d=mask_2d, sub_size=sub_size)\n', (34499, 34575), False, 'from autoarray.structures.arrays.two_d import array_2d_util\n'), ((34600, 34655), 'numpy.stack', 'np.stack', (['(grid_2d_native_y, grid_2d_native_x)'], {'axis': '(-1)'}), '((grid_2d_native_y, grid_2d_native_x), axis=-1)\n', (34608, 34655), True, 'import numpy as np\n'), ((35435, 35496), 'numpy.zeros', 'np.zeros', ([], {'shape': '(grid_slim.shape[0] * upscale_factor ** 2, 2)'}), '(shape=(grid_slim.shape[0] * upscale_factor ** 2, 2))\n', (35443, 35496), True, 'import numpy as np\n'), ((36925, 36955), 'numpy.asarray', 'np.asarray', (['y_inside', 'x_inside'], {}), '(y_inside, x_inside)\n', (36935, 36955), True, 'import numpy as np\n'), ((644, 672), 'numpy.max', 'np.max', (['grid_2d_slim[:, (0)]'], {}), '(grid_2d_slim[:, (0)])\n', (650, 672), True, 'import numpy as np\n'), ((673, 701), 'numpy.min', 'np.min', (['grid_2d_slim[:, (0)]'], {}), '(grid_2d_slim[:, (0)])\n', (679, 701), True, 'import numpy as np\n'), ((724, 752), 'numpy.max', 'np.max', (['grid_2d_slim[:, (1)]'], {}), '(grid_2d_slim[:, (1)])\n', (730, 752), True, 'import numpy as np\n'), ((753, 781), 'numpy.min', 'np.min', (['grid_2d_slim[:, (1)]'], {}), '(grid_2d_slim[:, (1)])\n', (759, 781), True, 'import numpy as np\n'), ((8748, 8793), 'numpy.full', 'np.full', ([], {'fill_value': '(False)', 'shape': 'shape_native'}), '(fill_value=False, shape=shape_native)\n', (8755, 8793), True, 'import numpy as np\n'), ((10888, 10933), 'numpy.full', 'np.full', ([], {'fill_value': '(False)', 'shape': 'shape_native'}), '(fill_value=False, shape=shape_native)\n', (10895, 10933), True, 'import numpy as np\n'), ((29554, 29604), 'numpy.subtract', 'np.subtract', (['border_grid[:, (0)]', 'border_origin[0]'], {}), '(border_grid[:, (0)], border_origin[0])\n', (29565, 29604), True, 'import numpy as np\n'), ((29628, 29678), 'numpy.subtract', 'np.subtract', (['border_grid[:, (1)]', 'border_origin[1]'], {}), '(border_grid[:, (1)], border_origin[1])\n', (29639, 29678), True, 'import numpy as np\n'), ((29816, 29859), 'numpy.subtract', 'np.subtract', (['grid[:, (0)]', 'border_origin[0]'], {}), '(grid[:, (0)], border_origin[0])\n', (29827, 29859), True, 'import numpy as np\n'), ((29883, 29926), 'numpy.subtract', 'np.subtract', (['grid[:, (1)]', 'border_origin[1]'], {}), '(grid[:, (1)], border_origin[1])\n', (29894, 29926), True, 'import numpy as np\n'), ((30116, 30169), 'numpy.square', 'np.square', (['(grid[pixel_index, 0] - border_grid[:, (0)])'], {}), '(grid[pixel_index, 0] - border_grid[:, (0)])\n', (30125, 30169), True, 'import numpy as np\n'), ((30187, 30240), 'numpy.square', 'np.square', (['(grid[pixel_index, 1] - border_grid[:, (1)])'], {}), '(grid[pixel_index, 1] - border_grid[:, (1)])\n', (30196, 30240), True, 'import numpy as np\n'), ((37078, 37091), 'numpy.roll', 'np.roll', (['y', '(1)'], {}), '(y, 1)\n', (37085, 37091), True, 'import numpy as np\n'), ((37105, 37118), 'numpy.roll', 'np.roll', (['x', '(1)'], {}), '(x, 1)\n', (37112, 37118), True, 'import numpy as np\n')]
crown-prince/proxies
Proxies/Proxies.py
a3342d414675dbc89cdf1b953b46ea518f451166
# coding: utf-8 import requests, math import gevent from gevent.queue import Queue from gevent import monkey; monkey.patch_all() from pyquery import PyQuery class Proxies(): def __init__(self): self.domestic_gn_url = 'http://www.kuaidaili.com/free/inha/{0}/' self.domestic_pt_url = 'http://www.kuaidaili.com/free/intr/{0}/' self.abroad_gn_url = 'http://www.kuaidaili.com/free/outha/{0}/' self.abroad_pt_url = 'http://www.kuaidaili.com/free/outtr/{0}/' self.result_arr = [] self.s = requests.Session() self.headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36', 'Referer': 'http://www.kuaidaili.com/' } def fetch_urls(self, queue, quantity): while not queue.empty(): url = queue.get() html = self.s.get(url, headers=self.headers).text pq = PyQuery(html) size = pq.find('tbody tr').size() for index in range(size): item = pq.find('tbody tr').eq(index) ip = item.find('td').eq(0).text() port = item.find('td').eq(1).text() _type = item.find('td').eq(3).text() self.result_arr.append({ str(_type).lower(): '{0}://{1}:{2}'.format(str(_type).lower(), ip, port) }) if len(self.result_arr) >= quantity: break def get_proxies(self, quantity, type): ''' quantity: 数量 type: 类型 1.国内高匿代理 2.国内普通代理 3.国外高匿代理 4.国外普通代理 ''' url_queue = Queue() need_pages = int(math.ceil(quantity/15)) # 判断类型 if type == 1: # 国内高匿代理 base_url = self.domestic_gn_url elif type == 2: # 国内普通代理 base_url = self.domestic_pt_url elif type == 3: # 国外高匿代理 base_url = self.abroad_gn_url elif type == 4: # 国外普通代理 base_url = self.abroad_pt_url # 获取所需要的页面URL for index in range(need_pages): url = base_url.format(index+1) url_queue.put(url) # 处理所有URL,开启2个协程 gevent_list = [] for index in range(2): gevent_list.append( gevent.spawn(self.fetch_urls, url_queue, quantity) ) gevent.joinall(gevent_list) def get_result(self): return self.result_arr if __name__ == '__main__': p = Proxies() p.get_proxies(20, 1) result = p.get_result() print(result)
[((111, 129), 'gevent.monkey.patch_all', 'monkey.patch_all', ([], {}), '()\n', (127, 129), False, 'from gevent import monkey\n'), ((536, 554), 'requests.Session', 'requests.Session', ([], {}), '()\n', (552, 554), False, 'import requests, math\n'), ((1726, 1733), 'gevent.queue.Queue', 'Queue', ([], {}), '()\n', (1731, 1733), False, 'from gevent.queue import Queue\n'), ((2486, 2513), 'gevent.joinall', 'gevent.joinall', (['gevent_list'], {}), '(gevent_list)\n', (2500, 2513), False, 'import gevent\n'), ((978, 991), 'pyquery.PyQuery', 'PyQuery', (['html'], {}), '(html)\n', (985, 991), False, 'from pyquery import PyQuery\n'), ((1759, 1783), 'math.ceil', 'math.ceil', (['(quantity / 15)'], {}), '(quantity / 15)\n', (1768, 1783), False, 'import requests, math\n'), ((2413, 2463), 'gevent.spawn', 'gevent.spawn', (['self.fetch_urls', 'url_queue', 'quantity'], {}), '(self.fetch_urls, url_queue, quantity)\n', (2425, 2463), False, 'import gevent\n')]
Oaklight/parallelformers
parallelformers/policies/base/auto.py
57fc36f81734c29aaf814e092ce13681d3c28ede
# Copyright 2021 TUNiB inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from contextlib import suppress from typing import List, Union from torch import nn from parallelformers.policies.base import Policy class AutoPolicy: """Class for finds automatically appropriate policies for the current model""" def __init__(self): self.builtin_policies = {} with suppress(Exception): from transformers.models.gpt_neo.modeling_gpt_neo import ( GPTNeoPreTrainedModel, ) from parallelformers.policies.gpt_neo import GPTNeoPolicy self.builtin_policies[GPTNeoPreTrainedModel] = [ GPTNeoPolicy, ] with suppress(Exception): from transformers.models.bert.modeling_bert import ( BertPreTrainedModel, ) from parallelformers.policies.bert import BertPolicy self.builtin_policies[BertPreTrainedModel] = [ BertPolicy, ] with suppress(Exception): from transformers.models.bart.modeling_bart import ( BartPretrainedModel, ) from parallelformers.policies.bart import ( BartDecoderPolicy, BartEncoderPolicy, ) self.builtin_policies[BartPretrainedModel] = [ BartEncoderPolicy, BartDecoderPolicy, ] with suppress(Exception): from transformers.models.blenderbot.modeling_blenderbot import ( BlenderbotPreTrainedModel, ) from parallelformers.policies.blenderbot import ( BlenderbotDecoderPolicy, BlenderbotEncoderPolicy, ) self.builtin_policies[BlenderbotPreTrainedModel] = [ BlenderbotEncoderPolicy, BlenderbotDecoderPolicy, ] with suppress(Exception): from transformers.models.deberta.modeling_deberta import ( DebertaPreTrainedModel, ) from parallelformers.policies.deberta import DebertaPolicy self.builtin_policies[DebertaPreTrainedModel] = [ DebertaPolicy, ] with suppress(Exception): from transformers.models.transfo_xl.modeling_transfo_xl import ( TransfoXLPreTrainedModel, ) from parallelformers.policies.transfo_xl import TransfoXLPolicy self.builtin_policies[TransfoXLPreTrainedModel] = [ TransfoXLPolicy, ] with suppress(Exception): from transformers.models.roberta.modeling_roberta import ( RobertaPreTrainedModel, ) from parallelformers.policies.roberta import RobertaPolicy self.builtin_policies[RobertaPreTrainedModel] = [ RobertaPolicy, ] with suppress(Exception): from transformers.models.albert.modeling_albert import ( AlbertPreTrainedModel, ) from parallelformers.policies.albert import AlbertPolicy self.builtin_policies[AlbertPreTrainedModel] = [ AlbertPolicy, ] with suppress(Exception): from transformers.models.gpt2.modeling_gpt2 import ( GPT2PreTrainedModel, ) from parallelformers.policies.gpt2 import GPT2Policy self.builtin_policies[GPT2PreTrainedModel] = [ GPT2Policy, ] with suppress(Exception): from transformers.models.ctrl.modeling_ctrl import ( CTRLPreTrainedModel, ) from parallelformers.policies.ctrl import CTRLPolicy self.builtin_policies[CTRLPreTrainedModel] = [ CTRLPolicy, ] with suppress(Exception): from transformers.models.deberta_v2.modeling_deberta_v2 import ( DebertaV2PreTrainedModel, ) from parallelformers.policies.deberta_v2 import DebertaV2Policy self.builtin_policies[DebertaV2PreTrainedModel] = [ DebertaV2Policy, ] with suppress(Exception): from transformers.models.openai.modeling_openai import ( OpenAIGPTPreTrainedModel, ) from parallelformers.policies.openai import OpenAIGPTPolicy self.builtin_policies[OpenAIGPTPreTrainedModel] = [ OpenAIGPTPolicy, ] with suppress(Exception): from transformers.models.electra.modeling_electra import ( ElectraPreTrainedModel, ) from parallelformers.policies.electra import ElectraPolicy self.builtin_policies[ElectraPreTrainedModel] = [ ElectraPolicy, ] with suppress(Exception): from transformers.models.blenderbot_small.modeling_blenderbot_small import ( BlenderbotSmallPreTrainedModel, ) from parallelformers.policies.blenderbot_small import ( BlenderbotSmallDecoderPolicy, BlenderbotSmallEncoderPolicy, ) self.builtin_policies[BlenderbotSmallPreTrainedModel] = [ BlenderbotSmallEncoderPolicy, BlenderbotSmallDecoderPolicy, ] with suppress(Exception): from transformers.models.distilbert.modeling_distilbert import ( DistilBertPreTrainedModel, ) from parallelformers.policies.distil_bert import DistilBertPolicy self.builtin_policies[DistilBertPreTrainedModel] = [ DistilBertPolicy, ] with suppress(Exception): from transformers.models.convbert.modeling_convbert import ( ConvBertPreTrainedModel, ) from parallelformers.policies.convbert import ConvBertPolicy self.builtin_policies[ConvBertPreTrainedModel] = [ ConvBertPolicy, ] with suppress(Exception): from transformers.models.bert_generation.modeling_bert_generation import ( BertGenerationPreTrainedModel, ) from parallelformers.policies.bert import BertPolicy self.builtin_policies[BertGenerationPreTrainedModel] = [ BertPolicy, ] with suppress(Exception): from transformers.models.big_bird.modeling_big_bird import ( BigBirdPreTrainedModel, ) from parallelformers.policies.bigbird import BigBirdPolicy self.builtin_policies[BigBirdPreTrainedModel] = [ BigBirdPolicy, ] with suppress(Exception): from transformers.models.bigbird_pegasus.modeling_bigbird_pegasus import ( BigBirdPegasusPreTrainedModel, ) from parallelformers.policies.bigbird_pegasus import ( BigBirdPegasusDecoderPolicy, BigBirdPegasusEncoderPolicy, ) self.builtin_policies[BigBirdPegasusPreTrainedModel] = [ BigBirdPegasusEncoderPolicy, BigBirdPegasusDecoderPolicy, ] with suppress(Exception): from transformers.models.vit.modeling_vit import ViTPreTrainedModel from parallelformers.policies.vit import ViTPolicy self.builtin_policies[ViTPreTrainedModel] = [ ViTPolicy, ] with suppress(Exception): from transformers.models.deit.modeling_deit import ( DeiTPreTrainedModel, ) from parallelformers.policies.deit import DeiTPolicy self.builtin_policies[DeiTPreTrainedModel] = [DeiTPolicy] with suppress(Exception): from transformers.models.mbart.modeling_mbart import ( MBartPreTrainedModel, ) from parallelformers.policies.mbart import ( MBartDecoderPolicy, MBartEncoderPolicy, ) self.builtin_policies[MBartPreTrainedModel] = [ MBartEncoderPolicy, MBartDecoderPolicy, ] with suppress(Exception): from transformers.models.t5.modeling_t5 import T5PreTrainedModel from parallelformers.policies.t5 import T5Policy self.builtin_policies[T5PreTrainedModel] = [ T5Policy, ] with suppress(Exception): from transformers.models.pegasus.modeling_pegasus import ( PegasusPreTrainedModel, ) from parallelformers.policies.pegasus import ( PegasusDecoderPolicy, PegasusEncoderPolicy, ) self.builtin_policies[PegasusPreTrainedModel] = [ PegasusEncoderPolicy, PegasusDecoderPolicy, ] with suppress(Exception): from transformers.models.fsmt.modeling_fsmt import ( PretrainedFSMTModel, ) from parallelformers.policies.fsmt import ( FSMTDecoderPolicy, FSMTEncoderPolicy, ) self.builtin_policies[PretrainedFSMTModel] = [ FSMTEncoderPolicy, FSMTDecoderPolicy, ] with suppress(Exception): from transformers.models.xlm.modeling_xlm import XLMPreTrainedModel from parallelformers.policies.xlm import ( XLMAttentionPolicy, XLMMLPPolicy, ) self.builtin_policies[XLMPreTrainedModel] = [ XLMAttentionPolicy, XLMMLPPolicy, ] with suppress(Exception): from transformers.models.m2m_100.modeling_m2m_100 import ( M2M100PreTrainedModel, ) from parallelformers.policies.m2m_100 import ( M2M100DecoderPolicy, M2M100EncoderPolicy, ) self.builtin_policies[M2M100PreTrainedModel] = [ M2M100EncoderPolicy, M2M100DecoderPolicy, ] with suppress(Exception): from transformers.models.marian.modeling_marian import ( MarianPreTrainedModel, ) from parallelformers.policies.marian import ( MarianDecoderPolicy, MarianEncoderPolicy, ) self.builtin_policies[MarianPreTrainedModel] = [ MarianEncoderPolicy, MarianDecoderPolicy, ] with suppress(Exception): from transformers.models.mobilebert.modeling_mobilebert import ( MobileBertPreTrainedModel, ) from parallelformers.policies.mobilebert import MobileBertPolicy self.builtin_policies[MobileBertPreTrainedModel] = [ MobileBertPolicy, ] with suppress(Exception): from transformers.models.mpnet.modeling_mpnet import ( MPNetPreTrainedModel, ) from parallelformers.policies.mpnet import ( MPNetEncoderPolicy, MPNetLayerPolicy, ) self.builtin_policies[MPNetPreTrainedModel] = [ MPNetEncoderPolicy, MPNetLayerPolicy, ] with suppress(Exception): from transformers.models.luke.modeling_luke import ( LukePreTrainedModel, ) from parallelformers.policies.luke import LukePolicy self.builtin_policies[LukePreTrainedModel] = [ LukePolicy, ] with suppress(Exception): from transformers.models.dpr.modeling_dpr import ( DPRPretrainedContextEncoder, DPRPretrainedQuestionEncoder, DPRPretrainedReader, ) self.builtin_policies[DPRPretrainedReader] = [ BertPolicy, ] self.builtin_policies[DPRPretrainedQuestionEncoder] = [ BertPolicy, ] self.builtin_policies[DPRPretrainedContextEncoder] = [ BertPolicy, ] with suppress(Exception): from transformers.models.lxmert.modeling_lxmert import ( LxmertPreTrainedModel, ) from parallelformers.policies.lxmert import LxmertPolicy self.builtin_policies[LxmertPreTrainedModel] = [ LxmertPolicy, ] with suppress(Exception): from transformers.models.hubert.modeling_hubert import ( HubertPreTrainedModel, ) from parallelformers.policies.hubert import HubertPolicy self.builtin_policies[HubertPreTrainedModel] = [ HubertPolicy, ] with suppress(Exception): from transformers.models.wav2vec2.modeling_wav2vec2 import ( Wav2Vec2PreTrainedModel, ) from parallelformers.policies.wav2vec import Wav2VecPolicy self.builtin_policies[Wav2Vec2PreTrainedModel] = [ Wav2VecPolicy, ] with suppress(Exception): from transformers.models.xlnet.modeling_xlnet import ( XLNetPreTrainedModel, ) from parallelformers.policies.xlnet import XLNetPolicy self.builtin_policies[XLNetPreTrainedModel] = [ XLNetPolicy, ] with suppress(Exception): from transformers.models.retribert.modeling_retribert import ( RetriBertPreTrainedModel, ) self.builtin_policies[RetriBertPreTrainedModel] = [ BertPolicy, ] with suppress(Exception): from transformers.models.clip.modeling_clip import ( CLIPPreTrainedModel, ) from parallelformers.policies.clip import ( CLIPLayerPolicy, CLIPTextPolicy, CLIPVisionPolicy, ) self.builtin_policies[CLIPPreTrainedModel] = [ CLIPLayerPolicy, CLIPTextPolicy, CLIPVisionPolicy, ] with suppress(Exception): from transformers.models.detr.modeling_detr import ( DetrPreTrainedModel, ) from parallelformers.policies.detr import ( DetrDecoderPolicy, DetrEncoderPolicy, ) self.builtin_policies[DetrPreTrainedModel] = [ DetrEncoderPolicy, DetrDecoderPolicy, ] with suppress(Exception): from transformers.models.reformer.modeling_reformer import ( ReformerPreTrainedModel, ) from parallelformers.policies.reformer import ReformerPolicy self.builtin_policies[ReformerPreTrainedModel] = [ ReformerPolicy, ] with suppress(Exception): from transformers.models.longformer.modeling_longformer import ( LongformerPreTrainedModel, ) from parallelformers.policies.longformer import LongformerPolicy self.builtin_policies[LongformerPreTrainedModel] = [ LongformerPolicy, ] with suppress(Exception): from transformers.models.roformer.modeling_roformer import ( RoFormerPreTrainedModel, ) from parallelformers.policies.roformer import RoformerPolicy self.builtin_policies[RoFormerPreTrainedModel] = [ RoformerPolicy, ] with suppress(Exception): from transformers.models.ibert.modeling_ibert import ( IBertPreTrainedModel, ) from parallelformers.policies.ibert import IBertPolicy self.builtin_policies[IBertPreTrainedModel] = [ IBertPolicy, ] with suppress(Exception): from transformers.models.tapas.modeling_tapas import ( TapasPreTrainedModel, ) from parallelformers.policies.tapas import TapasPolicy self.builtin_policies[TapasPreTrainedModel] = [ TapasPolicy, ] with suppress(Exception): from transformers.models.funnel.modeling_funnel import ( FunnelPreTrainedModel, ) from parallelformers.policies.funnel import FunnelPolicy self.builtin_policies[FunnelPreTrainedModel] = [ FunnelPolicy, ] with suppress(Exception): from transformers.models.layoutlm.modeling_layoutlm import ( LayoutLMPreTrainedModel, ) from parallelformers.policies.layoutlm import LayoutLMPolicy self.builtin_policies[LayoutLMPreTrainedModel] = [ LayoutLMPolicy, ] with suppress(Exception): from transformers.models.led.modeling_led import LEDPreTrainedModel from parallelformers.policies.led import ( LEDDecoderPolicy, LEDEncoderPolicy, ) self.builtin_policies[LEDPreTrainedModel] = [ LEDEncoderPolicy, LEDDecoderPolicy, ] with suppress(Exception): from transformers.models.prophetnet.modeling_prophetnet import ( ProphetNetPreTrainedModel, ) from parallelformers.policies.prophetnet import ( ProphetNetDecoderPolicy, ProphetNetEncoderPolicy, ) self.builtin_policies[ProphetNetPreTrainedModel] = [ ProphetNetEncoderPolicy, ProphetNetDecoderPolicy, ] with suppress(Exception): from transformers.models.visual_bert.modeling_visual_bert import ( VisualBertPreTrainedModel, ) from parallelformers.policies.visual_bert import VisualBertPolicy self.builtin_policies[VisualBertPreTrainedModel] = [ VisualBertPolicy, ] with suppress(Exception): from transformers.models.speech_to_text.modeling_speech_to_text import ( Speech2TextPreTrainedModel, ) from parallelformers.policies.speech_to_text import ( Speech2TextDecoderPolicy, Speech2TextEncoderPolicy, ) self.builtin_policies[Speech2TextPreTrainedModel] = [ Speech2TextEncoderPolicy, Speech2TextDecoderPolicy, ] with suppress(Exception): from transformers.models.gptj.modeling_gptj import ( GPTJPreTrainedModel, ) from parallelformers.policies.gptj import GPTJPolicy self.builtin_policies[GPTJPreTrainedModel] = [ GPTJPolicy, ] with suppress(Exception): from transformers.models.megatron_bert import ( MegatronBertPreTrainedModel, ) from parallelformers.policies.megtron_bert import ( MegatronBertPolicy, ) self.builtin_policies[MegatronBertPreTrainedModel] = [ MegatronBertPolicy, ] def get_policy(self, model: nn.Module) -> Union[List[Policy], None]: """ Find appropriate policies for the current model Args: model (nn.Module): model to parallelize Returns: Union[List[Policy], None]: appropriate policies or none """ for k, v in self.available().items(): if isinstance(model, k): return v return None def available(self): """Dictionary of available models and policies""" return self.builtin_policies
[((883, 902), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (891, 902), False, 'from contextlib import suppress\n'), ((1219, 1238), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (1227, 1238), False, 'from contextlib import suppress\n'), ((1538, 1557), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (1546, 1557), False, 'from contextlib import suppress\n'), ((1974, 1993), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (1982, 1993), False, 'from contextlib import suppress\n'), ((2464, 2483), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (2472, 2483), False, 'from contextlib import suppress\n'), ((2804, 2823), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (2812, 2823), False, 'from contextlib import suppress\n'), ((3161, 3180), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (3169, 3180), False, 'from contextlib import suppress\n'), ((3501, 3520), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (3509, 3520), False, 'from contextlib import suppress\n'), ((3834, 3853), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (3842, 3853), False, 'from contextlib import suppress\n'), ((4153, 4172), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (4161, 4172), False, 'from contextlib import suppress\n'), ((4472, 4491), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (4480, 4491), False, 'from contextlib import suppress\n'), ((4829, 4848), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (4837, 4848), False, 'from contextlib import suppress\n'), ((5174, 5193), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (5182, 5193), False, 'from contextlib import suppress\n'), ((5514, 5533), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (5522, 5533), False, 'from contextlib import suppress\n'), ((6052, 6071), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (6060, 6071), False, 'from contextlib import suppress\n'), ((6414, 6433), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (6422, 6433), False, 'from contextlib import suppress\n'), ((6761, 6780), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (6769, 6780), False, 'from contextlib import suppress\n'), ((7122, 7141), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (7130, 7141), False, 'from contextlib import suppress\n'), ((7464, 7483), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (7472, 7483), False, 'from contextlib import suppress\n'), ((7993, 8012), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (8001, 8012), False, 'from contextlib import suppress\n'), ((8272, 8291), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (8280, 8291), False, 'from contextlib import suppress\n'), ((8560, 8579), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (8568, 8579), False, 'from contextlib import suppress\n'), ((9005, 9024), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (9013, 9024), False, 'from contextlib import suppress\n'), ((9277, 9296), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (9285, 9296), False, 'from contextlib import suppress\n'), ((9740, 9759), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (9748, 9759), False, 'from contextlib import suppress\n'), ((10176, 10195), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (10184, 10195), False, 'from contextlib import suppress\n'), ((10566, 10585), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (10574, 10585), False, 'from contextlib import suppress\n'), ((11023, 11042), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (11031, 11042), False, 'from contextlib import suppress\n'), ((11477, 11496), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (11485, 11496), False, 'from contextlib import suppress\n'), ((11838, 11857), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (11846, 11857), False, 'from contextlib import suppress\n'), ((12279, 12298), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (12287, 12298), False, 'from contextlib import suppress\n'), ((12598, 12617), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (12606, 12617), False, 'from contextlib import suppress\n'), ((13161, 13180), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (13169, 13180), False, 'from contextlib import suppress\n'), ((13494, 13513), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (13502, 13513), False, 'from contextlib import suppress\n'), ((13827, 13846), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (13835, 13846), False, 'from contextlib import suppress\n'), ((14171, 14190), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (14179, 14190), False, 'from contextlib import suppress\n'), ((14497, 14516), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (14505, 14516), False, 'from contextlib import suppress\n'), ((14770, 14789), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (14778, 14789), False, 'from contextlib import suppress\n'), ((15264, 15283), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (15272, 15283), False, 'from contextlib import suppress\n'), ((15700, 15719), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (15708, 15719), False, 'from contextlib import suppress\n'), ((16047, 16066), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (16055, 16066), False, 'from contextlib import suppress\n'), ((16408, 16427), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (16416, 16427), False, 'from contextlib import suppress\n'), ((16755, 16774), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (16763, 16774), False, 'from contextlib import suppress\n'), ((17081, 17100), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (17089, 17100), False, 'from contextlib import suppress\n'), ((17407, 17426), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (17415, 17426), False, 'from contextlib import suppress\n'), ((17740, 17759), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (17748, 17759), False, 'from contextlib import suppress\n'), ((18087, 18106), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (18095, 18106), False, 'from contextlib import suppress\n'), ((18481, 18500), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (18489, 18500), False, 'from contextlib import suppress\n'), ((18971, 18990), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (18979, 18990), False, 'from contextlib import suppress\n'), ((19335, 19354), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (19343, 19354), False, 'from contextlib import suppress\n'), ((19843, 19862), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (19851, 19862), False, 'from contextlib import suppress\n'), ((20162, 20181), 'contextlib.suppress', 'suppress', (['Exception'], {}), '(Exception)\n', (20170, 20181), False, 'from contextlib import suppress\n')]
RyosukeDTomita/gcmPlot
main/upper_air_humidity.py
430f8af353daf464b5c5566f1c163d5bef63f584
# coding: utf-8 """ Name: upper_air_humidity.py Make upper level weather chart. Usage: python3 upper_air_humidity.py --file <ncfile> Author: Ryosuke Tomita Date: 2022/01/07 """ import argparse from ncmagics import fetchtime, japanmap, meteotool def parse_args() -> dict: """parse_args. set file path. Args: Returns: dict: """ parser = argparse.ArgumentParser() parser.add_argument("-f", "--file", help="set ncfile.", type=str) p = parser.parse_args() args = {"file": p.file} return args def output_name(ncfile: str, isobaric_surface: int) -> str: """output_name. Args: ncfile (str): ncfile isobaric_surface (int): isobaric_surface Returns: str: """ date_time = fetchtime.fetch_time(ncfile) outname = (date_time + "_" + str(isobaric_surface)) return outname def main(): """main. """ args = parse_args() meteo_tool = meteotool.MeteoTools(args["file"]) lat, lon = meteo_tool.get_lat_lon() isobaric_surface = (850, 500, 300) #label_upper = (30, 0) #lebel_min = (-30, -60) for i, pressure in enumerate(isobaric_surface): # get parameter temp_c = meteo_tool.get_parameter('t', isobaric_surface=pressure) - 273.15 rh = meteo_tool.get_parameter('r', isobaric_surface=pressure) height_gpm = meteo_tool.get_parameter('gh', isobaric_surface=pressure) u_wind = meteo_tool.get_parameter('u', isobaric_surface=pressure) v_wind = meteo_tool.get_parameter('v', isobaric_surface=pressure) jp_map = japanmap.JpMap() jp_map.contour_plot(lon, lat, height_gpm) #jp_map.shade_plot(lon, lat, temp_c, # label="2m temperature ($^\circ$C)", # color_bar_label_max=label_upper[i], # color_bar_label_min=lebel_min[i], # color_map_type="temperature", # double_color_bar=True,) jp_map.shade_plot(lon, lat, rh, label="relative humidity (%)", color_bar_label_max=100, color_bar_label_min=0, color_map_type="gray", double_color_bar=False,) jp_map.vector_plot(lon, lat, u_wind, v_wind, vector_interval=5, vector_scale=10, mode="wind") #jp_map.gray_shade(lon, lat, rh, # label="relative humidity (%)", # color_bar_label_max=100, # color_bar_label_min=0, # ) if pressure == 850: jp_map.color_line(lon, lat, temp_c, line_value=-6, color='#0000ff') if pressure == 500: jp_map.color_line(lon, lat, temp_c, line_value=-36, color='#b22222') outname = output_name(args["file"], pressure) print(outname) jp_map.save_fig(outname, str(pressure) + "hPa") if __name__ == "__main__": main()
[((374, 399), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (397, 399), False, 'import argparse\n'), ((764, 792), 'ncmagics.fetchtime.fetch_time', 'fetchtime.fetch_time', (['ncfile'], {}), '(ncfile)\n', (784, 792), False, 'from ncmagics import fetchtime, japanmap, meteotool\n'), ((945, 979), 'ncmagics.meteotool.MeteoTools', 'meteotool.MeteoTools', (["args['file']"], {}), "(args['file'])\n", (965, 979), False, 'from ncmagics import fetchtime, japanmap, meteotool\n'), ((1589, 1605), 'ncmagics.japanmap.JpMap', 'japanmap.JpMap', ([], {}), '()\n', (1603, 1605), False, 'from ncmagics import fetchtime, japanmap, meteotool\n')]
amCharlie/hive
serde/src/gen/thrift/gen-py/megastruct/ttypes.py
e1870c190188a3b706849059969c8bec2220b6d2
# # Autogenerated by Thrift Compiler (0.13.0) # # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING # # options string: py # from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException from thrift.protocol.TProtocol import TProtocolException from thrift.TRecursive import fix_spec import sys from thrift.transport import TTransport all_structs = [] class MyEnum(object): LLAMA = 1 ALPACA = 2 _VALUES_TO_NAMES = { 1: "LLAMA", 2: "ALPACA", } _NAMES_TO_VALUES = { "LLAMA": 1, "ALPACA": 2, } class MiniStruct(object): """ Attributes: - my_string - my_enum """ def __init__(self, my_string=None, my_enum=None,): self.my_string = my_string self.my_enum = my_enum def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.my_string = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.I32: self.my_enum = iprot.readI32() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('MiniStruct') if self.my_string is not None: oprot.writeFieldBegin('my_string', TType.STRING, 1) oprot.writeString(self.my_string.encode('utf-8') if sys.version_info[0] == 2 else self.my_string) oprot.writeFieldEnd() if self.my_enum is not None: oprot.writeFieldBegin('my_enum', TType.I32, 2) oprot.writeI32(self.my_enum) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class MegaStruct(object): """ Attributes: - my_bool - my_byte - my_16bit_int - my_32bit_int - my_64bit_int - my_double - my_string - my_binary - my_string_string_map - my_string_enum_map - my_enum_string_map - my_enum_struct_map - my_enum_stringlist_map - my_enum_structlist_map - my_stringlist - my_structlist - my_enumlist - my_stringset - my_enumset - my_structset """ def __init__(self, my_bool=None, my_byte=None, my_16bit_int=None, my_32bit_int=None, my_64bit_int=None, my_double=None, my_string=None, my_binary=None, my_string_string_map=None, my_string_enum_map=None, my_enum_string_map=None, my_enum_struct_map=None, my_enum_stringlist_map=None, my_enum_structlist_map=None, my_stringlist=None, my_structlist=None, my_enumlist=None, my_stringset=None, my_enumset=None, my_structset=None,): self.my_bool = my_bool self.my_byte = my_byte self.my_16bit_int = my_16bit_int self.my_32bit_int = my_32bit_int self.my_64bit_int = my_64bit_int self.my_double = my_double self.my_string = my_string self.my_binary = my_binary self.my_string_string_map = my_string_string_map self.my_string_enum_map = my_string_enum_map self.my_enum_string_map = my_enum_string_map self.my_enum_struct_map = my_enum_struct_map self.my_enum_stringlist_map = my_enum_stringlist_map self.my_enum_structlist_map = my_enum_structlist_map self.my_stringlist = my_stringlist self.my_structlist = my_structlist self.my_enumlist = my_enumlist self.my_stringset = my_stringset self.my_enumset = my_enumset self.my_structset = my_structset def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.BOOL: self.my_bool = iprot.readBool() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.BYTE: self.my_byte = iprot.readByte() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.I16: self.my_16bit_int = iprot.readI16() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.I32: self.my_32bit_int = iprot.readI32() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.I64: self.my_64bit_int = iprot.readI64() else: iprot.skip(ftype) elif fid == 6: if ftype == TType.DOUBLE: self.my_double = iprot.readDouble() else: iprot.skip(ftype) elif fid == 7: if ftype == TType.STRING: self.my_string = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 8: if ftype == TType.STRING: self.my_binary = iprot.readBinary() else: iprot.skip(ftype) elif fid == 9: if ftype == TType.MAP: self.my_string_string_map = {} (_ktype1, _vtype2, _size0) = iprot.readMapBegin() for _i4 in range(_size0): _key5 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() _val6 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() self.my_string_string_map[_key5] = _val6 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 10: if ftype == TType.MAP: self.my_string_enum_map = {} (_ktype8, _vtype9, _size7) = iprot.readMapBegin() for _i11 in range(_size7): _key12 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() _val13 = iprot.readI32() self.my_string_enum_map[_key12] = _val13 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 11: if ftype == TType.MAP: self.my_enum_string_map = {} (_ktype15, _vtype16, _size14) = iprot.readMapBegin() for _i18 in range(_size14): _key19 = iprot.readI32() _val20 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() self.my_enum_string_map[_key19] = _val20 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 12: if ftype == TType.MAP: self.my_enum_struct_map = {} (_ktype22, _vtype23, _size21) = iprot.readMapBegin() for _i25 in range(_size21): _key26 = iprot.readI32() _val27 = MiniStruct() _val27.read(iprot) self.my_enum_struct_map[_key26] = _val27 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 13: if ftype == TType.MAP: self.my_enum_stringlist_map = {} (_ktype29, _vtype30, _size28) = iprot.readMapBegin() for _i32 in range(_size28): _key33 = iprot.readI32() _val34 = [] (_etype38, _size35) = iprot.readListBegin() for _i39 in range(_size35): _elem40 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() _val34.append(_elem40) iprot.readListEnd() self.my_enum_stringlist_map[_key33] = _val34 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 14: if ftype == TType.MAP: self.my_enum_structlist_map = {} (_ktype42, _vtype43, _size41) = iprot.readMapBegin() for _i45 in range(_size41): _key46 = iprot.readI32() _val47 = [] (_etype51, _size48) = iprot.readListBegin() for _i52 in range(_size48): _elem53 = MiniStruct() _elem53.read(iprot) _val47.append(_elem53) iprot.readListEnd() self.my_enum_structlist_map[_key46] = _val47 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 15: if ftype == TType.LIST: self.my_stringlist = [] (_etype57, _size54) = iprot.readListBegin() for _i58 in range(_size54): _elem59 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() self.my_stringlist.append(_elem59) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 16: if ftype == TType.LIST: self.my_structlist = [] (_etype63, _size60) = iprot.readListBegin() for _i64 in range(_size60): _elem65 = MiniStruct() _elem65.read(iprot) self.my_structlist.append(_elem65) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 17: if ftype == TType.LIST: self.my_enumlist = [] (_etype69, _size66) = iprot.readListBegin() for _i70 in range(_size66): _elem71 = iprot.readI32() self.my_enumlist.append(_elem71) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 18: if ftype == TType.SET: self.my_stringset = set() (_etype75, _size72) = iprot.readSetBegin() for _i76 in range(_size72): _elem77 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString() self.my_stringset.add(_elem77) iprot.readSetEnd() else: iprot.skip(ftype) elif fid == 19: if ftype == TType.SET: self.my_enumset = set() (_etype81, _size78) = iprot.readSetBegin() for _i82 in range(_size78): _elem83 = iprot.readI32() self.my_enumset.add(_elem83) iprot.readSetEnd() else: iprot.skip(ftype) elif fid == 20: if ftype == TType.SET: self.my_structset = set() (_etype87, _size84) = iprot.readSetBegin() for _i88 in range(_size84): _elem89 = MiniStruct() _elem89.read(iprot) self.my_structset.add(_elem89) iprot.readSetEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('MegaStruct') if self.my_bool is not None: oprot.writeFieldBegin('my_bool', TType.BOOL, 1) oprot.writeBool(self.my_bool) oprot.writeFieldEnd() if self.my_byte is not None: oprot.writeFieldBegin('my_byte', TType.BYTE, 2) oprot.writeByte(self.my_byte) oprot.writeFieldEnd() if self.my_16bit_int is not None: oprot.writeFieldBegin('my_16bit_int', TType.I16, 3) oprot.writeI16(self.my_16bit_int) oprot.writeFieldEnd() if self.my_32bit_int is not None: oprot.writeFieldBegin('my_32bit_int', TType.I32, 4) oprot.writeI32(self.my_32bit_int) oprot.writeFieldEnd() if self.my_64bit_int is not None: oprot.writeFieldBegin('my_64bit_int', TType.I64, 5) oprot.writeI64(self.my_64bit_int) oprot.writeFieldEnd() if self.my_double is not None: oprot.writeFieldBegin('my_double', TType.DOUBLE, 6) oprot.writeDouble(self.my_double) oprot.writeFieldEnd() if self.my_string is not None: oprot.writeFieldBegin('my_string', TType.STRING, 7) oprot.writeString(self.my_string.encode('utf-8') if sys.version_info[0] == 2 else self.my_string) oprot.writeFieldEnd() if self.my_binary is not None: oprot.writeFieldBegin('my_binary', TType.STRING, 8) oprot.writeBinary(self.my_binary) oprot.writeFieldEnd() if self.my_string_string_map is not None: oprot.writeFieldBegin('my_string_string_map', TType.MAP, 9) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.my_string_string_map)) for kiter90, viter91 in self.my_string_string_map.items(): oprot.writeString(kiter90.encode('utf-8') if sys.version_info[0] == 2 else kiter90) oprot.writeString(viter91.encode('utf-8') if sys.version_info[0] == 2 else viter91) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_string_enum_map is not None: oprot.writeFieldBegin('my_string_enum_map', TType.MAP, 10) oprot.writeMapBegin(TType.STRING, TType.I32, len(self.my_string_enum_map)) for kiter92, viter93 in self.my_string_enum_map.items(): oprot.writeString(kiter92.encode('utf-8') if sys.version_info[0] == 2 else kiter92) oprot.writeI32(viter93) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_string_map is not None: oprot.writeFieldBegin('my_enum_string_map', TType.MAP, 11) oprot.writeMapBegin(TType.I32, TType.STRING, len(self.my_enum_string_map)) for kiter94, viter95 in self.my_enum_string_map.items(): oprot.writeI32(kiter94) oprot.writeString(viter95.encode('utf-8') if sys.version_info[0] == 2 else viter95) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_struct_map is not None: oprot.writeFieldBegin('my_enum_struct_map', TType.MAP, 12) oprot.writeMapBegin(TType.I32, TType.STRUCT, len(self.my_enum_struct_map)) for kiter96, viter97 in self.my_enum_struct_map.items(): oprot.writeI32(kiter96) viter97.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_stringlist_map is not None: oprot.writeFieldBegin('my_enum_stringlist_map', TType.MAP, 13) oprot.writeMapBegin(TType.I32, TType.LIST, len(self.my_enum_stringlist_map)) for kiter98, viter99 in self.my_enum_stringlist_map.items(): oprot.writeI32(kiter98) oprot.writeListBegin(TType.STRING, len(viter99)) for iter100 in viter99: oprot.writeString(iter100.encode('utf-8') if sys.version_info[0] == 2 else iter100) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_enum_structlist_map is not None: oprot.writeFieldBegin('my_enum_structlist_map', TType.MAP, 14) oprot.writeMapBegin(TType.I32, TType.LIST, len(self.my_enum_structlist_map)) for kiter101, viter102 in self.my_enum_structlist_map.items(): oprot.writeI32(kiter101) oprot.writeListBegin(TType.STRUCT, len(viter102)) for iter103 in viter102: iter103.write(oprot) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() if self.my_stringlist is not None: oprot.writeFieldBegin('my_stringlist', TType.LIST, 15) oprot.writeListBegin(TType.STRING, len(self.my_stringlist)) for iter104 in self.my_stringlist: oprot.writeString(iter104.encode('utf-8') if sys.version_info[0] == 2 else iter104) oprot.writeListEnd() oprot.writeFieldEnd() if self.my_structlist is not None: oprot.writeFieldBegin('my_structlist', TType.LIST, 16) oprot.writeListBegin(TType.STRUCT, len(self.my_structlist)) for iter105 in self.my_structlist: iter105.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.my_enumlist is not None: oprot.writeFieldBegin('my_enumlist', TType.LIST, 17) oprot.writeListBegin(TType.I32, len(self.my_enumlist)) for iter106 in self.my_enumlist: oprot.writeI32(iter106) oprot.writeListEnd() oprot.writeFieldEnd() if self.my_stringset is not None: oprot.writeFieldBegin('my_stringset', TType.SET, 18) oprot.writeSetBegin(TType.STRING, len(self.my_stringset)) for iter107 in self.my_stringset: oprot.writeString(iter107.encode('utf-8') if sys.version_info[0] == 2 else iter107) oprot.writeSetEnd() oprot.writeFieldEnd() if self.my_enumset is not None: oprot.writeFieldBegin('my_enumset', TType.SET, 19) oprot.writeSetBegin(TType.I32, len(self.my_enumset)) for iter108 in self.my_enumset: oprot.writeI32(iter108) oprot.writeSetEnd() oprot.writeFieldEnd() if self.my_structset is not None: oprot.writeFieldBegin('my_structset', TType.SET, 20) oprot.writeSetBegin(TType.STRUCT, len(self.my_structset)) for iter109 in self.my_structset: iter109.write(oprot) oprot.writeSetEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(MiniStruct) MiniStruct.thrift_spec = ( None, # 0 (1, TType.STRING, 'my_string', 'UTF8', None, ), # 1 (2, TType.I32, 'my_enum', None, None, ), # 2 ) all_structs.append(MegaStruct) MegaStruct.thrift_spec = ( None, # 0 (1, TType.BOOL, 'my_bool', None, None, ), # 1 (2, TType.BYTE, 'my_byte', None, None, ), # 2 (3, TType.I16, 'my_16bit_int', None, None, ), # 3 (4, TType.I32, 'my_32bit_int', None, None, ), # 4 (5, TType.I64, 'my_64bit_int', None, None, ), # 5 (6, TType.DOUBLE, 'my_double', None, None, ), # 6 (7, TType.STRING, 'my_string', 'UTF8', None, ), # 7 (8, TType.STRING, 'my_binary', 'BINARY', None, ), # 8 (9, TType.MAP, 'my_string_string_map', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 9 (10, TType.MAP, 'my_string_enum_map', (TType.STRING, 'UTF8', TType.I32, None, False), None, ), # 10 (11, TType.MAP, 'my_enum_string_map', (TType.I32, None, TType.STRING, 'UTF8', False), None, ), # 11 (12, TType.MAP, 'my_enum_struct_map', (TType.I32, None, TType.STRUCT, [MiniStruct, None], False), None, ), # 12 (13, TType.MAP, 'my_enum_stringlist_map', (TType.I32, None, TType.LIST, (TType.STRING, 'UTF8', False), False), None, ), # 13 (14, TType.MAP, 'my_enum_structlist_map', (TType.I32, None, TType.LIST, (TType.STRUCT, [MiniStruct, None], False), False), None, ), # 14 (15, TType.LIST, 'my_stringlist', (TType.STRING, 'UTF8', False), None, ), # 15 (16, TType.LIST, 'my_structlist', (TType.STRUCT, [MiniStruct, None], False), None, ), # 16 (17, TType.LIST, 'my_enumlist', (TType.I32, None, False), None, ), # 17 (18, TType.SET, 'my_stringset', (TType.STRING, 'UTF8', False), None, ), # 18 (19, TType.SET, 'my_enumset', (TType.I32, None, False), None, ), # 19 (20, TType.SET, 'my_structset', (TType.STRUCT, [MiniStruct, None], False), None, ), # 20 ) fix_spec(all_structs) del all_structs
[((22938, 22959), 'thrift.TRecursive.fix_spec', 'fix_spec', (['all_structs'], {}), '(all_structs)\n', (22946, 22959), False, 'from thrift.TRecursive import fix_spec\n')]
Tylarb/gpdb
gpMgmt/bin/gpload_test/gpload2/TEST.py
15e1341cfbac7f70d2086a9a1d46149a82765b5e
#!/usr/bin/env python import unittest import sys import os import string import time import socket import fileinput import platform import re try: import subprocess32 as subprocess except: import subprocess import pg def get_port_from_conf(): file = os.environ.get('MASTER_DATA_DIRECTORY')+'/postgresql.conf' if os.path.isfile(file): with open(file) as f: for line in f.xreadlines(): match = re.search('port=\d+',line) if match: match1 = re.search('\d+', match.group()) if match1: return match1.group() def get_port(): port = os.environ['PGPORT'] if not port: port = get_port_from_conf() return port if port else 5432 def get_ip(hostname=None): if hostname is None: hostname = socket.gethostname() else: hostname = hostname hostinfo = socket.getaddrinfo(hostname, None) ipaddrlist = list(set([(ai[4][0]) for ai in hostinfo])) for myip in ipaddrlist: if myip.find(":") > 0: ipv6 = myip return ipv6 elif myip.find(".") > 0: ipv4 = myip return ipv4 def getPortMasterOnly(host = 'localhost',master_value = None, user = os.environ.get('USER'),gphome = os.environ['GPHOME'], mdd=os.environ['MASTER_DATA_DIRECTORY'],port = os.environ['PGPORT']): master_pattern = "Context:\s*-1\s*Value:\s*\d+" command = "gpconfig -s %s" % ( "port" ) cmd = "source %s/greenplum_path.sh; export MASTER_DATA_DIRECTORY=%s; export PGPORT=%s; %s" \ % (gphome, mdd, port, command) (ok,out) = run(cmd) if not ok: raise Exception("Unable to connect to segment server %s as user %s" % (host, user)) for line in out: out = line.split('\n') for line in out: if re.search(master_pattern, line): master_value = int(line.split()[3].strip()) if master_value is None: error_msg = "".join(out) raise Exception(error_msg) return str(master_value) """ Global Values """ MYD = os.path.abspath(os.path.dirname(__file__)) mkpath = lambda *x: os.path.join(MYD, *x) UPD = os.path.abspath(mkpath('..')) if UPD not in sys.path: sys.path.append(UPD) DBNAME = "postgres" USER = os.environ.get( "LOGNAME" ) HOST = socket.gethostname() GPHOME = os.getenv("GPHOME") PGPORT = get_port() PGUSER = os.environ.get("PGUSER") if PGUSER is None: PGUSER = USER PGHOST = os.environ.get("PGHOST") if PGHOST is None: PGHOST = HOST d = mkpath('config') if not os.path.exists(d): os.mkdir(d) def write_config_file(mode='insert', reuse_flag='',columns_flag='0',mapping='0',portNum='8081',database='reuse_gptest',host='localhost',formatOpts='text',file='data/external_file_01.txt',table='texttable',format='text',delimiter="'|'",escape='',quote='',truncate='False',log_errors=None, error_limit='0',error_table=None,externalSchema=None,staging_table=None,fast_match='false', encoding=None, preload=True, fill=False): f = open(mkpath('config/config_file'),'w') f.write("VERSION: 1.0.0.1") if database: f.write("\nDATABASE: "+database) f.write("\nUSER: "+os.environ.get('USER')) f.write("\nHOST: "+hostNameAddrs) f.write("\nPORT: "+masterPort) f.write("\nGPLOAD:") f.write("\n INPUT:") f.write("\n - SOURCE:") f.write("\n LOCAL_HOSTNAME:") f.write("\n - "+hostNameAddrs) if portNum: f.write("\n PORT: "+portNum) f.write("\n FILE:") f.write("\n - "+mkpath(file)) if columns_flag=='1': f.write("\n - COLUMNS:") f.write("\n - s_s1: text") f.write("\n - s_s2: text") f.write("\n - s_dt: timestamp") f.write("\n - s_s3: text") f.write("\n - s_n1: smallint") f.write("\n - s_n2: integer") f.write("\n - s_n3: bigint") f.write("\n - s_n4: decimal") f.write("\n - s_n5: numeric") f.write("\n - s_n6: real") f.write("\n - s_n7: double precision") f.write("\n - s_n8: text") f.write("\n - s_n9: text") if format: f.write("\n - FORMAT: "+format) if log_errors: f.write("\n - LOG_ERRORS: true") f.write("\n - ERROR_LIMIT: " + error_limit) if error_table: f.write("\n - ERROR_TABLE: " + error_table) f.write("\n - ERROR_LIMIT: " + error_limit) if delimiter: f.write("\n - DELIMITER: "+delimiter) if encoding: f.write("\n - ENCODING: "+encoding) if escape: f.write("\n - ESCAPE: "+escape) if quote: f.write("\n - QUOTE: "+quote) if fill: f.write("\n - FILL_MISSING_FIELDS: true") f.write("\n OUTPUT:") f.write("\n - TABLE: "+table) if mode: if mode == 'insert': f.write("\n - MODE: "+'insert') if mode == 'update': f.write("\n - MODE: "+'update') if mode == 'merge': f.write("\n - MODE: "+'merge') f.write("\n - UPDATE_COLUMNS:") f.write("\n - n2") f.write("\n - MATCH_COLUMNS:") f.write("\n - n1") f.write("\n - s1") f.write("\n - s2") if mapping=='1': f.write("\n - MAPPING:") f.write("\n s1: s_s1") f.write("\n s2: s_s2") f.write("\n dt: s_dt") f.write("\n s3: s_s3") f.write("\n n1: s_n1") f.write("\n n2: s_n2") f.write("\n n3: s_n3") f.write("\n n4: s_n4") f.write("\n n5: s_n5") f.write("\n n6: s_n6") f.write("\n n7: s_n7") f.write("\n n8: s_n8") f.write("\n n9: s_n9") if externalSchema: f.write("\n EXTERNAL:") f.write("\n - SCHEMA: "+externalSchema) if preload: f.write("\n PRELOAD:") f.write("\n - REUSE_TABLES: "+reuse_flag) f.write("\n - FAST_MATCH: "+fast_match) if staging_table: f.write("\n - STAGING_TABLE: "+staging_table) f.write("\n") f.close() def runfile(ifile, flag='', dbname=None, outputPath="", outputFile="", username=None, PGOPTIONS=None, host = None, port = None): if len(outputFile) == 0: (ok, out) = psql_run(ifile = ifile,ofile = outFile(ifile, outputPath),flag = flag, dbname=dbname , username=username, PGOPTIONS=PGOPTIONS, host = host, port = port) else: (ok,out) = psql_run(ifile =ifile, ofile =outFile(outputFile, outputPath), flag =flag, dbname= dbname, username= username, PGOPTIONS= PGOPTIONS, host = host, port = port) return (ok, out) def psql_run(ifile = None, ofile = None, cmd = None, flag = '-e',dbname = None, username = None, PGOPTIONS = None, host = None, port = None): ''' Run a command or file against psql. Return True if OK. @param dbname: database name @param ifile: input file @param cmd: command line @param flag: -e Run SQL with no comments (default) -a Run SQL with comments and psql notice @param username: psql user @param host : to connect to a different host @param port : port where gpdb is running @param PGOPTIONS: connects to postgres via utility mode ''' if dbname is None: dbname = DBNAME if username is None: username = PGUSER # Use the default login user if PGOPTIONS is None: PGOPTIONS = "" else: PGOPTIONS = "PGOPTIONS='%s'" % PGOPTIONS if host is None: host = "-h %s" % PGHOST else: host = "-h %s" % host if port is None: port = "" else: port = "-p %s" % port if cmd: arg = '-c "%s"' % cmd elif ifile: arg = ' < ' + ifile if not (flag == '-q'): # Don't echo commands sent to server arg = '-e < ' + ifile if flag == '-a': arg = '-f ' + ifile else: raise PSQLError('missing cmd and ifile') if ofile == '-': ofile = '2>&1' elif not ofile: ofile = '> /dev/null 2>&1' else: ofile = '> %s 2>&1' % ofile return run('%s psql -d %s %s %s -U %s %s %s %s' % (PGOPTIONS, dbname, host, port, username, flag, arg, ofile)) def run(cmd): """ Run a shell command. Return (True, [result]) if OK, or (False, []) otherwise. @params cmd: The command to run at the shell. oFile: an optional output file. mode: What to do if the output file already exists: 'a' = append; 'w' = write. Defaults to append (so that the function is backwards compatible). Yes, this is passed to the open() function, so you can theoretically pass any value that is valid for the second parameter of open(). """ p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) out = p.communicate()[0] ret = [] ret.append(out) rc = False if p.wait() else True return (rc,ret) def outFile(fname,outputPath = ''): return changeExtFile(fname, ".out", outputPath) def diffFile( fname, outputPath = "" ): return changeExtFile( fname, ".diff", outputPath ) def changeExtFile( fname, ext = ".diff", outputPath = "" ): if len( outputPath ) == 0: return os.path.splitext( fname )[0] + ext else: filename = fname.split( "/" ) fname = os.path.splitext( filename[len( filename ) - 1] )[0] return outputPath + "/" + fname + ext def gpdbAnsFile(fname): ext = '.ans' return os.path.splitext(fname)[0] + ext def isFileEqual( f1, f2, optionalFlags = "", outputPath = "", myinitfile = ""): LMYD = os.path.abspath(os.path.dirname(__file__)) if not os.access( f1, os.R_OK ): raise Exception( 'Error: cannot find file %s' % f1 ) if not os.access( f2, os.R_OK ): raise Exception( 'Error: cannot find file %s' % f2 ) dfile = diffFile( f1, outputPath = outputPath ) # Gets the suitePath name to add init_file suitePath = f1[0:f1.rindex( "/" )] if os.path.exists(suitePath + "/init_file"): (ok, out) = run('../gpdiff.pl -w ' + optionalFlags + \ ' -I NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s/init_file ' '%s %s > %s 2>&1' % (LMYD, suitePath, f1, f2, dfile)) else: if os.path.exists(myinitfile): (ok, out) = run('../gpdiff.pl -w ' + optionalFlags + \ ' -I NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s ' '%s %s > %s 2>&1' % (LMYD, myinitfile, f1, f2, dfile)) else: (ok, out) = run( '../gpdiff.pl -w ' + optionalFlags + \ ' -I NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file ' '%s %s > %s 2>&1' % ( LMYD, f1, f2, dfile ) ) if ok: os.unlink( dfile ) return ok def read_diff(ifile, outputPath): """ Opens the diff file that is assocated with the given input file and returns its contents as a string. """ dfile = diffFile(ifile, outputPath) with open(dfile, 'r') as diff: return diff.read() def modify_sql_file(num): file = mkpath('query%d.sql' % num) user = os.environ.get('USER') if not user: user = os.environ.get('USER') if os.path.isfile(file): for line in fileinput.FileInput(file,inplace=1): line = line.replace("gpload.py ","gpload ") print str(re.sub('\n','',line)) def copy_data(source='',target=''): cmd = 'cp '+ mkpath('data/' + source) + ' ' + mkpath(target) p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) return p.communicate() hostNameAddrs = get_ip(HOST) masterPort = getPortMasterOnly() def get_table_name(): try: db = pg.DB(dbname='reuse_gptest' ,host='localhost' ,port=int(PGPORT) ) except Exception,e: errorMessage = str(e) print 'could not connect to database: ' + errorMessage queryString = """SELECT relname from pg_class WHERE relname like 'ext_gpload_reusable%' OR relname like 'staging_gpload_reusable%';""" resultList = db.query(queryString.encode('utf-8')).getresult() return resultList def drop_tables(): try: db = pg.DB(dbname='reuse_gptest' ,host='localhost' ,port=int(PGPORT) ) except Exception,e: errorMessage = str(e) print 'could not connect to database: ' + errorMessage list = get_table_name() for i in list: name = i[0] match = re.search('ext_gpload',name) if match: queryString = "DROP EXTERNAL TABLE %s" % name db.query(queryString.encode('utf-8')) else: queryString = "DROP TABLE %s" % name db.query(queryString.encode('utf-8')) class PSQLError(Exception): ''' PSQLError is the base class for exceptions in this module http://docs.python.org/tutorial/errors.html We want to raise an error and not a failure. The reason for an error might be program error, file not found, etc. Failure is define as test case failures, when the output is different from the expected result. ''' pass class GPLoad_FormatOpts_TestCase(unittest.TestCase): def check_result(self,ifile, optionalFlags = "-U3", outputPath = ""): """ PURPOSE: compare the actual and expected output files and report an error if they don't match. PARAMETERS: ifile: the name of the .sql file whose actual and expected outputs we want to compare. You may include the path as well as the filename. This function will process this file name to figure out the proper names of the .out and .ans files. optionalFlags: command-line options (if any) for diff. For example, pass " -B " (with the blank spaces) to ignore blank lines. By default, diffs are unified with 3 lines of context (i.e. optionalFlags is "-U3"). """ f1 = gpdbAnsFile(ifile) f2 = outFile(ifile, outputPath=outputPath) result = isFileEqual(f1, f2, optionalFlags, outputPath=outputPath) diff = None if result else read_diff(ifile, outputPath) self.assertTrue(result, "query resulted in diff:\n{}".format(diff)) return True def doTest(self, num): file = mkpath('query%d.diff' % num) if os.path.isfile(file): run("rm -f" + " " + file) modify_sql_file(num) file = mkpath('query%d.sql' % num) runfile(file) self.check_result(file) def test_00_gpload_formatOpts_setup(self): "0 gpload setup" for num in range(1,40): f = open(mkpath('query%d.sql' % num),'w') f.write("\! gpload -f "+mkpath('config/config_file')+ " -d reuse_gptest\n"+"\! gpload -f "+mkpath('config/config_file')+ " -d reuse_gptest\n") f.close() file = mkpath('setup.sql') runfile(file) self.check_result(file) def test_01_gpload_formatOpts_delimiter(self): "1 gpload formatOpts delimiter '|' with reuse " copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="'|'") self.doTest(1) def test_02_gpload_formatOpts_delimiter(self): "2 gpload formatOpts delimiter '\t' with reuse" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="'\t'") self.doTest(2) def test_03_gpload_formatOpts_delimiter(self): "3 gpload formatOpts delimiter E'\t' with reuse" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="E'\\t'") self.doTest(3) def test_04_gpload_formatOpts_delimiter(self): "4 gpload formatOpts delimiter E'\u0009' with reuse" copy_data('external_file_02.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="E'\u0009'") self.doTest(4) def test_05_gpload_formatOpts_delimiter(self): "5 gpload formatOpts delimiter E'\\'' with reuse" copy_data('external_file_03.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="E'\''") self.doTest(5) def test_06_gpload_formatOpts_delimiter(self): "6 gpload formatOpts delimiter \"'\" with reuse" copy_data('external_file_03.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="\"'\"") self.doTest(6) def test_07_gpload_reuse_table_insert_mode_without_reuse(self): "7 gpload insert mode without reuse" runfile(mkpath('setup.sql')) f = open(mkpath('query7.sql'),'a') f.write("\! psql -d reuse_gptest -c 'select count(*) from texttable;'") f.close() write_config_file(mode='insert',reuse_flag='false') self.doTest(7) def test_08_gpload_reuse_table_update_mode_with_reuse(self): "8 gpload update mode with reuse" drop_tables() copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',file='data_file.txt') self.doTest(8) def test_09_gpload_reuse_table_update_mode_without_reuse(self): "9 gpload update mode without reuse" f = open(mkpath('query9.sql'),'a') f.write("\! psql -d reuse_gptest -c 'select count(*) from texttable;'\n"+"\! psql -d reuse_gptest -c 'select * from texttable where n2=222;'") f.close() copy_data('external_file_05.txt','data_file.txt') write_config_file(mode='update',reuse_flag='false',file='data_file.txt') self.doTest(9) def test_10_gpload_reuse_table_merge_mode_with_reuse(self): "10 gpload merge mode with reuse " drop_tables() copy_data('external_file_06.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(10) def test_11_gpload_reuse_table_merge_mode_without_reuse(self): "11 gpload merge mode without reuse " copy_data('external_file_07.txt','data_file.txt') write_config_file('merge','false',file='data_file.txt') self.doTest(11) def test_12_gpload_reuse_table_merge_mode_with_different_columns_number_in_file(self): "12 gpload merge mode with reuse (RERUN with different columns number in file) " psql_run(cmd="ALTER TABLE texttable ADD column n8 text",dbname='reuse_gptest') copy_data('external_file_08.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(12) def test_13_gpload_reuse_table_merge_mode_with_different_columns_number_in_DB(self): "13 gpload merge mode with reuse (RERUN with different columns number in DB table) " preTest = mkpath('pre_test_13.sql') psql_run(preTest, dbname='reuse_gptest') copy_data('external_file_09.txt','data_file.txt') write_config_file('merge','true',file='data_file.txt') self.doTest(13) def test_14_gpload_reuse_table_update_mode_with_reuse_RERUN(self): "14 gpload update mode with reuse (RERUN) " write_config_file('update','true',file='data_file.txt') self.doTest(14) def test_15_gpload_reuse_table_merge_mode_with_different_columns_order(self): "15 gpload merge mode with different columns' order " copy_data('external_file_10.txt','data/data_file.tbl') write_config_file('merge','true',file='data/data_file.tbl',columns_flag='1',mapping='1') self.doTest(15) def test_16_gpload_formatOpts_quote(self): "16 gpload formatOpts quote unspecified in CSV with reuse " copy_data('external_file_11.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','") self.doTest(16) def test_17_gpload_formatOpts_quote(self): "17 gpload formatOpts quote '\\x26'(&) with reuse" copy_data('external_file_12.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",quote="'\x26'") self.doTest(17) def test_18_gpload_formatOpts_quote(self): "18 gpload formatOpts quote E'\\x26'(&) with reuse" copy_data('external_file_12.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",quote="E'\x26'") self.doTest(18) def test_19_gpload_formatOpts_escape(self): "19 gpload formatOpts escape '\\' with reuse" copy_data('external_file_01.txt','data_file.txt') file = mkpath('setup.sql') runfile(file) write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape='\\') self.doTest(19) def test_20_gpload_formatOpts_escape(self): "20 gpload formatOpts escape '\\' with reuse" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape= '\x5C') self.doTest(20) def test_21_gpload_formatOpts_escape(self): "21 gpload formatOpts escape E'\\\\' with reuse" copy_data('external_file_01.txt','data_file.txt') write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape="E'\\\\'") self.doTest(21) # case 22 is flaky on concourse. It may report: Fatal Python error: GC object already tracked during testing. # This is seldom issue. we can't reproduce it locally, so we disable it, in order to not blocking others #def test_22_gpload_error_count(self): # "22 gpload error count" # f = open(mkpath('query22.sql'),'a') # f.write("\! psql -d reuse_gptest -c 'select count(*) from csvtable;'") # f.close() # f = open(mkpath('data/large_file.csv'),'w') # for i in range(0, 10000): # if i % 2 == 0: # f.write('1997,Ford,E350,"ac, abs, moon",3000.00,a\n') # else: # f.write('1997,Ford,E350,"ac, abs, moon",3000.00\n') # f.close() # copy_data('large_file.csv','data_file.csv') # write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",log_errors=True,error_limit='90000000') # self.doTest(22) def test_23_gpload_error_count(self): "23 gpload error_table" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query23.sql'),'a') f.write("\! psql -d reuse_gptest -c 'select count(*) from csvtable;'") f.close() f = open(mkpath('data/large_file.csv'),'w') for i in range(0, 10000): if i % 2 == 0: f.write('1997,Ford,E350,"ac, abs, moon",3000.00,a\n') else: f.write('1997,Ford,E350,"ac, abs, moon",3000.00\n') f.close() copy_data('large_file.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",error_table="err_table",error_limit='90000000') self.doTest(23) def test_24_gpload_error_count(self): "24 gpload error count with ext schema" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query24.sql'),'a') f.write("\! psql -d reuse_gptest -c 'select count(*) from csvtable;'") f.close() f = open(mkpath('data/large_file.csv'),'w') for i in range(0, 10000): if i % 2 == 0: f.write('1997,Ford,E350,"ac, abs, moon",3000.00,a\n') else: f.write('1997,Ford,E350,"ac, abs, moon",3000.00\n') f.close() copy_data('large_file.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",log_errors=True,error_limit='90000000',externalSchema='test') self.doTest(24) def test_25_gpload_ext_staging_table(self): "25 gpload reuse ext_staging_table if it is configured" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query25.sql'),'a') f.write("\! psql -d reuse_gptest -c 'select count(*) from csvtable;'") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",log_errors=True,error_limit='10',staging_table='staging_table') self.doTest(25) def test_26_gpload_ext_staging_table_with_externalschema(self): "26 gpload reuse ext_staging_table if it is configured with externalschema" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query26.sql'),'a') f.write("\! psql -d reuse_gptest -c 'select count(*) from csvtable;'") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema='test') self.doTest(26) def test_27_gpload_ext_staging_table_with_externalschema(self): "27 gpload reuse ext_staging_table if it is configured with externalschema" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query27.sql'),'a') f.write("\! psql -d reuse_gptest -c 'select count(*) from test.csvtable;'") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter="','",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema="'%'") self.doTest(27) def test_28_gpload_ext_staging_table_with_dot(self): "28 gpload reuse ext_staging_table if it is configured with dot" file = mkpath('setup.sql') runfile(file) f = open(mkpath('query28.sql'),'a') f.write("\! psql -d reuse_gptest -c 'select count(*) from test.csvtable;'") f.close() copy_data('external_file_13.csv','data_file.csv') write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter="','",log_errors=True,error_limit='10',staging_table='t.staging_table') self.doTest(28) def test_29_gpload_reuse_table_insert_mode_with_reuse_and_null(self): "29 gpload insert mode with reuse and null" runfile(mkpath('setup.sql')) f = open(mkpath('query29.sql'),'a') f.write("\! psql -d reuse_gptest -c 'select count(*) from texttable where n2 is null;'") f.close() copy_data('external_file_14.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='true',file='data_file.txt',log_errors=True, error_limit='100') self.doTest(29) def test_30_gpload_reuse_table_update_mode_with_fast_match(self): "30 gpload update mode with fast match" drop_tables() copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(30) def test_31_gpload_reuse_table_update_mode_with_fast_match_and_different_columns_number(self): "31 gpload update mode with fast match and differenct columns number) " psql_run(cmd="ALTER TABLE texttable ADD column n8 text",dbname='reuse_gptest') copy_data('external_file_08.txt','data_file.txt') write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(31) def test_32_gpload_update_mode_without_reuse_table_with_fast_match(self): "32 gpload update mode when reuse table is false and fast match is true" drop_tables() copy_data('external_file_08.txt','data_file.txt') write_config_file(mode='update',reuse_flag='false',fast_match='true',file='data_file.txt') self.doTest(32) def test_33_gpload_reuse_table_merge_mode_with_fast_match_and_external_schema(self): "33 gpload update mode with fast match and external schema" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',externalSchema='test') self.doTest(33) def test_34_gpload_reuse_table_merge_mode_with_fast_match_and_encoding(self): "34 gpload merge mode with fast match and encoding GBK" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',encoding='GBK') self.doTest(34) def test_35_gpload_reuse_table_merge_mode_with_fast_match_default_encoding(self): "35 gpload does not reuse table when encoding is setted from GBK to empty" write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt') self.doTest(35) def test_36_gpload_reuse_table_merge_mode_default_encoding(self): "36 gpload merge mode with encoding GBK" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='GBK') self.doTest(36) def test_37_gpload_reuse_table_merge_mode_invalid_encoding(self): "37 gpload merge mode with invalid encoding" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='xxxx') self.doTest(37) def test_38_gpload_without_preload(self): "38 gpload insert mode without preload" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='true',fast_match='false',file='data_file.txt',error_table="err_table",error_limit='1000',preload=False) self.doTest(38) def test_39_gpload_fill_missing_fields(self): "39 gpload fill missing fields" file = mkpath('setup.sql') runfile(file) copy_data('external_file_04.txt','data_file.txt') write_config_file(mode='insert',reuse_flag='false',fast_match='false',file='data_file.txt',table='texttable1', error_limit='1000', fill=True) self.doTest(39) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(GPLoad_FormatOpts_TestCase) runner = unittest.TextTestRunner(verbosity=2) ret = not runner.run(suite).wasSuccessful() sys.exit(ret)
[]
dylanlee101/leetcode
code_week19_831_96/biao_shi_shu_zi.py
b059afdadb83d504e62afd1227107de0b59557af
''' 请实现一个函数用来判断字符串是否表示数值(包括整数和小数)。例如,字符串"+100"、"5e2"、"-123"、"3.1416"、"-1E-16"、"0123"都表示数值,但"12e"、"1a3.14"、"1.2.3"、"+-5"及"12e+5.4"都不是。 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/biao-shi-shu-zhi-de-zi-fu-chuan-lcof ''' class Solution: def isNumber(self, s: str) -> bool: states = [ { ' ': 0, 's': 1, 'd': 2, '.': 4 }, # 0. start with 'blank' { 'd': 2, '.': 4 } , # 1. 'sign' before 'e' { 'd': 2, '.': 3, 'e': 5, ' ': 8 }, # 2. 'digit' before 'dot' { 'd': 3, 'e': 5, ' ': 8 }, # 3. 'digit' after 'dot' { 'd': 3 }, # 4. 'digit' after 'dot' (‘blank’ before 'dot') { 's': 6, 'd': 7 }, # 5. 'e' { 'd': 7 }, # 6. 'sign' after 'e' { 'd': 7, ' ': 8 }, # 7. 'digit' after 'e' { ' ': 8 } # 8. end with 'blank' ] p = 0 # start with state 0 for c in s: if '0' <= c <= '9': t = 'd' # digit elif c in "+-": t = 's' # sign elif c in "eE": t = 'e' # e or E elif c in ". ": t = c # dot, blank else: t = '?' # unknown if t not in states[p]: return False p = states[p][t] return p in (2, 3, 7, 8)
[]
rackerlabs/teeth-overlord
teeth_overlord/tests/unit/networks/neutron.py
d76f6a03853d964b556aa1aa0f7011b4d1a6f208
""" Copyright 2013 Rackspace, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import collections from teeth_overlord import config from teeth_overlord.networks import neutron from teeth_overlord import tests from keystoneclient.apiclient import exceptions as keystone_exceptions from keystoneclient.v2_0 import client as keystone_client from neutronclient.common import exceptions as neutron_exceptions from neutronclient.neutron import client as neutron_client NETWORK1_RESPONSE = { u'status': u'ACTIVE', u'subnets': [u'SUBNET1'], u'name': u'private', u'provider:physical_network': None, u'admin_state_up': True, u'tenant_id': u'TENANTID', u'provider:network_type': u'local', u'router:external': False, u'shared': False, u'id': u'NETWORK1', u'provider:segmentation_id': None } NETWORK2_RESPONSE = { u'status': u'ACTIVE', u'subnets': [u'SUBNET2'], u'name': u'public', u'provider:physical_network': None, u'admin_state_up': True, u'tenant_id': u'TENANTID', u'provider:network_type': u'local', u'router:external': True, u'shared': False, u'id': u'NETWORK2', u'provider:segmentation_id': None } PORT1_RESPONSE = { u'status': u'ACTIVE', u'binding:host_id': u'precise64', u'name': u'', u'allowed_address_pairs': [], u'admin_state_up': True, u'network_id': u'NETWORK1', u'tenant_id': u'TENANTID', u'extra_dhcp_opts': [], u'binding:vif_type': u'ovs', u'device_owner': u'network:dhcp', u'binding:capabilities': {u'port_filter': True}, u'mac_address': u'fa:16:3e:e0:d4:63', u'fixed_ips': [ { u'subnet_id': u'SUBNET1', u'ip_address': u'10.0.0.3' } ], u'id': u'PORT1', u'security_groups': [], u'device_id': u'' } PORT2_RESPONSE = { u'status': u'DOWN', u'binding:host_id': u'', u'name': u'', u'allowed_address_pairs': [], u'admin_state_up': True, u'network_id': u'NETWORK2', u'tenant_id': u'TENANTID', u'extra_dhcp_opts': [], u'binding:vif_type': u'unbound', u'device_owner': u'', u'binding:capabilities': {u'port_filter': False}, u'mac_address': u'00:09:7b:3e:18:ca', u'fixed_ips': [ { u'subnet_id': u'SUBNET2', u'ip_address': u'192.168.27.3' } ], u'id': u'PORT2', u'security_groups': [u'SECGRP'], u'device_id': u'' } SUBNET1_RESPONSE = { u'name': u'private-subnet', u'enable_dhcp': True, u'network_id': u'NETWORK1', u'tenant_id': u'TENANTID', u'dns_nameservers': [], u'allocation_pools': [ { u'start': u'10.0.0.2', u'end': u'10.0.0.254' } ], u'host_routes': [], u'ip_version': 4, u'gateway_ip': u'10.0.0.1', u'cidr': u'10.0.0.0/24', u'id': u'SUBNET1' } SUBNET2_RESPONSE = { u'name': u'public-subnet', u'enable_dhcp': False, u'network_id': u'NETWORK2', u'tenant_id': u'TENANTID', u'dns_nameservers': [], u'allocation_pools': [ { u'start': u'192.168.27.1', u'end': u'192.168.27.1' }, { u'start': u'192.168.27.3', u'end': u'192.168.27.254' } ], u'host_routes': [], u'ip_version': 4, u'gateway_ip': u'192.168.27.2', u'cidr': u'192.168.27.0/24', u'id': u'SUBNET2' } SERIALIZED_NETWORK1 = collections.OrderedDict([ ('id', u'NETWORK1'), ('name', u'private'), ('status', u'ACTIVE'), ('subnets', [ collections.OrderedDict([ ('id', u'SUBNET1'), ('name', u'private-subnet'), ('ip_version', 4), ('gateway_ip', u'10.0.0.1'), ('cidr', u'10.0.0.0/24'), ('enable_dhcp', True) ]) ]) ]) SERIALIZED_NETWORK2 = collections.OrderedDict([ ('id', u'NETWORK2'), ('name', u'public'), ('status', u'ACTIVE'), ('subnets', [ collections.OrderedDict([ ('id', u'SUBNET2'), ('name', u'public-subnet'), ('ip_version', 4), ('gateway_ip', u'192.168.27.2'), ('cidr', u'192.168.27.0/24'), ('enable_dhcp', False) ]) ]) ]) SERIALIZED_PORT1 = collections.OrderedDict([ ('id', u'PORT1'), ('name', u''), ('status', u'ACTIVE'), ('mac_address', u'fa:16:3e:e0:d4:63'), ('fixed_ips', [ { u'subnet_id': u'SUBNET1', u'ip_address': u'10.0.0.3' } ]), ('network', SERIALIZED_NETWORK1) ]) class TestNeutronProvider(tests.TeethMockTestUtilities): def setUp(self): super(TestNeutronProvider, self).setUp() self.config = config.LazyConfig(config={ 'KEYSTONE_USER': 'user', 'KEYSTONE_PASS': 'pass', 'KEYSTONE_TENANT_ID': 'tenant', 'KEYSTONE_AUTH_URL': 'auth_url', 'NEUTRON_VERSION': '2.0', 'NEUTRON_URL': 'neutron_url', 'NEUTRON_PUBLIC_NETWORK': 'd6b32008-1432-4299-81c7-cbe3128ba13f', 'NEUTRON_PRIVATE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10', 'NEUTRON_SERVICE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10', }) self.neutron_client_mock = self.add_mock(neutron_client, 'Client') self.neutron_mock = self.neutron_client_mock.return_value self.keystone_client_mock = self.add_mock(keystone_client, 'Client') self.keystone_client_mock.return_value.auth_token = 'auth_token' self.provider = neutron.NeutronProvider(self.config) def test_get_auth_token(self): t = self.provider._get_auth_token() self.assertEqual(t, 'auth_token') self.keystone_client_mock.assert_called_with( username='user', password='pass', tenant_id='tenant', auth_url='auth_url' ) def test_get_auth_token_client_exception(self): exc = keystone_exceptions.ClientException self.keystone_client_mock.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider._get_auth_token) def test_get_neutron_client(self): self.provider._get_neutron_client() self.neutron_client_mock.assert_called_with( '2.0', endpoint_url='neutron_url', token='auth_token' ) def test_get_neutron_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_client_mock.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider._get_neutron_client) def test_list_networks(self): networks = {'networks': [NETWORK1_RESPONSE, NETWORK2_RESPONSE]} self.neutron_mock.list_networks.return_value = networks self.neutron_mock.show_subnet.side_effect = [ {'subnet': SUBNET1_RESPONSE}, {'subnet': SUBNET2_RESPONSE} ] networks = self.provider.list_networks() results = [ SERIALIZED_NETWORK1, SERIALIZED_NETWORK2 ] self.assertEqual([n.serialize() for n in networks], results) def test_list_networks_empty(self): self.neutron_mock.list_networks.return_value = {'networks': []} networks = self.provider.list_networks() self.neutron_mock.list_networks.assert_called() self.assertEqual(networks, []) def test_list_networks_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.list_networks.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.list_networks) def test_get_network_info(self): network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network self.neutron_mock.show_subnet.side_effect = [ {'subnet': SUBNET1_RESPONSE} ] network = self.provider.get_network_info('NETWORK1') self.assertEqual(network.serialize(), SERIALIZED_NETWORK1) self.neutron_mock.show_network.assert_called_with('NETWORK1') def test_get_network_info_does_not_exist(self): exc = neutron_exceptions.NeutronException() exc.message = '404 Not Found' self.neutron_mock.show_network.side_effect = exc self.assertRaises(self.provider.NetworkDoesNotExist, self.provider.get_network_info, 'NETWORK1') def test_get_network_info_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.show_network.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.get_network_info, 'NETWORK1') def test_list_ports(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet ports = self.provider.list_ports('a:b:c:d') self.assertEqual([p.serialize() for p in ports], [SERIALIZED_PORT1]) self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d') def test_attach(self): port = {'port': PORT1_RESPONSE} self.neutron_mock.create_port.return_value = port network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet port = self.provider.attach('a:b:c:d', 'network_id') self.neutron_mock.create_port.assert_called_with({ 'port': { 'network_id': 'network_id', 'admin_state_up': True, 'mac_address': 'a:b:c:d' } }) self.assertEqual(port.serialize(), SERIALIZED_PORT1) def test_attach_client_exception(self): exc = neutron_exceptions.NeutronException() self.neutron_mock.create_port.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.attach, 'mac_address', 'network_id') def test_detatch(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet self.provider.detach('a:b:c:d') self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id']) self.neutron_mock.list_ports.assert_called_with(mac_address='a:b:c:d') def test_detach_specific_network(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet self.provider.detach('a:b:c:d', 'network_id') self.neutron_mock.delete_port.assert_called_with(PORT1_RESPONSE['id']) self.neutron_mock.list_ports.assert_called_with( mac_address='a:b:c:d', network_id='network_id') def test_detach_client_exception(self): ports = {'ports': [PORT1_RESPONSE]} self.neutron_mock.list_ports.return_value = ports network = {'network': NETWORK1_RESPONSE} self.neutron_mock.show_network.return_value = network subnet = {'subnet': SUBNET1_RESPONSE} self.neutron_mock.show_subnet.return_value = subnet exc = neutron_exceptions.NeutronException() self.neutron_mock.delete_port.side_effect = exc self.assertRaises(self.provider.NetworkProviderException, self.provider.detach, 'a:b:c:d') def test_get_default_networks(self): network_ids = self.provider.get_default_networks() self.assertEqual(network_ids, [self.config.NEUTRON_PUBLIC_NETWORK, self.config.NEUTRON_PRIVATE_NETWORK]) def test_get_service_network(self): network_id = self.provider.get_service_network() self.assertEqual(network_id, self.config.NEUTRON_SERVICE_NETWORK)
[((4728, 4968), 'collections.OrderedDict', 'collections.OrderedDict', (["[('id', u'PORT1'), ('name', u''), ('status', u'ACTIVE'), ('mac_address',\n u'fa:16:3e:e0:d4:63'), ('fixed_ips', [{u'subnet_id': u'SUBNET1',\n u'ip_address': u'10.0.0.3'}]), ('network', SERIALIZED_NETWORK1)]"], {}), "([('id', u'PORT1'), ('name', u''), ('status',\n u'ACTIVE'), ('mac_address', u'fa:16:3e:e0:d4:63'), ('fixed_ips', [{\n u'subnet_id': u'SUBNET1', u'ip_address': u'10.0.0.3'}]), ('network',\n SERIALIZED_NETWORK1)])\n", (4751, 4968), False, 'import collections\n'), ((5183, 5600), 'teeth_overlord.config.LazyConfig', 'config.LazyConfig', ([], {'config': "{'KEYSTONE_USER': 'user', 'KEYSTONE_PASS': 'pass', 'KEYSTONE_TENANT_ID':\n 'tenant', 'KEYSTONE_AUTH_URL': 'auth_url', 'NEUTRON_VERSION': '2.0',\n 'NEUTRON_URL': 'neutron_url', 'NEUTRON_PUBLIC_NETWORK':\n 'd6b32008-1432-4299-81c7-cbe3128ba13f', 'NEUTRON_PRIVATE_NETWORK':\n '2afa16d6-7b84-484f-a642-af243b0e5b10', 'NEUTRON_SERVICE_NETWORK':\n '2afa16d6-7b84-484f-a642-af243b0e5b10'}"}), "(config={'KEYSTONE_USER': 'user', 'KEYSTONE_PASS': 'pass',\n 'KEYSTONE_TENANT_ID': 'tenant', 'KEYSTONE_AUTH_URL': 'auth_url',\n 'NEUTRON_VERSION': '2.0', 'NEUTRON_URL': 'neutron_url',\n 'NEUTRON_PUBLIC_NETWORK': 'd6b32008-1432-4299-81c7-cbe3128ba13f',\n 'NEUTRON_PRIVATE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10',\n 'NEUTRON_SERVICE_NETWORK': '2afa16d6-7b84-484f-a642-af243b0e5b10'})\n", (5200, 5600), False, 'from teeth_overlord import config\n'), ((6018, 6054), 'teeth_overlord.networks.neutron.NeutronProvider', 'neutron.NeutronProvider', (['self.config'], {}), '(self.config)\n', (6041, 6054), False, 'from teeth_overlord.networks import neutron\n'), ((6946, 6983), 'neutronclient.common.exceptions.NeutronException', 'neutron_exceptions.NeutronException', ([], {}), '()\n', (6981, 6983), True, 'from neutronclient.common import exceptions as neutron_exceptions\n'), ((8055, 8092), 'neutronclient.common.exceptions.NeutronException', 'neutron_exceptions.NeutronException', ([], {}), '()\n', (8090, 8092), True, 'from neutronclient.common import exceptions as neutron_exceptions\n'), ((8794, 8831), 'neutronclient.common.exceptions.NeutronException', 'neutron_exceptions.NeutronException', ([], {}), '()\n', (8829, 8831), True, 'from neutronclient.common import exceptions as neutron_exceptions\n'), ((9154, 9191), 'neutronclient.common.exceptions.NeutronException', 'neutron_exceptions.NeutronException', ([], {}), '()\n', (9189, 9191), True, 'from neutronclient.common import exceptions as neutron_exceptions\n'), ((10730, 10767), 'neutronclient.common.exceptions.NeutronException', 'neutron_exceptions.NeutronException', ([], {}), '()\n', (10765, 10767), True, 'from neutronclient.common import exceptions as neutron_exceptions\n'), ((12536, 12573), 'neutronclient.common.exceptions.NeutronException', 'neutron_exceptions.NeutronException', ([], {}), '()\n', (12571, 12573), True, 'from neutronclient.common import exceptions as neutron_exceptions\n'), ((4020, 4200), 'collections.OrderedDict', 'collections.OrderedDict', (["[('id', u'SUBNET1'), ('name', u'private-subnet'), ('ip_version', 4), (\n 'gateway_ip', u'10.0.0.1'), ('cidr', u'10.0.0.0/24'), ('enable_dhcp', True)\n ]"], {}), "([('id', u'SUBNET1'), ('name', u'private-subnet'), (\n 'ip_version', 4), ('gateway_ip', u'10.0.0.1'), ('cidr', u'10.0.0.0/24'),\n ('enable_dhcp', True)])\n", (4043, 4200), False, 'import collections\n'), ((4436, 4624), 'collections.OrderedDict', 'collections.OrderedDict', (["[('id', u'SUBNET2'), ('name', u'public-subnet'), ('ip_version', 4), (\n 'gateway_ip', u'192.168.27.2'), ('cidr', u'192.168.27.0/24'), (\n 'enable_dhcp', False)]"], {}), "([('id', u'SUBNET2'), ('name', u'public-subnet'), (\n 'ip_version', 4), ('gateway_ip', u'192.168.27.2'), ('cidr',\n u'192.168.27.0/24'), ('enable_dhcp', False)])\n", (4459, 4624), False, 'import collections\n')]
johnyburd/glucometer
classes/settings.py
075a48cff38e0570960fc2b8968bcb8b5ddd647f
def init(): global brightness global calibration_mode brightness = 500 calibration_mode = False
[]
Phoenix-sy/typeidea
typeidea/blog/views.py
e913218872c7f4e9afc290eb42b4ca8c8e4523be
from datetime import date from django.core.cache import cache from django.db.models import Q, F from django.shortcuts import render from django.shortcuts import get_object_or_404 from django.views.generic import ListView, DetailView #from silk.profiling.profiler import silk_profile from config.models import SideBar from .models import Post, Tag, Category from comment.models import Comment class CommonViewMinxin: def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update({ 'sidebars': self.get_sidebars(), }) context.update(self.get_navs()) return context def get_sidebars(self): return SideBar.objects.filter(status=SideBar.STATUS_SHOW) def get_navs(self): categories = Category.objects.filter(status=Category.STATUS_NORMAL) nav_categories = [] normal_categories = [] for cate in categories: if cate.is_nav: nav_categories.append(cate) else: normal_categories.append(cate) return { 'navs': nav_categories, 'categories': normal_categories, } class IndexView(CommonViewMinxin, ListView): queryset = Post.latest_posts() paginate_by = 5 context_object_name = 'post_list' template_name = 'blog/list.html' class CategoryView(IndexView): def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) category_id = self.kwargs.get('category_id') category = get_object_or_404(Category, pk=category_id) context.update({ 'category': category, }) return context def get_queryset(self): '''重写queryset,根据分类过滤''' queryset = super().get_queryset() category_id = self.kwargs.get('category_id') return queryset.filter(category_id=category_id) class TagView(IndexView): def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) tag_id = self.kwargs.get('tag_id') tag = get_object_or_404(Tag, pk=tag_id) context.update({ 'tag': tag, }) return context def get_queryset(self): '''重写queryset,根据标签过滤''' queryset = super().get_queryset() tag_id = self.kwargs.get('tag_id') return queryset.filter(tag__id=tag_id) class PostDetailView(CommonViewMinxin, DetailView): queryset = Post.latest_posts() template_name = 'blog/detail.html' context_object_name = 'post' pk_url_kwarg = 'post_id' def get(self, request, *args, **kwargs): response = super().get(request, *args, **kwargs) self.handle_visited() return response def handle_visited(self): increase_pv = False increase_uv = False uid = self.request.uid pv_key = 'pv:%s:%s' % (uid, self.request.path) uv_key = 'uv:%s:%s:%s' % (uid, str(date.today()), self.request.path) if not cache.get(pv_key): increase_pv = True cache.set(pv_key, 1, 1*60) #1分钟有效 if not cache.get(uv_key): increase_uv = True cache.set(uv_key, 1, 24*60*60) if increase_pv and increase_uv: Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1, uv=F('uv') + 1) elif increase_pv: Post.objects.filter(pk=self.object.id).update(pv=F('pv') + 1) elif increase_pv: Post.objects.filter(pk=self.object.id).update(pv=F('uv') + 1) class SearchView(IndexView): def get_context_data(self): context = super().get_context_data() context.update({ 'keyword': self.request.GET.get('keyword', '') }) return context def get_queryset(self): queryset = super().get_queryset() keyword = self.request.GET.get('keyword') if not keyword: return queryset return queryset.filter(Q(title__icontains=keyword) | Q(desc__icontains =keyword)) class AuthorView(IndexView): def get_queryset(self): queryset = super().get_queryset() author_id = self.kwargs.get('owner_id') return queryset.filter(owner_id=author_id) ''' def post_list(request, category_id=None, tag_id=None): tag = None category = None if tag_id: post_list, tag = Post.get_by_tag(tag_id) elif category_id: post_list, category=Post.get_by_category(category_id) else: post_list = Post.latest_posts() context = { 'category': category, 'tag': tag, 'post_list': post_list, 'sidebars': SideBar.get_all(), } context.update(Category.get_navs()) return render(request, 'blog/list.html', context=context) def post_detail(request, post_id=None): try: post = Post.objects.get(id=post_id) except Post.DoesNotExist: raise Http404('Post does not exist!') context={ 'post': post, 'sidebars': SideBar.get_all(), } context.update(Category.get_navs()) return render(request, 'blog/detail.html', context=context) '''
[((653, 703), 'config.models.SideBar.objects.filter', 'SideBar.objects.filter', ([], {'status': 'SideBar.STATUS_SHOW'}), '(status=SideBar.STATUS_SHOW)\n', (675, 703), False, 'from config.models import SideBar\n'), ((1387, 1430), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Category'], {'pk': 'category_id'}), '(Category, pk=category_id)\n', (1404, 1430), False, 'from django.shortcuts import get_object_or_404\n'), ((1843, 1876), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Tag'], {'pk': 'tag_id'}), '(Tag, pk=tag_id)\n', (1860, 1876), False, 'from django.shortcuts import get_object_or_404\n'), ((2642, 2659), 'django.core.cache.cache.get', 'cache.get', (['pv_key'], {}), '(pv_key)\n', (2651, 2659), False, 'from django.core.cache import cache\n'), ((2686, 2714), 'django.core.cache.cache.set', 'cache.set', (['pv_key', '(1)', '(1 * 60)'], {}), '(pv_key, 1, 1 * 60)\n', (2695, 2714), False, 'from django.core.cache import cache\n'), ((2733, 2750), 'django.core.cache.cache.get', 'cache.get', (['uv_key'], {}), '(uv_key)\n', (2742, 2750), False, 'from django.core.cache import cache\n'), ((2777, 2811), 'django.core.cache.cache.set', 'cache.set', (['uv_key', '(1)', '(24 * 60 * 60)'], {}), '(uv_key, 1, 24 * 60 * 60)\n', (2786, 2811), False, 'from django.core.cache import cache\n'), ((3457, 3484), 'django.db.models.Q', 'Q', ([], {'title__icontains': 'keyword'}), '(title__icontains=keyword)\n', (3458, 3484), False, 'from django.db.models import Q, F\n'), ((3487, 3513), 'django.db.models.Q', 'Q', ([], {'desc__icontains': 'keyword'}), '(desc__icontains=keyword)\n', (3488, 3513), False, 'from django.db.models import Q, F\n'), ((2599, 2611), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2609, 2611), False, 'from datetime import date\n'), ((2895, 2902), 'django.db.models.F', 'F', (['"""pv"""'], {}), "('pv')\n", (2896, 2902), False, 'from django.db.models import Q, F\n'), ((2915, 2922), 'django.db.models.F', 'F', (['"""uv"""'], {}), "('uv')\n", (2916, 2922), False, 'from django.db.models import Q, F\n'), ((3000, 3007), 'django.db.models.F', 'F', (['"""pv"""'], {}), "('pv')\n", (3001, 3007), False, 'from django.db.models import Q, F\n'), ((3085, 3092), 'django.db.models.F', 'F', (['"""uv"""'], {}), "('uv')\n", (3086, 3092), False, 'from django.db.models import Q, F\n')]
thomasbarillot/DAQ
VMI/VMItest.py
20126655f74194757d25380680af9429ff27784e
# -*- coding: utf-8 -*- """ Created on Sat May 7 11:38:18 2016 @author: thomasbarillot VMI control """ from ctypes import cdll #slib="VMIcrtl_ext.dll" #hlib=cdll('VMIcrtl.dll') import VMIcrtl_ext test=VMIcrtl_ext.VMIcrtl() #%% print test.GetFilename() #%% test.setFilename('20161115_1841.dat') print test.GetFilename() #%% test.StartAcquisitionPrev() #%% test.StopAcquisition() #%% img=test.RecallImagePrev() #%% import numpy as np print np.shape(img) a=np.array(img) print a #%% from matplotlib import pyplot as plt #%% b=np.reshape(a,[400,400]) print b plt.figure() plt.pcolor(np.reshape(a,[400,400]))
[]
vitodb/spack
var/spack/repos/builtin/packages/openssl/package.py
b9ab1de4c5f7b21d9f9cb88b7251820a48e82d27
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import llnl.util.tty as tty from spack import * import spack.architecture import os class Openssl(Package): # Uses Fake Autotools, should subclass Package """OpenSSL is an open source project that provides a robust, commercial-grade, and full-featured toolkit for the Transport Layer Security (TLS) and Secure Sockets Layer (SSL) protocols. It is also a general-purpose cryptography library.""" homepage = "http://www.openssl.org" # URL must remain http:// so Spack can bootstrap curl url = "http://www.openssl.org/source/openssl-1.1.1d.tar.gz" list_url = "http://www.openssl.org/source/old/" list_depth = 1 # The latest stable version is the 1.1.1 series. This is also our Long Term # Support (LTS) version, supported until 11th September 2023. version('1.1.1g', sha256='ddb04774f1e32f0c49751e21b67216ac87852ceb056b75209af2443400636d46') version('1.1.1f', sha256='186c6bfe6ecfba7a5b48c47f8a1673d0f3b0e5ba2e25602dd23b629975da3f35') version('1.1.1e', sha256='694f61ac11cb51c9bf73f54e771ff6022b0327a43bbdfa1b2f19de1662a6dcbe') version('1.1.1d', sha256='1e3a91bc1f9dfce01af26026f856e064eab4c8ee0a8f457b5ae30b40b8b711f2') version('1.1.1c', sha256='f6fb3079ad15076154eda9413fed42877d668e7069d9b87396d0804fdb3f4c90') version('1.1.1b', sha256='5c557b023230413dfb0756f3137a13e6d726838ccd1430888ad15bfb2b43ea4b') version('1.1.1a', sha256='fc20130f8b7cbd2fb918b2f14e2f429e109c31ddd0fb38fc5d71d9ffed3f9f41') version('1.1.1', sha256='2836875a0f89c03d0fdf483941512613a50cfb421d6fd94b9f41d7279d586a3d') # The 1.1.0 series is out of support and should not be used. version('1.1.0l', sha256='74a2f756c64fd7386a29184dc0344f4831192d61dc2481a93a4c5dd727f41148') version('1.1.0k', sha256='efa4965f4f773574d6cbda1cf874dbbe455ab1c0d4f906115f867d30444470b1') version('1.1.0j', sha256='31bec6c203ce1a8e93d5994f4ed304c63ccf07676118b6634edded12ad1b3246') version('1.1.0i', sha256='ebbfc844a8c8cc0ea5dc10b86c9ce97f401837f3fa08c17b2cdadc118253cf99') version('1.1.0g', sha256='de4d501267da39310905cb6dc8c6121f7a2cad45a7707f76df828fe1b85073af') version('1.1.0e', sha256='57be8618979d80c910728cfc99369bf97b2a1abd8f366ab6ebdee8975ad3874c') version('1.1.0d', sha256='7d5ebb9e89756545c156ff9c13cf2aa6214193b010a468a3bc789c3c28fe60df') version('1.1.0c', sha256='fc436441a2e05752d31b4e46115eb89709a28aef96d4fe786abe92409b2fd6f5') # The 1.0.2 series is out of support and should not be used. version('1.0.2u', sha256='ecd0c6ffb493dd06707d38b14bb4d8c2288bb7033735606569d8f90f89669d16') version('1.0.2t', sha256='14cb464efe7ac6b54799b34456bd69558a749a4931ecfd9cf9f71d7881cac7bc') version('1.0.2s', sha256='cabd5c9492825ce5bd23f3c3aeed6a97f8142f606d893df216411f07d1abab96') version('1.0.2r', sha256='ae51d08bba8a83958e894946f15303ff894d75c2b8bbd44a852b64e3fe11d0d6') version('1.0.2p', sha256='50a98e07b1a89eb8f6a99477f262df71c6fa7bef77df4dc83025a2845c827d00') version('1.0.2o', sha256='ec3f5c9714ba0fd45cb4e087301eb1336c317e0d20b575a125050470e8089e4d') version('1.0.2n', sha256='370babb75f278c39e0c50e8c4e7493bc0f18db6867478341a832a982fd15a8fe') version('1.0.2m', sha256='8c6ff15ec6b319b50788f42c7abc2890c08ba5a1cdcd3810eb9092deada37b0f') version('1.0.2k', sha256='6b3977c61f2aedf0f96367dcfb5c6e578cf37e7b8d913b4ecb6643c3cb88d8c0') version('1.0.2j', sha256='e7aff292be21c259c6af26469c7a9b3ba26e9abaaffd325e3dccc9785256c431') version('1.0.2i', sha256='9287487d11c9545b6efb287cdb70535d4e9b284dd10d51441d9b9963d000de6f') version('1.0.2h', sha256='1d4007e53aad94a5b2002fe045ee7bb0b3d98f1a47f8b2bc851dcd1c74332919') version('1.0.2g', sha256='b784b1b3907ce39abf4098702dade6365522a253ad1552e267a9a0e89594aa33') version('1.0.2f', sha256='932b4ee4def2b434f85435d9e3e19ca8ba99ce9a065a61524b429a9d5e9b2e9c') version('1.0.2e', sha256='e23ccafdb75cfcde782da0151731aa2185195ac745eea3846133f2e05c0e0bff') version('1.0.2d', sha256='671c36487785628a703374c652ad2cebea45fa920ae5681515df25d9f2c9a8c8') # The 1.0.1 version is out of support and should not be used. version('1.0.1u', sha256='4312b4ca1215b6f2c97007503d80db80d5157f76f8f7d3febbe6b4c56ff26739') version('1.0.1t', sha256='4a6ee491a2fdb22e519c76fdc2a628bb3cec12762cd456861d207996c8a07088') version('1.0.1r', sha256='784bd8d355ed01ce98b812f873f8b2313da61df7c7b5677fcf2e57b0863a3346') version('1.0.1h', sha256='9d1c8a9836aa63e2c6adb684186cbd4371c9e9dcc01d6e3bb447abf2d4d3d093') version('1.0.1e', sha256='f74f15e8c8ff11aa3d5bb5f276d202ec18d7246e95f961db76054199c69c1ae3') variant('systemcerts', default=True, description='Use system certificates') depends_on('zlib') depends_on('[email protected]:', type=('build', 'test')) parallel = False @property def libs(self): return find_libraries(['libssl', 'libcrypto'], root=self.prefix.lib) def handle_fetch_error(self, error): tty.warn("Fetching OpenSSL failed. This may indicate that OpenSSL has " "been updated, and the version in your instance of Spack is " "insecure. Consider updating to the latest OpenSSL version.") def install(self, spec, prefix): # OpenSSL uses a variable APPS in its Makefile. If it happens to be set # in the environment, then this will override what is set in the # Makefile, leading to build errors. env.pop('APPS', None) if str(spec.target.family) in ('x86_64', 'ppc64'): # This needs to be done for all 64-bit architectures (except Linux, # where it happens automatically?) env['KERNEL_BITS'] = '64' options = ['zlib', 'shared'] if spec.satisfies('@1.0'): options.append('no-krb5') # clang does not support the .arch directive in assembly files. if 'clang' in self.compiler.cc and \ 'aarch64' in spack.architecture.sys_type(): options.append('no-asm') config = Executable('./config') config('--prefix=%s' % prefix, '--openssldir=%s' % join_path(prefix, 'etc', 'openssl'), '-I{0}'.format(self.spec['zlib'].prefix.include), '-L{0}'.format(self.spec['zlib'].prefix.lib), *options) # Remove non-standard compiler options if present. These options are # present e.g. on Darwin. They are non-standard, i.e. most compilers # (e.g. gcc) will not accept them. filter_file(r'-arch x86_64', '', 'Makefile') make() if self.run_tests: make('test') # 'VERBOSE=1' make('install') @run_after('install') def link_system_certs(self): if '+systemcerts' not in self.spec: return system_dirs = [ # CentOS, Fedora, RHEL '/etc/pki/tls', # Ubuntu '/usr/lib/ssl', # OpenSUSE '/etc/ssl' ] pkg_dir = join_path(self.prefix, 'etc', 'openssl') for directory in system_dirs: sys_cert = join_path(directory, 'cert.pem') pkg_cert = join_path(pkg_dir, 'cert.pem') # If a bundle exists, use it. This is the preferred way on Fedora, # where the certs directory does not work. if os.path.exists(sys_cert) and not os.path.exists(pkg_cert): os.symlink(sys_cert, pkg_cert) sys_certs = join_path(directory, 'certs') pkg_certs = join_path(pkg_dir, 'certs') # If the certs directory exists, symlink it into the package. # We symlink the whole directory instead of all files because # the directory contents might change without Spack noticing. if os.path.isdir(sys_certs) and not os.path.islink(pkg_certs): os.rmdir(pkg_certs) os.symlink(sys_certs, pkg_certs)
[((5138, 5337), 'llnl.util.tty.warn', 'tty.warn', (['"""Fetching OpenSSL failed. This may indicate that OpenSSL has been updated, and the version in your instance of Spack is insecure. Consider updating to the latest OpenSSL version."""'], {}), "(\n 'Fetching OpenSSL failed. This may indicate that OpenSSL has been updated, and the version in your instance of Spack is insecure. Consider updating to the latest OpenSSL version.'\n )\n", (5146, 5337), True, 'import llnl.util.tty as tty\n'), ((7524, 7548), 'os.path.exists', 'os.path.exists', (['sys_cert'], {}), '(sys_cert)\n', (7538, 7548), False, 'import os\n'), ((7599, 7629), 'os.symlink', 'os.symlink', (['sys_cert', 'pkg_cert'], {}), '(sys_cert, pkg_cert)\n', (7609, 7629), False, 'import os\n'), ((7974, 7998), 'os.path.isdir', 'os.path.isdir', (['sys_certs'], {}), '(sys_certs)\n', (7987, 7998), False, 'import os\n'), ((8050, 8069), 'os.rmdir', 'os.rmdir', (['pkg_certs'], {}), '(pkg_certs)\n', (8058, 8069), False, 'import os\n'), ((8086, 8118), 'os.symlink', 'os.symlink', (['sys_certs', 'pkg_certs'], {}), '(sys_certs, pkg_certs)\n', (8096, 8118), False, 'import os\n'), ((7557, 7581), 'os.path.exists', 'os.path.exists', (['pkg_cert'], {}), '(pkg_cert)\n', (7571, 7581), False, 'import os\n'), ((8007, 8032), 'os.path.islink', 'os.path.islink', (['pkg_certs'], {}), '(pkg_certs)\n', (8021, 8032), False, 'import os\n')]
izaid/vispy
vispy/util/profiler.py
402cf95bfef88d70c9c45bb27c532ed72944e14a
# -*- coding: utf-8 -*- # Copyright (c) 2014, Vispy Development Team. # Distributed under the (new) BSD License. See LICENSE.txt for more info. # Adapted from PyQtGraph import sys from . import ptime from .. import config class Profiler(object): """Simple profiler allowing directed, hierarchical measurement of time intervals. By default, profilers are disabled. To enable profiling, set the environment variable `VISPYPROFILE` to a comma-separated list of fully-qualified names of profiled functions. Calling a profiler registers a message (defaulting to an increasing counter) that contains the time elapsed since the last call. When the profiler is about to be garbage-collected, the messages are passed to the outer profiler if one is running, or printed to stdout otherwise. If `delayed` is set to False, messages are immediately printed instead. Example: def function(...): profiler = Profiler() ... do stuff ... profiler('did stuff') ... do other stuff ... profiler('did other stuff') # profiler is garbage-collected and flushed at function end If this function is a method of class C, setting `VISPYPROFILE` to "C.function" (without the module name) will enable this profiler. For regular functions, use the qualified name of the function, stripping only the initial "vispy.." prefix from the module. """ _profilers = (config['profile'].split(",") if config['profile'] is not None else []) _depth = 0 _msgs = [] # set this flag to disable all or individual profilers at runtime disable = False class DisabledProfiler(object): def __init__(self, *args, **kwds): pass def __call__(self, *args): pass def finish(self): pass def mark(self, msg=None): pass _disabled_profiler = DisabledProfiler() def __new__(cls, msg=None, disabled='env', delayed=True): """Optionally create a new profiler based on caller's qualname. """ if (disabled is True or (disabled == 'env' and len(cls._profilers) == 0)): return cls._disabled_profiler # determine the qualified name of the caller function caller_frame = sys._getframe(1) try: caller_object_type = type(caller_frame.f_locals["self"]) except KeyError: # we are in a regular function qualifier = caller_frame.f_globals["__name__"].split(".", 1)[1] else: # we are in a method qualifier = caller_object_type.__name__ func_qualname = qualifier + "." + caller_frame.f_code.co_name if (disabled == 'env' and func_qualname not in cls._profilers and 'all' not in cls._profilers): # don't do anything return cls._disabled_profiler # create an actual profiling object cls._depth += 1 obj = super(Profiler, cls).__new__(cls) obj._name = msg or func_qualname obj._delayed = delayed obj._mark_count = 0 obj._finished = False obj._firstTime = obj._last_time = ptime.time() obj._new_msg("> Entering " + obj._name) return obj def __call__(self, msg=None, *args): """Register or print a new message with timing information. """ if self.disable: return if msg is None: msg = str(self._mark_count) self._mark_count += 1 new_time = ptime.time() elapsed = (new_time - self._last_time) * 1000 self._new_msg(" " + msg + ": %0.4f ms", *(args + (elapsed,))) self._last_time = new_time def mark(self, msg=None): self(msg) def _new_msg(self, msg, *args): msg = " " * (self._depth - 1) + msg if self._delayed: self._msgs.append((msg, args)) else: self.flush() print(msg % args) def __del__(self): self.finish() def finish(self, msg=None): """Add a final message; flush the message list if no parent profiler. """ if self._finished or self.disable: return self._finished = True if msg is not None: self(msg) self._new_msg("< Exiting %s, total time: %0.4f ms", self._name, (ptime.time() - self._firstTime) * 1000) type(self)._depth -= 1 if self._depth < 1: self.flush() def flush(self): if self._msgs: print("\n".join([m[0] % m[1] for m in self._msgs])) type(self)._msgs = []
[((2435, 2451), 'sys._getframe', 'sys._getframe', (['(1)'], {}), '(1)\n', (2448, 2451), False, 'import sys\n')]
vijithv/djangosaml2idp
tests/test_processor.py
8a238063da55bf4823bdc2192168171767c4e056
from django.contrib.auth import get_user_model from djangosaml2idp.processors import BaseProcessor User = get_user_model() class TestBaseProcessor: def test_extract_user_id_configure_by_user_class(self): user = User() user.USERNAME_FIELD = 'email' user.email = 'test_email' assert BaseProcessor('entity-id').get_user_id(user) == 'test_email' def test_extract_user_id_configure_by_settings(self, settings): """Should use `settings.SAML_IDP_DJANGO_USERNAME_FIELD` to determine the user id field""" settings.SAML_IDP_DJANGO_USERNAME_FIELD = 'first_name' user = User() user.first_name = 'test_first_name' assert BaseProcessor('entity-id').get_user_id(user) == 'test_first_name'
[((107, 123), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (121, 123), False, 'from django.contrib.auth import get_user_model\n'), ((323, 349), 'djangosaml2idp.processors.BaseProcessor', 'BaseProcessor', (['"""entity-id"""'], {}), "('entity-id')\n", (336, 349), False, 'from djangosaml2idp.processors import BaseProcessor\n'), ((698, 724), 'djangosaml2idp.processors.BaseProcessor', 'BaseProcessor', (['"""entity-id"""'], {}), "('entity-id')\n", (711, 724), False, 'from djangosaml2idp.processors import BaseProcessor\n')]
sasikrishna/python-programs
com/ds/SingleLinkedList.py
937002f37c86efc5c876b37c7b42634ca629fffc
class Node: def __init__(self, data): self.data = data self.prev = None self.next = None class SingleLinkedList: def __init__(self): self.head = None def add(self, ele): new_node = Node(ele) if self.head is None: self.head = new_node return temp_head = self.head while temp_head.next is not None: temp_head = temp_head.next; temp_head.next = new_node; def contains(self, ele): temp_head = self.head while temp_head is not None: if temp_head.data == ele: return True temp_head = temp_head.next return False def remove(self, ele): if self.head is None: return; if self.head.data == ele: self.head = self.head.next return True temp_head = self.head.next prev_node = temp_head is_node_deleted = False while temp_head is not None: if temp_head.data == ele: is_node_deleted = True prev_node.next = temp_head.next break prev_node = temp_head temp_head = temp_head.next return is_node_deleted def print_list(self): temp_head = self.head while temp_head is not None: print(temp_head.data) temp_head = temp_head.next if __name__ == '__main__': list = SingleLinkedList(); list.add(5) list.add(4) list.add(12) list.add(13) list.add(19) list.print_list(); print("List contains element 4", list.contains(4)) print("List contains element 6", list.contains(6)) print("Removing element 13", list.remove(13)) list.print_list(); print("List contains element 13", list.contains(13))
[]
data-stories/chart-experiment
src/data_setup/__init__.py
f4d7c86c32edca8bcb474cce5f6312138acf5cc9
__all__ = ["data_setup", "chart_params", "base_params"]
[]
rudaporto/aiocomcrawl
src/aiocomcrawl/models.py
9f76097d9f82c5790f968d26a6f1c3908084569b
from datetime import datetime from typing import Any, List, Optional, Union from pydantic import BaseModel, Field, HttpUrl, validator from pydantic.dataclasses import dataclass class Index(BaseModel): id: str name: str time_gate: HttpUrl = Field(alias="timegate") cdx_api: HttpUrl = Field(alias="cdx-api") @dataclass(frozen=True) class ResultBody: mime_detected: Optional[str] data: Optional[str] text: Optional[List[str]] @dataclass(frozen=True) class ResultMeta: # todo: these are still raw strings warc_request_meta: Optional[str] response_header: Optional[str] class Result(BaseModel): url_key: str = Field(alias="urlkey") timestamp: datetime url: str mime: str mime_detected: str = Field(alias="mime-detected") status: int digest: str length: int offset: int filename: str languages: Optional[str] encoding: Optional[str] index_id: Optional[str] body: Optional[ResultBody] meta: Optional[ResultMeta] @validator("timestamp", pre=True) def parse_timestamp(cls, value: Any) -> Union[datetime, Any]: if isinstance(value, str): datetime_value = datetime.strptime(value, "%Y%m%d%H%M%S") return datetime_value return value class SearchPagesRequest(BaseModel): """Request existing pages on one index for a given url.""" index: Index url: str show_num_pages: str = Field(alias="showNumPages", default="true", const=True) output: str = "json" class SearchPagesResponse(BaseModel): """Response with the total number of pages in this index for a given url.""" index: Index url: str pages: int class SearchIndexRequest(BaseModel): """One page that contains records to be fetched.""" index: Index url: str page: int output: str = "json"
[((328, 350), 'pydantic.dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (337, 350), False, 'from pydantic.dataclasses import dataclass\n'), ((459, 481), 'pydantic.dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (468, 481), False, 'from pydantic.dataclasses import dataclass\n'), ((255, 278), 'pydantic.Field', 'Field', ([], {'alias': '"""timegate"""'}), "(alias='timegate')\n", (260, 278), False, 'from pydantic import BaseModel, Field, HttpUrl, validator\n'), ((302, 324), 'pydantic.Field', 'Field', ([], {'alias': '"""cdx-api"""'}), "(alias='cdx-api')\n", (307, 324), False, 'from pydantic import BaseModel, Field, HttpUrl, validator\n'), ((658, 679), 'pydantic.Field', 'Field', ([], {'alias': '"""urlkey"""'}), "(alias='urlkey')\n", (663, 679), False, 'from pydantic import BaseModel, Field, HttpUrl, validator\n'), ((756, 784), 'pydantic.Field', 'Field', ([], {'alias': '"""mime-detected"""'}), "(alias='mime-detected')\n", (761, 784), False, 'from pydantic import BaseModel, Field, HttpUrl, validator\n'), ((1020, 1052), 'pydantic.validator', 'validator', (['"""timestamp"""'], {'pre': '(True)'}), "('timestamp', pre=True)\n", (1029, 1052), False, 'from pydantic import BaseModel, Field, HttpUrl, validator\n'), ((1438, 1493), 'pydantic.Field', 'Field', ([], {'alias': '"""showNumPages"""', 'default': '"""true"""', 'const': '(True)'}), "(alias='showNumPages', default='true', const=True)\n", (1443, 1493), False, 'from pydantic import BaseModel, Field, HttpUrl, validator\n'), ((1183, 1223), 'datetime.datetime.strptime', 'datetime.strptime', (['value', '"""%Y%m%d%H%M%S"""'], {}), "(value, '%Y%m%d%H%M%S')\n", (1200, 1223), False, 'from datetime import datetime\n')]
EnjoyLifeFund/macHighSierra-py36-pkgs
fs/error_tools.py
5668b5785296b314ea1321057420bcd077dba9ea
"""Tools for managing OS errors. """ from __future__ import print_function from __future__ import unicode_literals import errno from contextlib import contextmanager import sys import platform from . import errors from six import reraise _WINDOWS_PLATFORM = platform.system() == 'Windows' class _ConvertOSErrors(object): """Context manager to convert OSErrors in to FS Errors. """ FILE_ERRORS = { 64: errors.RemoteConnectionError, # ENONET errno.EACCES: errors.PermissionDenied, errno.ENOENT: errors.ResourceNotFound, errno.EFAULT: errors.ResourceNotFound, errno.ESRCH: errors.ResourceNotFound, errno.ENOTEMPTY: errors.DirectoryNotEmpty, errno.EEXIST: errors.FileExists, 183: errors.DirectoryExists, #errno.ENOTDIR: errors.DirectoryExpected, errno.ENOTDIR: errors.ResourceNotFound, errno.EISDIR: errors.FileExpected, errno.EINVAL: errors.FileExpected, errno.ENOSPC: errors.InsufficientStorage, errno.EPERM: errors.PermissionDenied, errno.ENETDOWN: errors.RemoteConnectionError, errno.ECONNRESET: errors.RemoteConnectionError, errno.ENAMETOOLONG: errors.PathError, errno.EOPNOTSUPP: errors.Unsupported, errno.ENOSYS: errors.Unsupported, } DIR_ERRORS = FILE_ERRORS.copy() DIR_ERRORS[errno.ENOTDIR] = errors.DirectoryExpected DIR_ERRORS[errno.EEXIST] = errors.DirectoryExists DIR_ERRORS[errno.EINVAL] = errors.DirectoryExpected if _WINDOWS_PLATFORM: # pragma: no cover DIR_ERRORS[13] = errors.DirectoryExpected DIR_ERRORS[267] = errors.DirectoryExpected FILE_ERRORS[13] = errors.FileExpected def __init__(self, opname, path, directory=False): self._opname = opname self._path = path self._directory = directory def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): os_errors = ( self.DIR_ERRORS if self._directory else self.FILE_ERRORS ) if exc_type and isinstance(exc_value, EnvironmentError): _errno = exc_value.errno fserror = os_errors.get(_errno, errors.OperationFailed) if _errno == errno.EACCES and sys.platform == "win32": if getattr(exc_value, 'args', None) == 32: # pragma: no cover fserror = errors.ResourceLocked reraise( fserror, fserror( self._path, exc=exc_value ), traceback ) # Stops linter complaining about invalid class name convert_os_errors = _ConvertOSErrors @contextmanager def unwrap_errors(path_replace): """Get a context to map OS errors to their `fs.errors` counterpart. The context will re-write the paths in resource exceptions to be in the same context as the wrapped filesystem. The only parameter may be the path from the parent, if only one path is to be unwrapped. Or it may be a dictionary that maps wrapped paths on to unwrapped paths. """ try: yield except errors.ResourceError as e: if hasattr(e, 'path'): if isinstance(path_replace, dict): e.path = path_replace.get(e.path, e.path) else: e.path = path_replace reraise(type(e), e)
[((264, 281), 'platform.system', 'platform.system', ([], {}), '()\n', (279, 281), False, 'import platform\n')]
gaberger/pysdn
samples/samplenetconf/demos/vr_demo3.py
67442e1c259d8ca8620ada95b95977e3852463c5
#!/usr/bin/python # Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from this # software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE. """ @authors: Sergei Garbuzov @status: Development @version: 1.1.0 """ import time import json from pysdn.controller.controller import Controller from pysdn.netconfdev.vrouter.vrouter5600 import VRouter5600 from pysdn.common.status import STATUS from pysdn.common.utils import load_dict_from_file def vr_demo_3(): f = "cfg4.yml" d = {} if(load_dict_from_file(f, d) is False): print("Config file '%s' read error: " % f) exit() try: ctrlIpAddr = d['ctrlIpAddr'] ctrlPortNum = d['ctrlPortNum'] ctrlUname = d['ctrlUname'] ctrlPswd = d['ctrlPswd'] nodeName = d['nodeName'] nodeIpAddr = d['nodeIpAddr'] nodePortNum = d['nodePortNum'] nodeUname = d['nodeUname'] nodePswd = d['nodePswd'] rundelay = d['rundelay'] except: print ("Failed to get Controller device attributes") exit(0) print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<") print ("<<< Demo Start") print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<") print ("\n") ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd) vrouter = VRouter5600(ctrl, nodeName, nodeIpAddr, nodePortNum, nodeUname, nodePswd) print ("<<< 'Controller': %s, '%s': %s" % (ctrlIpAddr, nodeName, nodeIpAddr)) print ("\n") time.sleep(rundelay) node_configured = False result = ctrl.check_node_config_status(nodeName) status = result.get_status() if(status.eq(STATUS.NODE_CONFIGURED)): node_configured = True print ("<<< '%s' is configured on the Controller" % nodeName) elif(status.eq(STATUS.DATA_NOT_FOUND)): node_configured = False else: print ("\n") print "Failed to get configuration status for the '%s'" % nodeName print ("!!!Demo terminated, reason: %s" % status.detailed()) exit(0) if node_configured is False: result = ctrl.add_netconf_node(vrouter) status = result.get_status() if(status.eq(STATUS.OK)): print ("<<< '%s' added to the Controller" % nodeName) else: print ("\n") print ("!!!Demo terminated, reason: %s" % status.detailed()) exit(0) print ("\n") time.sleep(rundelay) result = ctrl.check_node_conn_status(nodeName) status = result.get_status() if(status.eq(STATUS.NODE_CONNECTED)): print ("<<< '%s' is connected to the Controller" % nodeName) else: print ("\n") print ("!!!Demo terminated, reason: %s" % status.brief().lower()) exit(0) print("\n") print ("<<< Show configuration of the '%s'" % nodeName) time.sleep(rundelay) result = vrouter.get_cfg() status = result.get_status() if(status.eq(STATUS.OK)): print ("'%s' configuration:" % nodeName) cfg = result.get_data() data = json.loads(cfg) print json.dumps(data, indent=4) else: print ("\n") print ("!!!Demo terminated, reason: %s" % status.brief().lower()) exit(0) print "\n" print (">>> Remove '%s' NETCONF node from the Controller" % nodeName) time.sleep(rundelay) result = ctrl.delete_netconf_node(vrouter) status = result.get_status() if(status.eq(STATUS.OK)): print ("'%s' NETCONF node was successfully removed " "from the Controller" % nodeName) else: print ("\n") print ("!!!Demo terminated, reason: %s" % status.brief()) exit(0) print ("\n") print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>") print (">>> Demo End") print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>") if __name__ == "__main__": vr_demo_3()
[]
stijnvanhulle/EscapeGame
python/index.py
ae3e35334d64394a0f696149bfd56c1fd7a97681
# @Author: Stijn Van Hulle <stijnvanhulle> # @Date: 2016-11-28T13:51:38+01:00 # @Email: [email protected] # @Last modified by: stijnvanhulle # @Last modified time: 2016-12-20T12:51:07+01:00 # @License: stijnvanhulle.be #!/usr/bin/env python import time import datetime import math import sys import json import paho.mqtt.client as mqtt import paho.mqtt.publish as publish import lib.faceDetection as faceDetection import lib.levelCalculation as levelCalculation MQTT_BROKER="localhost" client = mqtt.Client() #classes def on_connect(client, userdata, rc): print("Connected to MQTT-broker on " + MQTT_BROKER ) client.subscribe("online") client.subscribe("message") client.subscribe("detection_find") client.subscribe("detection_found") client.subscribe("recalculate_start") client.subscribe("recalculate_done") def on_message(client, userdata, msg): try: parsed_json=json.loads(convertJson(msg.payload)) if msg.topic=="detection_find": print(parsed_json) _image1 =parsed_json['image1'] _image2 =parsed_json['image2'] _read=parsed_json['read'] if _read: if _image1 is not None and _image2 is not None: percent=faceDetection.getDifference(_image1,_image2) print('Detection:' + str(percent)) client.publish("detection_found", makeJsonObject_detection(percent,_image1,_image2,_read)) if msg.topic=="recalculate_start": print(parsed_json) _data =parsed_json['data'] _file=parsed_json['file'] if _data is not None: calcObj=levelCalculation.calculate(_data,_file) print('CalculatedOBJ:' + str(calcObj)) client.publish("recalculate_done", makeJsonObject_levelCalculate(calcObj['data'],calcObj['score'])) except Exception as error: print('Error:',error) def convertJson(data): data=data.decode() if data.startswith("'") and data.endswith("'"): data = data[1:-1] print(data) return data def makeJsonOnlineObject(device=''): item=json.dumps({"device":device}) return str(item) def init(): client.on_connect = on_connect client.on_message = on_message client.connect_async(MQTT_BROKER, 1883, 60) client.loop_start() time.sleep(0.2) client.publish("online", makeJsonOnlineObject('FaceDetection')) def makeJsonObject(value=None,port=None,type=None,read=False): item=json.dumps({"port":port, "type":type,"value":value,"read":read}) return str(item) def makeJsonObject_detection(value=None,image1=None,image2=None,read=False): item=json.dumps({"value":value, "image1":image1,"image2":image2, "read":read}) return str(item) def makeJsonObject_levelCalculate(data=None,score=0): item=json.dumps({"data":data,"score":score}) return str(item) def main(): init() while True: time.sleep(0.1) data = input("Code:") if data is not None: try: if data=='exit': exit() sys.exit(0) else: parsed_json=json.loads(convertJson(msg.payload)) _type =parsed_json['type'] _port=parsed_json['port'] _read=parsed_json['read'] if _type is not None and _port is not None and _read is not None: item=str(json.dumps(parsed_json)) print(item) #client.publish("message",item) client.publish("detection",item) else: throw('Not correct data') except Exception as error: print('Error:',error) if __name__ == '__main__': try: if len(sys.argv)>1: MQTT_BROKER=sys.argv[1] else: input_text = input("Ip of MQTT-broker: ") if input_text: MQTT_BROKER=input_text #executor = ProcessPoolExecutor(2) #loop = trollius.get_event_loop() #_main = trollius.async(loop.run_in_executor(executor, main)) main() except (TypeError) as ex: error="Error: " + str(ex) #print(error) except (KeyboardInterrupt): exit() print("\nIOT is afgesloten\n") sys.exit(0) except (SystemExit): print("\nIOT is geforceert afgelosten\n")
[((508, 521), 'paho.mqtt.client.Client', 'mqtt.Client', ([], {}), '()\n', (519, 521), True, 'import paho.mqtt.client as mqtt\n'), ((1932, 1962), 'json.dumps', 'json.dumps', (["{'device': device}"], {}), "({'device': device})\n", (1942, 1962), False, 'import json\n'), ((2124, 2139), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (2134, 2139), False, 'import time\n'), ((2275, 2345), 'json.dumps', 'json.dumps', (["{'port': port, 'type': type, 'value': value, 'read': read}"], {}), "({'port': port, 'type': type, 'value': value, 'read': read})\n", (2285, 2345), False, 'import json\n'), ((2442, 2520), 'json.dumps', 'json.dumps', (["{'value': value, 'image1': image1, 'image2': image2, 'read': read}"], {}), "({'value': value, 'image1': image1, 'image2': image2, 'read': read})\n", (2452, 2520), False, 'import json\n'), ((2596, 2638), 'json.dumps', 'json.dumps', (["{'data': data, 'score': score}"], {}), "({'data': data, 'score': score})\n", (2606, 2638), False, 'import json\n'), ((2692, 2707), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2702, 2707), False, 'import time\n'), ((3751, 3762), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3759, 3762), False, 'import sys\n'), ((1503, 1543), 'lib.levelCalculation.calculate', 'levelCalculation.calculate', (['_data', '_file'], {}), '(_data, _file)\n', (1529, 1543), True, 'import lib.levelCalculation as levelCalculation\n'), ((1165, 1210), 'lib.faceDetection.getDifference', 'faceDetection.getDifference', (['_image1', '_image2'], {}), '(_image1, _image2)\n', (1192, 1210), True, 'import lib.faceDetection as faceDetection\n'), ((2801, 2812), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2809, 2812), False, 'import sys\n'), ((3058, 3081), 'json.dumps', 'json.dumps', (['parsed_json'], {}), '(parsed_json)\n', (3068, 3081), False, 'import json\n')]
ajeet1308/code_problems
Codility/python/tape_equilibrium.py
5d99839b6319295c6d81dd86775c46a536e7a1ca
def solution(A): total = sum(A) m = float('inf') left_sum = 0 for n in A[:-1]: left_sum += n v = abs(total - 2*left_sum) if v < m: m = v return m
[]
idjaw/pythondotorg
peps/converters.py
8e4babbc7ad15ed52b4f66fdd4ab43c2dd3bd649
import re import os from bs4 import BeautifulSoup from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.files import File from pages.models import Page, Image PEP_TEMPLATE = 'pages/pep-page.html' pep_url = lambda num: 'dev/peps/pep-{}/'.format(num) def check_paths(): """ Checks to ensure our PEP_REPO_PATH is setup correctly """ if not hasattr(settings, 'PEP_REPO_PATH'): raise ImproperlyConfigured("No PEP_REPO_PATH in settings") if not os.path.exists(settings.PEP_REPO_PATH): raise ImproperlyConfigured("PEP_REPO_PATH in settings does not exist") def convert_pep0(): """ Take existing generated pep-0000.html and convert to something suitable for a Python.org Page returns the core body HTML necessary only """ check_paths() pep0_path = os.path.join(settings.PEP_REPO_PATH, 'pep-0000.html') pep0_content = open(pep0_path).read() soup = BeautifulSoup(pep0_content) body_children = list(soup.body.children) # Grab header and PEP body header = body_children[3] pep_content = body_children[7] # Fix PEP links body_links = pep_content.find_all("a") pep_href_re = re.compile(r'pep-(\d+)\.html') for b in body_links: m = pep_href_re.search(b.attrs['href']) # Skip anything not matching 'pep-XXXX.html' if not m: continue b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1)) # Remove Version from header header_rows = header.find_all('th') for t in header_rows: if 'Version:' in t.text and 'N/A' in t.next_sibling.text: t.parent.extract() return ''.join([header.prettify(), pep_content.prettify()]) def get_pep0_page(commit=True): """ Using convert_pep0 above, create a CMS ready pep0 page and return it pep0 is used as the directory index, but it's also an actual pep, so we return both Page objects. """ pep0_content = convert_pep0() pep0_page, _ = Page.objects.get_or_create(path='dev/peps/') pep0000_page, _ = Page.objects.get_or_create(path='dev/peps/pep-0000/') for page in [pep0_page, pep0000_page]: page.content = pep0_content page.content_markup_type = 'html' page.title = "PEP 0 -- Index of Python Enhancement Proposals (PEPs)" page.template_name = PEP_TEMPLATE if commit: page.save() return pep0_page, pep0000_page def fix_headers(soup, data): """ Remove empty or unwanted headers and find our title """ header_rows = soup.find_all('th') for t in header_rows: if 'Version:' in t.text: if t.next_sibling.text == '$Revision$': t.parent.extract() if t.next_sibling.text == '': t.parent.extract() if 'Last-Modified:' in t.text: if '$Date$'in t.next_sibling.text: t.parent.extract() if t.next_sibling.text == '': t.parent.extract() if t.text == 'Title:': data['title'] = t.next_sibling.text if t.text == 'Content-Type:': t.parent.extract() if 'Version:' in t.text and 'N/A' in t.next_sibling.text: t.parent.extract() return soup, data def convert_pep_page(pep_number, content): """ Handle different formats that pep2html.py outputs """ check_paths() data = { 'title': None, } if '<html>' in content: soup = BeautifulSoup(content) data['title'] = soup.title.text if not re.search(r'PEP \d+', data['title']): data['title'] = 'PEP {} -- {}'.format( pep_number, soup.title.text, ) header = soup.body.find('div', class_="header") header, data = fix_headers(header, data) data['header'] = header.prettify() main_content = soup.body.find('div', class_="content") data['main_content'] = main_content.prettify() data['content'] = ''.join([ data['header'], data['main_content'] ]) else: soup = BeautifulSoup(content) soup, data = fix_headers(soup, data) if not data['title']: data['title'] = "PEP {} -- ".format(pep_number) else: if not re.search(r'PEP \d+', data['title']): data['title'] = "PEP {} -- {}".format( pep_number, data['title'], ) data['content'] = soup.prettify() # Fix PEP links pep_content = BeautifulSoup(data['content']) body_links = pep_content.find_all("a") pep_href_re = re.compile(r'pep-(\d+)\.html') for b in body_links: m = pep_href_re.search(b.attrs['href']) # Skip anything not matching 'pep-XXXX.html' if not m: continue b.attrs['href'] = '/dev/peps/pep-{}/'.format(m.group(1)) data['content'] = pep_content.prettify() hg_link = "https://hg.python.org/peps/file/tip/pep-{0}.txt".format(pep_number) data['content'] += """Source: <a href="{0}">{0}</a>""".format(hg_link) return data def get_pep_page(pep_number, commit=True): """ Given a pep_number retrieve original PEP source text, rst, or html. Get or create the associated Page and return it """ pep_path = os.path.join(settings.PEP_REPO_PATH, 'pep-{}.html'.format(pep_number)) if not os.path.exists(pep_path): print("PEP Path '{}' does not exist, skipping".format(pep_path)) pep_content = convert_pep_page(pep_number, open(pep_path).read()) pep_page, _ = Page.objects.get_or_create(path=pep_url(pep_number)) # Remove leading zeros from PEP number for display purposes pep_number_string = str(pep_number) pep_number_string = re.sub(r'^0+', '', pep_number_string) pep_page.title = pep_content['title'] pep_page.content = pep_content['content'] pep_page.content_markup_type = 'html' pep_page.template_name = PEP_TEMPLATE if commit: pep_page.save() return pep_page def add_pep_image(pep_number, path): image_path = os.path.join(settings.PEP_REPO_PATH, path) if not os.path.exists(image_path): print("Image Path '{}' does not exist, skipping".format(image_path)) try: page = Page.objects.get(path=pep_url(pep_number)) except Page.DoesNotExist: print("Could not find backing PEP {}".format(pep_number)) return # Find existing images, we have to loop here as we can't use the ORM # to query against image__path existing_images = Image.objects.filter(page=page) MISSING = False FOUND = False for image in existing_images: image_root_path = os.path.join(settings.MEDIA_ROOT, page.path, path) if image.image.path.endswith(path): FOUND = True # File is missing on disk, recreate if not os.path.exists(image_root_path): MISSING = image break if not FOUND or MISSING: image = None if MISSING: image = MISSING else: image = Image(page=page) with open(image_path, 'rb') as image_obj: image.image.save(path, File(image_obj)) image.save() # Old images used to live alongside html, but now they're in different # places, so update the page accordingly. soup = BeautifulSoup(page.content.raw) for img_tag in soup.findAll('img'): if img_tag['src'] == path: img_tag['src'] = os.path.join(settings.MEDIA_URL, page.path, path) page.content.raw = soup.prettify() page.save() return image def get_peps_rss(): rss_feed = os.path.join(settings.PEP_REPO_PATH, 'peps.rss') if not os.path.exists(rss_feed): return page, _ = Page.objects.get_or_create( path="dev/peps/peps.rss", template_name="pages/raw.html", ) with open(rss_feed, "r") as rss_content: content = rss_content.read() page.content = content page.is_published = True page.content_type = "application/rss+xml" page.save() return page
[((854, 907), 'os.path.join', 'os.path.join', (['settings.PEP_REPO_PATH', '"""pep-0000.html"""'], {}), "(settings.PEP_REPO_PATH, 'pep-0000.html')\n", (866, 907), False, 'import os\n'), ((962, 989), 'bs4.BeautifulSoup', 'BeautifulSoup', (['pep0_content'], {}), '(pep0_content)\n', (975, 989), False, 'from bs4 import BeautifulSoup\n'), ((1216, 1247), 're.compile', 're.compile', (['"""pep-(\\\\d+)\\\\.html"""'], {}), "('pep-(\\\\d+)\\\\.html')\n", (1226, 1247), False, 'import re\n'), ((2025, 2069), 'pages.models.Page.objects.get_or_create', 'Page.objects.get_or_create', ([], {'path': '"""dev/peps/"""'}), "(path='dev/peps/')\n", (2051, 2069), False, 'from pages.models import Page, Image\n'), ((2092, 2145), 'pages.models.Page.objects.get_or_create', 'Page.objects.get_or_create', ([], {'path': '"""dev/peps/pep-0000/"""'}), "(path='dev/peps/pep-0000/')\n", (2118, 2145), False, 'from pages.models import Page, Image\n'), ((4605, 4635), 'bs4.BeautifulSoup', 'BeautifulSoup', (["data['content']"], {}), "(data['content'])\n", (4618, 4635), False, 'from bs4 import BeautifulSoup\n'), ((4698, 4729), 're.compile', 're.compile', (['"""pep-(\\\\d+)\\\\.html"""'], {}), "('pep-(\\\\d+)\\\\.html')\n", (4708, 4729), False, 'import re\n'), ((5836, 5872), 're.sub', 're.sub', (['"""^0+"""', '""""""', 'pep_number_string'], {}), "('^0+', '', pep_number_string)\n", (5842, 5872), False, 'import re\n'), ((6165, 6207), 'os.path.join', 'os.path.join', (['settings.PEP_REPO_PATH', 'path'], {}), '(settings.PEP_REPO_PATH, path)\n', (6177, 6207), False, 'import os\n'), ((6634, 6665), 'pages.models.Image.objects.filter', 'Image.objects.filter', ([], {'page': 'page'}), '(page=page)\n', (6654, 6665), False, 'from pages.models import Page, Image\n'), ((7444, 7475), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.content.raw'], {}), '(page.content.raw)\n', (7457, 7475), False, 'from bs4 import BeautifulSoup\n'), ((7741, 7789), 'os.path.join', 'os.path.join', (['settings.PEP_REPO_PATH', '"""peps.rss"""'], {}), "(settings.PEP_REPO_PATH, 'peps.rss')\n", (7753, 7789), False, 'import os\n'), ((7857, 7946), 'pages.models.Page.objects.get_or_create', 'Page.objects.get_or_create', ([], {'path': '"""dev/peps/peps.rss"""', 'template_name': '"""pages/raw.html"""'}), "(path='dev/peps/peps.rss', template_name=\n 'pages/raw.html')\n", (7883, 7946), False, 'from pages.models import Page, Image\n'), ((453, 505), 'django.core.exceptions.ImproperlyConfigured', 'ImproperlyConfigured', (['"""No PEP_REPO_PATH in settings"""'], {}), "('No PEP_REPO_PATH in settings')\n", (473, 505), False, 'from django.core.exceptions import ImproperlyConfigured\n'), ((518, 556), 'os.path.exists', 'os.path.exists', (['settings.PEP_REPO_PATH'], {}), '(settings.PEP_REPO_PATH)\n', (532, 556), False, 'import os\n'), ((572, 636), 'django.core.exceptions.ImproperlyConfigured', 'ImproperlyConfigured', (['"""PEP_REPO_PATH in settings does not exist"""'], {}), "('PEP_REPO_PATH in settings does not exist')\n", (592, 636), False, 'from django.core.exceptions import ImproperlyConfigured\n'), ((3507, 3529), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content'], {}), '(content)\n', (3520, 3529), False, 'from bs4 import BeautifulSoup\n'), ((4153, 4175), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content'], {}), '(content)\n', (4166, 4175), False, 'from bs4 import BeautifulSoup\n'), ((5465, 5489), 'os.path.exists', 'os.path.exists', (['pep_path'], {}), '(pep_path)\n', (5479, 5489), False, 'import os\n'), ((6219, 6245), 'os.path.exists', 'os.path.exists', (['image_path'], {}), '(image_path)\n', (6233, 6245), False, 'import os\n'), ((6765, 6815), 'os.path.join', 'os.path.join', (['settings.MEDIA_ROOT', 'page.path', 'path'], {}), '(settings.MEDIA_ROOT, page.path, path)\n', (6777, 6815), False, 'import os\n'), ((7801, 7825), 'os.path.exists', 'os.path.exists', (['rss_feed'], {}), '(rss_feed)\n', (7815, 7825), False, 'import os\n'), ((3586, 3622), 're.search', 're.search', (['"""PEP \\\\d+"""', "data['title']"], {}), "('PEP \\\\d+', data['title'])\n", (3595, 3622), False, 'import re\n'), ((7170, 7186), 'pages.models.Image', 'Image', ([], {'page': 'page'}), '(page=page)\n', (7175, 7186), False, 'from pages.models import Page, Image\n'), ((7580, 7629), 'os.path.join', 'os.path.join', (['settings.MEDIA_URL', 'page.path', 'path'], {}), '(settings.MEDIA_URL, page.path, path)\n', (7592, 7629), False, 'import os\n'), ((4345, 4381), 're.search', 're.search', (['"""PEP \\\\d+"""', "data['title']"], {}), "('PEP \\\\d+', data['title'])\n", (4354, 4381), False, 'import re\n'), ((6953, 6984), 'os.path.exists', 'os.path.exists', (['image_root_path'], {}), '(image_root_path)\n', (6967, 6984), False, 'import os\n'), ((7273, 7288), 'django.core.files.File', 'File', (['image_obj'], {}), '(image_obj)\n', (7277, 7288), False, 'from django.core.files import File\n')]
ajayiagbebaku/NFL-Model
venv/Lib/site-packages/toolz/sandbox/__init__.py
afcc67a85ca7138c58c3334d45988ada2da158ed
from .core import EqualityHashKey, unzip from .parallel import fold
[]
caglorithm/accel
interface/app/__init__.py
7fe5c13ea9559565c599633bdb3318c8fbc57088
from flask import Flask app = Flask(__name__, static_folder='static') from app import routes
[((31, 70), 'flask.Flask', 'Flask', (['__name__'], {'static_folder': '"""static"""'}), "(__name__, static_folder='static')\n", (36, 70), False, 'from flask import Flask\n')]
sebastien-riou/SATL
implementations/python3/tests/CAPDU.py
b95d0e784d2e8e1384381d4d5b8b448d3d1798cf
import os import pysatl from pysatl import CAPDU if __name__ == "__main__": def check(hexstr, expected): capdu = CAPDU.from_hexstr(hexstr) if capdu != expected: raise Exception("Mismatch for input '"+hexstr+"'\nActual: "+str(capdu)+"\nExpected: "+str(expected)) def gencase(* ,LC ,LE): assert(LC < 0x10000) assert(LE <= 0x10000) data = os.getrandom(LC) hexstr = "00112233" case4 = LC>0 and LE>0 case4e = case4 and (LC>0xFF or LE>0x100) if LC>0: if LC>0xFF or case4e: hexstr += "00%04X"%LC else: hexstr += "%02X" % LC hexstr += pysatl.Utils.hexstr(data, separator="") if LE>0: if case4e: if LE == 0x10000: hexstr += "0000" else: hexstr += "%04X"%LE elif LE == 0x10000: hexstr += "000000" elif LE>0x100: hexstr += "00%04X"%LE elif LE == 0x100: hexstr += "00" else: hexstr += "%02X" % LE expected = hexstr capdu = CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33, DATA=data, LE=LE) hexstr = capdu.to_hexstr() if hexstr != expected: raise Exception("Mismatch for LC=%d, LE=%d"%(LC,LE)+"\nActual: "+hexstr+"\nExpected: "+expected) b = capdu.to_bytes() assert(type(b) is bytes) return (hexstr, capdu) #check __repr__ expected = "pysatl.CAPDU.from_hexstr('00112233015502')" capdu=None exec("capdu="+expected) assert(expected==repr(capdu)) #check well formed inputs check("00112233", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check("00 11 22 33", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check("0x00,0x11,0x22,0x33", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) #check we tolerate less well formed inputs check("00-11,22_33", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check("""0x00 0x11 0x22 0x33""", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33)) check("1 2 304", CAPDU(CLA=0x01, INS=0x02, P1=0x03, P2=0x04)) LC_cases = [0,1,2,254,255,256,257,65534,65535] LE_cases = LC_cases + [65536] for LC in LC_cases: for LE in LE_cases: print(LC,LE) check(*gencase(LC=LC, LE=LE))
[((129, 154), 'pysatl.CAPDU.from_hexstr', 'CAPDU.from_hexstr', (['hexstr'], {}), '(hexstr)\n', (146, 154), False, 'from pysatl import CAPDU\n'), ((405, 421), 'os.getrandom', 'os.getrandom', (['LC'], {}), '(LC)\n', (417, 421), False, 'import os\n'), ((1200, 1252), 'pysatl.CAPDU', 'CAPDU', ([], {'CLA': '(0)', 'INS': '(17)', 'P1': '(34)', 'P2': '(51)', 'DATA': 'data', 'LE': 'LE'}), '(CLA=0, INS=17, P1=34, P2=51, DATA=data, LE=LE)\n', (1205, 1252), False, 'from pysatl import CAPDU\n'), ((1744, 1778), 'pysatl.CAPDU', 'CAPDU', ([], {'CLA': '(0)', 'INS': '(17)', 'P1': '(34)', 'P2': '(51)'}), '(CLA=0, INS=17, P1=34, P2=51)\n', (1749, 1778), False, 'from pysatl import CAPDU\n'), ((1814, 1848), 'pysatl.CAPDU', 'CAPDU', ([], {'CLA': '(0)', 'INS': '(17)', 'P1': '(34)', 'P2': '(51)'}), '(CLA=0, INS=17, P1=34, P2=51)\n', (1819, 1848), False, 'from pysatl import CAPDU\n'), ((1892, 1926), 'pysatl.CAPDU', 'CAPDU', ([], {'CLA': '(0)', 'INS': '(17)', 'P1': '(34)', 'P2': '(51)'}), '(CLA=0, INS=17, P1=34, P2=51)\n', (1897, 1926), False, 'from pysatl import CAPDU\n'), ((2010, 2044), 'pysatl.CAPDU', 'CAPDU', ([], {'CLA': '(0)', 'INS': '(17)', 'P1': '(34)', 'P2': '(51)'}), '(CLA=0, INS=17, P1=34, P2=51)\n', (2015, 2044), False, 'from pysatl import CAPDU\n'), ((2105, 2139), 'pysatl.CAPDU', 'CAPDU', ([], {'CLA': '(0)', 'INS': '(17)', 'P1': '(34)', 'P2': '(51)'}), '(CLA=0, INS=17, P1=34, P2=51)\n', (2110, 2139), False, 'from pysatl import CAPDU\n'), ((2171, 2202), 'pysatl.CAPDU', 'CAPDU', ([], {'CLA': '(1)', 'INS': '(2)', 'P1': '(3)', 'P2': '(4)'}), '(CLA=1, INS=2, P1=3, P2=4)\n', (2176, 2202), False, 'from pysatl import CAPDU\n'), ((696, 735), 'pysatl.Utils.hexstr', 'pysatl.Utils.hexstr', (['data'], {'separator': '""""""'}), "(data, separator='')\n", (715, 735), False, 'import pysatl\n')]
rosich/mgls
src/mgls_bootstrapping.py
64c924f59adba2dddf44bb70a84868173f0b7120
#!/usr/bin/python from math import sin, cos, tan, atan, pi, acos, sqrt, exp, log10 import sys, os import copy import random import numpy as np import multiprocessing as mp import ConfigParser sys.path.append('./bin') import mGLS, mMGLS sys.path.append('./src') from EnvGlobals import Globals import mgls_io import mgls_mc from mgls_lib import * #definitions and constants to_radians = pi/180.0 to_deg = 1.0/to_radians #------------------------- def _gls_instance_Ndim_bootstrapping(n_runs): """executes n_runs instances of MGLS for with previous data shuffle """ cpu_periodogram = list() for iter in range(n_runs): """ #shuffle RV's and their errors. Repetition is not allowed comb_rv_err = zip(Globals.rv, Globals.rv_err) random.shuffle(comb_rv_err) Globals.rv[:], Globals.rv_err[:] = zip(*comb_rv_err) """ #allowing repetition rv = [0.0]*len(Globals.time) rv_err = [0.0]*len(Globals.time) for i in range(len(Globals.time)): index = int(random.uniform(0,len(Globals.time))) rv[i] = Globals.rv[index] rv_err[i] = Globals.rv_err[index] Globals.rv = rv Globals.rv_err = rv_err opt_state = mgls_mc.optimal(Globals.ndim, msgs = False, temp_steps=20, n_iter=1000) pwr_opt, fitting_coeffs, A = mgls(opt_state) cpu_periodogram.append(pwr_opt) #save the best period determination (highest power) return cpu_periodogram def fap(bootstrapping_stats, pwr): """returns FAP for a given pwr. i.e. how many realizations overcome a given power, over unit. """ return float(sum(i > pwr for i in bootstrapping_stats))/len(bootstrapping_stats) def fap_levels(bootstrapping_stats): """determines which power a FAP of 1, 0.1, 0.01 % is reached """ FAPs = [1.0, 0.1, 0.01, 0.001] #FAPS to compute in % n_bs = len(bootstrapping_stats) #sort bootstrapping_stats vector ascendently sorted_pwr = sorted(bootstrapping_stats) return [np.percentile(sorted_pwr,100-FAPs[i]) for i in range(len(FAPs))] def parallel_Mdim_bootstrapping(n_bootstrapping): """ """ n_runs = [n_bootstrapping/Globals.ncpus for i in range(Globals.ncpus)] pool = mp.Pool(Globals.ncpus) #ncpus available #run parallell execution try: out = pool.map_async(_gls_instance_Ndim_bootstrapping, n_runs).get(1./.0001) pool.terminate() except KeyboardInterrupt: pool.terminate() sys.exit() """ except ZeroDivisionError: print "Error: Zero division error. Restarted parallel bootstapping" """ #join the output bunches out_spectra = list() for cpu in range(len(n_runs)): out_spectra.extend(out[cpu]) bootstrapping_stats = list() for j in range(len(out_spectra)): bootstrapping_stats.append(out_spectra[j]) return bootstrapping_stats def parallel_bootstrapping(n_bootstrapping): """ """ n_runs = [n_bootstrapping/Globals.ncpus for i in range(Globals.ncpus)] pool = mp.Pool(Globals.ncpus) #ncpus available #run parallell execution try: out = pool.map_async(_gls_instance_bootstrapping, n_runs).get(1./.00001) pool.terminate() except KeyboardInterrupt: pool.terminate() sys.exit() #join the output bunches out_spectra = list() for cpu in range(len(n_runs)): out_spectra.extend(out[cpu]) bootstrapping_stats = list() for j in range(len(out_spectra)): bootstrapping_stats.append(out_spectra[j]) return bootstrapping_stats def Mdim_bootstrapping(max_pow): """ """ #n_bootstrapping = 500 #iterations bootstrapping_stats = parallel_Mdim_bootstrapping(Globals.n_bootstrapping) print "\n//BOOTSTRAPPING:// {1.0, 0.1, 0.01, 0.001}%" print "FAP Levels:", fap_levels(bootstrapping_stats) print "Total bootstapping samples: ", len(bootstrapping_stats) return bootstrapping_stats
[]
pcaruana/sombrio
mgmt/src/constants.py
3b669fc83e0227a69b673b5555d88e15b55c397c
#! /usr/bin/env python3 """ constants.py - Contains all constants used by the device manager Author: - Pablo Caruana (pablo dot caruana at gmail dot com) Date: 12/3/2016 """ number_of_rows = 3 # total number rows of Index Servers number_of_links = 5 # number of links to be sent to Crawler number_of_chunks = 5 # number of chunks to be sent to Index Builder number_of_comps = 10 # number of components managed by each watchdog
[]
STomoya/sketchify
XDoG/XDoG.py
93c068042f02172505457cc15cb0bef673666be3
import cv2 import numpy as np def DoG(image, size, sigma, k=1.6, gamma=1.): g1 = cv2.GaussianBlur(image, (size, size), sigma) g2 = cv2.GaussianBlur(image, (size, size), sigma*k) return g1 - gamma * g2 def XDoG(image, size, sigma, eps, phi, k=1.6, gamma=1.): eps /= 255 d = DoG(image, size, sigma, k, gamma) d /= d.max() e = 1 + np.tanh(phi * (d - eps)) e[e >= 1] = 1 return e * 255 # This config is found by the author # modify if not the desired output XDoG_config = dict( size=0, sigma=0.6, eps=-15, phi=10e8, k=2.5, gamma=0.97 ) def gen_xdog_image(src, dst): gray = cv2.imread(src, cv2.IMREAD_GRAYSCALE) # I wanted the gamma between [0.97, 0.98] # but it depends on the image so I made it move randomly # comment out if this is not needed XDoG_config['gamma'] += 0.01 * np.random.rand(1) dogged = XDoG(gray, **XDoG_config) cv2.imwrite(dst, dogged) if __name__ == "__main__": gen_xdog_image('sample.jpg', 'dog.jpg')
[((87, 131), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image', '(size, size)', 'sigma'], {}), '(image, (size, size), sigma)\n', (103, 131), False, 'import cv2\n'), ((141, 189), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image', '(size, size)', '(sigma * k)'], {}), '(image, (size, size), sigma * k)\n', (157, 189), False, 'import cv2\n'), ((638, 675), 'cv2.imread', 'cv2.imread', (['src', 'cv2.IMREAD_GRAYSCALE'], {}), '(src, cv2.IMREAD_GRAYSCALE)\n', (648, 675), False, 'import cv2\n'), ((919, 943), 'cv2.imwrite', 'cv2.imwrite', (['dst', 'dogged'], {}), '(dst, dogged)\n', (930, 943), False, 'import cv2\n'), ((359, 383), 'numpy.tanh', 'np.tanh', (['(phi * (d - eps))'], {}), '(phi * (d - eps))\n', (366, 383), True, 'import numpy as np\n'), ((858, 875), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (872, 875), True, 'import numpy as np\n')]
ericlin8545/grover
lm/validate.py
3ac6e506f2e1a859d98cc2c3fb57ba251be31484
# Original work Copyright 2018 The Google AI Language Team Authors. # Modified work Copyright 2019 Rowan Zellers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from lm.modeling import model_fn_builder, GroverConfig import tensorflow as tf from lm.dataloader import input_fn_builder import numpy as np import tempfile import h5py from google.cloud import storage flags = tf.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "config_file", 'configs/base.json', "The config json file corresponding to the pre-trained news model. " "This specifies the model architecture.") flags.DEFINE_string( "input_file", None, "Input TF example files (can be a glob or comma separated).") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") flags.DEFINE_string( "validation_name", 'preds.h5', "Name to use") ## Other parameters flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained model).") flags.DEFINE_integer( "max_seq_length", 1024, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded. Must match data generation.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_integer("batch_size", 32, "Batch size used for eval") flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") flags.DEFINE_string( "tpu_name", None, "The Cloud TPU to use for training. This should be either the name " "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") flags.DEFINE_string( "gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not " "specified, we will attempt to automatically detect the GCE project from " "metadata.") flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") # This is a handy little utility so that we can save the perplexities to TPU class gcloudwriter(): def __init__(self, gcloud_name): assert gcloud_name.startswith('gs://') self.gcloud_name = gcloud_name bucket_name, blob_name = gcloud_name.split('gs://')[1].split('/', 1) bucket = storage.Client().get_bucket(bucket_name) self.blob = bucket.blob(blob_name) def __enter__(self): self.tempfile = tempfile.NamedTemporaryFile() return self.tempfile def __exit__(self, *args): self.tempfile.flush() print("UPLOADING TO {}".format(self.gcloud_name), flush=True) self.blob.upload_from_filename(self.tempfile.name) self.tempfile.close() def ind_where(array: np.ndarray, target, return_first_match=True, default_value=-1): """ :param array: Single dimension array :param target: target to search for :param return_first_match: If true, return the first index that matches, otherwise, return the last one :param default_value: Index to return if there was no match :return: index of the first match, or -1 if nothing """ assert array.ndim == 1 matching_inds = np.where(array == target)[0] if len(matching_inds) > 0: if return_first_match: return int(matching_inds[0]) else: return int(matching_inds[-1]) return default_value def main(_): tf.logging.set_verbosity(tf.logging.INFO) news_config = GroverConfig.from_json_file(FLAGS.config_file) tf.gfile.MakeDirs(FLAGS.output_dir) input_files = [] for input_pattern in FLAGS.input_file.split(","): input_files.extend(tf.gfile.Glob(input_pattern)) tf.logging.info("*** Input Files ***") for input_file in input_files: tf.logging.info(" %s" % input_file) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.iterations_per_loop, keep_checkpoint_max=None, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) model_fn = model_fn_builder(news_config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=1e-4, num_train_steps=0, num_warmup_steps=0, use_tpu=FLAGS.use_tpu, ) # If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.batch_size, eval_batch_size=FLAGS.batch_size, predict_batch_size=FLAGS.batch_size, params={'model_dir': FLAGS.output_dir} ) eval_input_fn = input_fn_builder( input_files=input_files, seq_length=FLAGS.max_seq_length, evaluate_for_fixed_number_of_steps=False, num_cpu_threads=1, is_training=False) result = [x for x in estimator.predict(input_fn=eval_input_fn, yield_single_examples=True)] cats = sorted(result[0].keys()) result_stack = {cat: np.stack([x[cat] for x in result]) for cat in cats} with gcloudwriter(os.path.join(FLAGS.output_dir, FLAGS.validation_name)) as tempfile_name: with h5py.File(tempfile_name, 'w') as h5: for cat, data in result_stack.items(): dtype2use = np.float16 if cat.endswith(('logprobs', 'top_p_required')) else np.uint16 h5.create_dataset(cat, data=data.astype(dtype2use)) h5.create_dataset('model', data=FLAGS.config_file) h5.create_dataset('ckpt', data=FLAGS.init_checkpoint) h5.create_dataset('input_file', data=FLAGS.input_file) # This gives the perplexity of the entire article. if you want to replicate the results of the paper you # might need to do something different to extract the ppl of just the body in particular. ppl_ex = [] for logprobs_i, ids_i in zip(result_stack['gt_logprobs'], result_stack['labels']): # Omit the first token. Keep in mind input_ids is shifted by 1 start_ind = ind_where(ids_i, target=50265, default_value=0) end_ind = ind_where(ids_i, target=50266, default_value=ids_i.shape[0] - 1) ppl_ex.append(logprobs_i[start_ind:end_ind]) ppl_ex = np.concatenate(ppl_ex, 0) print("Article perplexity is {:.3f}".format(np.exp(-np.mean(ppl_ex))), flush=True) if __name__ == "__main__": flags.mark_flag_as_required("input_file") flags.mark_flag_as_required("output_dir") tf.app.run()
[((4327, 4368), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (4351, 4368), True, 'import tensorflow as tf\n'), ((4388, 4434), 'lm.modeling.GroverConfig.from_json_file', 'GroverConfig.from_json_file', (['FLAGS.config_file'], {}), '(FLAGS.config_file)\n', (4415, 4434), False, 'from lm.modeling import model_fn_builder, GroverConfig\n'), ((4440, 4475), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.output_dir'], {}), '(FLAGS.output_dir)\n', (4457, 4475), True, 'import tensorflow as tf\n'), ((4614, 4652), 'tensorflow.logging.info', 'tf.logging.info', (['"""*** Input Files ***"""'], {}), "('*** Input Files ***')\n", (4629, 4652), True, 'import tensorflow as tf\n'), ((5484, 5645), 'lm.modeling.model_fn_builder', 'model_fn_builder', (['news_config'], {'init_checkpoint': 'FLAGS.init_checkpoint', 'learning_rate': '(0.0001)', 'num_train_steps': '(0)', 'num_warmup_steps': '(0)', 'use_tpu': 'FLAGS.use_tpu'}), '(news_config, init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=0.0001, num_train_steps=0, num_warmup_steps=0, use_tpu=\n FLAGS.use_tpu)\n', (5500, 5645), False, 'from lm.modeling import model_fn_builder, GroverConfig\n'), ((5938, 6186), 'tensorflow.contrib.tpu.TPUEstimator', 'tf.contrib.tpu.TPUEstimator', ([], {'use_tpu': 'FLAGS.use_tpu', 'model_fn': 'model_fn', 'config': 'run_config', 'train_batch_size': 'FLAGS.batch_size', 'eval_batch_size': 'FLAGS.batch_size', 'predict_batch_size': 'FLAGS.batch_size', 'params': "{'model_dir': FLAGS.output_dir}"}), "(use_tpu=FLAGS.use_tpu, model_fn=model_fn,\n config=run_config, train_batch_size=FLAGS.batch_size, eval_batch_size=\n FLAGS.batch_size, predict_batch_size=FLAGS.batch_size, params={\n 'model_dir': FLAGS.output_dir})\n", (5965, 6186), True, 'import tensorflow as tf\n'), ((6256, 6418), 'lm.dataloader.input_fn_builder', 'input_fn_builder', ([], {'input_files': 'input_files', 'seq_length': 'FLAGS.max_seq_length', 'evaluate_for_fixed_number_of_steps': '(False)', 'num_cpu_threads': '(1)', 'is_training': '(False)'}), '(input_files=input_files, seq_length=FLAGS.max_seq_length,\n evaluate_for_fixed_number_of_steps=False, num_cpu_threads=1,\n is_training=False)\n', (6272, 6418), False, 'from lm.dataloader import input_fn_builder\n'), ((7819, 7844), 'numpy.concatenate', 'np.concatenate', (['ppl_ex', '(0)'], {}), '(ppl_ex, 0)\n', (7833, 7844), True, 'import numpy as np\n'), ((8057, 8069), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (8067, 8069), True, 'import tensorflow as tf\n'), ((3356, 3385), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (3383, 3385), False, 'import tempfile\n'), ((4095, 4120), 'numpy.where', 'np.where', (['(array == target)'], {}), '(array == target)\n', (4103, 4120), True, 'import numpy as np\n'), ((4696, 4732), 'tensorflow.logging.info', 'tf.logging.info', (["(' %s' % input_file)"], {}), "(' %s' % input_file)\n", (4711, 4732), True, 'import tensorflow as tf\n'), ((4838, 4953), 'tensorflow.contrib.cluster_resolver.TPUClusterResolver', 'tf.contrib.cluster_resolver.TPUClusterResolver', (['FLAGS.tpu_name'], {'zone': 'FLAGS.tpu_zone', 'project': 'FLAGS.gcp_project'}), '(FLAGS.tpu_name, zone=FLAGS.\n tpu_zone, project=FLAGS.gcp_project)\n', (4884, 4953), True, 'import tensorflow as tf\n'), ((6609, 6643), 'numpy.stack', 'np.stack', (['[x[cat] for x in result]'], {}), '([x[cat] for x in result])\n', (6617, 6643), True, 'import numpy as np\n'), ((4579, 4607), 'tensorflow.gfile.Glob', 'tf.gfile.Glob', (['input_pattern'], {}), '(input_pattern)\n', (4592, 4607), True, 'import tensorflow as tf\n'), ((5285, 5433), 'tensorflow.contrib.tpu.TPUConfig', 'tf.contrib.tpu.TPUConfig', ([], {'iterations_per_loop': 'FLAGS.iterations_per_loop', 'num_shards': 'FLAGS.num_tpu_cores', 'per_host_input_for_training': 'is_per_host'}), '(iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)\n', (5309, 5433), True, 'import tensorflow as tf\n'), ((6684, 6737), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', 'FLAGS.validation_name'], {}), '(FLAGS.output_dir, FLAGS.validation_name)\n', (6696, 6737), False, 'import os\n'), ((6770, 6799), 'h5py.File', 'h5py.File', (['tempfile_name', '"""w"""'], {}), "(tempfile_name, 'w')\n", (6779, 6799), False, 'import h5py\n'), ((3222, 3238), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (3236, 3238), False, 'from google.cloud import storage\n'), ((7901, 7916), 'numpy.mean', 'np.mean', (['ppl_ex'], {}), '(ppl_ex)\n', (7908, 7916), True, 'import numpy as np\n')]
fuhuifang/RoBo
robo/fmin/entropy_search.py
036bbaa0e59032577e2611d8ba304384b397c7f6
import logging import george import numpy as np from robo.priors.default_priors import DefaultPrior from robo.models.gaussian_process import GaussianProcess from robo.models.gaussian_process_mcmc import GaussianProcessMCMC from robo.maximizers.random_sampling import RandomSampling from robo.maximizers.scipy_optimizer import SciPyOptimizer from robo.maximizers.differential_evolution import DifferentialEvolution from robo.solver.bayesian_optimization import BayesianOptimization from robo.acquisition_functions.information_gain import InformationGain from robo.acquisition_functions.ei import EI from robo.acquisition_functions.marginalization import MarginalizationGPMCMC from robo.initial_design import init_latin_hypercube_sampling logger = logging.getLogger(__name__) def entropy_search(objective_function, lower, upper, num_iterations=30, maximizer="random", model="gp_mcmc", n_init=3, output_path=None, rng=None): """ Entropy search for global black box optimization problems. This is a reimplemenation of the entropy search algorithm by Henning and Schuler[1]. [1] Entropy search for information-efficient global optimization. P. Hennig and C. Schuler. JMLR, (1), 2012. Parameters ---------- objective_function: function The objective function that is minimized. This function gets a numpy array (D,) as input and returns the function value (scalar) lower: np.ndarray (D,) The lower bound of the search space upper: np.ndarray (D,) The upper bound of the search space num_iterations: int The number of iterations (initial design + BO) maximizer: {"random", "scipy", "differential_evolution"} Defines how the acquisition function is maximized. model: {"gp", "gp_mcmc"} The model for the objective function. n_init: int Number of points for the initial design. Make sure that it is <= num_iterations. output_path: string Specifies the path where the intermediate output after each iteration will be saved. If None no output will be saved to disk. rng: numpy.random.RandomState Random number generator Returns ------- dict with all results """ assert upper.shape[0] == lower.shape[0], "Dimension miss match" assert np.all(lower < upper), "Lower bound >= upper bound" assert n_init <= num_iterations, "Number of initial design point has to be <= than the number of iterations" if rng is None: rng = np.random.RandomState(np.random.randint(0, 10000)) cov_amp = 2 n_dims = lower.shape[0] initial_ls = np.ones([n_dims]) exp_kernel = george.kernels.Matern52Kernel(initial_ls, ndim=n_dims) kernel = cov_amp * exp_kernel prior = DefaultPrior(len(kernel) + 1) n_hypers = 3 * len(kernel) if n_hypers % 2 == 1: n_hypers += 1 if model == "gp": gp = GaussianProcess(kernel, prior=prior, rng=rng, normalize_output=False, normalize_input=True, lower=lower, upper=upper) elif model == "gp_mcmc": gp = GaussianProcessMCMC(kernel, prior=prior, n_hypers=n_hypers, chain_length=200, burnin_steps=100, normalize_input=True, normalize_output=False, rng=rng, lower=lower, upper=upper) else: print("ERROR: %s is not a valid model!" % model) return a = InformationGain(gp, lower=lower, upper=upper, sampling_acquisition=EI) if model == "gp": acquisition_func = a elif model == "gp_mcmc": acquisition_func = MarginalizationGPMCMC(a) if maximizer == "random": max_func = RandomSampling(acquisition_func, lower, upper, rng=rng) elif maximizer == "scipy": max_func = SciPyOptimizer(acquisition_func, lower, upper, rng=rng) elif maximizer == "differential_evolution": max_func = DifferentialEvolution(acquisition_func, lower, upper, rng=rng) else: print("ERROR: %s is not a valid function to maximize the acquisition function!" % maximizer) return bo = BayesianOptimization(objective_function, lower, upper, acquisition_func, gp, max_func, initial_design=init_latin_hypercube_sampling, initial_points=n_init, rng=rng, output_path=output_path) x_best, f_min = bo.run(num_iterations) results = dict() results["x_opt"] = x_best results["f_opt"] = f_min results["incumbents"] = [inc for inc in bo.incumbents] results["incumbent_values"] = [val for val in bo.incumbents_values] results["runtime"] = bo.runtime results["overhead"] = bo.time_overhead results["X"] = [x.tolist() for x in bo.X] results["y"] = [y for y in bo.y] return results
[((748, 775), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (765, 775), False, 'import logging\n'), ((2358, 2379), 'numpy.all', 'np.all', (['(lower < upper)'], {}), '(lower < upper)\n', (2364, 2379), True, 'import numpy as np\n'), ((2672, 2689), 'numpy.ones', 'np.ones', (['[n_dims]'], {}), '([n_dims])\n', (2679, 2689), True, 'import numpy as np\n'), ((2707, 2761), 'george.kernels.Matern52Kernel', 'george.kernels.Matern52Kernel', (['initial_ls'], {'ndim': 'n_dims'}), '(initial_ls, ndim=n_dims)\n', (2736, 2761), False, 'import george\n'), ((3686, 3756), 'robo.acquisition_functions.information_gain.InformationGain', 'InformationGain', (['gp'], {'lower': 'lower', 'upper': 'upper', 'sampling_acquisition': 'EI'}), '(gp, lower=lower, upper=upper, sampling_acquisition=EI)\n', (3701, 3756), False, 'from robo.acquisition_functions.information_gain import InformationGain\n'), ((4368, 4566), 'robo.solver.bayesian_optimization.BayesianOptimization', 'BayesianOptimization', (['objective_function', 'lower', 'upper', 'acquisition_func', 'gp', 'max_func'], {'initial_design': 'init_latin_hypercube_sampling', 'initial_points': 'n_init', 'rng': 'rng', 'output_path': 'output_path'}), '(objective_function, lower, upper, acquisition_func, gp,\n max_func, initial_design=init_latin_hypercube_sampling, initial_points=\n n_init, rng=rng, output_path=output_path)\n', (4388, 4566), False, 'from robo.solver.bayesian_optimization import BayesianOptimization\n'), ((3002, 3123), 'robo.models.gaussian_process.GaussianProcess', 'GaussianProcess', (['kernel'], {'prior': 'prior', 'rng': 'rng', 'normalize_output': '(False)', 'normalize_input': '(True)', 'lower': 'lower', 'upper': 'upper'}), '(kernel, prior=prior, rng=rng, normalize_output=False,\n normalize_input=True, lower=lower, upper=upper)\n', (3017, 3123), False, 'from robo.models.gaussian_process import GaussianProcess\n'), ((3940, 3995), 'robo.maximizers.random_sampling.RandomSampling', 'RandomSampling', (['acquisition_func', 'lower', 'upper'], {'rng': 'rng'}), '(acquisition_func, lower, upper, rng=rng)\n', (3954, 3995), False, 'from robo.maximizers.random_sampling import RandomSampling\n'), ((2580, 2607), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10000)'], {}), '(0, 10000)\n', (2597, 2607), True, 'import numpy as np\n'), ((3220, 3405), 'robo.models.gaussian_process_mcmc.GaussianProcessMCMC', 'GaussianProcessMCMC', (['kernel'], {'prior': 'prior', 'n_hypers': 'n_hypers', 'chain_length': '(200)', 'burnin_steps': '(100)', 'normalize_input': '(True)', 'normalize_output': '(False)', 'rng': 'rng', 'lower': 'lower', 'upper': 'upper'}), '(kernel, prior=prior, n_hypers=n_hypers, chain_length=\n 200, burnin_steps=100, normalize_input=True, normalize_output=False,\n rng=rng, lower=lower, upper=upper)\n', (3239, 3405), False, 'from robo.models.gaussian_process_mcmc import GaussianProcessMCMC\n'), ((3865, 3889), 'robo.acquisition_functions.marginalization.MarginalizationGPMCMC', 'MarginalizationGPMCMC', (['a'], {}), '(a)\n', (3886, 3889), False, 'from robo.acquisition_functions.marginalization import MarginalizationGPMCMC\n'), ((4046, 4101), 'robo.maximizers.scipy_optimizer.SciPyOptimizer', 'SciPyOptimizer', (['acquisition_func', 'lower', 'upper'], {'rng': 'rng'}), '(acquisition_func, lower, upper, rng=rng)\n', (4060, 4101), False, 'from robo.maximizers.scipy_optimizer import SciPyOptimizer\n'), ((4169, 4231), 'robo.maximizers.differential_evolution.DifferentialEvolution', 'DifferentialEvolution', (['acquisition_func', 'lower', 'upper'], {'rng': 'rng'}), '(acquisition_func, lower, upper, rng=rng)\n', (4190, 4231), False, 'from robo.maximizers.differential_evolution import DifferentialEvolution\n')]
ck-tm/biserici-inlemnite
biserici_inlemnite/app/migrations/0096_bisericapage_datare_an.py
c9d12127b92f25d3ab2fcc7b4c386419fe308a4e
# Generated by Django 3.1.13 on 2021-10-29 11:07 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('app', '0095_bisericapage_utitle'), ] operations = [ migrations.AddField( model_name='bisericapage', name='datare_an', field=models.IntegerField(blank=True, null=True), ), ]
[((341, 383), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (360, 383), False, 'from django.db import migrations, models\n')]
bcgov/mds
services/core-api/app/api/mms_now_submissions/models/surface_bulk_sample_activity.py
6c427a66a5edb4196222607291adef8fd6677038
from app.api.utils.models_mixins import Base from app.extensions import db class MMSSurfaceBulkSampleActivity(Base): __tablename__ = "surface_bulk_sample_activity" __table_args__ = {"schema": "mms_now_submissions"} id = db.Column(db.Integer, primary_key=True) messageid = db.Column(db.Integer, db.ForeignKey('mms_now_submissions.application.messageid')) mms_cid = db.Column(db.Integer) type = db.Column(db.String) disturbedarea = db.Column(db.Numeric(14, 2)) timbervolume = db.Column(db.Numeric(14, 2)) quantity = db.Column(db.Integer) def __repr__(self): return '<MMSSurfaceBulkSampleActivity %r>' % self.id
[((234, 273), 'app.extensions.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (243, 273), False, 'from app.extensions import db\n'), ((386, 407), 'app.extensions.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (395, 407), False, 'from app.extensions import db\n'), ((419, 439), 'app.extensions.db.Column', 'db.Column', (['db.String'], {}), '(db.String)\n', (428, 439), False, 'from app.extensions import db\n'), ((552, 573), 'app.extensions.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (561, 573), False, 'from app.extensions import db\n'), ((312, 370), 'app.extensions.db.ForeignKey', 'db.ForeignKey', (['"""mms_now_submissions.application.messageid"""'], {}), "('mms_now_submissions.application.messageid')\n", (325, 370), False, 'from app.extensions import db\n'), ((470, 487), 'app.extensions.db.Numeric', 'db.Numeric', (['(14)', '(2)'], {}), '(14, 2)\n', (480, 487), False, 'from app.extensions import db\n'), ((518, 535), 'app.extensions.db.Numeric', 'db.Numeric', (['(14)', '(2)'], {}), '(14, 2)\n', (528, 535), False, 'from app.extensions import db\n')]
davo22/lgtv_rs232
lgtv_rs232/commands/remote_control/remote_control_lock.py
40562cddf7acdf6fa95124029595e3838dd9e7b0
from enum import Enum class RemoteControlLock(Enum): OFF = 0 ON = 1 def map_to_state(data: int): return RemoteControlLock(data) class RemoteControlLockCommands(object): _command = "km" def __init__(self, send_command): self._send_command = send_command async def get_state(self): return map_to_state(await self._send_command(self._command, 255)) async def set_state(self, state: RemoteControlLock): return map_to_state(await self._send_command(self._command, state.value)) def on(self): return self.set_state(RemoteControlLock.ON) def off(self): return self.set_state(RemoteControlLock.OFF)
[]
aashishogale/FunctionalPrograms-Python-
com/bridgelabz/programs/powerof2.py
d297bdb78112ef03274a10a58efc90da27f51b14
import sys from com.bridgelabz.utility.Utility import Utility class PowerOf2: def start(self): number=int(sys.argv[1]) print(number) for i in Utility().powerof2(number): print(i) return PowerOf2().start()
[((170, 179), 'com.bridgelabz.utility.Utility.Utility', 'Utility', ([], {}), '()\n', (177, 179), False, 'from com.bridgelabz.utility.Utility import Utility\n')]
MichaelLeeman/Job_Web_Scraper
app/main.py
29205d84f1190830a77174ce8272f4f79bb3468b
# This program scraps data from job postings on the website workinstartups.com and appends it to an excel worksheet. import os from datetime import datetime, timedelta from selenium import webdriver from app import web_scraper from app import excel job_list, last_date = [], None file_path = os.path.abspath("main.py").rstrip('/app/main.py') + '//Workbooks' + "//Job_Openings.xlsx" print("-" * 75, "-" * 75, "\n\t\t\t\t\t\t\t JOB WEB SCRAPER", "-" * 75, "-" * 75, sep="\n") print("\n") # If the Job_Openings workbook already exists then append the jobs not already in the worksheet # by checking the date of the first job in excel, since the last time the site was scraped. if os.path.isfile(file_path): print("Job_Opening excel file already exists. Loading workbook.", "-" * 75, sep="\n") workbook, worksheet = excel.load_xlsx(file_path) last_scrape_date = excel.get_first_job_date(worksheet) last_scrape_date = datetime.strptime(last_scrape_date, "%d-%b-%Y") # If not, create a new workbook and append all of the jobs posted within the month else: print("Creating new Excel workbook.", "-" * 75, sep="\n") current_date = datetime.today() date_month_ago = current_date - timedelta(weeks=4.348) # Average amount of weeks in a month last_scrape_date = date_month_ago.replace(hour=0, minute=0, second=0, microsecond=0) # default to midnight workbook, worksheet = excel.init_xlsx(worksheet_title="Job Openings") # Open webdriver to workinstartups.com and create soup print("Creating soup and opening Chrome webdriver", "-"*75, sep="\n") URL = "https://workinstartups.com/job-board/jobs-in/london" soup = web_scraper.soup_creator(URL, max_retry=1, sleep_time=0) driver = webdriver.Chrome('./chromedriver') driver.get(URL) driver.find_element_by_link_text('Close').click() # Scrap the jobs from workinstartups.com and update the worksheet with the found jobs print("Scraping jobs from workinstartups.com. Please wait.", "-" * 75, sep="\n") job_list = web_scraper.search_for_jobs(soup, last_scrape_date, driver) print("Scraping finished. Updating and saving Excel workbook.", "-" * 75, sep="\n") driver.close() excel.update_xlsx(worksheet, job_list) excel.save_xlsx(workbook, file_path) print("Finished!", sep="\n")
[((680, 705), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (694, 705), False, 'import os\n'), ((1643, 1699), 'app.web_scraper.soup_creator', 'web_scraper.soup_creator', (['URL'], {'max_retry': '(1)', 'sleep_time': '(0)'}), '(URL, max_retry=1, sleep_time=0)\n', (1667, 1699), False, 'from app import web_scraper\n'), ((1710, 1744), 'selenium.webdriver.Chrome', 'webdriver.Chrome', (['"""./chromedriver"""'], {}), "('./chromedriver')\n", (1726, 1744), False, 'from selenium import webdriver\n'), ((1990, 2049), 'app.web_scraper.search_for_jobs', 'web_scraper.search_for_jobs', (['soup', 'last_scrape_date', 'driver'], {}), '(soup, last_scrape_date, driver)\n', (2017, 2049), False, 'from app import web_scraper\n'), ((2150, 2188), 'app.excel.update_xlsx', 'excel.update_xlsx', (['worksheet', 'job_list'], {}), '(worksheet, job_list)\n', (2167, 2188), False, 'from app import excel\n'), ((2189, 2225), 'app.excel.save_xlsx', 'excel.save_xlsx', (['workbook', 'file_path'], {}), '(workbook, file_path)\n', (2204, 2225), False, 'from app import excel\n'), ((823, 849), 'app.excel.load_xlsx', 'excel.load_xlsx', (['file_path'], {}), '(file_path)\n', (838, 849), False, 'from app import excel\n'), ((873, 908), 'app.excel.get_first_job_date', 'excel.get_first_job_date', (['worksheet'], {}), '(worksheet)\n', (897, 908), False, 'from app import excel\n'), ((932, 979), 'datetime.datetime.strptime', 'datetime.strptime', (['last_scrape_date', '"""%d-%b-%Y"""'], {}), "(last_scrape_date, '%d-%b-%Y')\n", (949, 979), False, 'from datetime import datetime, timedelta\n'), ((1150, 1166), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (1164, 1166), False, 'from datetime import datetime, timedelta\n'), ((1402, 1449), 'app.excel.init_xlsx', 'excel.init_xlsx', ([], {'worksheet_title': '"""Job Openings"""'}), "(worksheet_title='Job Openings')\n", (1417, 1449), False, 'from app import excel\n'), ((1203, 1225), 'datetime.timedelta', 'timedelta', ([], {'weeks': '(4.348)'}), '(weeks=4.348)\n', (1212, 1225), False, 'from datetime import datetime, timedelta\n'), ((294, 320), 'os.path.abspath', 'os.path.abspath', (['"""main.py"""'], {}), "('main.py')\n", (309, 320), False, 'import os\n')]
kapkic/native_client
src/trusted/validator_arm/dgen_output.py
51c8bc8c249d55606232ae011bdfc8b4cab3d794
#!/usr/bin/python2 # # Copyright (c) 2012 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # """ Some common boilerplates and helper functions for source code generation in files dgen_test_output.py and dgen_decode_output.py. """ HEADER_BOILERPLATE ="""/* * Copyright 2013 The Native Client Authors. All rights reserved. * Use of this source code is governed by a BSD-style license that can * be found in the LICENSE file. */ // DO NOT EDIT: GENERATED CODE """ NOT_TCB_BOILERPLATE="""#ifndef NACL_TRUSTED_BUT_NOT_TCB #error This file is not meant for use in the TCB #endif """ NEWLINE_STR=""" """ COMMENTED_NEWLINE_STR=""" //""" """Adds comment '// ' string after newlines.""" def commented_string(str, indent=''): sep = NEWLINE_STR + indent + '//' str = str.replace(NEWLINE_STR, sep) # This second line is a hack to fix that sometimes newlines are # represented as '\n'. # TODO(karl) Find the cause of this hack, and fix it. return str.replace('\\n', sep) def ifdef_name(filename): """ Generates the ifdef name to use for the given filename""" return filename.replace("/", "_").replace(".", "_").upper() + "_" def GetNumberCodeBlocks(separators): """Gets the number of code blocks to break classes into.""" num_blocks = len(separators) + 1 assert num_blocks >= 2 return num_blocks def FindBlockIndex(filename, format, num_blocks): """Returns true if the filename matches the format with an index in the range [1, num_blocks].""" for block in range(1, num_blocks+1): suffix = format % block if filename.endswith(suffix): return block raise Exception("Can't find block index: %s" % filename) def GetDecodersBlock(n, separators, decoders, name_fcn): """Returns the (sorted) list of decoders to include in block n, assuming decoders are split using the list of separators.""" num_blocks = GetNumberCodeBlocks(separators) assert n > 0 and n <= num_blocks return [decoder for decoder in decoders if ((n == 1 or IsPrefixLeDecoder(separators[n-2], decoder, name_fcn)) and (n == num_blocks or not IsPrefixLeDecoder(separators[n-1], decoder, name_fcn)))] def IsPrefixLeDecoder(prefix, decoder, name_fcn): """Returns true if the prefix is less than or equal to the corresponding prefix length of the decoder name.""" decoder_name = name_fcn(decoder) prefix_len = len(prefix) decoder_len = len(decoder_name) decoder_prefix = (decoder_name[0:prefix_len] if prefix_len < decoder_len else decoder_name) return prefix <= decoder_prefix
[]
ChristopherBrix/Debona
src/data_loader/input_data_loader.py
f000f3d483b2cc592233d0ba2a1a0327210562c8
""" Functions for loading input data. Author: Patrick Henriksen <[email protected]> """ import os import numpy as np def load_img(path: str, img_nums: list, shape: tuple) -> np.array: """ Loads a image in the human-readable format. Args: path: The path to the to the folder with mnist images. img_nums: A list with the numbers of the images we want to load. shape: The shape of a single image. Returns: The images as a MxCx28x28 numpy array. """ images = np.zeros((len(img_nums), *shape), dtype=float) for idx, i in enumerate(img_nums): file = os.path.join(path, "image" + str(i)) with open(file, "r") as f: data = [float(pixel) for pixel in f.readlines()[0].split(",")[:-1]] images[idx, :, :] = np.array(data).reshape(*shape) return images def load_mnist_human_readable(path: str, img_nums: list) -> np.array: """ Loads a mnist image from the neurify dataset. Args: path: The path to the to the folder with mnist images. img_nums: A list with the numbers of the images we want to load. Returns: The images as a Mx28x28 numpy array. """ return load_img(path, img_nums, (28, 28)) def load_cifar10_human_readable(path: str, img_nums: list) -> np.array: """ Loads the Cifar10 images in human readable format. Args: path: The path to the to the folder with mnist images. img_nums: A list with the numbers of the images we want to load. Returns: The images as a Mx3x32x32 numpy array. """ return load_img(path, img_nums, (3, 32, 32)) def load_images_eran(img_csv: str = "../../resources/images/cifar10_test.csv", num_images: int = 100, image_shape: tuple = (3, 32, 32)) -> tuple: """ Loads the images from the eran csv. Args: The csv path Returns: images, targets """ num_images = 100 images_array = np.zeros((num_images, np.prod(image_shape)), dtype=np.float32) targets_array = np.zeros(num_images, dtype=int) with open(img_csv, "r") as file: for j in range(num_images): line_arr = file.readline().split(",") targets_array[j] = int(line_arr[0]) images_array[j] = [float(pixel) for pixel in line_arr[1:]] return images_array.reshape((num_images, *image_shape)), targets_array
[((2156, 2187), 'numpy.zeros', 'np.zeros', (['num_images'], {'dtype': 'int'}), '(num_images, dtype=int)\n', (2164, 2187), True, 'import numpy as np\n'), ((2095, 2115), 'numpy.prod', 'np.prod', (['image_shape'], {}), '(image_shape)\n', (2102, 2115), True, 'import numpy as np\n'), ((845, 859), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (853, 859), True, 'import numpy as np\n')]
hirokiyaginuma/scriptspinner-software
ui_splash_screen.py
87185f237f76feeee33a2b74a4d05be088bde011
# -*- coding: utf-8 -*- ################################################################################ ## Form generated from reading UI file 'splash_screen.ui' ## ## Created by: Qt User Interface Compiler version 5.15.1 ## ## WARNING! All changes made in this file will be lost when recompiling UI file! ################################################################################ from PySide2.QtCore import * from PySide2.QtGui import * from PySide2.QtWidgets import * class Ui_Splash_Screen(object): def setupUi(self, Splash_Screen): if not Splash_Screen.objectName(): Splash_Screen.setObjectName(u"Splash_Screen") Splash_Screen.resize(720, 425) self.centralwidget = QWidget(Splash_Screen) self.centralwidget.setObjectName(u"centralwidget") self.verticalLayout = QVBoxLayout(self.centralwidget) self.verticalLayout.setSpacing(0) self.verticalLayout.setObjectName(u"verticalLayout") self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.frame = QFrame(self.centralwidget) self.frame.setObjectName(u"frame") self.frame.setFrameShape(QFrame.StyledPanel) self.frame.setFrameShadow(QFrame.Raised) self.frame.setLineWidth(0) self.label = QLabel(self.frame) self.label.setObjectName(u"label") self.label.setGeometry(QRect(0, 0, 720, 425)) self.label.setLineWidth(0) self.label.setPixmap(QPixmap(u"img/SS_logo.jpg")) self.label.setIndent(0) self.progressBar = QProgressBar(self.frame) self.progressBar.setObjectName(u"progressBar") self.progressBar.setGeometry(QRect(70, 330, 591, 41)) self.progressBar.setStyleSheet(u"QProgressBar {\n" " background-color:rgb(149, 165, 166);\n" " border-style: none;\n" " border-radius: 10px;\n" " text-align: center;\n" "}\n" "QProgressBar::chunk {\n" " border-radius: 10px;\n" " background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(210, 157, 255, 255), stop:1 rgba(156, 69, 255, 255));\n" "}") self.progressBar.setValue(24) self.verticalLayout.addWidget(self.frame) Splash_Screen.setCentralWidget(self.centralwidget) self.retranslateUi(Splash_Screen) QMetaObject.connectSlotsByName(Splash_Screen) # setupUi def retranslateUi(self, Splash_Screen): Splash_Screen.setWindowTitle(QCoreApplication.translate("Splash_Screen", u"MainWindow", None)) self.label.setText("") # retranslateUi
[]
tedye/leetcode
tools/leetcode.112.Path Sum/leetcode.112.Path Sum.submission10.py
975d7e3b8cb9b6be9e80e07febf4bcf6414acd46
# Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: # @param {TreeNode} root # @param {integer} sum # @return {boolean} def hasPathSum(self, root, sum): if not root: return False if not root.right and not root.left: return sum == root.val r = False l = False if root.right: r = self.hasPathSum(root.right,sum-root.val) if root.left: l = self.hasPathSum(root.left,sum-root.val) return r or l
[]
danbirken/pandas
pandas/io/sql.py
fa8a5ca1dd27c4169727070ddbdcb248002fddb4
""" Collection of query wrappers / abstractions to both facilitate data retrieval and to reduce dependency on DB-specific API. """ from __future__ import print_function, division from datetime import datetime, date, timedelta import warnings import traceback import itertools import re import numpy as np import pandas.core.common as com from pandas.compat import lzip, map, zip, raise_with_traceback, string_types from pandas.core.api import DataFrame, Series from pandas.core.base import PandasObject from pandas.tseries.tools import to_datetime class SQLAlchemyRequired(ImportError): pass class DatabaseError(IOError): pass #------------------------------------------------------------------------------ # Helper functions def _convert_params(sql, params): """convert sql and params args to DBAPI2.0 compliant format""" args = [sql] if params is not None: if hasattr(params, 'keys'): # test if params is a mapping args += [params] else: args += [list(params)] return args def _handle_date_column(col, format=None): if isinstance(format, dict): return to_datetime(col, **format) else: if format in ['D', 's', 'ms', 'us', 'ns']: return to_datetime(col, coerce=True, unit=format) elif issubclass(col.dtype.type, np.floating) or issubclass(col.dtype.type, np.integer): # parse dates as timestamp format = 's' if format is None else format return to_datetime(col, coerce=True, unit=format) else: return to_datetime(col, coerce=True, format=format) def _parse_date_columns(data_frame, parse_dates): """ Force non-datetime columns to be read as such. Supports both string formatted and integer timestamp columns """ # handle non-list entries for parse_dates gracefully if parse_dates is True or parse_dates is None or parse_dates is False: parse_dates = [] if not hasattr(parse_dates, '__iter__'): parse_dates = [parse_dates] for col_name in parse_dates: df_col = data_frame[col_name] try: fmt = parse_dates[col_name] except TypeError: fmt = None data_frame[col_name] = _handle_date_column(df_col, format=fmt) return data_frame def execute(sql, con, cur=None, params=None): """ Execute the given SQL query using the provided connection object. Parameters ---------- sql : string Query to be executed con : SQLAlchemy engine or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. cur : depreciated, cursor is obtained from connection params : list or tuple, optional List of parameters to pass to execute method. Returns ------- Results Iterable """ if cur is None: pandas_sql = pandasSQL_builder(con) else: pandas_sql = pandasSQL_builder(cur, is_cursor=True) args = _convert_params(sql, params) return pandas_sql.execute(*args) #------------------------------------------------------------------------------ #--- Deprecated tquery and uquery def _safe_fetch(cur): try: result = cur.fetchall() if not isinstance(result, list): result = list(result) return result except Exception as e: # pragma: no cover excName = e.__class__.__name__ if excName == 'OperationalError': return [] def tquery(sql, con=None, cur=None, retry=True): """ DEPRECATED. Returns list of tuples corresponding to each row in given sql query. If only one column selected, then plain list is returned. To obtain the same result in the future, you can use the following: >>> execute(sql, con, params).fetchall() Parameters ---------- sql: string SQL query to be executed con: DBAPI2 connection cur: depreciated, cursor is obtained from connection Returns ------- Results Iterable """ warnings.warn( "tquery is depreciated, and will be removed in future versions. " "You can use ``execute(...).fetchall()`` instead.", FutureWarning) cur = execute(sql, con, cur=cur) result = _safe_fetch(cur) if con is not None: try: cur.close() con.commit() except Exception as e: excName = e.__class__.__name__ if excName == 'OperationalError': # pragma: no cover print('Failed to commit, may need to restart interpreter') else: raise traceback.print_exc() if retry: return tquery(sql, con=con, retry=False) if result and len(result[0]) == 1: # python 3 compat result = list(lzip(*result)[0]) elif result is None: # pragma: no cover result = [] return result def uquery(sql, con=None, cur=None, retry=True, params=None): """ DEPRECATED. Does the same thing as tquery, but instead of returning results, it returns the number of rows affected. Good for update queries. To obtain the same result in the future, you can use the following: >>> execute(sql, con).rowcount Parameters ---------- sql: string SQL query to be executed con: DBAPI2 connection cur: depreciated, cursor is obtained from connection params: list or tuple, optional List of parameters to pass to execute method. Returns ------- Number of affected rows """ warnings.warn( "uquery is depreciated, and will be removed in future versions. " "You can use ``execute(...).rowcount`` instead.", FutureWarning) cur = execute(sql, con, cur=cur, params=params) result = cur.rowcount try: con.commit() except Exception as e: excName = e.__class__.__name__ if excName != 'OperationalError': raise traceback.print_exc() if retry: print('Looks like your connection failed, reconnecting...') return uquery(sql, con, retry=False) return result #------------------------------------------------------------------------------ #--- Read and write to DataFrames def read_sql_table(table_name, con, index_col=None, coerce_float=True, parse_dates=None, columns=None): """Read SQL database table into a DataFrame. Given a table name and an SQLAlchemy engine, returns a DataFrame. This function does not support DBAPI connections. Parameters ---------- table_name : string Name of SQL table in database con : SQLAlchemy engine Sqlite DBAPI conncection mode not supported index_col : string, optional Column to set as index coerce_float : boolean, default True Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal) to floating point. Can result in loss of Precision. parse_dates : list or dict - List of column names to parse as dates - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times or is one of (D, s, ns, ms, us) in case of parsing integer timestamps - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite columns : list List of column names to select from sql table Returns ------- DataFrame See also -------- read_sql_query : Read SQL query into a DataFrame. read_sql """ pandas_sql = PandasSQLAlchemy(con) table = pandas_sql.read_table( table_name, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) if table is not None: return table else: raise ValueError("Table %s not found" % table_name, con) def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None): """Read SQL query into a DataFrame. Returns a DataFrame corresponding to the result set of the query string. Optionally provide an `index_col` parameter to use one of the columns as the index, otherwise default integer index will be used. Parameters ---------- sql : string SQL query to be executed con : SQLAlchemy engine or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. index_col : string, optional Column name to use as index for the returned DataFrame object. coerce_float : boolean, default True Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets params : list, tuple or dict, optional List of parameters to pass to execute method. parse_dates : list or dict - List of column names to parse as dates - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times or is one of (D, s, ns, ms, us) in case of parsing integer timestamps - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite Returns ------- DataFrame See also -------- read_sql_table : Read SQL database table into a DataFrame read_sql """ pandas_sql = pandasSQL_builder(con) return pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) def read_sql(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None): """ Read SQL query or database table into a DataFrame. Parameters ---------- sql : string SQL query to be executed or database table name. con : SQLAlchemy engine or DBAPI2 connection (legacy mode) Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. index_col : string, optional column name to use as index for the returned DataFrame object. coerce_float : boolean, default True Attempt to convert values to non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets params : list, tuple or dict, optional List of parameters to pass to execute method. parse_dates : list or dict - List of column names to parse as dates - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times or is one of (D, s, ns, ms, us) in case of parsing integer timestamps - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite columns : list List of column names to select from sql table (only used when reading a table). Returns ------- DataFrame Notes ----- This function is a convenience wrapper around ``read_sql_table`` and ``read_sql_query`` (and for backward compatibility) and will delegate to the specific function depending on the provided input (database table name or sql query). See also -------- read_sql_table : Read SQL database table into a DataFrame read_sql_query : Read SQL query into a DataFrame """ pandas_sql = pandasSQL_builder(con) if isinstance(pandas_sql, PandasSQLLegacy): return pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) if pandas_sql.has_table(sql): return pandas_sql.read_table( sql, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) else: return pandas_sql.read_sql( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates) def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True, index_label=None): """ Write records stored in a DataFrame to a SQL database. Parameters ---------- frame : DataFrame name : string Name of SQL table con : SQLAlchemy engine or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. flavor : {'sqlite', 'mysql'}, default 'sqlite' The flavor of SQL to use. Ignored when using SQLAlchemy engine. 'mysql' is deprecated and will be removed in future versions, but it will be further supported through SQLAlchemy engines. if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. index : boolean, default True Write DataFrame index as a column index_label : string or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. """ if if_exists not in ('fail', 'replace', 'append'): raise ValueError("'{0}' is not valid for if_exists".format(if_exists)) pandas_sql = pandasSQL_builder(con, flavor=flavor) if isinstance(frame, Series): frame = frame.to_frame() elif not isinstance(frame, DataFrame): raise NotImplementedError pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index, index_label=index_label) def has_table(table_name, con, flavor='sqlite'): """ Check if DataBase has named table. Parameters ---------- table_name: string Name of SQL table con: SQLAlchemy engine or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. flavor: {'sqlite', 'mysql'}, default 'sqlite' The flavor of SQL to use. Ignored when using SQLAlchemy engine. 'mysql' is deprecated and will be removed in future versions, but it will be further supported through SQLAlchemy engines. Returns ------- boolean """ pandas_sql = pandasSQL_builder(con, flavor=flavor) return pandas_sql.has_table(table_name) table_exists = has_table _MYSQL_WARNING = ("The 'mysql' flavor with DBAPI connection is deprecated " "and will be removed in future versions. " "MySQL will be further supported with SQLAlchemy engines.") def pandasSQL_builder(con, flavor=None, meta=None, is_cursor=False): """ Convenience function to return the correct PandasSQL subclass based on the provided parameters """ # When support for DBAPI connections is removed, # is_cursor should not be necessary. try: import sqlalchemy if isinstance(con, sqlalchemy.engine.Engine): return PandasSQLAlchemy(con, meta=meta) else: if flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) except ImportError: if flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return PandasSQLLegacy(con, flavor, is_cursor=is_cursor) class PandasSQLTable(PandasObject): """ For mapping Pandas tables to SQL tables. Uses fact that table is reflected by SQLAlchemy to do better type convertions. Also holds various flags needed to avoid having to pass them between functions all the time. """ # TODO: support for multiIndex def __init__(self, name, pandas_sql_engine, frame=None, index=True, if_exists='fail', prefix='pandas', index_label=None): self.name = name self.pd_sql = pandas_sql_engine self.prefix = prefix self.frame = frame self.index = self._index_name(index, index_label) if frame is not None: # We want to write a frame if self.pd_sql.has_table(self.name): if if_exists == 'fail': raise ValueError("Table '%s' already exists." % name) elif if_exists == 'replace': self.pd_sql.drop_table(self.name) self.table = self._create_table_statement() self.create() elif if_exists == 'append': self.table = self.pd_sql.get_table(self.name) if self.table is None: self.table = self._create_table_statement() else: raise ValueError( "'{0}' is not valid for if_exists".format(if_exists)) else: self.table = self._create_table_statement() self.create() else: # no data provided, read-only mode self.table = self.pd_sql.get_table(self.name) if self.table is None: raise ValueError("Could not init table '%s'" % name) def exists(self): return self.pd_sql.has_table(self.name) def sql_schema(self): from sqlalchemy.schema import CreateTable return str(CreateTable(self.table)) def create(self): self.table.create() def insert_statement(self): return self.table.insert() def maybe_asscalar(self, i): try: return np.asscalar(i) except AttributeError: return i def insert_data(self): if self.index is not None: temp = self.frame.copy() temp.index.names = self.index try: temp.reset_index(inplace=True) except ValueError as err: raise ValueError( "duplicate name in index/columns: {0}".format(err)) else: temp = self.frame return temp def insert(self): ins = self.insert_statement() data_list = [] temp = self.insert_data() keys = temp.columns for t in temp.itertuples(): data = dict((k, self.maybe_asscalar(v)) for k, v in zip(keys, t[1:])) data_list.append(data) self.pd_sql.execute(ins, data_list) def read(self, coerce_float=True, parse_dates=None, columns=None): if columns is not None and len(columns) > 0: from sqlalchemy import select cols = [self.table.c[n] for n in columns] if self.index is not None: [cols.insert(0, self.table.c[idx]) for idx in self.index[::-1]] sql_select = select(cols) else: sql_select = self.table.select() result = self.pd_sql.execute(sql_select) data = result.fetchall() column_names = result.keys() self.frame = DataFrame.from_records( data, columns=column_names, coerce_float=coerce_float) self._harmonize_columns(parse_dates=parse_dates) if self.index is not None: self.frame.set_index(self.index, inplace=True) return self.frame def _index_name(self, index, index_label): # for writing: index=True to include index in sql table if index is True: nlevels = self.frame.index.nlevels # if index_label is specified, set this as index name(s) if index_label is not None: if not isinstance(index_label, list): index_label = [index_label] if len(index_label) != nlevels: raise ValueError( "Length of 'index_label' should match number of " "levels, which is {0}".format(nlevels)) else: return index_label # return the used column labels for the index columns if nlevels == 1 and 'index' not in self.frame.columns and self.frame.index.name is None: return ['index'] else: return [l if l is not None else "level_{0}".format(i) for i, l in enumerate(self.frame.index.names)] # for reading: index=(list of) string to specify column to set as index elif isinstance(index, string_types): return [index] elif isinstance(index, list): return index else: return None def _create_table_statement(self): from sqlalchemy import Table, Column columns = list(map(str, self.frame.columns)) column_types = map(self._sqlalchemy_type, self.frame.dtypes) columns = [Column(name, typ) for name, typ in zip(columns, column_types)] if self.index is not None: for i, idx_label in enumerate(self.index[::-1]): idx_type = self._sqlalchemy_type( self.frame.index.get_level_values(i)) columns.insert(0, Column(idx_label, idx_type, index=True)) return Table(self.name, self.pd_sql.meta, *columns) def _harmonize_columns(self, parse_dates=None): """ Make a data_frame's column type align with an sql_table column types Need to work around limited NA value support. Floats are always fine, ints must always be floats if there are Null values. Booleans are hard because converting bool column with None replaces all Nones with false. Therefore only convert bool if there are no NA values. Datetimes should already be converted to np.datetime if supported, but here we also force conversion if required """ # handle non-list entries for parse_dates gracefully if parse_dates is True or parse_dates is None or parse_dates is False: parse_dates = [] if not hasattr(parse_dates, '__iter__'): parse_dates = [parse_dates] for sql_col in self.table.columns: col_name = sql_col.name try: df_col = self.frame[col_name] # the type the dataframe column should have col_type = self._numpy_type(sql_col.type) if col_type is datetime or col_type is date: if not issubclass(df_col.dtype.type, np.datetime64): self.frame[col_name] = _handle_date_column(df_col) elif col_type is float: # floats support NA, can always convert! self.frame[col_name].astype(col_type, copy=False) elif len(df_col) == df_col.count(): # No NA values, can convert ints and bools if col_type is int or col_type is bool: self.frame[col_name].astype(col_type, copy=False) # Handle date parsing if col_name in parse_dates: try: fmt = parse_dates[col_name] except TypeError: fmt = None self.frame[col_name] = _handle_date_column( df_col, format=fmt) except KeyError: pass # this column not in results def _sqlalchemy_type(self, arr_or_dtype): from sqlalchemy.types import Integer, Float, Text, Boolean, DateTime, Date, Interval if arr_or_dtype is date: return Date if com.is_datetime64_dtype(arr_or_dtype): try: tz = arr_or_dtype.tzinfo return DateTime(timezone=True) except: return DateTime if com.is_timedelta64_dtype(arr_or_dtype): warnings.warn("the 'timedelta' type is not supported, and will be " "written as integer values (ns frequency) to the " "database.", UserWarning) return Integer elif com.is_float_dtype(arr_or_dtype): return Float elif com.is_integer_dtype(arr_or_dtype): # TODO: Refine integer size. return Integer elif com.is_bool(arr_or_dtype): return Boolean return Text def _numpy_type(self, sqltype): from sqlalchemy.types import Integer, Float, Boolean, DateTime, Date if isinstance(sqltype, Float): return float if isinstance(sqltype, Integer): # TODO: Refine integer size. return int if isinstance(sqltype, DateTime): # Caution: np.datetime64 is also a subclass of np.number. return datetime if isinstance(sqltype, Date): return date if isinstance(sqltype, Boolean): return bool return object class PandasSQL(PandasObject): """ Subclasses Should define read_sql and to_sql """ def read_sql(self, *args, **kwargs): raise ValueError( "PandasSQL must be created with an SQLAlchemy engine or connection+sql flavor") def to_sql(self, *args, **kwargs): raise ValueError( "PandasSQL must be created with an SQLAlchemy engine or connection+sql flavor") class PandasSQLAlchemy(PandasSQL): """ This class enables convertion between DataFrame and SQL databases using SQLAlchemy to handle DataBase abstraction """ def __init__(self, engine, meta=None): self.engine = engine if not meta: from sqlalchemy.schema import MetaData meta = MetaData(self.engine) meta.reflect(self.engine) self.meta = meta def execute(self, *args, **kwargs): """Simple passthrough to SQLAlchemy engine""" return self.engine.execute(*args, **kwargs) def read_table(self, table_name, index_col=None, coerce_float=True, parse_dates=None, columns=None): table = PandasSQLTable(table_name, self, index=index_col) return table.read(coerce_float=coerce_float, parse_dates=parse_dates, columns=columns) def read_sql(self, sql, index_col=None, coerce_float=True, parse_dates=None, params=None): args = _convert_params(sql, params) result = self.execute(*args) data = result.fetchall() columns = result.keys() data_frame = DataFrame.from_records( data, columns=columns, coerce_float=coerce_float) _parse_date_columns(data_frame, parse_dates) if index_col is not None: data_frame.set_index(index_col, inplace=True) return data_frame def to_sql(self, frame, name, if_exists='fail', index=True, index_label=None): table = PandasSQLTable( name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label) table.insert() @property def tables(self): return self.meta.tables def has_table(self, name): if self.meta.tables.get(name) is not None: return True else: return False def get_table(self, table_name): return self.meta.tables.get(table_name) def drop_table(self, table_name): if self.engine.has_table(table_name): self.get_table(table_name).drop() self.meta.clear() self.meta.reflect() def _create_sql_schema(self, frame, table_name): table = PandasSQLTable(table_name, self, frame=frame) return str(table.sql_schema()) # ---- SQL without SQLAlchemy --- # Flavour specific sql strings and handler class for access to DBs without # SQLAlchemy installed # SQL type convertions for each DB _SQL_TYPES = { 'text': { 'mysql': 'VARCHAR (63)', 'sqlite': 'TEXT', }, 'float': { 'mysql': 'FLOAT', 'sqlite': 'REAL', }, 'int': { 'mysql': 'BIGINT', 'sqlite': 'INTEGER', }, 'datetime': { 'mysql': 'DATETIME', 'sqlite': 'TIMESTAMP', }, 'date': { 'mysql': 'DATE', 'sqlite': 'TIMESTAMP', }, 'bool': { 'mysql': 'BOOLEAN', 'sqlite': 'INTEGER', } } # SQL enquote and wildcard symbols _SQL_SYMB = { 'mysql': { 'br_l': '`', 'br_r': '`', 'wld': '%s' }, 'sqlite': { 'br_l': '[', 'br_r': ']', 'wld': '?' } } _SAFE_NAMES_WARNING = ("The spaces in these column names will not be changed. " "In pandas versions < 0.14, spaces were converted to " "underscores.") class PandasSQLTableLegacy(PandasSQLTable): """Patch the PandasSQLTable for legacy support. Instead of a table variable just use the Create Table statement""" def sql_schema(self): return str(self.table) def create(self): self.pd_sql.execute(self.table) def insert_statement(self): names = list(map(str, self.frame.columns)) flv = self.pd_sql.flavor br_l = _SQL_SYMB[flv]['br_l'] # left val quote char br_r = _SQL_SYMB[flv]['br_r'] # right val quote char wld = _SQL_SYMB[flv]['wld'] # wildcard char if self.index is not None: [names.insert(0, idx) for idx in self.index[::-1]] bracketed_names = [br_l + column + br_r for column in names] col_names = ','.join(bracketed_names) wildcards = ','.join([wld] * len(names)) insert_statement = 'INSERT INTO %s (%s) VALUES (%s)' % ( self.name, col_names, wildcards) return insert_statement def insert(self): ins = self.insert_statement() temp = self.insert_data() data_list = [] for t in temp.itertuples(): data = tuple((self.maybe_asscalar(v) for v in t[1:])) data_list.append(data) cur = self.pd_sql.con.cursor() cur.executemany(ins, data_list) cur.close() self.pd_sql.con.commit() def _create_table_statement(self): "Return a CREATE TABLE statement to suit the contents of a DataFrame." columns = list(map(str, self.frame.columns)) pat = re.compile('\s+') if any(map(pat.search, columns)): warnings.warn(_SAFE_NAMES_WARNING) column_types = [self._sql_type_name(typ) for typ in self.frame.dtypes] if self.index is not None: for i, idx_label in enumerate(self.index[::-1]): columns.insert(0, idx_label) column_types.insert(0, self._sql_type_name(self.frame.index.get_level_values(i).dtype)) flv = self.pd_sql.flavor br_l = _SQL_SYMB[flv]['br_l'] # left val quote char br_r = _SQL_SYMB[flv]['br_r'] # right val quote char col_template = br_l + '%s' + br_r + ' %s' columns = ',\n '.join(col_template % x for x in zip(columns, column_types)) template = """CREATE TABLE %(name)s ( %(columns)s )""" create_statement = template % {'name': self.name, 'columns': columns} return create_statement def _sql_type_name(self, dtype): pytype = dtype.type pytype_name = "text" if issubclass(pytype, np.floating): pytype_name = "float" elif com.is_timedelta64_dtype(pytype): warnings.warn("the 'timedelta' type is not supported, and will be " "written as integer values (ns frequency) to the " "database.", UserWarning) pytype_name = "int" elif issubclass(pytype, np.integer): pytype_name = "int" elif issubclass(pytype, np.datetime64) or pytype is datetime: # Caution: np.datetime64 is also a subclass of np.number. pytype_name = "datetime" elif pytype is datetime.date: pytype_name = "date" elif issubclass(pytype, np.bool_): pytype_name = "bool" return _SQL_TYPES[pytype_name][self.pd_sql.flavor] class PandasSQLLegacy(PandasSQL): def __init__(self, con, flavor, is_cursor=False): self.is_cursor = is_cursor self.con = con if flavor is None: flavor = 'sqlite' if flavor not in ['sqlite', 'mysql']: raise NotImplementedError else: self.flavor = flavor def execute(self, *args, **kwargs): if self.is_cursor: cur = self.con else: cur = self.con.cursor() try: if kwargs: cur.execute(*args, **kwargs) else: cur.execute(*args) return cur except Exception as e: try: self.con.rollback() except Exception: # pragma: no cover ex = DatabaseError( "Execution failed on sql: %s\n%s\nunable to rollback" % (args[0], e)) raise_with_traceback(ex) ex = DatabaseError("Execution failed on sql: %s" % args[0]) raise_with_traceback(ex) def read_sql(self, sql, index_col=None, coerce_float=True, params=None, parse_dates=None): args = _convert_params(sql, params) cursor = self.execute(*args) columns = [col_desc[0] for col_desc in cursor.description] data = self._fetchall_as_list(cursor) cursor.close() data_frame = DataFrame.from_records( data, columns=columns, coerce_float=coerce_float) _parse_date_columns(data_frame, parse_dates) if index_col is not None: data_frame.set_index(index_col, inplace=True) return data_frame def _fetchall_as_list(self, cur): result = cur.fetchall() if not isinstance(result, list): result = list(result) return result def to_sql(self, frame, name, if_exists='fail', index=True, index_label=None): """ Write records stored in a DataFrame to a SQL database. Parameters ---------- frame: DataFrame name: name of SQL table flavor: {'sqlite', 'mysql'}, default 'sqlite' if_exists: {'fail', 'replace', 'append'}, default 'fail' fail: If table exists, do nothing. replace: If table exists, drop it, recreate it, and insert data. append: If table exists, insert data. Create if does not exist. """ table = PandasSQLTableLegacy( name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label) table.insert() def has_table(self, name): flavor_map = { 'sqlite': ("SELECT name FROM sqlite_master " "WHERE type='table' AND name='%s';") % name, 'mysql': "SHOW TABLES LIKE '%s'" % name} query = flavor_map.get(self.flavor) return len(self.execute(query).fetchall()) > 0 def get_table(self, table_name): return None # not supported in Legacy mode def drop_table(self, name): drop_sql = "DROP TABLE %s" % name self.execute(drop_sql) def _create_sql_schema(self, frame, table_name): table = PandasSQLTableLegacy(table_name, self, frame=frame) return str(table.sql_schema()) def get_schema(frame, name, flavor='sqlite', keys=None, con=None): """ Get the SQL db table schema for the given frame. Parameters ---------- frame : DataFrame name : string name of SQL table flavor : {'sqlite', 'mysql'}, default 'sqlite' The flavor of SQL to use. Ignored when using SQLAlchemy engine. 'mysql' is deprecated and will be removed in future versions, but it will be further supported through SQLAlchemy engines. keys : string or sequence columns to use a primary key con: an open SQL database connection object or an SQLAlchemy engine Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. """ if con is None: if flavor == 'mysql': warnings.warn(_MYSQL_WARNING, FutureWarning) return _get_schema_legacy(frame, name, flavor, keys) pandas_sql = pandasSQL_builder(con=con, flavor=flavor) return pandas_sql._create_sql_schema(frame, name) def _get_schema_legacy(frame, name, flavor, keys=None): """Old function from 0.13.1. To keep backwards compatibility. When mysql legacy support is dropped, it should be possible to remove this code """ def get_sqltype(dtype, flavor): pytype = dtype.type pytype_name = "text" if issubclass(pytype, np.floating): pytype_name = "float" elif issubclass(pytype, np.integer): pytype_name = "int" elif issubclass(pytype, np.datetime64) or pytype is datetime: # Caution: np.datetime64 is also a subclass of np.number. pytype_name = "datetime" elif pytype is datetime.date: pytype_name = "date" elif issubclass(pytype, np.bool_): pytype_name = "bool" return _SQL_TYPES[pytype_name][flavor] lookup_type = lambda dtype: get_sqltype(dtype, flavor) column_types = lzip(frame.dtypes.index, map(lookup_type, frame.dtypes)) if flavor == 'sqlite': columns = ',\n '.join('[%s] %s' % x for x in column_types) else: columns = ',\n '.join('`%s` %s' % x for x in column_types) keystr = '' if keys is not None: if isinstance(keys, string_types): keys = (keys,) keystr = ', PRIMARY KEY (%s)' % ','.join(keys) template = """CREATE TABLE %(name)s ( %(columns)s %(keystr)s );""" create_statement = template % {'name': name, 'columns': columns, 'keystr': keystr} return create_statement # legacy names, with depreciation warnings and copied docs def read_frame(*args, **kwargs): """DEPRECIATED - use read_sql """ warnings.warn("read_frame is depreciated, use read_sql", FutureWarning) return read_sql(*args, **kwargs) def frame_query(*args, **kwargs): """DEPRECIATED - use read_sql """ warnings.warn("frame_query is depreciated, use read_sql", FutureWarning) return read_sql(*args, **kwargs) def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs): """DEPRECIATED - use to_sql Write records stored in a DataFrame to a SQL database. Parameters ---------- frame : DataFrame name : string con : DBAPI2 connection flavor : {'sqlite', 'mysql'}, default 'sqlite' The flavor of SQL to use. if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. index : boolean, default False Write DataFrame index as a column Notes ----- This function is deprecated in favor of ``to_sql``. There are however two differences: - With ``to_sql`` the index is written to the sql database by default. To keep the behaviour this function you need to specify ``index=False``. - The new ``to_sql`` function supports sqlalchemy engines to work with different sql flavors. See also -------- pandas.DataFrame.to_sql """ warnings.warn("write_frame is depreciated, use to_sql", FutureWarning) # for backwards compatibility, set index=False when not specified index = kwargs.pop('index', False) return to_sql(frame, name, con, flavor=flavor, if_exists=if_exists, index=index, **kwargs) # Append wrapped function docstrings read_frame.__doc__ += read_sql.__doc__ frame_query.__doc__ += read_sql.__doc__
[((4112, 4265), 'warnings.warn', 'warnings.warn', (['"""tquery is depreciated, and will be removed in future versions. You can use ``execute(...).fetchall()`` instead."""', 'FutureWarning'], {}), "(\n 'tquery is depreciated, and will be removed in future versions. You can use ``execute(...).fetchall()`` instead.'\n , FutureWarning)\n", (4125, 4265), False, 'import warnings\n'), ((5648, 5799), 'warnings.warn', 'warnings.warn', (['"""uquery is depreciated, and will be removed in future versions. You can use ``execute(...).rowcount`` instead."""', 'FutureWarning'], {}), "(\n 'uquery is depreciated, and will be removed in future versions. You can use ``execute(...).rowcount`` instead.'\n , FutureWarning)\n", (5661, 5799), False, 'import warnings\n'), ((39194, 39265), 'warnings.warn', 'warnings.warn', (['"""read_frame is depreciated, use read_sql"""', 'FutureWarning'], {}), "('read_frame is depreciated, use read_sql', FutureWarning)\n", (39207, 39265), False, 'import warnings\n'), ((39385, 39457), 'warnings.warn', 'warnings.warn', (['"""frame_query is depreciated, use read_sql"""', 'FutureWarning'], {}), "('frame_query is depreciated, use read_sql', FutureWarning)\n", (39398, 39457), False, 'import warnings\n'), ((40629, 40699), 'warnings.warn', 'warnings.warn', (['"""write_frame is depreciated, use to_sql"""', 'FutureWarning'], {}), "('write_frame is depreciated, use to_sql', FutureWarning)\n", (40642, 40699), False, 'import warnings\n'), ((1142, 1168), 'pandas.tseries.tools.to_datetime', 'to_datetime', (['col'], {}), '(col, **format)\n', (1153, 1168), False, 'from pandas.tseries.tools import to_datetime\n'), ((19824, 19901), 'pandas.core.api.DataFrame.from_records', 'DataFrame.from_records', (['data'], {'columns': 'column_names', 'coerce_float': 'coerce_float'}), '(data, columns=column_names, coerce_float=coerce_float)\n', (19846, 19901), False, 'from pandas.core.api import DataFrame, Series\n'), ((21552, 21597), 'pandas.compat.map', 'map', (['self._sqlalchemy_type', 'self.frame.dtypes'], {}), '(self._sqlalchemy_type, self.frame.dtypes)\n', (21555, 21597), False, 'from pandas.compat import lzip, map, zip, raise_with_traceback, string_types\n'), ((21996, 22040), 'sqlalchemy.Table', 'Table', (['self.name', 'self.pd_sql.meta', '*columns'], {}), '(self.name, self.pd_sql.meta, *columns)\n', (22001, 22040), False, 'from sqlalchemy import Table, Column\n'), ((24471, 24508), 'pandas.core.common.is_datetime64_dtype', 'com.is_datetime64_dtype', (['arr_or_dtype'], {}), '(arr_or_dtype)\n', (24494, 24508), True, 'import pandas.core.common as com\n'), ((24678, 24716), 'pandas.core.common.is_timedelta64_dtype', 'com.is_timedelta64_dtype', (['arr_or_dtype'], {}), '(arr_or_dtype)\n', (24702, 24716), True, 'import pandas.core.common as com\n'), ((27386, 27458), 'pandas.core.api.DataFrame.from_records', 'DataFrame.from_records', (['data'], {'columns': 'columns', 'coerce_float': 'coerce_float'}), '(data, columns=columns, coerce_float=coerce_float)\n', (27408, 27458), False, 'from pandas.core.api import DataFrame, Series\n'), ((31206, 31224), 're.compile', 're.compile', (['"""\\\\s+"""'], {}), "('\\\\s+')\n", (31216, 31224), False, 'import re\n'), ((34505, 34577), 'pandas.core.api.DataFrame.from_records', 'DataFrame.from_records', (['data'], {'columns': 'columns', 'coerce_float': 'coerce_float'}), '(data, columns=columns, coerce_float=coerce_float)\n', (34527, 34577), False, 'from pandas.core.api import DataFrame, Series\n'), ((38406, 38436), 'pandas.compat.map', 'map', (['lookup_type', 'frame.dtypes'], {}), '(lookup_type, frame.dtypes)\n', (38409, 38436), False, 'from pandas.compat import lzip, map, zip, raise_with_traceback, string_types\n'), ((1249, 1291), 'pandas.tseries.tools.to_datetime', 'to_datetime', (['col'], {'coerce': '(True)', 'unit': 'format'}), '(col, coerce=True, unit=format)\n', (1260, 1291), False, 'from pandas.tseries.tools import to_datetime\n'), ((6063, 6084), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (6082, 6084), False, 'import traceback\n'), ((18183, 18206), 'sqlalchemy.schema.CreateTable', 'CreateTable', (['self.table'], {}), '(self.table)\n', (18194, 18206), False, 'from sqlalchemy.schema import CreateTable\n'), ((18393, 18407), 'numpy.asscalar', 'np.asscalar', (['i'], {}), '(i)\n', (18404, 18407), True, 'import numpy as np\n'), ((19610, 19622), 'sqlalchemy.select', 'select', (['cols'], {}), '(cols)\n', (19616, 19622), False, 'from sqlalchemy import select\n'), ((21499, 21527), 'pandas.compat.map', 'map', (['str', 'self.frame.columns'], {}), '(str, self.frame.columns)\n', (21502, 21527), False, 'from pandas.compat import lzip, map, zip, raise_with_traceback, string_types\n'), ((21618, 21635), 'sqlalchemy.Column', 'Column', (['name', 'typ'], {}), '(name, typ)\n', (21624, 21635), False, 'from sqlalchemy import Table, Column\n'), ((24730, 24878), 'warnings.warn', 'warnings.warn', (['"""the \'timedelta\' type is not supported, and will be written as integer values (ns frequency) to the database."""', 'UserWarning'], {}), '(\n "the \'timedelta\' type is not supported, and will be written as integer values (ns frequency) to the database."\n , UserWarning)\n', (24743, 24878), False, 'import warnings\n'), ((24967, 24999), 'pandas.core.common.is_float_dtype', 'com.is_float_dtype', (['arr_or_dtype'], {}), '(arr_or_dtype)\n', (24985, 24999), True, 'import pandas.core.common as com\n'), ((26558, 26579), 'sqlalchemy.schema.MetaData', 'MetaData', (['self.engine'], {}), '(self.engine)\n', (26566, 26579), False, 'from sqlalchemy.schema import MetaData\n'), ((29985, 30013), 'pandas.compat.map', 'map', (['str', 'self.frame.columns'], {}), '(str, self.frame.columns)\n', (29988, 30013), False, 'from pandas.compat import lzip, map, zip, raise_with_traceback, string_types\n'), ((31162, 31190), 'pandas.compat.map', 'map', (['str', 'self.frame.columns'], {}), '(str, self.frame.columns)\n', (31165, 31190), False, 'from pandas.compat import lzip, map, zip, raise_with_traceback, string_types\n'), ((31239, 31263), 'pandas.compat.map', 'map', (['pat.search', 'columns'], {}), '(pat.search, columns)\n', (31242, 31263), False, 'from pandas.compat import lzip, map, zip, raise_with_traceback, string_types\n'), ((31278, 31312), 'warnings.warn', 'warnings.warn', (['_SAFE_NAMES_WARNING'], {}), '(_SAFE_NAMES_WARNING)\n', (31291, 31312), False, 'import warnings\n'), ((32367, 32399), 'pandas.core.common.is_timedelta64_dtype', 'com.is_timedelta64_dtype', (['pytype'], {}), '(pytype)\n', (32391, 32399), True, 'import pandas.core.common as com\n'), ((37240, 37284), 'warnings.warn', 'warnings.warn', (['_MYSQL_WARNING', 'FutureWarning'], {}), '(_MYSQL_WARNING, FutureWarning)\n', (37253, 37284), False, 'import warnings\n'), ((1501, 1543), 'pandas.tseries.tools.to_datetime', 'to_datetime', (['col'], {'coerce': '(True)', 'unit': 'format'}), '(col, coerce=True, unit=format)\n', (1512, 1543), False, 'from pandas.tseries.tools import to_datetime\n'), ((1577, 1621), 'pandas.tseries.tools.to_datetime', 'to_datetime', (['col'], {'coerce': '(True)', 'format': 'format'}), '(col, coerce=True, format=format)\n', (1588, 1621), False, 'from pandas.tseries.tools import to_datetime\n'), ((4706, 4727), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (4725, 4727), False, 'import traceback\n'), ((4895, 4908), 'pandas.compat.lzip', 'lzip', (['*result'], {}), '(*result)\n', (4899, 4908), False, 'from pandas.compat import lzip, map, zip, raise_with_traceback, string_types\n'), ((15968, 16012), 'warnings.warn', 'warnings.warn', (['_MYSQL_WARNING', 'FutureWarning'], {}), '(_MYSQL_WARNING, FutureWarning)\n', (15981, 16012), False, 'import warnings\n'), ((16149, 16193), 'warnings.warn', 'warnings.warn', (['_MYSQL_WARNING', 'FutureWarning'], {}), '(_MYSQL_WARNING, FutureWarning)\n', (16162, 16193), False, 'import warnings\n'), ((21672, 21698), 'pandas.compat.zip', 'zip', (['columns', 'column_types'], {}), '(columns, column_types)\n', (21675, 21698), False, 'from pandas.compat import lzip, map, zip, raise_with_traceback, string_types\n'), ((24591, 24614), 'sqlalchemy.types.DateTime', 'DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (24599, 24614), False, 'from sqlalchemy.types import Integer, Float, Boolean, DateTime, Date\n'), ((25039, 25073), 'pandas.core.common.is_integer_dtype', 'com.is_integer_dtype', (['arr_or_dtype'], {}), '(arr_or_dtype)\n', (25059, 25073), True, 'import pandas.core.common as com\n'), ((32413, 32561), 'warnings.warn', 'warnings.warn', (['"""the \'timedelta\' type is not supported, and will be written as integer values (ns frequency) to the database."""', 'UserWarning'], {}), '(\n "the \'timedelta\' type is not supported, and will be written as integer values (ns frequency) to the database."\n , UserWarning)\n', (32426, 32561), False, 'import warnings\n'), ((34128, 34152), 'pandas.compat.raise_with_traceback', 'raise_with_traceback', (['ex'], {}), '(ex)\n', (34148, 34152), False, 'from pandas.compat import lzip, map, zip, raise_with_traceback, string_types\n'), ((21939, 21978), 'sqlalchemy.Column', 'Column', (['idx_label', 'idx_type'], {'index': '(True)'}), '(idx_label, idx_type, index=True)\n', (21945, 21978), False, 'from sqlalchemy import Table, Column\n'), ((25156, 25181), 'pandas.core.common.is_bool', 'com.is_bool', (['arr_or_dtype'], {}), '(arr_or_dtype)\n', (25167, 25181), True, 'import pandas.core.common as com\n'), ((31936, 31962), 'pandas.compat.zip', 'zip', (['columns', 'column_types'], {}), '(columns, column_types)\n', (31939, 31962), False, 'from pandas.compat import lzip, map, zip, raise_with_traceback, string_types\n'), ((19146, 19162), 'pandas.compat.zip', 'zip', (['keys', 't[1:]'], {}), '(keys, t[1:])\n', (19149, 19162), False, 'from pandas.compat import lzip, map, zip, raise_with_traceback, string_types\n'), ((34018, 34042), 'pandas.compat.raise_with_traceback', 'raise_with_traceback', (['ex'], {}), '(ex)\n', (34038, 34042), False, 'from pandas.compat import lzip, map, zip, raise_with_traceback, string_types\n')]
kkcookies99/UAST
Dataset/Leetcode/train/58/28.py
fff81885aa07901786141a71e5600a08d7cb4868
class Solution: def XXX(self, s): """ :type s: str :rtype: int """ cnt, tail = 0, len(s) - 1 while tail >= 0 and s[tail] == ' ': tail -= 1 while tail >= 0 and s[tail] != ' ': cnt += 1 tail -= 1 return cnt
[]
ZytroCode/Systerm
Systerm/meta.py
688b1a9eab51ec2d2fcc8e921d57ae4ae585a1b7
"""Meta is a module contains objects that will customize the behavior of python.""" from abc import ABC from abc import ABCMeta from abc import abstractmethod from typing import Any from typing import Callable import Systerm # Metaclass class Metaclass(ABCMeta): """A metaclass to customize the behavior of all classes.""" def __new__(self, name: str, bases: tuple[type, ...], attrs: dict[str, Any], **keys: Any) -> type: """The static constructor for the Metaclass. Parameters: name - str The name of the class bases - tuple[type, ...] A tuple of classes to inherit attrs - dict[str, Any] A dictionary of attributes **keys - Any Keyword arguments to pass in """ # Creating a new class cls = super().__new__(self, name, bases, dict(attrs), **keys) cls.__setattr__ = self.setattr # Custom magic methods cls.__namespaces__ = {} cls.__magics__ = {} cls.__attributes__ = {} cls.__publics__ = {} cls.__privates__ = {} cls.__protecteds__ = {} # Setting objects for name in dir(cls): value = getattr(cls, name) # Adds attributes to __magics__ if name.startswith("__") and name.endswith("__"): cls.__magics__[name] = value # Adds attributes to other namespace else: # Adds attributes to __privates__ if name.startswith("__"): cls.__privates__[name] = value # Adds attributes to __protecteds__ elif name.startswith("_"): cls.__protecteds__[name] = value # Adds attributes to __publics__ else: cls.__publics__[name] = value cls.__attributes__[name] = value # Adds attributes to namespace cls.__namespaces__[name] = value return cls def setattr(self, name: str, value: object) -> None: # Adds attributes to __magics__ if name.startswith("__") and name.endswith("__"): self.__magics__[name] = value # Adds attributes to other namespace else: # Adds attributes to __privates__ if name.startswith("__"): self.__privates__[name] = value # Adds attributes to __protecteds__ elif name.startswith("_"): self.__protecteds__[name] = value # Adds attributes to __publics__ else: self.__publics__[name] = value self.__attributes__[name] = value # Adds attributes to namespace self.__namespaces__[name] = value # Object class class Object(object, metaclass=Metaclass): pass # List class class List(list, metaclass=Metaclass): pass # Dictionary class class Dictionary(dict, metaclass=Metaclass): def __getattr__(self, name: str) -> None: try: return self[name] except KeyError as e: try: return super().__getattr__(name) except AttributeError: raise e def __setattr__(self, name: str, value: object) -> None: self[name] = value # Recreating ABC ABC = Metaclass(ABC.__name__, ABC.__bases__, {name: getattr(ABC, name) for name in dir(ABC)}) def get_namespaces(object: Object) -> Dictionary: """Gets the namespaces of an object.""" return object.__namespaces__ def get_magics(object: Object) -> Dictionary: """Gets the magic methods of an object.""" return object.__magics__ def get_attributes(object: Object) -> Dictionary: """Gets the attributes of an object.""" return object.__attributes__ def get_publics(object: Object) -> Dictionary: """Gets the public namespaces of an object.""" return object.__publics__ def get_privates(object: Object) -> Dictionary: """Gets the private namespaces of an object.""" return object.__privates__ def get_protecteds(object: Object) -> Dictionary: """Gets the protected namespaces of an object.""" return object.__protecteds__ # Initializing Systerm.module from Systerm._setup import init_module module = init_module() # MetaMod class class MetaMod(module.Module): pass module.modules[__name__].__class__ = MetaMod
[((4295, 4308), 'Systerm._setup.init_module', 'init_module', ([], {}), '()\n', (4306, 4308), False, 'from Systerm._setup import init_module\n')]
iqsarv/CCF
samples/apps/txregulator/tests/txregulatorclient.py
5cc33a1f0e06eb2a25dc1ebd0e2153881962b889
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the Apache 2.0 License. import infra.e2e_args import infra.ccf import infra.jsonrpc import logging from time import gmtime, strftime import csv import random from loguru import logger as LOG class AppUser: def __init__(self, network, name, country, curve): self.name = name self.country = country primary, _ = network.find_primary() network.create_users([self.name], curve) network.consortium.add_users(primary, [self.name]) with primary.user_client(user_id=self.name) as client: self.ccf_id = client.rpc("whoAmI", {}).result["caller_id"] def __str__(self): return f"{self.ccf_id} ({self.name})" def run(args): hosts = ["localhost"] with infra.ccf.network( hosts, args.build_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb ) as network: check = infra.checker.Checker() network.start_and_join(args) primary, others = network.find_nodes() script = "if tonumber(amt) > 200000 then return true else return false end" if args.lua_script is not None: data = [] with open(args.lua_script, "r") as f: data = f.readlines() script = "".join(data) manager = AppUser(network, "manager", "GB", args.default_curve) regulator = AppUser(network, "auditor", "GB", args.default_curve) banks = [ AppUser(network, f"bank{country}", country, args.default_curve) for country in ("US", "GB", "GR", "FR") ] transactions = [] with open(args.datafile, newline="") as f: datafile = csv.DictReader(f) for i, row in enumerate(datafile): # read first 10 lines if i > 10: break json_tx = { "src": row["origin"], "dst": row["destination"], "amt": row["amount"], "type": row["type"], "timestamp": strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()), "src_country": row["src_country"], "dst_country": row["dst_country"], } transactions.append(json_tx) # Manager is granted special privileges by members, which is later read by app to enforce access restrictions proposal_result, error = network.consortium.propose( 0, primary, f""" return Calls:call( "set_user_data", {{ user_id = {manager.ccf_id}, user_data = {{ privileges = {{ REGISTER_REGULATORS = true, REGISTER_BANKS = true, }} }} }} ) """, ) network.consortium.vote_using_majority(primary, proposal_result["id"]) # Check permissions are enforced with primary.user_client(user_id=regulator.name) as c: check( c.rpc("REG_register", {}), error=lambda e: e is not None and e["code"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) check( c.rpc("BK_register", {}), error=lambda e: e is not None and e["code"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) with primary.user_client(user_id=banks[0].name) as c: check( c.rpc("REG_register", {}), error=lambda e: e is not None and e["code"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) check( c.rpc("BK_register", {}), error=lambda e: e is not None and e["code"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) # As permissioned manager, register regulator and banks with primary.node_client() as mc: check_commit = infra.checker.Checker(mc) with primary.user_client(format="msgpack", user_id=manager.name) as c: check( c.rpc( "REG_register", { "regulator_id": regulator.ccf_id, "country": regulator.country, "script": script, }, ), result=regulator.ccf_id, ) check( c.rpc("REG_get", {"id": regulator.ccf_id}), result=[regulator.country, script], ) check( c.rpc( "BK_register", {"bank_id": regulator.ccf_id, "country": regulator.country}, ), error=lambda e: e is not None and e["code"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) LOG.debug(f"User {regulator} successfully registered as regulator") for bank in banks: check( c.rpc( "BK_register", {"bank_id": bank.ccf_id, "country": bank.country}, ), result=bank.ccf_id, ) check(c.rpc("BK_get", {"id": bank.ccf_id}), result=bank.country) check( c.rpc( "REG_register", {"regulator_id": bank.ccf_id, "country": bank.country}, ), error=lambda e: e is not None and e["code"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) LOG.debug(f"User {bank} successfully registered as bank") LOG.success(f"{1} regulator and {len(banks)} bank(s) successfully setup") tx_id = 0 # Tracks how many transactions have been issued # tracks flagged/non flagged and revealed/non revealed transactions for validation flagged_txs = {} revealed_tx_ids = [] flagged_ids = [] non_flagged_ids = [] flagged_amt = 200000 for i, bank in enumerate(banks): with primary.user_client(format="msgpack", user_id=bank.name) as c: # Destination account is the next one in the list of banks for transaction in transactions: print(transaction) amount = transaction["amt"] check(c.rpc("TX_record", transaction), result=tx_id) check( c.rpc("TX_get", {"tx_id": tx_id}), result={ "amt": amount, "bank_id": bank.ccf_id, "dst": transaction["dst"], "dst_country": transaction["dst_country"], "src": transaction["src"], "src_country": transaction["src_country"], "timestamp": transaction["timestamp"], "type": transaction["type"], }, ) if float(amount) > flagged_amt: check( c.rpc("FLAGGED_TX_get", {"tx_id": tx_id}), result=[regulator.ccf_id, False, transaction["timestamp"]], ) flagged_tx = { "amt": amount, "bank_id": bank.ccf_id, "dst": transaction["dst"], "dst_country": transaction["dst_country"], "src": transaction["src"], "src_country": transaction["src_country"], "timestamp": transaction["timestamp"], "tx_id": tx_id, "type": transaction["type"], } flagged_ids.append(tx_id) flagged_txs[tx_id] = flagged_tx else: check( c.rpc("FLAGGED_TX_get", {"tx_id": tx_id}), error=lambda e: e is not None and e["code"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) non_flagged_ids.append(tx_id) tx_id += 1 LOG.success(f"{tx_id} transactions have been successfully issued") # bank that issued first flagged transaction with primary.user_client(format="msgpack", user_id=bank.name) as c: # try to poll flagged but fail as you are not a regulator check( c.rpc("REG_poll_flagged", {}), error=lambda e: e is not None and e["code"] == infra.jsonrpc.ErrorCode.INVALID_CALLER_ID.value, ) # bank reveal some transactions that were flagged for i, tx_id in enumerate(flagged_ids): if i % 2 == 0: check(c.rpc("TX_reveal", {"tx_id": tx_id}), result=True) revealed_tx_ids.append(tx_id) # bank try to reveal non flagged txs for tx_id in non_flagged_ids: check( c.rpc("TX_reveal", {"tx_id": tx_id}), error=lambda e: e is not None and e["code"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) # regulator poll for transactions that are flagged with primary.node_client() as mc: with primary.user_client(format="msgpack", user_id=regulator.name) as c: # assert that the flagged txs that we poll for are correct resp = c.rpc("REG_poll_flagged", {}) poll_flagged_ids = [] for poll_flagged in resp.result: # poll flagged is a list [tx_id, regulator_id] poll_flagged_ids.append(poll_flagged[0]) poll_flagged_ids.sort() assert poll_flagged_ids == flagged_ids for tx_id in flagged_ids: # get from flagged txs, try to get the flagged one that was not revealed if tx_id not in revealed_tx_ids: check( c.rpc("REG_get_revealed", {"tx_id": tx_id}), error=lambda e: e is not None and e["code"] == infra.jsonrpc.ErrorCode.INVALID_PARAMS.value, ) # get from flagged txs, try to get the flagged ones that were revealed for tx_id in revealed_tx_ids: check( c.rpc("REG_get_revealed", {"tx_id": tx_id}), result=flagged_txs[tx_id], ) if __name__ == "__main__": def add(parser): parser.add_argument( "--lua-script", help="Regulator checker loaded as lua script file", type=str ) parser.add_argument( "--datafile", help="Load an existing scenario file (csv)", type=str ) args = infra.e2e_args.cli_args(add) args.package = args.app_script and "libluageneric" or "liblogging" run(args)
[((8939, 9005), 'loguru.logger.success', 'LOG.success', (['f"""{tx_id} transactions have been successfully issued"""'], {}), "(f'{tx_id} transactions have been successfully issued')\n", (8950, 9005), True, 'from loguru import logger as LOG\n'), ((1723, 1740), 'csv.DictReader', 'csv.DictReader', (['f'], {}), '(f)\n', (1737, 1740), False, 'import csv\n'), ((5218, 5285), 'loguru.logger.debug', 'LOG.debug', (['f"""User {regulator} successfully registered as regulator"""'], {}), "(f'User {regulator} successfully registered as regulator')\n", (5227, 5285), True, 'from loguru import logger as LOG\n'), ((6077, 6134), 'loguru.logger.debug', 'LOG.debug', (['f"""User {bank} successfully registered as bank"""'], {}), "(f'User {bank} successfully registered as bank')\n", (6086, 6134), True, 'from loguru import logger as LOG\n'), ((2152, 2160), 'time.gmtime', 'gmtime', ([], {}), '()\n', (2158, 2160), False, 'from time import gmtime, strftime\n')]
davidcortesortuno/finmag
src/finmag/sim/hysteresis.py
9ac0268d2c0e45faf1284cee52a73525aa589e2b
import os import re import glob import logging import textwrap import fileinput import numpy as np from finmag.energies import Zeeman from finmag.util.helpers import norm log = logging.getLogger(name="finmag") def hysteresis(sim, H_ext_list, fun=None, **kwargs): """ Set the applied field to the first value in `H_ext_list` (which should be a list of external field vectors) and then call the relax() method. When convergence is reached, the field is changed to the next one in H_ext_list, and so on until all values in H_ext_list are exhausted. Note: The fields in H_ext_list are applied *in addition to* any Zeeman interactions that are already present in the simulation. In particular, if only one external field should be present then do not add any Zeeman interactions before calling this method. If you would like to perform a certain action (e.g. save a VTK snapshot of the magnetisation) at the end of each relaxation stage, use the sim.schedule() command with the directive 'at_end=True' as in the following example: sim.schedule('save_vtk', at_end=True, ...) sim.hysteresis(...) *Arguments* H_ext_list: list of 3-vectors List of external fields, where each field can have any of the forms accepted by Zeeman.__init__() (see its docstring for more details). fun: callable The user can pass a function here (which should accept the Simulation object as its only argument); this function is called after each relaxation and determines the return value (see below). For example, if fun = (lambda sim: sim.m_average[0]) then the return value is a list of values representing the average x-component of the magnetisation at the end of each relaxation. All other keyword arguments are passed on to the relax() method. See its documentation for details. *Return value* If `fun` is not None then the return value is a list containing an accumulation of all the return values of `fun` after each stage. Otherwise the return value is None. """ if H_ext_list == []: return # Add a new Zeeman interaction, initialised to zero. H = Zeeman((0, 0, 0)) sim.add(H) # We keep track of the current stage of the hysteresis loop. cur_stage = 0 num_stages = len(H_ext_list) res = [] try: while True: H_cur = H_ext_list[cur_stage] log.info( "Entering hysteresis stage #{} ({} out of {}). Current field: " "{}".format(cur_stage, cur_stage + 1, num_stages, H_cur)) H.set_value(H_cur) sim.relax(**kwargs) cur_stage += 1 if fun is not None: retval = fun(sim) res.append(retval) log.debug("hysteresis callback function '{}' returned " "value: {}".format(fun.__name__, retval)) except IndexError: log.info("Hysteresis is finished.") log.info("Removing the applied field used for hysteresis.") sim.remove_interaction(H.name) return res or None def hysteresis_loop(sim, H_max, direction, N, **kwargs): """ Compute a hysteresis loop. This is a specialised convenience version of the more general `hysteresis` method. It computes a hysteresis loop where the external field is applied along a single axis and changes magnitude from +H_max to -H_max and back (using N steps in each direction). The return value is a pair (H_vals, m_vals), where H_vals is the list of field strengths at which a relaxation is performed and m_vals is a list of scalar values containing, for each field value, the averaged value of the magnetisation along the axis `direction` (after relaxation has been reached). Thus the command plot(H_vals, m_vals) could be used to plot the hysteresis loop. direction -- a vector indicating the direction of the external field (will be normalised automatically) H_max -- maximum field strength N -- number of data points to compute in each direction (thus the total number of data points for the entire loop will be 2*N-1) kwargs -- any keyword argument accepted by the hysteresis() method """ d = np.array(direction) H_dir = d / norm(d) H_norms = list(np.linspace(H_max, -H_max, N)) + \ list(np.linspace(-H_max, H_max, N)) H_vals = [h * H_dir for h in H_norms] m_avg = hysteresis(sim, H_vals, fun=lambda sim: sim.m_average, **kwargs) # projected lengths of the averaged magnetisation values along the axis # `H_dir` m_vals = [np.dot(m, H_dir) for m in m_avg] return (H_norms, m_vals)
[((178, 210), 'logging.getLogger', 'logging.getLogger', ([], {'name': '"""finmag"""'}), "(name='finmag')\n", (195, 210), False, 'import logging\n'), ((2339, 2356), 'finmag.energies.Zeeman', 'Zeeman', (['(0, 0, 0)'], {}), '((0, 0, 0))\n', (2345, 2356), False, 'from finmag.energies import Zeeman\n'), ((4498, 4517), 'numpy.array', 'np.array', (['direction'], {}), '(direction)\n', (4506, 4517), True, 'import numpy as np\n'), ((4534, 4541), 'finmag.util.helpers.norm', 'norm', (['d'], {}), '(d)\n', (4538, 4541), False, 'from finmag.util.helpers import norm\n'), ((4863, 4879), 'numpy.dot', 'np.dot', (['m', 'H_dir'], {}), '(m, H_dir)\n', (4869, 4879), True, 'import numpy as np\n'), ((4561, 4590), 'numpy.linspace', 'np.linspace', (['H_max', '(-H_max)', 'N'], {}), '(H_max, -H_max, N)\n', (4572, 4590), True, 'import numpy as np\n'), ((4609, 4638), 'numpy.linspace', 'np.linspace', (['(-H_max)', 'H_max', 'N'], {}), '(-H_max, H_max, N)\n', (4620, 4638), True, 'import numpy as np\n')]
smokedpirate/Encryption-hash-generator
uiSetup.py
47bf3f1f6b6b24ca3e9078fefe46b1e6409d59e5
from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5 import QtGui, QtCore class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(577, 341) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) MainWindow.setPalette(palette) MainWindow.setAutoFillBackground(False) MainWindow.setStyleSheet("background-color: rgb(84, 84, 84);") self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.Algorithms = QtWidgets.QComboBox(self.centralwidget) self.Algorithms.setGeometry(QtCore.QRect(190, 60, 191, 41)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Algorithms.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Algorithms.setFont(font) self.Algorithms.setStyleSheet("QComboBox {\n" " color: #333;\n" "\n" " \n" " border-style: outset;\n" " background: qradialgradient(\n" " cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n" " radius: 1.35, stop: 0 #fff, stop: 1 #888\n" " );\n" " padding: 5px;\n" " \n" " }\n" "\n" "\n" "QComboBox:hover {\n" " background: qradialgradient(\n" " cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n" " radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n" " );\n" " }\n" "\n" "QComboBox:pressed {\n" " border-style: inset;\n" " background: qradialgradient(\n" " cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\n" " radius: 1.35, stop: 0 #fff, stop: 1 #ddd\n" " );\n" " }\n" "\n" "\n" "\n" "\n" "\n" "\n" "") self.Algorithms.setObjectName("Algorithms") self.Algorithms.addItem("") self.Algorithms.addItem("") self.Algorithms.addItem("") self.Algorithms.addItem("") self.Algorithms.addItem("") self.Algorithms.addItem("") self.Generate = QtWidgets.QPushButton(self.centralwidget) self.Generate.setGeometry(QtCore.QRect(190, 120, 191, 41)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Generate.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Generate.setFont(font) self.Generate.setStyleSheet("QPushButton {\n" " color: #333;\n" "\n" " border-radius: 20px;\n" " border-style: outset;\n" " background: qradialgradient(\n" " cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n" " radius: 1.35, stop: 0 #fff, stop: 1 #888\n" " );\n" " padding: 5px;\n" " }\n" "\n" "QPushButton:hover {\n" " background: qradialgradient(\n" " cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n" " radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n" " );\n" " }\n" "\n" "QPushButton:pressed {\n" " border-style: inset;\n" " background: qradialgradient(\n" " cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\n" " radius: 1.35, stop: 0 #fff, stop: 1 #ddd\n" " );\n" " }") self.Generate.setObjectName("Generate") self.UserInput = QtWidgets.QLineEdit(self.centralwidget) self.UserInput.setGeometry(QtCore.QRect(190, 20, 191, 31)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.UserInput.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.UserInput.setFont(font) self.UserInput.setObjectName("UserInput") self.Password = QtWidgets.QLineEdit(self.centralwidget) self.Password.setGeometry(QtCore.QRect(200, 210, 141, 31)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Password.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Password.setFont(font) self.Password.setText("") self.Password.setEchoMode(QtWidgets.QLineEdit.Password) self.Password.setReadOnly(True) self.Password.setObjectName("Password") self.HideShow = QtWidgets.QPushButton(self.centralwidget) self.HideShow.setGeometry(QtCore.QRect(350, 210, 31, 31)) self.HideShow.setStyleSheet("QPushButton {\n" " color: #333;\n" "\n" " border-radius: 7px;\n" " border-style: outset;\n" " background: qradialgradient(\n" " cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n" " radius: 1.35, stop: 0 #fff, stop: 1 #888\n" " );\n" " padding: 5px;\n" " }\n" "\n" "QPushButton:hover {\n" " background: qradialgradient(\n" " cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n" " radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n" " );\n" " }\n" "\n" "QPushButton:pressed {\n" " border-style: inset;\n" " background: qradialgradient(\n" " cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\n" " radius: 1.35, stop: 0 #fff, stop: 1 #ddd\n" " );\n" " }") self.HideShow.setText("") icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap("../../Desktop/EYECLOSE.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.HideShow.setIcon(icon) self.HideShow.setIconSize(QtCore.QSize(30, 30)) self.HideShow.setObjectName("HideShow") self.Copy = QtWidgets.QPushButton(self.centralwidget) self.Copy.setGeometry(QtCore.QRect(190, 250, 201, 31)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) gradient = QtGui.QRadialGradient(0.3, -0.4, 1.35, 0.3, -0.4) gradient.setSpread(QtGui.QGradient.PadSpread) gradient.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode) gradient.setColorAt(0.0, QtGui.QColor(255, 255, 255)) gradient.setColorAt(1.0, QtGui.QColor(136, 136, 136)) brush = QtGui.QBrush(gradient) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(51, 51, 51, 128)) brush.setStyle(QtCore.Qt.NoBrush) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.Copy.setPalette(palette) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.Copy.setFont(font) self.Copy.setStyleSheet("QPushButton {\n" " color: #333;\n" " \n" " border-radius: 13px;\n" " border-style: outset;\n" " background: qradialgradient(\n" " cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n" " radius: 1.35, stop: 0 #fff, stop: 1 #888\n" " );\n" " padding: 5px;\n" " }\n" "\n" "QPushButton:hover {\n" " background: qradialgradient(\n" " cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n" " radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n" " );\n" " }\n" "\n" "QPushButton:pressed {\n" " border-style: inset;\n" " background: qradialgradient(\n" " cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\n" " radius: 1.35, stop: 0 #fff, stop: 1 #ddd\n" " );\n" " }") self.Copy.setObjectName("Copy") self.hexify = QtWidgets.QCheckBox(self.centralwidget) self.hexify.setGeometry(QtCore.QRect(250, 180, 81, 21)) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.PlaceholderText, brush) brush = QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush) brush = QtGui.QBrush(QtGui.QColor(120, 120, 120)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush) brush = QtGui.QBrush(QtGui.QColor(84, 84, 84)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.PlaceholderText, brush) self.hexify.setPalette(palette) font = QtGui.QFont() font.setPointSize(12) font.setBold(True) font.setWeight(75) self.hexify.setFont(font) self.hexify.setObjectName("hexify") MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 577, 21)) self.menubar.setObjectName("menubar") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName("statusbar") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow")) self.Algorithms.setCurrentText(_translate("MainWindow", "Select encryption algorithm")) self.Algorithms.setItemText(0, _translate("MainWindow", "Select encryption algorithm")) self.Algorithms.setItemText(1, _translate("MainWindow", "sha256")) self.Algorithms.setItemText(2, _translate("MainWindow", "md5")) self.Algorithms.setItemText(3, _translate("MainWindow", "sha224")) self.Algorithms.setItemText(4, _translate("MainWindow", "sha1")) self.Algorithms.setItemText(5, _translate("MainWindow", "sha512")) self.Generate.setText(_translate("MainWindow", "GENERATE")) self.Copy.setText(_translate("MainWindow", "COPY TO CLIPBOARD")) self.hexify.setText(_translate("MainWindow", "Hexify?")) self.HideShow.setIcon(QtGui.QIcon("Assets//EYECLOSE.png")) if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow() ui.setupUi(MainWindow) MainWindow.show() sys.exit(app.exec_())
[((39511, 39543), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (39533, 39543), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((39562, 39585), 'PyQt5.QtWidgets.QMainWindow', 'QtWidgets.QMainWindow', ([], {}), '()\n', (39583, 39585), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((252, 268), 'PyQt5.QtGui.QPalette', 'QtGui.QPalette', ([], {}), '()\n', (266, 268), False, 'from PyQt5 import QtGui, QtCore\n'), ((2687, 2716), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['MainWindow'], {}), '(MainWindow)\n', (2704, 2716), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2803, 2842), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2822, 2842), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2931, 2947), 'PyQt5.QtGui.QPalette', 'QtGui.QPalette', ([], {}), '()\n', (2945, 2947), False, 'from PyQt5 import QtGui, QtCore\n'), ((3155, 3204), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (3176, 3204), False, 'from PyQt5 import QtGui, QtCore\n'), ((3475, 3497), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (3487, 3497), False, 'from PyQt5 import QtGui, QtCore\n'), ((3965, 4014), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (3986, 4014), False, 'from PyQt5 import QtGui, QtCore\n'), ((4285, 4307), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (4297, 4307), False, 'from PyQt5 import QtGui, QtCore\n'), ((4405, 4454), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (4426, 4454), False, 'from PyQt5 import QtGui, QtCore\n'), ((4725, 4747), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (4737, 4747), False, 'from PyQt5 import QtGui, QtCore\n'), ((5236, 5285), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (5257, 5285), False, 'from PyQt5 import QtGui, QtCore\n'), ((5556, 5578), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (5568, 5578), False, 'from PyQt5 import QtGui, QtCore\n'), ((6052, 6101), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (6073, 6101), False, 'from PyQt5 import QtGui, QtCore\n'), ((6372, 6394), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (6384, 6394), False, 'from PyQt5 import QtGui, QtCore\n'), ((6494, 6543), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (6515, 6543), False, 'from PyQt5 import QtGui, QtCore\n'), ((6814, 6836), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (6826, 6836), False, 'from PyQt5 import QtGui, QtCore\n'), ((7329, 7378), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (7350, 7378), False, 'from PyQt5 import QtGui, QtCore\n'), ((7649, 7671), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (7661, 7671), False, 'from PyQt5 import QtGui, QtCore\n'), ((8145, 8194), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (8166, 8194), False, 'from PyQt5 import QtGui, QtCore\n'), ((8465, 8487), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (8477, 8487), False, 'from PyQt5 import QtGui, QtCore\n'), ((8587, 8636), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (8608, 8636), False, 'from PyQt5 import QtGui, QtCore\n'), ((8907, 8929), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (8919, 8929), False, 'from PyQt5 import QtGui, QtCore\n'), ((9271, 9284), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (9282, 9284), False, 'from PyQt5 import QtGui, QtCore\n'), ((10483, 10524), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (10504, 10524), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10612, 10628), 'PyQt5.QtGui.QPalette', 'QtGui.QPalette', ([], {}), '()\n', (10626, 10628), False, 'from PyQt5 import QtGui, QtCore\n'), ((10836, 10885), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (10857, 10885), False, 'from PyQt5 import QtGui, QtCore\n'), ((11156, 11178), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (11168, 11178), False, 'from PyQt5 import QtGui, QtCore\n'), ((11646, 11695), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (11667, 11695), False, 'from PyQt5 import QtGui, QtCore\n'), ((11966, 11988), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (11978, 11988), False, 'from PyQt5 import QtGui, QtCore\n'), ((12086, 12135), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (12107, 12135), False, 'from PyQt5 import QtGui, QtCore\n'), ((12406, 12428), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (12418, 12428), False, 'from PyQt5 import QtGui, QtCore\n'), ((12909, 12958), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (12930, 12958), False, 'from PyQt5 import QtGui, QtCore\n'), ((13229, 13251), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (13241, 13251), False, 'from PyQt5 import QtGui, QtCore\n'), ((13725, 13774), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (13746, 13774), False, 'from PyQt5 import QtGui, QtCore\n'), ((14045, 14067), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (14057, 14067), False, 'from PyQt5 import QtGui, QtCore\n'), ((14167, 14216), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (14188, 14216), False, 'from PyQt5 import QtGui, QtCore\n'), ((14487, 14509), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (14499, 14509), False, 'from PyQt5 import QtGui, QtCore\n'), ((14994, 15043), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (15015, 15043), False, 'from PyQt5 import QtGui, QtCore\n'), ((15314, 15336), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (15326, 15336), False, 'from PyQt5 import QtGui, QtCore\n'), ((15810, 15859), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (15831, 15859), False, 'from PyQt5 import QtGui, QtCore\n'), ((16130, 16152), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (16142, 16152), False, 'from PyQt5 import QtGui, QtCore\n'), ((16252, 16301), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (16273, 16301), False, 'from PyQt5 import QtGui, QtCore\n'), ((16572, 16594), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (16584, 16594), False, 'from PyQt5 import QtGui, QtCore\n'), ((16929, 16942), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (16940, 16942), False, 'from PyQt5 import QtGui, QtCore\n'), ((17881, 17920), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.centralwidget'], {}), '(self.centralwidget)\n', (17900, 17920), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((18008, 18024), 'PyQt5.QtGui.QPalette', 'QtGui.QPalette', ([], {}), '()\n', (18022, 18024), False, 'from PyQt5 import QtGui, QtCore\n'), ((21466, 21479), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (21477, 21479), False, 'from PyQt5 import QtGui, QtCore\n'), ((21650, 21689), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.centralwidget'], {}), '(self.centralwidget)\n', (21669, 21689), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((21777, 21793), 'PyQt5.QtGui.QPalette', 'QtGui.QPalette', ([], {}), '()\n', (21791, 21793), False, 'from PyQt5 import QtGui, QtCore\n'), ((25234, 25247), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (25245, 25247), False, 'from PyQt5 import QtGui, QtCore\n'), ((25556, 25597), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (25577, 25597), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((26485, 26498), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (26496, 26498), False, 'from PyQt5 import QtGui, QtCore\n'), ((26769, 26810), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (26790, 26810), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((26894, 26910), 'PyQt5.QtGui.QPalette', 'QtGui.QPalette', ([], {}), '()\n', (26908, 26910), False, 'from PyQt5 import QtGui, QtCore\n'), ((27118, 27167), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (27139, 27167), False, 'from PyQt5 import QtGui, QtCore\n'), ((27438, 27460), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (27450, 27460), False, 'from PyQt5 import QtGui, QtCore\n'), ((27928, 27977), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (27949, 27977), False, 'from PyQt5 import QtGui, QtCore\n'), ((28248, 28270), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (28260, 28270), False, 'from PyQt5 import QtGui, QtCore\n'), ((28368, 28417), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (28389, 28417), False, 'from PyQt5 import QtGui, QtCore\n'), ((28688, 28710), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (28700, 28710), False, 'from PyQt5 import QtGui, QtCore\n'), ((29191, 29240), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (29212, 29240), False, 'from PyQt5 import QtGui, QtCore\n'), ((29511, 29533), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (29523, 29533), False, 'from PyQt5 import QtGui, QtCore\n'), ((30007, 30056), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (30028, 30056), False, 'from PyQt5 import QtGui, QtCore\n'), ((30327, 30349), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (30339, 30349), False, 'from PyQt5 import QtGui, QtCore\n'), ((30449, 30498), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (30470, 30498), False, 'from PyQt5 import QtGui, QtCore\n'), ((30769, 30791), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (30781, 30791), False, 'from PyQt5 import QtGui, QtCore\n'), ((31276, 31325), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (31297, 31325), False, 'from PyQt5 import QtGui, QtCore\n'), ((31596, 31618), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (31608, 31618), False, 'from PyQt5 import QtGui, QtCore\n'), ((32092, 32141), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (32113, 32141), False, 'from PyQt5 import QtGui, QtCore\n'), ((32412, 32434), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (32424, 32434), False, 'from PyQt5 import QtGui, QtCore\n'), ((32534, 32583), 'PyQt5.QtGui.QRadialGradient', 'QtGui.QRadialGradient', (['(0.3)', '(-0.4)', '(1.35)', '(0.3)', '(-0.4)'], {}), '(0.3, -0.4, 1.35, 0.3, -0.4)\n', (32555, 32583), False, 'from PyQt5 import QtGui, QtCore\n'), ((32854, 32876), 'PyQt5.QtGui.QBrush', 'QtGui.QBrush', (['gradient'], {}), '(gradient)\n', (32866, 32876), False, 'from PyQt5 import QtGui, QtCore\n'), ((33207, 33220), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (33218, 33220), False, 'from PyQt5 import QtGui, QtCore\n'), ((34142, 34181), 'PyQt5.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', (['self.centralwidget'], {}), '(self.centralwidget)\n', (34161, 34181), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((34266, 34282), 'PyQt5.QtGui.QPalette', 'QtGui.QPalette', ([], {}), '()\n', (34280, 34282), False, 'from PyQt5 import QtGui, QtCore\n'), ((37721, 37734), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (37732, 37734), False, 'from PyQt5 import QtGui, QtCore\n'), ((37983, 38013), 'PyQt5.QtWidgets.QMenuBar', 'QtWidgets.QMenuBar', (['MainWindow'], {}), '(MainWindow)\n', (38001, 38013), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((38195, 38227), 'PyQt5.QtWidgets.QStatusBar', 'QtWidgets.QStatusBar', (['MainWindow'], {}), '(MainWindow)\n', (38215, 38227), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((38381, 38430), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['MainWindow'], {}), '(MainWindow)\n', (38418, 38430), False, 'from PyQt5 import QtGui, QtCore\n'), ((299, 326), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (311, 326), False, 'from PyQt5 import QtGui, QtCore\n'), ((489, 513), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (501, 513), False, 'from PyQt5 import QtGui, QtCore\n'), ((672, 696), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (684, 696), False, 'from PyQt5 import QtGui, QtCore\n'), ((853, 877), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (865, 877), False, 'from PyQt5 import QtGui, QtCore\n'), ((1036, 1063), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (1048, 1063), False, 'from PyQt5 import QtGui, QtCore\n'), ((1228, 1252), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (1240, 1252), False, 'from PyQt5 import QtGui, QtCore\n'), ((1413, 1437), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (1425, 1437), False, 'from PyQt5 import QtGui, QtCore\n'), ((1596, 1620), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (1608, 1620), False, 'from PyQt5 import QtGui, QtCore\n'), ((1781, 1808), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(120)', '(120)', '(120)'], {}), '(120, 120, 120)\n', (1793, 1808), False, 'from PyQt5 import QtGui, QtCore\n'), ((1973, 1997), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (1985, 1997), False, 'from PyQt5 import QtGui, QtCore\n'), ((2158, 2182), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (2170, 2182), False, 'from PyQt5 import QtGui, QtCore\n'), ((2341, 2365), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (2353, 2365), False, 'from PyQt5 import QtGui, QtCore\n'), ((2880, 2910), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(190)', '(60)', '(191)', '(41)'], {}), '(190, 60, 191, 41)\n', (2892, 2910), False, 'from PyQt5 import QtGui, QtCore\n'), ((2978, 3002), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (2990, 3002), False, 'from PyQt5 import QtGui, QtCore\n'), ((3366, 3393), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (3378, 3393), False, 'from PyQt5 import QtGui, QtCore\n'), ((3429, 3456), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (3441, 3456), False, 'from PyQt5 import QtGui, QtCore\n'), ((3607, 3631), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (3619, 3631), False, 'from PyQt5 import QtGui, QtCore\n'), ((3788, 3812), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (3800, 3812), False, 'from PyQt5 import QtGui, QtCore\n'), ((4176, 4203), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (4188, 4203), False, 'from PyQt5 import QtGui, QtCore\n'), ((4239, 4266), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (4251, 4266), False, 'from PyQt5 import QtGui, QtCore\n'), ((4616, 4643), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (4628, 4643), False, 'from PyQt5 import QtGui, QtCore\n'), ((4679, 4706), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (4691, 4706), False, 'from PyQt5 import QtGui, QtCore\n'), ((4857, 4889), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)', '(128)'], {}), '(255, 255, 255, 128)\n', (4869, 4889), False, 'from PyQt5 import QtGui, QtCore\n'), ((5057, 5081), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (5069, 5081), False, 'from PyQt5 import QtGui, QtCore\n'), ((5447, 5474), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (5459, 5474), False, 'from PyQt5 import QtGui, QtCore\n'), ((5510, 5537), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (5522, 5537), False, 'from PyQt5 import QtGui, QtCore\n'), ((5690, 5714), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (5702, 5714), False, 'from PyQt5 import QtGui, QtCore\n'), ((5873, 5897), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (5885, 5897), False, 'from PyQt5 import QtGui, QtCore\n'), ((6263, 6290), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (6275, 6290), False, 'from PyQt5 import QtGui, QtCore\n'), ((6326, 6353), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (6338, 6353), False, 'from PyQt5 import QtGui, QtCore\n'), ((6705, 6732), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (6717, 6732), False, 'from PyQt5 import QtGui, QtCore\n'), ((6768, 6795), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (6780, 6795), False, 'from PyQt5 import QtGui, QtCore\n'), ((6948, 6980), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)', '(128)'], {}), '(255, 255, 255, 128)\n', (6960, 6980), False, 'from PyQt5 import QtGui, QtCore\n'), ((7150, 7174), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (7162, 7174), False, 'from PyQt5 import QtGui, QtCore\n'), ((7540, 7567), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (7552, 7567), False, 'from PyQt5 import QtGui, QtCore\n'), ((7603, 7630), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (7615, 7630), False, 'from PyQt5 import QtGui, QtCore\n'), ((7783, 7807), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (7795, 7807), False, 'from PyQt5 import QtGui, QtCore\n'), ((7966, 7990), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (7978, 7990), False, 'from PyQt5 import QtGui, QtCore\n'), ((8356, 8383), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (8368, 8383), False, 'from PyQt5 import QtGui, QtCore\n'), ((8419, 8446), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (8431, 8446), False, 'from PyQt5 import QtGui, QtCore\n'), ((8798, 8825), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (8810, 8825), False, 'from PyQt5 import QtGui, QtCore\n'), ((8861, 8888), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (8873, 8888), False, 'from PyQt5 import QtGui, QtCore\n'), ((9041, 9070), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)', '(128)'], {}), '(51, 51, 51, 128)\n', (9053, 9070), False, 'from PyQt5 import QtGui, QtCore\n'), ((10560, 10591), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(190)', '(120)', '(191)', '(41)'], {}), '(190, 120, 191, 41)\n', (10572, 10591), False, 'from PyQt5 import QtGui, QtCore\n'), ((10659, 10683), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (10671, 10683), False, 'from PyQt5 import QtGui, QtCore\n'), ((11047, 11074), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (11059, 11074), False, 'from PyQt5 import QtGui, QtCore\n'), ((11110, 11137), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (11122, 11137), False, 'from PyQt5 import QtGui, QtCore\n'), ((11288, 11312), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (11300, 11312), False, 'from PyQt5 import QtGui, QtCore\n'), ((11469, 11493), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (11481, 11493), False, 'from PyQt5 import QtGui, QtCore\n'), ((11857, 11884), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (11869, 11884), False, 'from PyQt5 import QtGui, QtCore\n'), ((11920, 11947), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (11932, 11947), False, 'from PyQt5 import QtGui, QtCore\n'), ((12297, 12324), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (12309, 12324), False, 'from PyQt5 import QtGui, QtCore\n'), ((12360, 12387), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (12372, 12387), False, 'from PyQt5 import QtGui, QtCore\n'), ((12538, 12567), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)', '(128)'], {}), '(51, 51, 51, 128)\n', (12550, 12567), False, 'from PyQt5 import QtGui, QtCore\n'), ((12730, 12754), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (12742, 12754), False, 'from PyQt5 import QtGui, QtCore\n'), ((13120, 13147), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (13132, 13147), False, 'from PyQt5 import QtGui, QtCore\n'), ((13183, 13210), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (13195, 13210), False, 'from PyQt5 import QtGui, QtCore\n'), ((13363, 13387), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (13375, 13387), False, 'from PyQt5 import QtGui, QtCore\n'), ((13546, 13570), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (13558, 13570), False, 'from PyQt5 import QtGui, QtCore\n'), ((13936, 13963), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (13948, 13963), False, 'from PyQt5 import QtGui, QtCore\n'), ((13999, 14026), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (14011, 14026), False, 'from PyQt5 import QtGui, QtCore\n'), ((14378, 14405), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (14390, 14405), False, 'from PyQt5 import QtGui, QtCore\n'), ((14441, 14468), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (14453, 14468), False, 'from PyQt5 import QtGui, QtCore\n'), ((14621, 14650), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)', '(128)'], {}), '(51, 51, 51, 128)\n', (14633, 14650), False, 'from PyQt5 import QtGui, QtCore\n'), ((14815, 14839), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (14827, 14839), False, 'from PyQt5 import QtGui, QtCore\n'), ((15205, 15232), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (15217, 15232), False, 'from PyQt5 import QtGui, QtCore\n'), ((15268, 15295), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (15280, 15295), False, 'from PyQt5 import QtGui, QtCore\n'), ((15448, 15472), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (15460, 15472), False, 'from PyQt5 import QtGui, QtCore\n'), ((15631, 15655), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (15643, 15655), False, 'from PyQt5 import QtGui, QtCore\n'), ((16021, 16048), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (16033, 16048), False, 'from PyQt5 import QtGui, QtCore\n'), ((16084, 16111), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (16096, 16111), False, 'from PyQt5 import QtGui, QtCore\n'), ((16463, 16490), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (16475, 16490), False, 'from PyQt5 import QtGui, QtCore\n'), ((16526, 16553), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (16538, 16553), False, 'from PyQt5 import QtGui, QtCore\n'), ((16706, 16735), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)', '(128)'], {}), '(51, 51, 51, 128)\n', (16718, 16735), False, 'from PyQt5 import QtGui, QtCore\n'), ((17957, 17987), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(190)', '(20)', '(191)', '(31)'], {}), '(190, 20, 191, 31)\n', (17969, 17987), False, 'from PyQt5 import QtGui, QtCore\n'), ((18055, 18082), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (18067, 18082), False, 'from PyQt5 import QtGui, QtCore\n'), ((18245, 18269), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (18257, 18269), False, 'from PyQt5 import QtGui, QtCore\n'), ((18428, 18455), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (18440, 18455), False, 'from PyQt5 import QtGui, QtCore\n'), ((18612, 18636), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (18624, 18636), False, 'from PyQt5 import QtGui, QtCore\n'), ((18793, 18817), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (18805, 18817), False, 'from PyQt5 import QtGui, QtCore\n'), ((18976, 19008), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)', '(128)'], {}), '(255, 255, 255, 128)\n', (18988, 19008), False, 'from PyQt5 import QtGui, QtCore\n'), ((19176, 19203), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (19188, 19203), False, 'from PyQt5 import QtGui, QtCore\n'), ((19368, 19392), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (19380, 19392), False, 'from PyQt5 import QtGui, QtCore\n'), ((19553, 19580), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (19565, 19580), False, 'from PyQt5 import QtGui, QtCore\n'), ((19739, 19763), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (19751, 19763), False, 'from PyQt5 import QtGui, QtCore\n'), ((19922, 19946), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (19934, 19946), False, 'from PyQt5 import QtGui, QtCore\n'), ((20107, 20139), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)', '(128)'], {}), '(255, 255, 255, 128)\n', (20119, 20139), False, 'from PyQt5 import QtGui, QtCore\n'), ((20309, 20336), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(120)', '(120)', '(120)'], {}), '(120, 120, 120)\n', (20321, 20336), False, 'from PyQt5 import QtGui, QtCore\n'), ((20501, 20525), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (20513, 20525), False, 'from PyQt5 import QtGui, QtCore\n'), ((20686, 20713), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(120)', '(120)', '(120)'], {}), '(120, 120, 120)\n', (20698, 20713), False, 'from PyQt5 import QtGui, QtCore\n'), ((20872, 20896), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (20884, 20896), False, 'from PyQt5 import QtGui, QtCore\n'), ((21055, 21079), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (21067, 21079), False, 'from PyQt5 import QtGui, QtCore\n'), ((21240, 21266), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(0)', '(0)', '(0)', '(128)'], {}), '(0, 0, 0, 128)\n', (21252, 21266), False, 'from PyQt5 import QtGui, QtCore\n'), ((21725, 21756), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(200)', '(210)', '(141)', '(31)'], {}), '(200, 210, 141, 31)\n', (21737, 21756), False, 'from PyQt5 import QtGui, QtCore\n'), ((21824, 21851), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (21836, 21851), False, 'from PyQt5 import QtGui, QtCore\n'), ((22014, 22038), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (22026, 22038), False, 'from PyQt5 import QtGui, QtCore\n'), ((22197, 22224), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (22209, 22224), False, 'from PyQt5 import QtGui, QtCore\n'), ((22381, 22405), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (22393, 22405), False, 'from PyQt5 import QtGui, QtCore\n'), ((22562, 22586), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (22574, 22586), False, 'from PyQt5 import QtGui, QtCore\n'), ((22745, 22777), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)', '(128)'], {}), '(255, 255, 255, 128)\n', (22757, 22777), False, 'from PyQt5 import QtGui, QtCore\n'), ((22945, 22972), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (22957, 22972), False, 'from PyQt5 import QtGui, QtCore\n'), ((23137, 23161), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (23149, 23161), False, 'from PyQt5 import QtGui, QtCore\n'), ((23322, 23349), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (23334, 23349), False, 'from PyQt5 import QtGui, QtCore\n'), ((23508, 23532), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (23520, 23532), False, 'from PyQt5 import QtGui, QtCore\n'), ((23691, 23715), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (23703, 23715), False, 'from PyQt5 import QtGui, QtCore\n'), ((23876, 23908), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)', '(128)'], {}), '(255, 255, 255, 128)\n', (23888, 23908), False, 'from PyQt5 import QtGui, QtCore\n'), ((24078, 24105), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(120)', '(120)', '(120)'], {}), '(120, 120, 120)\n', (24090, 24105), False, 'from PyQt5 import QtGui, QtCore\n'), ((24270, 24294), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (24282, 24294), False, 'from PyQt5 import QtGui, QtCore\n'), ((24455, 24482), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(120)', '(120)', '(120)'], {}), '(120, 120, 120)\n', (24467, 24482), False, 'from PyQt5 import QtGui, QtCore\n'), ((24641, 24665), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (24653, 24665), False, 'from PyQt5 import QtGui, QtCore\n'), ((24824, 24848), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (24836, 24848), False, 'from PyQt5 import QtGui, QtCore\n'), ((25009, 25035), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(0)', '(0)', '(0)', '(128)'], {}), '(0, 0, 0, 128)\n', (25021, 25035), False, 'from PyQt5 import QtGui, QtCore\n'), ((25633, 25663), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(350)', '(210)', '(31)', '(31)'], {}), '(350, 210, 31, 31)\n', (25645, 25663), False, 'from PyQt5 import QtGui, QtCore\n'), ((26523, 26566), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""../../Desktop/EYECLOSE.png"""'], {}), "('../../Desktop/EYECLOSE.png')\n", (26536, 26566), False, 'from PyQt5 import QtGui, QtCore\n'), ((26677, 26697), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(30)', '(30)'], {}), '(30, 30)\n', (26689, 26697), False, 'from PyQt5 import QtGui, QtCore\n'), ((26842, 26873), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(190)', '(250)', '(201)', '(31)'], {}), '(190, 250, 201, 31)\n', (26854, 26873), False, 'from PyQt5 import QtGui, QtCore\n'), ((26941, 26965), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (26953, 26965), False, 'from PyQt5 import QtGui, QtCore\n'), ((27329, 27356), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (27341, 27356), False, 'from PyQt5 import QtGui, QtCore\n'), ((27392, 27419), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (27404, 27419), False, 'from PyQt5 import QtGui, QtCore\n'), ((27570, 27594), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (27582, 27594), False, 'from PyQt5 import QtGui, QtCore\n'), ((27751, 27775), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (27763, 27775), False, 'from PyQt5 import QtGui, QtCore\n'), ((28139, 28166), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (28151, 28166), False, 'from PyQt5 import QtGui, QtCore\n'), ((28202, 28229), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (28214, 28229), False, 'from PyQt5 import QtGui, QtCore\n'), ((28579, 28606), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (28591, 28606), False, 'from PyQt5 import QtGui, QtCore\n'), ((28642, 28669), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (28654, 28669), False, 'from PyQt5 import QtGui, QtCore\n'), ((28820, 28849), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)', '(128)'], {}), '(51, 51, 51, 128)\n', (28832, 28849), False, 'from PyQt5 import QtGui, QtCore\n'), ((29012, 29036), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (29024, 29036), False, 'from PyQt5 import QtGui, QtCore\n'), ((29402, 29429), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (29414, 29429), False, 'from PyQt5 import QtGui, QtCore\n'), ((29465, 29492), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (29477, 29492), False, 'from PyQt5 import QtGui, QtCore\n'), ((29645, 29669), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (29657, 29669), False, 'from PyQt5 import QtGui, QtCore\n'), ((29828, 29852), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (29840, 29852), False, 'from PyQt5 import QtGui, QtCore\n'), ((30218, 30245), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (30230, 30245), False, 'from PyQt5 import QtGui, QtCore\n'), ((30281, 30308), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (30293, 30308), False, 'from PyQt5 import QtGui, QtCore\n'), ((30660, 30687), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (30672, 30687), False, 'from PyQt5 import QtGui, QtCore\n'), ((30723, 30750), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (30735, 30750), False, 'from PyQt5 import QtGui, QtCore\n'), ((30903, 30932), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)', '(128)'], {}), '(51, 51, 51, 128)\n', (30915, 30932), False, 'from PyQt5 import QtGui, QtCore\n'), ((31097, 31121), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (31109, 31121), False, 'from PyQt5 import QtGui, QtCore\n'), ((31487, 31514), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (31499, 31514), False, 'from PyQt5 import QtGui, QtCore\n'), ((31550, 31577), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (31562, 31577), False, 'from PyQt5 import QtGui, QtCore\n'), ((31730, 31754), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (31742, 31754), False, 'from PyQt5 import QtGui, QtCore\n'), ((31913, 31937), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)'], {}), '(51, 51, 51)\n', (31925, 31937), False, 'from PyQt5 import QtGui, QtCore\n'), ((32303, 32330), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (32315, 32330), False, 'from PyQt5 import QtGui, QtCore\n'), ((32366, 32393), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (32378, 32393), False, 'from PyQt5 import QtGui, QtCore\n'), ((32745, 32772), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (32757, 32772), False, 'from PyQt5 import QtGui, QtCore\n'), ((32808, 32835), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(136)', '(136)', '(136)'], {}), '(136, 136, 136)\n', (32820, 32835), False, 'from PyQt5 import QtGui, QtCore\n'), ((32988, 33017), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(51)', '(51)', '(51)', '(128)'], {}), '(51, 51, 51, 128)\n', (33000, 33017), False, 'from PyQt5 import QtGui, QtCore\n'), ((34215, 34245), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(250)', '(180)', '(81)', '(21)'], {}), '(250, 180, 81, 21)\n', (34227, 34245), False, 'from PyQt5 import QtGui, QtCore\n'), ((34313, 34340), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (34325, 34340), False, 'from PyQt5 import QtGui, QtCore\n'), ((34503, 34527), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (34515, 34527), False, 'from PyQt5 import QtGui, QtCore\n'), ((34686, 34713), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (34698, 34713), False, 'from PyQt5 import QtGui, QtCore\n'), ((34870, 34894), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (34882, 34894), False, 'from PyQt5 import QtGui, QtCore\n'), ((35051, 35075), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (35063, 35075), False, 'from PyQt5 import QtGui, QtCore\n'), ((35234, 35266), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)', '(128)'], {}), '(255, 255, 255, 128)\n', (35246, 35266), False, 'from PyQt5 import QtGui, QtCore\n'), ((35434, 35461), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (35446, 35461), False, 'from PyQt5 import QtGui, QtCore\n'), ((35626, 35650), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (35638, 35650), False, 'from PyQt5 import QtGui, QtCore\n'), ((35811, 35838), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (35823, 35838), False, 'from PyQt5 import QtGui, QtCore\n'), ((35997, 36021), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (36009, 36021), False, 'from PyQt5 import QtGui, QtCore\n'), ((36180, 36204), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (36192, 36204), False, 'from PyQt5 import QtGui, QtCore\n'), ((36365, 36397), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)', '(128)'], {}), '(255, 255, 255, 128)\n', (36377, 36397), False, 'from PyQt5 import QtGui, QtCore\n'), ((36567, 36594), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(120)', '(120)', '(120)'], {}), '(120, 120, 120)\n', (36579, 36594), False, 'from PyQt5 import QtGui, QtCore\n'), ((36759, 36783), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (36771, 36783), False, 'from PyQt5 import QtGui, QtCore\n'), ((36944, 36971), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(120)', '(120)', '(120)'], {}), '(120, 120, 120)\n', (36956, 36971), False, 'from PyQt5 import QtGui, QtCore\n'), ((37130, 37154), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (37142, 37154), False, 'from PyQt5 import QtGui, QtCore\n'), ((37313, 37337), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(84)', '(84)', '(84)'], {}), '(84, 84, 84)\n', (37325, 37337), False, 'from PyQt5 import QtGui, QtCore\n'), ((37498, 37524), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(0)', '(0)', '(0)', '(128)'], {}), '(0, 0, 0, 128)\n', (37510, 37524), False, 'from PyQt5 import QtGui, QtCore\n'), ((38048, 38075), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(577)', '(21)'], {}), '(0, 0, 577, 21)\n', (38060, 38075), False, 'from PyQt5 import QtGui, QtCore\n'), ((39415, 39450), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', (['"""Assets//EYECLOSE.png"""'], {}), "('Assets//EYECLOSE.png')\n", (39426, 39450), False, 'from PyQt5 import QtGui, QtCore\n')]
wjh112233/yxtx
yxtx/myApp/migrations/0017_chat.py
f118c2b9983ca48b099f2c328487e23f5430303f
# Generated by Django 3.0.2 on 2020-03-17 08:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('myApp', '0016_usergroup_buyer'), ] operations = [ migrations.CreateModel( name='Chat', fields=[ ('id', models.CharField(max_length=31, primary_key=True, serialize=False)), ('chatinfo', models.CharField(max_length=20000)), ('shopid', models.CharField(max_length=30)), ('user1', models.CharField(max_length=50)), ('user2', models.CharField(max_length=50)), ('name1', models.CharField(max_length=50)), ('name2', models.CharField(max_length=50)), ], ), ]
[((323, 389), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(31)', 'primary_key': '(True)', 'serialize': '(False)'}), '(max_length=31, primary_key=True, serialize=False)\n', (339, 389), False, 'from django.db import migrations, models\n'), ((421, 455), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20000)'}), '(max_length=20000)\n', (437, 455), False, 'from django.db import migrations, models\n'), ((485, 516), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (501, 516), False, 'from django.db import migrations, models\n'), ((545, 576), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (561, 576), False, 'from django.db import migrations, models\n'), ((605, 636), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (621, 636), False, 'from django.db import migrations, models\n'), ((665, 696), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (681, 696), False, 'from django.db import migrations, models\n'), ((725, 756), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (741, 756), False, 'from django.db import migrations, models\n')]
willingc/oh-missions-oppia-beta
core/controllers/services.py
3d97903a5155ec67f135b1aa2c02f3bb39eb02e7
# Copyright 2012 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Controllers for miscellaneous services.""" __author__ = 'Tarashish Mishra' import base64 import json from core.controllers import base class FileReadHandler(base.BaseHandler): """Returns a base64-encoded ascii string with uploaded file's content.""" def post(self): raw_file_content = self.request.get('file') encoded_content = base64.b64encode(raw_file_content) self.response.headers['Content-Type'] = 'application/json' response = { 'base64_file_content': encoded_content, } self.response.out.write(json.dumps(response))
[((960, 994), 'base64.b64encode', 'base64.b64encode', (['raw_file_content'], {}), '(raw_file_content)\n', (976, 994), False, 'import base64\n'), ((1178, 1198), 'json.dumps', 'json.dumps', (['response'], {}), '(response)\n', (1188, 1198), False, 'import json\n')]
hebinhuang/batch-shipyard
convoy/crypto.py
f87d94850380bee273eb51c5c35381952a5722b8
# Copyright (c) Microsoft Corporation # # All rights reserved. # # MIT License # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # compat imports from __future__ import ( absolute_import, division, print_function, unicode_literals ) from builtins import ( # noqa bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip) # stdlib imports import base64 import collections import getpass import logging import os try: import pathlib2 as pathlib except ImportError: import pathlib import tempfile import stat import subprocess # local imports from . import settings from . import util # create logger logger = logging.getLogger(__name__) util.setup_logger(logger) # global defines _SSH_KEY_PREFIX = 'id_rsa_shipyard' _REMOTEFS_SSH_KEY_PREFIX = '{}_remotefs'.format(_SSH_KEY_PREFIX) # named tuples PfxSettings = collections.namedtuple( 'PfxSettings', ['filename', 'passphrase', 'sha1']) def get_ssh_key_prefix(): # type: (None) -> str """Get SSH key prefix :rtype: str :return: ssh key prefix """ return _SSH_KEY_PREFIX def get_remotefs_ssh_key_prefix(): # type: (None) -> str """Get remote fs SSH key prefix :rtype: str :return: ssh key prefix for remote fs """ return _REMOTEFS_SSH_KEY_PREFIX def generate_rdp_password(): # type: (None) -> str """Generate an RDP password :rtype: str :return: rdp password """ return base64.b64encode(os.urandom(8)) def generate_ssh_keypair(export_path, prefix=None): # type: (str, str) -> tuple """Generate an ssh keypair for use with user logins :param str export_path: keypair export path :param str prefix: key prefix :rtype: tuple :return: (private key filename, public key filename) """ if util.is_none_or_empty(prefix): prefix = _SSH_KEY_PREFIX privkey = pathlib.Path(export_path, prefix) pubkey = pathlib.Path(export_path, prefix + '.pub') if privkey.exists(): old = pathlib.Path(export_path, prefix + '.old') if old.exists(): old.unlink() privkey.rename(old) if pubkey.exists(): old = pathlib.Path(export_path, prefix + '.pub.old') if old.exists(): old.unlink() pubkey.rename(old) logger.info('generating ssh key pair to path: {}'.format(export_path)) subprocess.check_call( ['ssh-keygen', '-f', str(privkey), '-t', 'rsa', '-N', '''''']) return (privkey, pubkey) def check_ssh_private_key_filemode(ssh_private_key): # type: (pathlib.Path) -> bool """Check SSH private key filemode :param pathlib.Path ssh_private_key: SSH private key :rtype: bool :return: private key filemode is ok """ def _mode_check(fstat, flag): return bool(fstat & flag) if util.on_windows(): return True fstat = ssh_private_key.stat().st_mode modes = frozenset((stat.S_IRWXG, stat.S_IRWXO)) return not any([_mode_check(fstat, x) for x in modes]) def connect_or_exec_ssh_command( remote_ip, remote_port, ssh_private_key, username, sync=True, shell=False, tty=False, ssh_args=None, command=None): # type: (str, int, pathlib.Path, str, bool, bool, tuple, tuple) -> bool """Connect to node via SSH or execute SSH command :param str remote_ip: remote ip address :param int remote_port: remote port :param pathlib.Path ssh_private_key: SSH private key :param str username: username :param bool sync: synchronous execution :param bool shell: execute with shell :param bool tty: allocate pseudo-tty :param tuple ssh_args: ssh args :param tuple command: command :rtype: int or subprocess.Process :return: return code or subprocess handle """ if not ssh_private_key.exists(): raise RuntimeError('SSH private key file not found at: {}'.format( ssh_private_key)) # ensure file mode is set properly for the private key if not check_ssh_private_key_filemode(ssh_private_key): logger.warning( 'SSH private key filemode is too permissive: {}'.format( ssh_private_key)) # execute SSH command ssh_cmd = [ 'ssh', '-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile={}'.format(os.devnull), '-i', str(ssh_private_key), '-p', str(remote_port), ] if tty: ssh_cmd.append('-t') if util.is_not_empty(ssh_args): ssh_cmd.extend(ssh_args) ssh_cmd.append('{}@{}'.format(username, remote_ip)) if util.is_not_empty(command): ssh_cmd.extend(command) logger.info('{} node {}:{} with key {}'.format( 'connecting to' if util.is_none_or_empty(command) else 'executing command on', remote_ip, remote_port, ssh_private_key)) if sync: return util.subprocess_with_output(ssh_cmd, shell=shell) else: return util.subprocess_nowait_pipe_stdout( ssh_cmd, shell=shell, pipe_stderr=True) def derive_private_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None): # type: (str, str, str) -> str """Derive a private key pem file from a pfx :param str pfxfile: pfx file :param str passphrase: passphrase for pfx :param str pemfile: path of pem file to write to :rtype: str :return: path of pem file """ if pfxfile is None: raise ValueError('pfx file is invalid') if passphrase is None: passphrase = getpass.getpass('Enter password for PFX: ') # convert pfx to pem if pemfile is None: f = tempfile.NamedTemporaryFile(mode='wb', delete=False) f.close() pemfile = f.name try: # create pem from pfx subprocess.check_call( ['openssl', 'pkcs12', '-nodes', '-in', pfxfile, '-out', pemfile, '-password', 'pass:' + passphrase] ) except Exception: fp = pathlib.Path(pemfile) if fp.exists(): fp.unlink() pemfile = None return pemfile def derive_public_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None): # type: (str, str, str) -> str """Derive a public key pem file from a pfx :param str pfxfile: pfx file :param str passphrase: passphrase for pfx :param str pemfile: path of pem file to write to :rtype: str :return: path of pem file """ if pfxfile is None: raise ValueError('pfx file is invalid') if passphrase is None: passphrase = getpass.getpass('Enter password for PFX: ') # convert pfx to pem if pemfile is None: f = tempfile.NamedTemporaryFile(mode='wb', delete=False) f.close() pemfile = f.name try: # create pem from pfx subprocess.check_call( ['openssl', 'pkcs12', '-nodes', '-in', pfxfile, '-out', pemfile, '-password', 'pass:' + passphrase] ) # extract public key from private key subprocess.check_call( ['openssl', 'rsa', '-in', pemfile, '-pubout', '-outform', 'PEM', '-out', pemfile] ) except Exception: fp = pathlib.Path(pemfile) if fp.exists(): fp.unlink() pemfile = None return pemfile def _parse_sha1_thumbprint_openssl(output): # type: (str) -> str """Get SHA1 thumbprint from buffer :param str buffer: buffer to parse :rtype: str :return: sha1 thumbprint of buffer """ # return just thumbprint (without colons) from the above openssl command # in lowercase. Expected openssl output is in the form: # SHA1 Fingerprint=<thumbprint> return ''.join(util.decode_string( output).strip().split('=')[1].split(':')).lower() def get_sha1_thumbprint_pfx(pfxfile, passphrase): # type: (str, str) -> str """Get SHA1 thumbprint of PFX :param str pfxfile: name of the pfx file to export :param str passphrase: passphrase for pfx :rtype: str :return: sha1 thumbprint of pfx """ if pfxfile is None: raise ValueError('pfxfile is invalid') if passphrase is None: passphrase = getpass.getpass('Enter password for PFX: ') # compute sha1 thumbprint of pfx pfxdump = subprocess.check_output( ['openssl', 'pkcs12', '-in', pfxfile, '-nodes', '-passin', 'pass:' + passphrase] ) proc = subprocess.Popen( ['openssl', 'x509', '-noout', '-fingerprint'], stdin=subprocess.PIPE, stdout=subprocess.PIPE ) return _parse_sha1_thumbprint_openssl(proc.communicate(input=pfxdump)[0]) def get_sha1_thumbprint_pem(pemfile): # type: (str) -> str """Get SHA1 thumbprint of PEM :param str pfxfile: name of the pfx file to export :rtype: str :return: sha1 thumbprint of pem """ proc = subprocess.Popen( ['openssl', 'x509', '-noout', '-fingerprint', '-in', pemfile], stdout=subprocess.PIPE ) return _parse_sha1_thumbprint_openssl(proc.communicate()[0]) def generate_pem_pfx_certificates(config): # type: (dict) -> str """Generate a pem and a derived pfx file :param dict config: configuration dict :rtype: str :return: sha1 thumbprint of pfx """ # gather input pemfile = settings.batch_shipyard_encryption_public_key_pem(config) pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) if pemfile is None: pemfile = util.get_input('Enter public key PEM filename to create: ') if pfxfile is None: pfxfile = util.get_input('Enter PFX filename to create: ') if passphrase is None: while util.is_none_or_empty(passphrase): passphrase = getpass.getpass('Enter password for PFX: ') if len(passphrase) == 0: print('passphrase cannot be empty') privatekey = pemfile + '.key' # generate pem file with private key and no password f = tempfile.NamedTemporaryFile(mode='wb', delete=False) f.close() try: subprocess.check_call( ['openssl', 'req', '-new', '-nodes', '-x509', '-newkey', 'rsa:2048', '-keyout', privatekey, '-out', f.name, '-days', '730', '-subj', '/C=US/ST=None/L=None/O=None/CN=BatchShipyard'] ) # extract public key from private key subprocess.check_call( ['openssl', 'rsa', '-in', privatekey, '-pubout', '-outform', 'PEM', '-out', pemfile] ) logger.debug('created public key PEM file: {}'.format(pemfile)) # convert pem to pfx for Azure Batch service subprocess.check_call( ['openssl', 'pkcs12', '-export', '-out', pfxfile, '-inkey', privatekey, '-in', f.name, '-certfile', f.name, '-passin', 'pass:', '-passout', 'pass:' + passphrase] ) logger.debug('created PFX file: {}'.format(pfxfile)) finally: # remove rsa private key file fp = pathlib.Path(privatekey) if fp.exists(): fp.unlink() # remove temp cert pem fp = pathlib.Path(f.name) if fp.exists(): fp.unlink() # get sha1 thumbprint of pfx return get_sha1_thumbprint_pfx(pfxfile, passphrase) def get_encryption_pfx_settings(config): # type: (dict) -> tuple """Get PFX encryption settings from configuration :param dict config: configuration settings :rtype: tuple :return: pfxfile, passphrase, sha1 tp """ pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) sha1_cert_tp = settings.batch_shipyard_encryption_pfx_sha1_thumbprint( config) # manually get thumbprint of pfx if not exists in config if util.is_none_or_empty(sha1_cert_tp): if pfx_passphrase is None: pfx_passphrase = getpass.getpass('Enter password for PFX: ') sha1_cert_tp = get_sha1_thumbprint_pfx(pfxfile, pfx_passphrase) settings.set_batch_shipyard_encryption_pfx_sha1_thumbprint( config, sha1_cert_tp) return PfxSettings( filename=pfxfile, passphrase=pfx_passphrase, sha1=sha1_cert_tp) def _rsa_encrypt_string(data, config): # type: (str, dict) -> str """RSA encrypt a string :param str data: clear text data to encrypt :param dict config: configuration dict :rtype: str :return: base64-encoded cipher text """ if util.is_none_or_empty(data): raise ValueError('invalid data to encrypt') inkey = settings.batch_shipyard_encryption_public_key_pem(config) derived = False if inkey is None: # derive pem from pfx derived = True pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase( config) inkey = derive_public_key_pem_from_pfx(pfxfile, pfx_passphrase, None) try: if inkey is None: raise RuntimeError('public encryption key is invalid') proc = subprocess.Popen( ['openssl', 'rsautl', '-encrypt', '-pubin', '-inkey', inkey], stdin=subprocess.PIPE, stdout=subprocess.PIPE) ciphertext = util.base64_encode_string( proc.communicate(input=util.encode_string(data))[0]) if proc.returncode != 0: raise RuntimeError( 'openssl encryption failed with returncode: {}'.format( proc.returncode)) return ciphertext finally: if derived: fp = pathlib.Path(inkey) if fp.exists(): fp.unlink() def _rsa_decrypt_string_with_pfx(ciphertext, config): # type: (str, dict) -> str """RSA decrypt a string :param str ciphertext: cipher text in base64 :param dict config: configuration dict :rtype: str :return: decrypted cipher text """ if util.is_none_or_empty(ciphertext): raise ValueError('invalid ciphertext to decrypt') pfxfile = settings.batch_shipyard_encryption_pfx_filename(config) pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config) pemfile = derive_private_key_pem_from_pfx(pfxfile, pfx_passphrase, None) if pemfile is None: raise RuntimeError('cannot decrypt without valid private key') cleartext = None try: data = util.base64_decode_string(ciphertext) proc = subprocess.Popen( ['openssl', 'rsautl', '-decrypt', '-inkey', pemfile], stdin=subprocess.PIPE, stdout=subprocess.PIPE) cleartext = proc.communicate(input=data)[0] finally: fp = pathlib.Path(pemfile) if fp.exists(): fp.unlink() return cleartext def encrypt_string(enabled, string, config): # type: (bool, str, dict) -> str """Encrypt a string :param bool enabled: if encryption is enabled :param str string: string to encrypt :param dict config: configuration dict :rtype: str :return: encrypted string if enabled """ if enabled: return _rsa_encrypt_string(string, config) else: return string
[((1701, 1728), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1718, 1728), False, 'import logging\n'), ((1902, 1975), 'collections.namedtuple', 'collections.namedtuple', (['"""PfxSettings"""', "['filename', 'passphrase', 'sha1']"], {}), "('PfxSettings', ['filename', 'passphrase', 'sha1'])\n", (1924, 1975), False, 'import collections\n'), ((2915, 2948), 'pathlib.Path', 'pathlib.Path', (['export_path', 'prefix'], {}), '(export_path, prefix)\n', (2927, 2948), False, 'import pathlib\n'), ((2962, 3004), 'pathlib.Path', 'pathlib.Path', (['export_path', "(prefix + '.pub')"], {}), "(export_path, prefix + '.pub')\n", (2974, 3004), False, 'import pathlib\n'), ((9231, 9340), 'subprocess.check_output', 'subprocess.check_output', (["['openssl', 'pkcs12', '-in', pfxfile, '-nodes', '-passin', 'pass:' + passphrase\n ]"], {}), "(['openssl', 'pkcs12', '-in', pfxfile, '-nodes',\n '-passin', 'pass:' + passphrase])\n", (9254, 9340), False, 'import subprocess\n'), ((9371, 9486), 'subprocess.Popen', 'subprocess.Popen', (["['openssl', 'x509', '-noout', '-fingerprint']"], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE'}), "(['openssl', 'x509', '-noout', '-fingerprint'], stdin=\n subprocess.PIPE, stdout=subprocess.PIPE)\n", (9387, 9486), False, 'import subprocess\n'), ((9807, 9914), 'subprocess.Popen', 'subprocess.Popen', (["['openssl', 'x509', '-noout', '-fingerprint', '-in', pemfile]"], {'stdout': 'subprocess.PIPE'}), "(['openssl', 'x509', '-noout', '-fingerprint', '-in',\n pemfile], stdout=subprocess.PIPE)\n", (9823, 9914), False, 'import subprocess\n'), ((10979, 11031), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""wb"""', 'delete': '(False)'}), "(mode='wb', delete=False)\n", (11006, 11031), False, 'import tempfile\n'), ((2508, 2521), 'os.urandom', 'os.urandom', (['(8)'], {}), '(8)\n', (2518, 2521), False, 'import os\n'), ((3044, 3086), 'pathlib.Path', 'pathlib.Path', (['export_path', "(prefix + '.old')"], {}), "(export_path, prefix + '.old')\n", (3056, 3086), False, 'import pathlib\n'), ((3203, 3249), 'pathlib.Path', 'pathlib.Path', (['export_path', "(prefix + '.pub.old')"], {}), "(export_path, prefix + '.pub.old')\n", (3215, 3249), False, 'import pathlib\n'), ((5359, 5379), 'builtins.str', 'str', (['ssh_private_key'], {}), '(ssh_private_key)\n', (5362, 5379), False, 'from builtins import bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip\n'), ((5387, 5403), 'builtins.str', 'str', (['remote_port'], {}), '(remote_port)\n', (5390, 5403), False, 'from builtins import bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip\n'), ((6492, 6535), 'getpass.getpass', 'getpass.getpass', (['"""Enter password for PFX: """'], {}), "('Enter password for PFX: ')\n", (6507, 6535), False, 'import getpass\n'), ((6597, 6649), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""wb"""', 'delete': '(False)'}), "(mode='wb', delete=False)\n", (6624, 6649), False, 'import tempfile\n'), ((6740, 6866), 'subprocess.check_call', 'subprocess.check_call', (["['openssl', 'pkcs12', '-nodes', '-in', pfxfile, '-out', pemfile,\n '-password', 'pass:' + passphrase]"], {}), "(['openssl', 'pkcs12', '-nodes', '-in', pfxfile,\n '-out', pemfile, '-password', 'pass:' + passphrase])\n", (6761, 6866), False, 'import subprocess\n'), ((7511, 7554), 'getpass.getpass', 'getpass.getpass', (['"""Enter password for PFX: """'], {}), "('Enter password for PFX: ')\n", (7526, 7554), False, 'import getpass\n'), ((7616, 7668), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""wb"""', 'delete': '(False)'}), "(mode='wb', delete=False)\n", (7643, 7668), False, 'import tempfile\n'), ((7759, 7885), 'subprocess.check_call', 'subprocess.check_call', (["['openssl', 'pkcs12', '-nodes', '-in', pfxfile, '-out', pemfile,\n '-password', 'pass:' + passphrase]"], {}), "(['openssl', 'pkcs12', '-nodes', '-in', pfxfile,\n '-out', pemfile, '-password', 'pass:' + passphrase])\n", (7780, 7885), False, 'import subprocess\n'), ((7971, 8079), 'subprocess.check_call', 'subprocess.check_call', (["['openssl', 'rsa', '-in', pemfile, '-pubout', '-outform', 'PEM', '-out',\n pemfile]"], {}), "(['openssl', 'rsa', '-in', pemfile, '-pubout',\n '-outform', 'PEM', '-out', pemfile])\n", (7992, 8079), False, 'import subprocess\n'), ((9136, 9179), 'getpass.getpass', 'getpass.getpass', (['"""Enter password for PFX: """'], {}), "('Enter password for PFX: ')\n", (9151, 9179), False, 'import getpass\n'), ((11063, 11274), 'subprocess.check_call', 'subprocess.check_call', (["['openssl', 'req', '-new', '-nodes', '-x509', '-newkey', 'rsa:2048',\n '-keyout', privatekey, '-out', f.name, '-days', '730', '-subj',\n '/C=US/ST=None/L=None/O=None/CN=BatchShipyard']"], {}), "(['openssl', 'req', '-new', '-nodes', '-x509',\n '-newkey', 'rsa:2048', '-keyout', privatekey, '-out', f.name, '-days',\n '730', '-subj', '/C=US/ST=None/L=None/O=None/CN=BatchShipyard'])\n", (11084, 11274), False, 'import subprocess\n'), ((11369, 11480), 'subprocess.check_call', 'subprocess.check_call', (["['openssl', 'rsa', '-in', privatekey, '-pubout', '-outform', 'PEM', '-out',\n pemfile]"], {}), "(['openssl', 'rsa', '-in', privatekey, '-pubout',\n '-outform', 'PEM', '-out', pemfile])\n", (11390, 11480), False, 'import subprocess\n'), ((11645, 11837), 'subprocess.check_call', 'subprocess.check_call', (["['openssl', 'pkcs12', '-export', '-out', pfxfile, '-inkey', privatekey,\n '-in', f.name, '-certfile', f.name, '-passin', 'pass:', '-passout', \n 'pass:' + passphrase]"], {}), "(['openssl', 'pkcs12', '-export', '-out', pfxfile,\n '-inkey', privatekey, '-in', f.name, '-certfile', f.name, '-passin',\n 'pass:', '-passout', 'pass:' + passphrase])\n", (11666, 11837), False, 'import subprocess\n'), ((12003, 12027), 'pathlib.Path', 'pathlib.Path', (['privatekey'], {}), '(privatekey)\n', (12015, 12027), False, 'import pathlib\n'), ((12120, 12140), 'pathlib.Path', 'pathlib.Path', (['f.name'], {}), '(f.name)\n', (12132, 12140), False, 'import pathlib\n'), ((14114, 14243), 'subprocess.Popen', 'subprocess.Popen', (["['openssl', 'rsautl', '-encrypt', '-pubin', '-inkey', inkey]"], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE'}), "(['openssl', 'rsautl', '-encrypt', '-pubin', '-inkey',\n inkey], stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n", (14130, 14243), False, 'import subprocess\n'), ((15490, 15611), 'subprocess.Popen', 'subprocess.Popen', (["['openssl', 'rsautl', '-decrypt', '-inkey', pemfile]"], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE'}), "(['openssl', 'rsautl', '-decrypt', '-inkey', pemfile],\n stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n", (15506, 15611), False, 'import subprocess\n'), ((15711, 15732), 'pathlib.Path', 'pathlib.Path', (['pemfile'], {}), '(pemfile)\n', (15723, 15732), False, 'import pathlib\n'), ((3458, 3470), 'builtins.str', 'str', (['privkey'], {}), '(privkey)\n', (3461, 3470), False, 'from builtins import bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip\n'), ((6933, 6954), 'pathlib.Path', 'pathlib.Path', (['pemfile'], {}), '(pemfile)\n', (6945, 6954), False, 'import pathlib\n'), ((8146, 8167), 'pathlib.Path', 'pathlib.Path', (['pemfile'], {}), '(pemfile)\n', (8158, 8167), False, 'import pathlib\n'), ((10747, 10790), 'getpass.getpass', 'getpass.getpass', (['"""Enter password for PFX: """'], {}), "('Enter password for PFX: ')\n", (10762, 10790), False, 'import getpass\n'), ((12927, 12970), 'getpass.getpass', 'getpass.getpass', (['"""Enter password for PFX: """'], {}), "('Enter password for PFX: ')\n", (12942, 12970), False, 'import getpass\n'), ((14629, 14648), 'pathlib.Path', 'pathlib.Path', (['inkey'], {}), '(inkey)\n', (14641, 14648), False, 'import pathlib\n')]
lj-ecjtu/Cascade_FPN_Tensorflow-master
libs/configs/COCO/cfgs_res50_1x_coco_v3.py
40fcd2c10f057b3f015ca1380d7db102e967391f
# -*- coding: utf-8 -*- from __future__ import division, print_function, absolute_import import os import tensorflow as tf ''' gluoncv backbone + multi_gpu ''' # ------------------------------------------------ VERSION = 'Cascade_FPN_Res50_COCO_1x_20190421_v3' NET_NAME = 'resnet50_v1d' ADD_BOX_IN_TENSORBOARD = True # ---------------------------------------- System_config ROOT_PATH = os.path.abspath('../') print(20*"++--") print(ROOT_PATH) GPU_GROUP = "0,1,2,3,4,5,6,7" NUM_GPU = len(GPU_GROUP.strip().split(',')) SHOW_TRAIN_INFO_INTE = 20 SMRY_ITER = 200 SAVE_WEIGHTS_INTE = 80000 SUMMARY_PATH = ROOT_PATH + '/output/summary' TEST_SAVE_PATH = ROOT_PATH + '/tools/test_result' INFERENCE_IMAGE_PATH = ROOT_PATH + '/tools/inference_image' INFERENCE_SAVE_PATH = ROOT_PATH + '/tools/inference_results' if NET_NAME.startswith("resnet"): weights_name = NET_NAME elif NET_NAME.startswith("MobilenetV2"): weights_name = "mobilenet/mobilenet_v2_1.0_224" else: raise NotImplementedError PRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt' TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights') EVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/' # ------------------------------------------ Train config RESTORE_FROM_RPN = False IS_FILTER_OUTSIDE_BOXES = False FIXED_BLOCKS = 0 # allow 0~3 FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone USE_07_METRIC = True CUDA9 = True EVAL_THRESHOLD = 0.5 RPN_LOCATION_LOSS_WEIGHT = 1. RPN_CLASSIFICATION_LOSS_WEIGHT = 1.0 FAST_RCNN_LOCATION_LOSS_WEIGHT = 1.0 FAST_RCNN_CLASSIFICATION_LOSS_WEIGHT = 1.0 RPN_SIGMA = 3.0 FASTRCNN_SIGMA = 1.0 MUTILPY_BIAS_GRADIENT = None # 2.0 # if None, will not multipy GRADIENT_CLIPPING_BY_NORM = None # 10.0 if None, will not clip EPSILON = 1e-5 MOMENTUM = 0.9 BATCH_SIZE = 1 WARM_SETP = int(0.25 * SAVE_WEIGHTS_INTE) LR = 5e-4 * 2 * 1.25 * NUM_GPU * BATCH_SIZE DECAY_STEP = [11*SAVE_WEIGHTS_INTE, 16*SAVE_WEIGHTS_INTE, 20*SAVE_WEIGHTS_INTE] # 50000, 70000 MAX_ITERATION = 20*SAVE_WEIGHTS_INTE # -------------------------------------------- Data_preprocess_config DATASET_NAME = 'coco' # 'pascal', 'coco' PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR PIXEL_MEAN_ = [0.485, 0.456, 0.406] PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR IMG_SHORT_SIDE_LEN = 800 IMG_MAX_LENGTH = 1333 CLASS_NUM = 80 # --------------------------------------------- Network_config INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01) BBOX_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.001) WEIGHT_DECAY = 0.00004 if NET_NAME.startswith('Mobilenet') else 0.0001 IS_ASSIGN = True # ---------------------------------------------Anchor config USE_CENTER_OFFSET = True LEVLES = ['P2', 'P3', 'P4', 'P5', 'P6'] BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512] ANCHOR_STRIDE_LIST = [4, 8, 16, 32, 64] ANCHOR_SCALES = [1.0] ANCHOR_RATIOS = [0.5, 1., 2.0] ROI_SCALE_FACTORS = [[10., 10., 5.0, 5.0], [20., 20., 10.0, 10.0], [40., 40., 20.0, 20.0]] ANCHOR_SCALE_FACTORS = [10., 10., 5.0, 5.0] # --------------------------------------------FPN config SHARE_HEADS = True KERNEL_SIZE = 3 RPN_IOU_POSITIVE_THRESHOLD = 0.7 RPN_IOU_NEGATIVE_THRESHOLD = 0.3 TRAIN_RPN_CLOOBER_POSITIVES = False RPN_MINIBATCH_SIZE = 256 RPN_POSITIVE_RATE = 0.5 RPN_NMS_IOU_THRESHOLD = 0.7 RPN_TOP_K_NMS_TRAIN = 12000 RPN_MAXIMUM_PROPOSAL_TARIN = 2000 RPN_TOP_K_NMS_TEST = 6000 RPN_MAXIMUM_PROPOSAL_TEST = 1000 # -------------------------------------------Fast-RCNN config ROI_SIZE = 14 ROI_POOL_KERNEL_SIZE = 2 USE_DROPOUT = False KEEP_PROB = 1.0 SHOW_SCORE_THRSHOLD = 0.6 # only show in tensorboard FAST_RCNN_NMS_IOU_THRESHOLD = 0.5 # 0.6 FAST_RCNN_NMS_MAX_BOXES_PER_CLASS = 100 FAST_RCNN_IOU_POSITIVE_THRESHOLD = 0.5 FAST_RCNN_IOU_NEGATIVE_THRESHOLD = 0.0 # 0.1 < IOU < 0.5 is negative FAST_RCNN_MINIBATCH_SIZE = 512 # if is -1, that is train with OHEM FAST_RCNN_POSITIVE_RATE = 0.25 ADD_GTBOXES_TO_TRAIN = False
[((389, 411), 'os.path.abspath', 'os.path.abspath', (['"""../"""'], {}), "('../')\n", (404, 411), False, 'import os\n'), ((1096, 1145), 'os.path.join', 'os.path.join', (['ROOT_PATH', '"""output/trained_weights"""'], {}), "(ROOT_PATH, 'output/trained_weights')\n", (1108, 1145), False, 'import os\n'), ((2554, 2605), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.01)'}), '(mean=0.0, stddev=0.01)\n', (2582, 2605), True, 'import tensorflow as tf\n'), ((2625, 2677), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.001)'}), '(mean=0.0, stddev=0.001)\n', (2653, 2677), True, 'import tensorflow as tf\n')]
vibhaska/delta
python/delta/tests/test_exceptions.py
0e16356ff46520404e2376d048f002ca74f6dc0c
# # Copyright (2020) The Delta Lake Project Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest import delta.exceptions as exceptions from delta.testing.utils import DeltaTestCase class DeltaExceptionTests(DeltaTestCase): def _raise_concurrent_exception(self, exception_type): e = exception_type("") self.spark.sparkContext._jvm.scala.util.Failure(e).get() def test_capture_concurrent_write_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentWriteException self.assertRaises(exceptions.ConcurrentWriteException, lambda: self._raise_concurrent_exception(e)) def test_capture_metadata_changed_exception(self): e = self.spark._jvm.io.delta.exceptions.MetadataChangedException self.assertRaises(exceptions.MetadataChangedException, lambda: self._raise_concurrent_exception(e)) def test_capture_protocol_changed_exception(self): e = self.spark._jvm.io.delta.exceptions.ProtocolChangedException self.assertRaises(exceptions.ProtocolChangedException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_append_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentAppendException self.assertRaises(exceptions.ConcurrentAppendException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_delete_read_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteReadException self.assertRaises(exceptions.ConcurrentDeleteReadException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_delete_delete_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentDeleteDeleteException self.assertRaises(exceptions.ConcurrentDeleteDeleteException, lambda: self._raise_concurrent_exception(e)) def test_capture_concurrent_transaction_exception(self): e = self.spark._jvm.io.delta.exceptions.ConcurrentTransactionException self.assertRaises(exceptions.ConcurrentTransactionException, lambda: self._raise_concurrent_exception(e)) if __name__ == "__main__": try: import xmlrunner testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=4) except ImportError: testRunner = None unittest.main(testRunner=testRunner, verbosity=4)
[((3005, 3054), 'unittest.main', 'unittest.main', ([], {'testRunner': 'testRunner', 'verbosity': '(4)'}), '(testRunner=testRunner, verbosity=4)\n', (3018, 3054), False, 'import unittest\n'), ((2884, 2950), 'xmlrunner.XMLTestRunner', 'xmlrunner.XMLTestRunner', ([], {'output': '"""target/test-reports"""', 'verbosity': '(4)'}), "(output='target/test-reports', verbosity=4)\n", (2907, 2950), False, 'import xmlrunner\n')]
nikhilsamninan/python-files
day10/samematrix.py
15198459081097058a939b40b5e8ef754e578fe0
def matrix_form(): r = int(input("Enter the no of rows")) c = int(input("Enter the no of columns")) matrix=[] print("Enter the enteries") for i in range(r): a = [] for j in range(c): a.append(int(input())) matrix.append(a) return(matrix) def check_matrix(first_matrix,sec_matrix): if(first_matrix==sec_matrix): print("same") else: print("not same") print("Enter the 1st matrix") first_matrix = matrix_form() print(first_matrix) print("Enter the 2nd matrix") sec_matrix = matrix_form() print(sec_matrix) check_matrix(first_matrix,sec_matrix)
[]
PatrickJReed/Longboard
extractFeatures.py
f6ca4a6e51c91296894aee2e02b86f83b38c080a
#!/home/ubuntu/miniconda2/bin/python from __future__ import division import sys import glob, os, gc import uuid import os.path import csv import numpy as np from time import time from subprocess import (call, Popen, PIPE) from itertools import product import shutil import re import pickle from boto3.session import Session import boto3 import h5py import umap import hdbscan from keras.models import load_model from keras.models import Model from keras import backend as K from keras.utils import multi_gpu_model ##Path to Data basepath = "/home/ubuntu/" subject = sys.argv[1] with open("config.txt") as f: config = [line.rstrip() for line in f] print config[0] print config[1] session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3 = boto3.client ('s3') s3.download_file('for-ndar',os.path.join("metadata/", subject + ".txt"),os.path.join(basepath,subject + ".txt")) with open(subject + ".txt") as f: Cells = [line.rstrip() for line in f] session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3.meta.client.download_file('bsmn-data',os.path.join('Inception_Transfer_Model.h5'),os.path.join(basepath,'Inception_Transfer_Model.h5')) feat_extractor = load_model(os.path.join(basepath,'Inception_Transfer_Model.h5')) parallel_model = multi_gpu_model(feat_extractor, gpus=2) count = 0 for cell in Cells: print(cell) cell_size=0 cell_ids = [] s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_IDs.h5'),os.path.join(basepath,cell+'_IDs.h5')) f = h5py.File(os.path.join(basepath,cell+'_IDs.h5'), 'r') cell_ids = f['ID'] for cid in cell_ids: cid = cid.decode('utf-8') s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_'+cid+'.h5'),os.path.join(basepath,cell+'_'+cid+'.h5')) xyz = h5py.File(os.path.join(basepath,cell+'_'+cid+'.h5'), 'r') os.remove(os.path.join(basepath,cell+'_'+cid+'.h5')) if count == 0: X = xyz['X'] Y = xyz['Y'] Z = parallel_model.predict(X, batch_size = 128) count+=1 length = len(Y) U = [cid] * length else: X = xyz['X'] Y = np.append(Y,xyz['Y'], axis=0) z = feat_extractor.predict(X, batch_size = 128) Z = np.append(Z,z, axis=0) length = len(xyz['Y']) U = U + ([cid] * length) print(Z.shape) hf = h5py.File(subject+'_ef.h5', 'w') hf.create_dataset('Y', data=Y) hf.create_dataset('Z', data=Z) hf.close() session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1]) s3 = session.resource('s3') s3.meta.client.upload_file(os.path.join(subject+'_ef.h5'),'bsmn-data',os.path.join(subject, subject+'_ef.h5')) call(['sudo', 'shutdown', '-h', 'now'])
[]
mdbernard/astrodynamics
kepler.py
cf98df6cd17086e3675c1f7c2fce342d5322ee51
import numpy as np from stumpff import C, S from CelestialBody import BODIES from numerical import newton, laguerre from lagrange import calc_f, calc_fd, calc_g, calc_gd def kepler_chi(chi, alpha, r0, vr0, mu, dt): ''' Kepler's Equation of the universal anomaly, modified for use in numerical solvers. ''' z = alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi**2*C(z) + \ (1 - alpha*r0)*chi**3*S(z) + \ r0*chi - np.sqrt(mu)*dt def dkepler_dchi(chi, alpha, r0, vr0, mu, dt): ''' Derivative of Kepler's Equation of the universal anomaly, modified for use in numerical solvers. ''' z = alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi*(1 - alpha*chi**2*S(z)) + \ (1 - alpha*r0)*chi**2*C(z) + r0 def d2kepler_dchi2(chi, alpha, r0, vr0, mu, dt): ''' Second derivative of Kepler's Equation of the universal anomaly, modified for use in numerical solvers. ''' z = alpha*chi**2 S_ = S(z) return (r0*vr0/np.sqrt(mu))*(1 - 3*z*S_ + z*(C(z) - 3*S_)) + \ chi*(1 - z*S_)*(1 - alpha*r0) def solve_kepler_chi(r_0, v_0, dt, body=BODIES['Earth'], method='laguerre', tol=1e-7, max_iters=100): ''' Solve Kepler's Equation of the universal anomaly chi using the specified numerical method. Applies Algorithm 3.4 from Orbital Mechanics for Engineering Students, 4 ed, Curtis. :param r_0: `iterable` (km) initial position 3-vector :param v_0: `iterable` (km/s) initial velocity 3-vector :param dt: `float` (s) time after initial state to solve for r, v as 3-vectors :param body: `CelestialBody` (--) the celestial body to use for orbital parameters :param method: `str` (--) which numerical method to use to solve Kepler's Equation :param tol: `float` (--) decimal tolerance for numerical method (default 1e-7 is IEEE 745 single precision) :param max_iters: `int` (--) maximum number of iterations in numerical method before breaking :return: (km) final position 3-vector, (km/s) final velocity 3-vector ''' VALID_METHODS = ('laguerre', 'newton') mu = body.mu # (km**3/s**2) gravitational parameter of the specified primary body r0 = np.linalg.norm(r_0) # (km) initial position magnitude v0 = np.linalg.norm(v_0) # (km/s) initial velocity magnitude vr0 = np.dot(v_0, r_0)/r0 # (km/s) initial radial velocity magnitude alpha = 2/r0 - v0**2/mu # (1/km) inverse of semi-major axis chi0 = np.sqrt(mu)*np.abs(alpha)*dt if method not in VALID_METHODS: print(f'Method \'{method}\' is not valid, must be one of {VALID_METHODS}.\nDefaulting to laguerre method.') chi, _, _ = laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt) elif method == 'newton': chi, _, _ = newton(chi0, kepler_chi, dkepler_dchi, alpha, r0, vr0, mu, dt) else: # method == 'laguerre' chi, _, _ = laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt) f = calc_f(chi, r0, alpha) g = calc_g(dt, mu, chi, alpha) r_1 = f*r_0 + g*v_0 r1 = np.linalg.norm(r_1) fd = calc_fd(mu, r1, r0, alpha, chi) gd = calc_gd(chi, r1, alpha) v_1 = fd*r_0 + gd*v_0 return r_1, v_1 def solve_kepler_E(e, Me, tol=1e-7, max_iters=100): ''' Solve Kepler's Equation in the form containing Eccentric Anomaly (E), eccentricity (e), and Mean Anomaly of Ellipse (Me). Uses Algorithm 3.1 from Orbital Mechanics for Engineering Students, 4 ed, Curtis. ''' # TODO: have this function make use of one of the numerical methods in numerical.py def f(E, e, Me): return E - e*np.sin(E) - Me def fp(E, e): return 1 - e*np.cos(E) E = Me + e/2 if Me < np.pi else Me - e/2 ratio = f(E, e, Me)/fp(E, e) iters = 0 while abs(ratio) > tol and iters < max_iters: E -= ratio ratio = f(E, e, Me)/fp(E, e) iters += 1 E -= ratio converged = np.abs(ratio) <= tol return E, iters, converged def test(): ''' Test the functionality of solve_kepler_chi and solve_kepler_laguerre using Problem 3.20 from Orbital Mechanics for Engineering Students, 4 ed, Curtis. ''' # given starting information Earth = BODIES['Earth'] # `CelestialBody` (--) Earth and all the Earth things r_0 = np.array([20000, -105000, -19000]) # (km) initial position vector v_0 = np.array([0.9, -3.4, -1.5]) # (km/s) initial velocity vector dt = 2*60*60 # (s) time of interest after initial time # given correct answer from textbook correct_r_1 = np.array([26338, -128750, -29656]) # (km) final position vector correct_v_1 = np.array([0.86280, -3.2116, -1.4613]) # (km/s) final velocity vector # solve using above methods r_n, v_n = solve_kepler_chi(r_0, v_0, dt, Earth, method='newton') r_l, v_l = solve_kepler_chi(r_0, v_0, dt, Earth, method='laguerre') # check correctness # tolerance based on significant figures of given answers newton_valid = np.allclose(r_n, correct_r_1, atol=1) and np.allclose(v_n, correct_v_1, atol=1e-4) laguerre_valid = np.allclose(r_l, correct_r_1, atol=1) and np.allclose(v_l, correct_v_1, atol=1e-4) return all([newton_valid, laguerre_valid]) if __name__ == '__main__': print(test())
[((953, 957), 'stumpff.S', 'S', (['z'], {}), '(z)\n', (954, 957), False, 'from stumpff import C, S\n'), ((2171, 2190), 'numpy.linalg.norm', 'np.linalg.norm', (['r_0'], {}), '(r_0)\n', (2185, 2190), True, 'import numpy as np\n'), ((2235, 2254), 'numpy.linalg.norm', 'np.linalg.norm', (['v_0'], {}), '(v_0)\n', (2249, 2254), True, 'import numpy as np\n'), ((2982, 3004), 'lagrange.calc_f', 'calc_f', (['chi', 'r0', 'alpha'], {}), '(chi, r0, alpha)\n', (2988, 3004), False, 'from lagrange import calc_f, calc_fd, calc_g, calc_gd\n'), ((3013, 3039), 'lagrange.calc_g', 'calc_g', (['dt', 'mu', 'chi', 'alpha'], {}), '(dt, mu, chi, alpha)\n', (3019, 3039), False, 'from lagrange import calc_f, calc_fd, calc_g, calc_gd\n'), ((3073, 3092), 'numpy.linalg.norm', 'np.linalg.norm', (['r_1'], {}), '(r_1)\n', (3087, 3092), True, 'import numpy as np\n'), ((3103, 3134), 'lagrange.calc_fd', 'calc_fd', (['mu', 'r1', 'r0', 'alpha', 'chi'], {}), '(mu, r1, r0, alpha, chi)\n', (3110, 3134), False, 'from lagrange import calc_f, calc_fd, calc_g, calc_gd\n'), ((3144, 3167), 'lagrange.calc_gd', 'calc_gd', (['chi', 'r1', 'alpha'], {}), '(chi, r1, alpha)\n', (3151, 3167), False, 'from lagrange import calc_f, calc_fd, calc_g, calc_gd\n'), ((4309, 4343), 'numpy.array', 'np.array', (['[20000, -105000, -19000]'], {}), '([20000, -105000, -19000])\n', (4317, 4343), True, 'import numpy as np\n'), ((4386, 4413), 'numpy.array', 'np.array', (['[0.9, -3.4, -1.5]'], {}), '([0.9, -3.4, -1.5])\n', (4394, 4413), True, 'import numpy as np\n'), ((4568, 4602), 'numpy.array', 'np.array', (['[26338, -128750, -29656]'], {}), '([26338, -128750, -29656])\n', (4576, 4602), True, 'import numpy as np\n'), ((4651, 4687), 'numpy.array', 'np.array', (['[0.8628, -3.2116, -1.4613]'], {}), '([0.8628, -3.2116, -1.4613])\n', (4659, 4687), True, 'import numpy as np\n'), ((2302, 2318), 'numpy.dot', 'np.dot', (['v_0', 'r_0'], {}), '(v_0, r_0)\n', (2308, 2318), True, 'import numpy as np\n'), ((2645, 2730), 'numerical.laguerre', 'laguerre', (['chi0', 'kepler_chi', 'dkepler_dchi', 'd2kepler_dchi2', 'alpha', 'r0', 'vr0', 'mu', 'dt'], {}), '(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt\n )\n', (2653, 2730), False, 'from numerical import newton, laguerre\n'), ((3940, 3953), 'numpy.abs', 'np.abs', (['ratio'], {}), '(ratio)\n', (3946, 3953), True, 'import numpy as np\n'), ((5002, 5039), 'numpy.allclose', 'np.allclose', (['r_n', 'correct_r_1'], {'atol': '(1)'}), '(r_n, correct_r_1, atol=1)\n', (5013, 5039), True, 'import numpy as np\n'), ((5044, 5086), 'numpy.allclose', 'np.allclose', (['v_n', 'correct_v_1'], {'atol': '(0.0001)'}), '(v_n, correct_v_1, atol=0.0001)\n', (5055, 5086), True, 'import numpy as np\n'), ((5106, 5143), 'numpy.allclose', 'np.allclose', (['r_l', 'correct_r_1'], {'atol': '(1)'}), '(r_l, correct_r_1, atol=1)\n', (5117, 5143), True, 'import numpy as np\n'), ((5148, 5190), 'numpy.allclose', 'np.allclose', (['v_l', 'correct_v_1'], {'atol': '(0.0001)'}), '(v_l, correct_v_1, atol=0.0001)\n', (5159, 5190), True, 'import numpy as np\n'), ((447, 458), 'numpy.sqrt', 'np.sqrt', (['mu'], {}), '(mu)\n', (454, 458), True, 'import numpy as np\n'), ((2443, 2454), 'numpy.sqrt', 'np.sqrt', (['mu'], {}), '(mu)\n', (2450, 2454), True, 'import numpy as np\n'), ((2455, 2468), 'numpy.abs', 'np.abs', (['alpha'], {}), '(alpha)\n', (2461, 2468), True, 'import numpy as np\n'), ((2775, 2837), 'numerical.newton', 'newton', (['chi0', 'kepler_chi', 'dkepler_dchi', 'alpha', 'r0', 'vr0', 'mu', 'dt'], {}), '(chi0, kepler_chi, dkepler_dchi, alpha, r0, vr0, mu, dt)\n', (2781, 2837), False, 'from numerical import newton, laguerre\n'), ((2892, 2977), 'numerical.laguerre', 'laguerre', (['chi0', 'kepler_chi', 'dkepler_dchi', 'd2kepler_dchi2', 'alpha', 'r0', 'vr0', 'mu', 'dt'], {}), '(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt\n )\n', (2900, 2977), False, 'from numerical import newton, laguerre\n'), ((742, 746), 'stumpff.C', 'C', (['z'], {}), '(z)\n', (743, 746), False, 'from stumpff import C, S\n'), ((977, 988), 'numpy.sqrt', 'np.sqrt', (['mu'], {}), '(mu)\n', (984, 988), True, 'import numpy as np\n'), ((3680, 3689), 'numpy.cos', 'np.cos', (['E'], {}), '(E)\n', (3686, 3689), True, 'import numpy as np\n'), ((376, 380), 'stumpff.C', 'C', (['z'], {}), '(z)\n', (377, 380), False, 'from stumpff import C, S\n'), ((418, 422), 'stumpff.S', 'S', (['z'], {}), '(z)\n', (419, 422), False, 'from stumpff import C, S\n'), ((3625, 3634), 'numpy.sin', 'np.sin', (['E'], {}), '(E)\n', (3631, 3634), True, 'import numpy as np\n'), ((664, 675), 'numpy.sqrt', 'np.sqrt', (['mu'], {}), '(mu)\n', (671, 675), True, 'import numpy as np\n'), ((699, 703), 'stumpff.S', 'S', (['z'], {}), '(z)\n', (700, 703), False, 'from stumpff import C, S\n'), ((1007, 1011), 'stumpff.C', 'C', (['z'], {}), '(z)\n', (1008, 1011), False, 'from stumpff import C, S\n'), ((356, 367), 'numpy.sqrt', 'np.sqrt', (['mu'], {}), '(mu)\n', (363, 367), True, 'import numpy as np\n')]
jkrueger1/nicos
nicos_demo/vpgaa/setups/pgai.py
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
description = 'PGAA setup with XYZOmega sample table' group = 'basic' sysconfig = dict( datasinks = ['mcasink', 'chnsink', 'csvsink', 'livesink'] ) includes = [ 'system', 'reactor', 'nl4b', 'pressure', 'sampletable', 'pilz', 'detector', 'collimation', ] devices = dict( mcasink = device('nicos_mlz.pgaa.devices.MCASink', settypes = {'point'}, detectors = ['_60p', 'LEGe'], ), chnsink = device('nicos_mlz.pgaa.devices.CHNSink', settypes = {'point'}, detectors = ['_60p', 'LEGe'], ), csvsink = device('nicos_mlz.pgaa.devices.CSVDataSink', settypes = {'point'}, ), ) startupcode = """ SetDetectors('_60p', 'LEGe') SetEnvironment(chamber_pressure) printinfo("============================================================") printinfo("Welcome to the NICOS PGAI demo setup.") printinfo("============================================================") """
[]
ravikumarvc/incubator-tvm
tests/python/relay/test_op_level2.py
9826947ffce0ed40e9d47a0db2abb033e394279e
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Support level2 operator test cases. """ import numpy as np import tvm from tvm import autotvm from tvm import relay from tvm.relay import transform from tvm.relay.testing import ctx_list, run_infer_type from tvm.contrib import util import topi.testing def test_conv1d_infer_type(): # symbolic in batch dimension n, c, w = tvm.var("n"), 10, 224 x = relay.var("x", relay.ty.TensorType((n, c, w), "float32")) w = relay.var("w") y = relay.nn.conv1d(x, w, kernel_size=3, padding=(1, 1), channels=2) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 224), "float32") assert yy.args[1].checked_type == relay.TensorType( (2, 10, 3), "float32") # infer by shape of w, mixed precision n, c, w = tvm.var("n"), 10, 224 x = relay.var("x", relay.TensorType((n, c, w), "int8")) w = relay.var("w", relay.TensorType((2, 10, 3), "int8")) y = relay.nn.conv1d(x, w, out_dtype="int32") assert "out_dtype=\"int32\"" in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 222), "int32") # infer shape in case of different dtypes for input and weight. n, c, w = tvm.var("n"), 10, 224 x = relay.var("x", relay.TensorType((n, c, w), "uint8")) w = relay.var("w", relay.TensorType((2, 10, 3), "int8")) y = relay.nn.conv1d(x, w, out_dtype="int32") assert "out_dtype=\"int32\"" in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 222), "int32") # Infer with NWC n, c, w = 4, 32, 224 x = relay.var("x", relay.TensorType((n, w, c), "int8")) wt = relay.var("w") y = relay.nn.conv1d(x, wt, kernel_size=3, padding=(1, 1), channels=16, data_layout="NWC", out_dtype="int32") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, w, 16), "int32") def test_conv1d_run(): def run_test_conv1d(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1), fref=None, dilation=1, except_targets=None, **attrs): if except_targets is None: except_targets = [] x = relay.var("x", shape=dshape, dtype=dtype) w = relay.var("w", dtype=dtype) y = relay.nn.conv1d(x, w, padding=padding, dilation=dilation, **attrs) func = relay.Function([x, w], y) data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) ref_res = topi.testing.conv1d_ncw_python( data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, dilation) for target, ctx in ctx_list(): if target in except_targets: continue intrp1 = relay.create_executor("graph", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) # normal conv1d dshape = (1, 3, 224) kshape = (10, 3, 3) run_test_conv1d("float32", "float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=3) # mixed precision run_test_conv1d("int8", "int32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=3) # dilated conv2d dshape = (1, 3, 18) kshape = (10, 3, 3) run_test_conv1d("float32", "float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=3, dilation=3) def test_conv2d_infer_type(): # symbolic in batch dimension n, c, h, w = tvm.size_var("n"), 10, 224, 224 x = relay.var("x", relay.ty.TensorType((n, c, h, w), "float32")) w = relay.var("w") y = relay.nn.conv2d(x, w, kernel_size=(3, 3), padding=(1, 1), channels=2) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 224, 224), "float32") assert yy.args[1].checked_type == relay.TensorType( (2, 10, 3, 3), "float32") # infer by shape of w, mixed precision n, c, h, w = tvm.size_var("n"), 10, 224, 224 x = relay.var("x", relay.TensorType((n, c, h, w), "int8")) w = relay.var("w", relay.TensorType((2, 10, 3, 3), "int8")) y = relay.nn.conv2d(x, w, out_dtype="int32") assert "out_dtype=\"int32\"" in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 222, 222), "int32") # infer shape in case of different dtypes for input and weight. n, c, h, w = tvm.size_var("n"), 10, 224, 224 x = relay.var("x", relay.TensorType((n, c, h, w), "uint8")) w = relay.var("w", relay.TensorType((2, 10, 3, 3), "int8")) y = relay.nn.conv2d(x, w, out_dtype="int32") assert "out_dtype=\"int32\"" in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 222, 222), "int32") # Infer with a different layout n, c, h, w = 4, 32, 224, 224 x = relay.var("x", relay.TensorType((n//4, c//4, h, w, 4, 4), "int8")) wt = relay.var("w") y = relay.nn.conv2d(x, wt, kernel_size=(3, 3), padding=(1, 1), channels=16, data_layout="NCHW4n4c", kernel_layout="OIHW4o4i", out_dtype="int32") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (1, 4, 224, 224, 4, 4), "int32") assert yy.args[1].checked_type == relay.TensorType( (4, 8, 3, 3, 4, 4), "int8") # Infer with NHWC n, c, h, w = 4, 32, 224, 224 x = relay.var("x", relay.TensorType((n, h, w, c), "int8")) wt = relay.var("w") y = relay.nn.conv2d(x, wt, kernel_size=(3, 3), padding=(1, 1), channels=16, data_layout="NHWC", out_dtype="int32") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, h, w, 16), "int32") def test_conv2d_run(): def run_test_conv2d(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1), fref=None, groups=1, dilation=(1, 1), except_targets=None, **attrs): if except_targets is None: except_targets = [] x = relay.var("x", shape=dshape, dtype=dtype) w = relay.var("w", dtype=dtype) y = relay.nn.conv2d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs) func = relay.Function([x, w], y) data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation) if fref is None: ref_res = topi.testing.conv2d_nchw_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding, groups=groups) else: ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype)) for target, ctx in ctx_list(): if target in except_targets: continue intrp1 = relay.create_executor("graph", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def compile_test_conv2d_arm_cpu(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1), groups=1, dilation=(1, 1), **attrs): x = relay.var("x", shape=dshape, dtype=dtype) w = relay.var("w", dtype=dtype) y = relay.nn.conv2d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs) func = relay.Function([x, w], y) mod = tvm.relay.Module() mod["main"] = func test_schedule='{"i": ["llvm -device=arm_cpu", "topi_nn_depthwise_conv2d_nchw", \ [["TENSOR", [1, 512, 32, 32], "float32"], \ ["TENSOR", [512, 1, 3, 3], "float32"], \ [1, 1], [1, 1], [1, 1], "float32"], {}, \ ["depthwise_conv2d_nchw", [1, 512, 32, 32, "float32"], \ [512, 1, 3, 3, "float32"], [1, 1], [1, 1], [1, 1], "float32"], \ {"i": 743640, "t": "contrib_spatial_pack", "c": null, \ "e": [["tile_co", "sp", [32, 16]], ["tile_oh", "sp", [8, 1]], \ ["tile_ow", "sp", [1, 8]], \ ["reorder_0", "re", [0, 1, 2, 3, 4, 5, 8, 6, 7]], \ ["reorder_1", "re", [0, 1, 2, 3, 6, 4, 5]], \ ["ann_reduce", "an", ["unroll", "none"]], \ ["ann_spatial", "an", ["unroll", "unroll", "vec"]], \ ["data_pad_inline", "ot", 4], ["data_vec_inline", "ot", 1], \ ["conv_inline", "ot", 0]]}], "r": [[0.0002933163], \ 0, 3.1976189613342285, 1570811630.6058347], "v": 0.1}' temp = util.tempdir() with open(temp.relpath("temp.log"), "w") as log_file: log_file.write(test_schedule) with autotvm.apply_history_best(temp.relpath("temp.log")): with relay.build_config(opt_level=3): print('Compiling...') graph_json, mod, params = tvm.relay.build(mod, target="llvm -device=arm_cpu") # depthwise conv2d dshape = (1, 32, 18, 18) kshape = (32, 1, 3, 3) run_test_conv2d("float32", "float32", 1, dshape, kshape, padding=(1, 1), channels=32, groups=32, kernel_size=(3 ,3), fref=lambda x, w: topi.testing.depthwise_conv2d_python_nchw( x, w, (1, 1), "SAME")) # depthwise conv2d for arm_cpu dshape = (1, 512, 32, 32) kshape = (512, 1, 3, 3) compile_test_conv2d_arm_cpu("float32", "float32", 1, dshape, kshape, padding=(1, 1), channels=512, groups=512, kernel_size=(3 ,3)) # CUDA is disabled for 'direct' schedule: # https://github.com/apache/incubator-tvm/pull/3070#issuecomment-486597553 # group conv2d dshape = (1, 32, 18, 18) kshape = (32, 4, 3, 3) run_test_conv2d("float32", "float32", 1, dshape, kshape, padding=(1, 1), channels=32, groups=8, kernel_size=(3 ,3), except_targets=['cuda']) # also group conv2d dshape = (1, 32, 18, 18) kshape = (64, 1, 3, 3) run_test_conv2d("float32", "float32", 1, dshape, kshape, padding=(1, 1), channels=64, groups=32, kernel_size=(3 ,3), except_targets=['cuda']) # normal conv2d dshape = (1, 3, 224, 224) kshape = (10, 3, 3, 3) run_test_conv2d("float32", "float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3 ,3)) # mixed precision run_test_conv2d("int8", "int32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3 ,3)) kshape = (10, 3, 1, 3) # mixed precision. run_test_conv2d("int8", "int32", 1, dshape, kshape, padding=(0, 1), channels=10, kernel_size=(1 ,3)) # dilated conv2d dshape = (1, 3, 18, 18) kshape = (10, 3, 3, 3) run_test_conv2d("float32", "float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3 ,3), dilation=(3, 3)) def test_conv2d_winograd(): class WinogradFallback(autotvm.FallbackContext): def _query_inside(self, target, workload): key = (target, workload) if key in self.memory: return self.memory[key] cfg = autotvm.task.space.FallbackConfigEntity() cfg.template_key = 'winograd' cfg.is_fallback = False cfg['tile_b'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1]) cfg['tile_y'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1]) cfg['tile_x'] = autotvm.task.space.SplitEntity([-1, 1, 1, 1]) cfg['tile_rc'] = autotvm.task.space.SplitEntity([-1, 1]) cfg['auto_unroll_max_setp'] = autotvm.task.space.OtherOptionEntity(1500) cfg['unroll_explicit'] = autotvm.task.space.OtherOptionEntity(1) self.memory[key] = cfg return cfg def run_test_conv2d_cuda(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1), groups=1, dilation=(1, 1), **attrs): x = relay.var("x", shape=dshape, dtype=dtype) w = relay.var("w", shape=kshape, dtype=dtype) y = relay.nn.conv2d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs) func = relay.Function([x, w], y) mod = relay.Module() mod['main'] = func mod = relay.transform.InferType()(mod) data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) ref_res = topi.testing.conv2d_nchw_python( data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, groups=groups) with WinogradFallback(), relay.build_config(opt_level=3): for target, ctx in ctx_list(): if target != 'cuda': continue params = {'w': tvm.nd.array(kernel)} graph, lib, params = relay.build_module.build(mod, target=target, params=params) module = tvm.contrib.graph_runtime.create(graph, lib, ctx) module.set_input('x', tvm.nd.array(data)) module.set_input(**params) module.run() op_res1 = module.get_output(0) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-3, atol=1e-3) # normal winograd: stride 1, padding 1, kernel 3x3 dshape = (1, 80, 73, 73) kshape = (192, 80, 3, 3) run_test_conv2d_cuda("float32", "float32", 1, dshape, kshape, padding=(1, 1), channels=192, kernel_size=(3, 3)) # extended winograd: stride 1, padding N, kernel 3x3 run_test_conv2d_cuda("float32", "float32", 1, dshape, kshape, padding=(0, 0), channels=192, kernel_size=(3, 3)) run_test_conv2d_cuda("float32", "float32", 1, dshape, kshape, padding=(2, 2), channels=192, kernel_size=(3, 3)) # extended winograd: stride 1, padding N, kernel NxN kshape = (192, 80, 7, 7) run_test_conv2d_cuda("float32", "float32", 1, dshape, kshape, padding=(2, 2), channels=192, kernel_size=(7, 7)) def test_conv3d_infer_type(): # symbolic in batch dimension n, c, d, h, w = tvm.size_var("n"), 10, 224, 224, 224 x = relay.var("x", relay.ty.TensorType((n, c, d, h, w), "float32")) w = relay.var("w") y = relay.nn.conv3d(x, w, kernel_size=(3, 3, 3), padding=(1, 1, 1), channels=2) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 224, 224, 224), "float32") assert yy.args[1].checked_type == relay.TensorType( (2, 10, 3, 3, 3), "float32") # infer by shape of w, mixed precision n, c, d, h, w = tvm.size_var("n"), 10, 224, 224, 224 x = relay.var("x", relay.TensorType((n, c, d, h, w), "int8")) w = relay.var("w", relay.TensorType((2, 10, 3, 3, 3), "int8")) y = relay.nn.conv3d(x, w, out_dtype="int32") assert "out_dtype=\"int32\"" in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 222, 222, 222), "int32") # infer shape in case of different dtypes for input and weight. n, c, d, h, w = tvm.size_var("n"), 10, 224, 224, 224 x = relay.var("x", relay.TensorType((n, c, d, h, w), "uint8")) w = relay.var("w", relay.TensorType((2, 10, 3, 3, 3), "int8")) y = relay.nn.conv3d(x, w, out_dtype="int32") assert "out_dtype=\"int32\"" in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 2, 222, 222, 222), "int32") # Infer with NDHWC n, c, d, h, w = 4, 32, 224, 224, 224 x = relay.var("x", relay.TensorType((n, d, h, w, c), "int8")) wt = relay.var("w") y = relay.nn.conv3d(x, wt, kernel_size=(3, 3, 3), padding=(1, 1, 1), channels=16, data_layout="NDHWC", out_dtype="int32") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, d, h, w, 16), "int32") def test_conv3d_run(): def run_test_conv3d(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1, 1), fref=None, groups=1, dilation=(1, 1, 1), except_targets=None, **attrs): if except_targets is None: except_targets = [] x = relay.var("x", shape=dshape, dtype=dtype) w = relay.var("w", dtype=dtype) y = relay.nn.conv3d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs) func = relay.Function([x, w], y) data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation) if fref is None: ref_res = topi.testing.conv3d_ncdhw_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding, groups=groups) else: ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype)) for target, ctx in ctx_list(): if target in except_targets: continue intrp1 = relay.create_executor("graph", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) # normal conv3d dshape = (1, 3, 5, 224, 224) kshape = (10, 3, 3, 3, 3) run_test_conv3d("float32", "float32", 1, dshape, kshape, padding=(1, 1, 1), channels=10, kernel_size=(3, 3 ,3)) def test_conv3d_ndhwc_run(): def run_test_conv3d(dtype, out_dtype, scale, dshape, kshape, padding=(1, 1, 1), fref=None, groups=1, dilation=(1, 1, 1), except_targets=None, **attrs): if except_targets is None: except_targets = [] x = relay.var("x", shape=dshape, dtype=dtype) w = relay.var("w", dtype=dtype) y = relay.nn.conv3d(x, w, padding=padding, dilation=dilation, groups=groups, data_layout="NDHWC", kernel_layout="DHWIO", **attrs) func = relay.Function([x, w], y) data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) dkernel = topi.testing.dilate_python(kernel, (1, 1) + dilation) if fref is None: ref_res = topi.testing.conv3d_ndhwc_python( data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding) else: ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype)) for target, ctx in ctx_list(): if target in except_targets: continue intrp1 = relay.create_executor("graph", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) # normal conv3d dshape = (1, 5, 224, 224, 6) kshape = (3, 3, 3, 6, 10) run_test_conv3d("float32", "float32", 1, dshape, kshape, padding=(1, 1, 1), channels=10, kernel_size=(3, 3 ,3), except_targets=["cuda"]) def test_conv2d_transpose_infer_type(): # symbolic in batch dimension n, c, h, w = tvm.size_var("n"), 10, 10, 12 x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) w = relay.var("w", relay.IncompleteType()) y = relay.nn.conv2d_transpose(x, w, kernel_size=(3, 3), padding=(1, 1), channels=15) assert "channels=15" in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 15, 10, 12), "float32") assert yy.args[1].checked_type == relay.TensorType( (10, 15, 3, 3), "float32") # infer by shape of w, mixed precision n, h, w, c = tvm.size_var("n"), 10, 10, 12 x = relay.var("x", relay.TensorType((n, h, w, c), "float32")) w = relay.var("w", relay.TensorType((12, 11, 5, 5), "float32")) y = relay.nn.conv2d_transpose(x, w, output_padding=(1, 1), channels=11, data_layout="NHWC") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 15, 15, 11), "float32") def test_conv2d_transpose_nchw_run(): dshape = (1, 3, 18, 18) kshape = (3, 10, 3, 3) oshape = (1, 10, 37, 37) x = relay.var("x", shape=dshape) w = relay.var("w") y = relay.nn.conv2d_transpose(x, w, channels=10, kernel_size=(3,3), strides=(2,2), padding=(1,1), output_padding=(2, 2)) func = relay.Function([x, w], y) dtype = "float32" data = np.random.uniform(size=dshape).astype(dtype) kernel = np.random.uniform(size=kshape).astype(dtype) c_np = topi.testing.conv2d_transpose_nchw_python( data, kernel, 2, 1) d_np = np.zeros(shape=oshape) d_np[:,:,0:c_np.shape[2],0:c_np.shape[3]] = c_np ref_res = d_np for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_conv2d_transpose_nhwc_run(): dshape_nhwc = (1, 18, 18, 3) kshape_hwoi = (3, 3, 10, 3) oshape_nhwc = (1, 37, 37, 10) x = relay.var("x", shape=dshape_nhwc) w = relay.var("w") # kshape and kernel_layout should have swapped IO. # kshape is HWOI and kernel_layout is HWIO y = relay.nn.conv2d_transpose(x, w, channels=10, kernel_size=(3, 3), strides=(2, 2), padding=(1, 1), output_padding=(2, 2), data_layout="NHWC", kernel_layout="HWIO") func = relay.Function([x, w], y) dtype = "float32" data = np.random.uniform(size=dshape_nhwc).astype(dtype) kernel = np.random.uniform(size=kshape_hwoi).astype(dtype) # use true kshape layout here - HWOI c_np = topi.testing.conv2d_transpose_nhwc_python(data, kernel, 'HWOI', 2, 1) d_np = np.zeros(shape=oshape_nhwc) d_np[:,0:c_np.shape[1],0:c_np.shape[2],:] = c_np def test_conv1d_transpose_ncw_run(): dshape = (1, 3, 18) kshape = (3, 10, 3) oshape = (1, 10, 37) x = relay.var("x", shape=dshape) w = relay.var("w") y = relay.nn.conv1d_transpose(x, w, channels=10, kernel_size=(3,), strides=(2,), padding=(1,), output_padding=(2,)) func = relay.Function([x, w], y) dtype = "float32" data = np.random.uniform(size=dshape).astype(dtype) kernel = np.random.uniform(size=kshape).astype(dtype) c_np = topi.testing.conv1d_transpose_ncw_python( data, kernel, 2, 1) d_np = np.zeros(shape=oshape) d_np[:,:,0:c_np.shape[2]] = c_np ref_res = d_np for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_upsampling_infer_type(): n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") scale = tvm.const(2.0, "float64") x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout="NCHW", method="bilinear") "method=\"BINLINEAR\"" in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, tvm.expr.Cast("int32", tvm.round(h*scale)), tvm.expr.Cast("int32", tvm.round(w*scale))), "float32") n, c = tvm.size_var("n"), tvm.size_var("c") x = relay.var("x", relay.TensorType((n, c, 100, 200), "float32")) y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout="NCHW", method="bilinear") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, 200, 400), "float32") def test_upsampling3d_infer_type(): n, c, d, h, w = tvm.size_var("n"), tvm.size_var("c"),\ tvm.size_var("d"), tvm.size_var("h"), tvm.size_var("w") scale = tvm.const(2.0, "float64") x = relay.var("x", relay.TensorType((n, c, d, h, w), "float32")) y = relay.nn.upsampling3d(x, scale_d=2, scale_h=2, scale_w=2, layout="NCDHW", method="trilinear") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, tvm.expr.Cast("int32", tvm.round(d*scale)), tvm.expr.Cast("int32", tvm.round(h*scale)), tvm.expr.Cast("int32", tvm.round(w*scale))), "float32") n, c = tvm.size_var("n"), tvm.size_var("c") x = relay.var("x", relay.TensorType((n, c, 100, 100, 200), "float32")) y = relay.nn.upsampling3d(x, scale_d=2, scale_h=2, scale_w=2, layout="NCDHW", method="trilinear") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, 200, 200, 400), "float32") def _test_pool2d(opfunc, reffunc): n, c, h, w = tvm.size_var("n"), 10, 224, 224 x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = opfunc(x, pool_size=(1, 1)) assert "pool_size=" in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 10, 224, 224), "float32") # test execution dtype = "float32" dshape = (1, 3, 28, 28) x = relay.var("x", shape=dshape) y = opfunc(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0)) func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) ref_res = reffunc(data.reshape(1, 3, 14, 2, 14, 2), axis=(3, 5)) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def _test_pool2d_int(opfunc, reffunc, dtype): n, c, h, w = tvm.size_var("n"), 10, 224, 224 x = relay.var("x", relay.TensorType((n, c, h, w), dtype)) y = opfunc(x, pool_size=(1, 1)) assert "pool_size=" in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 10, 224, 224), dtype) # test execution dtype = "int32" dshape = (1, 3, 28, 28) x = relay.var("x", shape=dshape, dtype=dtype) y = opfunc(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0)) func = relay.Function([x], y) data = np.random.random_integers(low=-128, high=128, size=dshape) ref_res = reffunc(data.reshape(1,3,14,2,14,2), axis=(3,5)).astype(dtype) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def _test_global_pool2d(opfunc, reffunc): n, c, h, w = tvm.size_var("n"), tvm.size_var("c"), 224, 224 x = relay.var("x", relay.TensorType((n, h, w, c), "float32")) y = opfunc(x, layout="NHWC") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 1, 1, c), "float32") n, c, h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = opfunc(x) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, 1, 1), "float32") # test execution dtype = "float32" dshape = (1, 1024, 7, 7) x = relay.var("x", shape=dshape) y = opfunc(x) func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) ref_res = reffunc(data, axis=(2,3), keepdims=True) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_pool2d(): _test_pool2d(relay.nn.max_pool2d, np.max) _test_pool2d(relay.nn.avg_pool2d, np.mean) _test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'int32') _test_pool2d_int(relay.nn.avg_pool2d, np.mean, 'uint16') _test_global_pool2d(relay.nn.global_max_pool2d, np.max) _test_global_pool2d(relay.nn.global_avg_pool2d, np.mean) def test_pool1d(): def _test_pool1d(opfunc): n, c, w = tvm.var("n"), 10, 224 x = relay.var("x", relay.TensorType((n, c, w), "float32")) y = opfunc(x, pool_size=(1,)) assert "pool_size=" in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 10, 224), "float32") # test execution dtype = "float32" dshape = (1, 3, 32) x = relay.var("x", shape=dshape) pool_type = 'max' if 'max' in str(opfunc) else 'avg' y = opfunc(x, pool_size=(2,), strides=(2,), padding=(0, 0)) func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) ref_res = topi.testing.pool1d_ncw_python(data, (2,), (2,), (0, 0), (1, 3, 16), pool_type, False) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) _test_pool1d(relay.nn.max_pool1d) _test_pool1d(relay.nn.avg_pool1d) def test_pool3d(): def _test_pool3d(opfunc, padding=(0, 0, 0, 0, 0, 0), out_shape=(1, 3, 16, 16, 16)): n, c, d, h, w = tvm.size_var("n"), 10, 5, 224, 224 x = relay.var("x", relay.TensorType((n, c, d, h, w), "float32")) y = opfunc(x, pool_size=(1, 1, 1)) assert "pool_size=" in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 10, 5, 224, 224), "float32") # test execution dtype = "float32" dshape = (1, 3, 32, 32, 32) x = relay.var("x", shape=dshape) pool_type = 'max' if 'max' in str(opfunc) else 'avg' y = opfunc(x, pool_size=(2, 2, 2), strides=(2, 2, 2), padding=padding) func = relay.Function([x], y) # check output shape f_out_shape = tuple(map(lambda x: int(x), run_infer_type(func).ret_type.shape)) assert out_shape == f_out_shape, \ "Output shape mismatch. expected {}, actual {}".format(out_shape, f_out_shape) data = np.random.uniform(size=dshape).astype(dtype) ref_res = topi.testing.pool3d_ncdhw_python(data, (2, 2, 2), (2, 2, 2), padding, out_shape, pool_type, False) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) _test_pool3d(relay.nn.max_pool3d) _test_pool3d(relay.nn.max_pool3d, padding=(2, 0, 0, 2, 0, 0), out_shape=(1, 3, 18, 16, 16)) _test_pool3d(relay.nn.max_pool3d, padding=(0, 3, 0, 0, 3, 0), out_shape=(1, 3, 16, 19, 16)) _test_pool3d(relay.nn.max_pool3d, padding=(0, 0, 4, 0, 0, 4), out_shape=(1, 3, 16, 16, 20)) _test_pool3d(relay.nn.avg_pool3d) _test_pool3d(relay.nn.avg_pool3d, padding=(2, 0, 0, 2, 0, 0), out_shape=(1, 3, 18, 16, 16)) _test_pool3d(relay.nn.avg_pool3d, padding=(0, 3, 0, 0, 3, 0), out_shape=(1, 3, 16, 19, 16)) _test_pool3d(relay.nn.avg_pool3d, padding=(0, 0, 4, 0, 0, 4), out_shape=(1, 3, 16, 16, 20)) def test_avg_pool2d_no_count_pad(): kh, kw = (4, 4) sh, sw = (2, 2) ph, pw = (2, 2) n = 1 (ic, ih, iw) = (3, 28, 28) (oc, oh, ow) = (3, 15, 15) dshape = (n, ic, ih, iw) x = relay.var("x", shape=dshape) y = relay.nn.avg_pool2d(x, pool_size=(kh, kw), strides=(sw, sw), padding=(ph, pw), count_include_pad=False) func = relay.Function([x], y) dtype = "float32" a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype) pad_np = np.zeros(shape=(n, ic, ih+2*ph, iw+2*pw)).astype(dtype) no_zero = (range(n), range(ic), (range(ph, ih+ph)), (range(pw, iw+pw))) pad_np[np.ix_(*no_zero)] = a_np b_np = np.zeros(shape=(n, oc, oh, ow)).astype(dtype) for i in range(oh): for j in range(ow): pad_count = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw] > 0, axis=(2,3)) b_np[:,:,i,j] = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2,3)) / np.maximum(pad_count, 1) ref_res = np.maximum(b_np, 0.0) data = a_np for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_flatten_infer_type(): d1, d2, d3, d4 = tvm.size_var("d1"), tvm.size_var("d2"), tvm.size_var("d3"), tvm.size_var("d4") x = relay.var("x", relay.TensorType((d1, d2, d3, d4), "float32")) y = relay.nn.batch_flatten(x) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((d1, ((d2*d3)*d4)), "float32") x = relay.var("x", relay.TensorType((3, 2, 4, 3), "float32")) y = relay.nn.batch_flatten(x) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((3, 24), "float32") x = relay.var("x", relay.TensorType((d1, 2, d3, 3), "float32")) y = relay.nn.batch_flatten(x) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((d1, ((2*d3)*3)), "float32") shape = (1, 5, 10, 10) o_shape = (1, 500) dtype = "float32" x = relay.var("x", relay.TensorType(shape, dtype)) z = relay.nn.batch_flatten(x) yy = run_infer_type(z) assert yy.checked_type == relay.TensorType(o_shape, dtype) func = relay.Function([x], z) x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype) ref_res = x_data.flatten().reshape(o_shape) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) intrp2 = relay.create_executor("debug", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5) op_res2 = intrp2.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def test_pad_infer_type(): # entirely concrete case n, c, h, w = 1, 2, 3, 4 t = relay.var("t", relay.TensorType((n, c, h, w), "float32")) y = relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4))) "pad_width=" in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((3, 6, 9, 12), "float32") # some symbolic values n, c, h, w = tvm.size_var("n"), 2, 3, tvm.size_var("w") t = relay.var("t", relay.TensorType((n, c, h, w), "float32")) y = relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4))) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n + 2, 6, 9, w + 8), "float32") def test_pad_run(): def _test_run(dtype): dshape = (4, 10, 7, 7) x = relay.var("x", shape=dshape) y = relay.nn.pad(x, ((1, 1), (2, 2), (3, 3), (4, 4))) func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) ref_res = np.pad(data, ((1, 1), (2, 2), (3, 3), (4, 4)), 'constant') for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) _test_run('float32') _test_run('int32') def test_lrn(): n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") x = relay.var("x", shape=(n, c , h, w)) y = relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=.00001, beta=0.75) "alpha=" in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c , h, w)) shape = (1, 5, 10, 10) dtype = "float32" x = relay.var("x", relay.TensorType(shape, dtype)) size=5 axis=1 bias=0.5 alpha=.00001 beta=0.75 z = relay.nn.lrn(x, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta) yy = run_infer_type(z) assert yy.checked_type == relay.TensorType(shape, dtype) func = relay.Function([x], z) x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype) ref_res = topi.testing.lrn_python(x_data, size, axis, bias, alpha, beta) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) intrp2 = relay.create_executor("debug", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5) op_res2 = intrp2.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def test_l2_normalize(): n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") x = relay.var("x", shape=(n, c , h, w)) y = relay.nn.l2_normalize(x, eps=0.001, axis=[1]) "axis=" in y.astext() yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c , h, w)) shape = (1, 5, 10, 10) dtype = "float32" x = relay.var("x", relay.TensorType(shape, dtype)) eps=0.001 axis=1 z = relay.nn.l2_normalize(x, eps=0.001, axis=[axis]) yy = run_infer_type(z) assert yy.checked_type == relay.TensorType(shape, dtype) func = relay.Function([x], z) x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype) ref_res = topi.testing.l2_normalize_python(x_data, eps, axis) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) intrp2 = relay.create_executor("debug", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5) op_res2 = intrp2.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def batch_flatten(data): shape = data.shape target_dim = 1 for i in range(len(shape) - 1): target_dim = target_dim * shape[i + 1] return np.reshape(data, (shape[0], target_dim)) def test_batch_flatten(): t1 = relay.TensorType((5, 10, 5)) x = relay.Var("x", t1) func = relay.Function([x], relay.nn.batch_flatten(x)) data = np.random.rand(5, 10, 5).astype(t1.dtype) ref_res = batch_flatten(data) for target, ctx in ctx_list(): intrp = relay.create_executor("graph", ctx=ctx, target=target) op_res = intrp.evaluate(func)(data) np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) def _test_upsampling(layout, method, align_corners=False): n, c, h, w = tvm.size_var("n"), 16, 32, 32 scale_h = 2.0 scale_w = 2.0 dtype = "float32" def get_shape(): if layout == "NCHW": return (c, h, w), (c, int(round(h*scale_h)), int(round(w*scale_w))) else: return (h, w, c), (int(round(h*scale_h)), int(round(w*scale_w)), c) ishape, oshape = get_shape() x = relay.var("x", relay.TensorType((n,) + ishape, dtype)) y = relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w, layout=layout, method=method, align_corners=align_corners) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n,) + oshape, dtype) dshape = (1,) + ishape x = relay.var("x", shape=dshape) y = relay.nn.upsampling(x, scale_h=scale_h, scale_w=scale_w, layout=layout, method=method, align_corners=align_corners) func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) if method == "nearest_neighbor": ref = topi.testing.upsampling_python(data, (scale_h, scale_w), layout) else: ref = topi.testing.bilinear_resize_python(data, (int(round(h*scale_h)), int(round(w*scale_w))), layout) for target, ctx in ctx_list(): executor = relay.create_executor("graph", ctx=ctx, target=target) out = executor.evaluate(func)(data) tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5) def test_upsampling(): _test_upsampling("NCHW", "nearest_neighbor") _test_upsampling("NCHW", "bilinear", True) _test_upsampling("NHWC", "nearest_neighbor") _test_upsampling("NHWC", "bilinear", True) def _test_upsampling3d(layout, method, coordinate_transformation_mode="half_pixel"): n, c, d, h, w = tvm.size_var("n"), 8, 16, 16, 16 scale_d = 2.0 scale_h = 2.0 scale_w = 2.0 dtype = "float32" def get_shape(): if layout == "NCDHW": return (c, d, h, w), (c, int(round(d*scale_d)), int(round(h*scale_h)),\ int(round(w*scale_w))) else: return (d, h, w, c), (int(round(d*scale_d)), int(round(h*scale_h)),\ int(round(w*scale_w)), c) ishape, oshape = get_shape() x = relay.var("x", relay.TensorType((n,) + ishape, dtype)) y = relay.nn.upsampling3d(x, scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\ layout=layout, method=method,\ coordinate_transformation_mode=coordinate_transformation_mode) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n,) + oshape, dtype) dshape = (1,) + ishape x = relay.var("x", shape=dshape) y = relay.nn.upsampling3d(x, scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\ layout=layout, method=method,\ coordinate_transformation_mode=coordinate_transformation_mode) func = relay.Function([x], y) data = np.random.uniform(size=dshape).astype(dtype) if method == "nearest_neighbor": ref = topi.testing.upsampling3d_python(data, (scale_d, scale_h, scale_w), layout) else: ref = topi.testing.trilinear_resize3d_python(data, (int(round(d*scale_d)),\ int(round(h*scale_h)),\ int(round(w*scale_w))), layout) for target, ctx in ctx_list(): executor = relay.create_executor("graph", ctx=ctx, target=target) out = executor.evaluate(func)(data) tvm.testing.assert_allclose(out.asnumpy(), ref, rtol=1e-5, atol=1e-5) def test_upsampling3d(): _test_upsampling3d("NCDHW", "nearest_neighbor") _test_upsampling3d("NCDHW", "trilinear", "align_corners") _test_upsampling3d("NDHWC", "nearest_neighbor") _test_upsampling3d("NDHWC", "trilinear", "align_corners") def test_conv2d_int8_intrinsics(): def _compile(ic, oc, target, data_layout, kernel_layout, dtypes): input_dtype, weight_dtype, output_dtype = dtypes n, h, w, ch, cw = 1, 64, 64, 3, 3 if data_layout == 'NCHW': data_shape = (n, ic, h, w) x = relay.var("x", relay.TensorType(data_shape, input_dtype)) elif data_layout == 'NHWC': data_shape = (n, h, w, ic) x = relay.var("x", relay.TensorType(data_shape, input_dtype)) else: raise ValueError('Not supported') if kernel_layout == 'OIHW': kernel_shape = (oc, ic, ch, cw) elif kernel_layout == 'HWIO': kernel_shape = (ch, cw, ic, oc) else: raise ValueError('Not supported') weight = relay.var("weight", relay.TensorType(kernel_shape, weight_dtype)) y = relay.nn.conv2d(x, weight, kernel_size=(ch, cw), channels=oc, padding=(1, 1), dilation=(1, 1), data_layout=data_layout, kernel_layout=kernel_layout, out_dtype=output_dtype) func = relay.Function([x, weight], y) wdata = np.random.rand(*kernel_shape) * 10 parameters = {"weight": tvm.nd.array(wdata.astype(weight_dtype))} with relay.build_config(opt_level=3): graph, lib, params = relay.build(func, target, params=parameters) assembly = lib.get_source("asm") return assembly def _has_fast_int8_instructions(asm, target): if 'skylake-avx512' in target: return "pmaddubs" in asm elif 'cascadelake' in target: return "vpdpbusd" in asm else: assert False, "Target should be Skylake or Cascadelake" # compile conv2d for x86 (skylake, cascadelake) and test assembly contains *pmadd* instructions targets = ["llvm -mcpu=skylake-avx512", "llvm -mcpu=cascadelake"] llvm_version = tvm.codegen.llvm_version_major() for target in targets: if llvm_version >= 8: dtypes = ('uint8', 'int8', 'int32') # Sweep the input channels to check int8 robustness # Input channels should be a multiple of 4 internally. for ic in [1, 4, 6]: asm = _compile(ic=ic, oc=16, target=target, data_layout="NCHW", kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) for ic in [1, 4, 6]: asm = _compile(ic=ic, oc=16, target=target, data_layout="NHWC", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) # Sweep the output channels to check int8 robustness # Output channels should be a multiple of 16 internally. for oc in [4, 16, 20]: asm = _compile(ic=8, oc=oc, target=target, data_layout="NCHW", kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) for oc in [4, 16, 20]: asm = _compile(ic=8, oc=oc, target=target, data_layout="NHWC", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) # Check that both non-divisible oc and ic work asm = _compile(ic=17, oc=29, target=target, data_layout="NCHW", kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) asm = _compile(ic=17, oc=29, target=target, data_layout="NHWC", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) # Check that int8 x int8 goes through legalization so that fast instructions can be picked up. for target in targets: if llvm_version >= 8: dtypes = (('int8', 'int8', 'int32')) # Check that both non-divisible oc and ic work asm = _compile(ic=17, oc=29, target=target, data_layout="NCHW", kernel_layout='OIHW', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) asm = _compile(ic=17, oc=29, target=target, data_layout="NHWC", kernel_layout='HWIO', dtypes=dtypes) assert _has_fast_int8_instructions(asm, target) # Ensure that code is generated when datatypes are not HW supported. dtypes = ('uint8', 'uint8', 'int32') asm = _compile(ic=16, oc=32, target=target, data_layout="NHWC", kernel_layout='HWIO', dtypes=dtypes) # Check that intrinisic is not present in the assembly. assert not _has_fast_int8_instructions(asm, target) # Check that a vectorized instruction is generated for older Intel # generations, because we default to NCHWc layout. target = "llvm -mcpu=core-avx2" fast_int8_dtypes = ('uint8', 'int8', 'int32') asm = _compile(ic=16, oc=32, target=target, data_layout="NCHW", kernel_layout='OIHW', dtypes=fast_int8_dtypes) # Check that vector int mult and add instructions are generated. assert "vpmulld" in asm and "vpadd" in asm def test_depthwise_conv2d_int8(): input_dtype = 'uint8' weight_dtype = 'int8' output_dtype = 'int32' data_shape = (1, 64, 56, 56) x = relay.var("x", relay.TensorType(data_shape, input_dtype)) kernel_shape = (64, 1, 3, 3) weight = relay.var("weight", relay.TensorType(kernel_shape, weight_dtype)) y = relay.nn.conv2d(x, weight, kernel_size=(3, 3), groups=64, padding=(1, 1), dilation=(1, 1), out_dtype=output_dtype) func = relay.Function([x, weight], y) wdata = np.random.rand(*kernel_shape) * 10 parameters = {"weight": tvm.nd.array(wdata.astype(weight_dtype))} targets = ["llvm -mcpu=skylake-avx512", "llvm -mcpu=cascadelake"] llvm_version = tvm.codegen.llvm_version_major() for target in targets: if llvm_version >= 8: with relay.build_config(opt_level=3): graph, lib, params = relay.build(func, target, params=parameters) def test_bitserial_conv2d_infer_type(): # Basic shape test with ambiguous batch. n, c, h, w = tvm.size_var("n"), 32, 224, 224 x = relay.var("x", relay.ty.TensorType((n, c, h, w), "int16")) w = relay.var("w", relay.ty.TensorType((32, 32, 3, 3), "int16")) y = relay.nn.bitserial_conv2d( x, w, kernel_size=(3, 3), padding=(0, 0), channels=32) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (n, 32, 222, 222), "int16") def test_bitpack_infer_type(): # Test axis packing shape inference. o, i, h, w = 32, 32, 128, 128 x = relay.var("x", relay.ty.TensorType((o, i, h, w), "int16")) y = relay.nn.bitpack(x, bit_axis=4, pack_axis=1, pack_type='uint16', bits=1) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType( (32, 2, 128, 128, 1), "uint16") if __name__ == "__main__": test_pool1d() test_pool2d() test_pool3d() test_avg_pool2d_no_count_pad() test_lrn() test_l2_normalize() test_conv1d_infer_type() test_conv2d_infer_type() test_conv3d_infer_type() test_bitpack_infer_type() test_upsampling_infer_type() test_upsampling3d_infer_type() test_flatten_infer_type() test_pad_infer_type() test_pad_run() test_conv2d_transpose_infer_type() test_conv2d_transpose_nchw_run() test_conv2d_transpose_nhwc_run() test_conv1d_transpose_ncw_run() test_conv1d_run() test_conv2d_run() test_conv2d_winograd() test_conv3d_run() test_conv3d_ndhwc_run() test_bitserial_conv2d_infer_type() test_batch_flatten() test_upsampling() test_upsampling3d() test_conv2d_int8_intrinsics() test_depthwise_conv2d_int8()
[((1217, 1231), 'tvm.relay.var', 'relay.var', (['"""w"""'], {}), "('w')\n", (1226, 1231), False, 'from tvm import relay\n'), ((1240, 1304), 'tvm.relay.nn.conv1d', 'relay.nn.conv1d', (['x', 'w'], {'kernel_size': '(3)', 'padding': '(1, 1)', 'channels': '(2)'}), '(x, w, kernel_size=3, padding=(1, 1), channels=2)\n', (1255, 1304), False, 'from tvm import relay\n'), ((1386, 1403), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (1400, 1403), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((1781, 1821), 'tvm.relay.nn.conv1d', 'relay.nn.conv1d', (['x', 'w'], {'out_dtype': '"""int32"""'}), "(x, w, out_dtype='int32')\n", (1796, 1821), False, 'from tvm import relay\n'), ((1878, 1895), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (1892, 1895), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((2210, 2250), 'tvm.relay.nn.conv1d', 'relay.nn.conv1d', (['x', 'w'], {'out_dtype': '"""int32"""'}), "(x, w, out_dtype='int32')\n", (2225, 2250), False, 'from tvm import relay\n'), ((2307, 2324), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (2321, 2324), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((2520, 2534), 'tvm.relay.var', 'relay.var', (['"""w"""'], {}), "('w')\n", (2529, 2534), False, 'from tvm import relay\n'), ((2543, 2651), 'tvm.relay.nn.conv1d', 'relay.nn.conv1d', (['x', 'wt'], {'kernel_size': '(3)', 'padding': '(1, 1)', 'channels': '(16)', 'data_layout': '"""NWC"""', 'out_dtype': '"""int32"""'}), "(x, wt, kernel_size=3, padding=(1, 1), channels=16,\n data_layout='NWC', out_dtype='int32')\n", (2558, 2651), False, 'from tvm import relay\n'), ((2777, 2794), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (2791, 2794), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((4870, 4884), 'tvm.relay.var', 'relay.var', (['"""w"""'], {}), "('w')\n", (4879, 4884), False, 'from tvm import relay\n'), ((4893, 4962), 'tvm.relay.nn.conv2d', 'relay.nn.conv2d', (['x', 'w'], {'kernel_size': '(3, 3)', 'padding': '(1, 1)', 'channels': '(2)'}), '(x, w, kernel_size=(3, 3), padding=(1, 1), channels=2)\n', (4908, 4962), False, 'from tvm import relay\n'), ((5044, 5061), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (5058, 5061), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((5466, 5506), 'tvm.relay.nn.conv2d', 'relay.nn.conv2d', (['x', 'w'], {'out_dtype': '"""int32"""'}), "(x, w, out_dtype='int32')\n", (5481, 5506), False, 'from tvm import relay\n'), ((5563, 5580), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (5577, 5580), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((5919, 5959), 'tvm.relay.nn.conv2d', 'relay.nn.conv2d', (['x', 'w'], {'out_dtype': '"""int32"""'}), "(x, w, out_dtype='int32')\n", (5934, 5959), False, 'from tvm import relay\n'), ((6016, 6033), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (6030, 6033), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((6272, 6286), 'tvm.relay.var', 'relay.var', (['"""w"""'], {}), "('w')\n", (6281, 6286), False, 'from tvm import relay\n'), ((6295, 6439), 'tvm.relay.nn.conv2d', 'relay.nn.conv2d', (['x', 'wt'], {'kernel_size': '(3, 3)', 'padding': '(1, 1)', 'channels': '(16)', 'data_layout': '"""NCHW4n4c"""', 'kernel_layout': '"""OIHW4o4i"""', 'out_dtype': '"""int32"""'}), "(x, wt, kernel_size=(3, 3), padding=(1, 1), channels=16,\n data_layout='NCHW4n4c', kernel_layout='OIHW4o4i', out_dtype='int32')\n", (6310, 6439), False, 'from tvm import relay\n'), ((6589, 6606), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (6603, 6606), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((6917, 6931), 'tvm.relay.var', 'relay.var', (['"""w"""'], {}), "('w')\n", (6926, 6931), False, 'from tvm import relay\n'), ((6940, 7054), 'tvm.relay.nn.conv2d', 'relay.nn.conv2d', (['x', 'wt'], {'kernel_size': '(3, 3)', 'padding': '(1, 1)', 'channels': '(16)', 'data_layout': '"""NHWC"""', 'out_dtype': '"""int32"""'}), "(x, wt, kernel_size=(3, 3), padding=(1, 1), channels=16,\n data_layout='NHWC', out_dtype='int32')\n", (6955, 7054), False, 'from tvm import relay\n'), ((7180, 7197), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (7194, 7197), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((16711, 16725), 'tvm.relay.var', 'relay.var', (['"""w"""'], {}), "('w')\n", (16720, 16725), False, 'from tvm import relay\n'), ((16734, 16809), 'tvm.relay.nn.conv3d', 'relay.nn.conv3d', (['x', 'w'], {'kernel_size': '(3, 3, 3)', 'padding': '(1, 1, 1)', 'channels': '(2)'}), '(x, w, kernel_size=(3, 3, 3), padding=(1, 1, 1), channels=2)\n', (16749, 16809), False, 'from tvm import relay\n'), ((16891, 16908), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (16905, 16908), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((17335, 17375), 'tvm.relay.nn.conv3d', 'relay.nn.conv3d', (['x', 'w'], {'out_dtype': '"""int32"""'}), "(x, w, out_dtype='int32')\n", (17350, 17375), False, 'from tvm import relay\n'), ((17432, 17449), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (17446, 17449), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((17807, 17847), 'tvm.relay.nn.conv3d', 'relay.nn.conv3d', (['x', 'w'], {'out_dtype': '"""int32"""'}), "(x, w, out_dtype='int32')\n", (17822, 17847), False, 'from tvm import relay\n'), ((17904, 17921), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (17918, 17921), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((18151, 18165), 'tvm.relay.var', 'relay.var', (['"""w"""'], {}), "('w')\n", (18160, 18165), False, 'from tvm import relay\n'), ((18174, 18296), 'tvm.relay.nn.conv3d', 'relay.nn.conv3d', (['x', 'wt'], {'kernel_size': '(3, 3, 3)', 'padding': '(1, 1, 1)', 'channels': '(16)', 'data_layout': '"""NDHWC"""', 'out_dtype': '"""int32"""'}), "(x, wt, kernel_size=(3, 3, 3), padding=(1, 1, 1), channels=\n 16, data_layout='NDHWC', out_dtype='int32')\n", (18189, 18296), False, 'from tvm import relay\n'), ((18421, 18438), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (18435, 18438), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((22408, 22493), 'tvm.relay.nn.conv2d_transpose', 'relay.nn.conv2d_transpose', (['x', 'w'], {'kernel_size': '(3, 3)', 'padding': '(1, 1)', 'channels': '(15)'}), '(x, w, kernel_size=(3, 3), padding=(1, 1), channels=15\n )\n', (22433, 22493), False, 'from tvm import relay\n'), ((22639, 22656), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (22653, 22656), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((23065, 23156), 'tvm.relay.nn.conv2d_transpose', 'relay.nn.conv2d_transpose', (['x', 'w'], {'output_padding': '(1, 1)', 'channels': '(11)', 'data_layout': '"""NHWC"""'}), "(x, w, output_padding=(1, 1), channels=11,\n data_layout='NHWC')\n", (23090, 23156), False, 'from tvm import relay\n'), ((23264, 23281), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (23278, 23281), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((23498, 23526), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'dshape'}), "('x', shape=dshape)\n", (23507, 23526), False, 'from tvm import relay\n'), ((23535, 23549), 'tvm.relay.var', 'relay.var', (['"""w"""'], {}), "('w')\n", (23544, 23549), False, 'from tvm import relay\n'), ((23558, 23681), 'tvm.relay.nn.conv2d_transpose', 'relay.nn.conv2d_transpose', (['x', 'w'], {'channels': '(10)', 'kernel_size': '(3, 3)', 'strides': '(2, 2)', 'padding': '(1, 1)', 'output_padding': '(2, 2)'}), '(x, w, channels=10, kernel_size=(3, 3), strides=(2,\n 2), padding=(1, 1), output_padding=(2, 2))\n', (23583, 23681), False, 'from tvm import relay\n'), ((23754, 23779), 'tvm.relay.Function', 'relay.Function', (['[x, w]', 'y'], {}), '([x, w], y)\n', (23768, 23779), False, 'from tvm import relay\n'), ((24009, 24031), 'numpy.zeros', 'np.zeros', ([], {'shape': 'oshape'}), '(shape=oshape)\n', (24017, 24031), True, 'import numpy as np\n'), ((24128, 24138), 'tvm.relay.testing.ctx_list', 'ctx_list', ([], {}), '()\n', (24136, 24138), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((24499, 24532), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'dshape_nhwc'}), "('x', shape=dshape_nhwc)\n", (24508, 24532), False, 'from tvm import relay\n'), ((24541, 24555), 'tvm.relay.var', 'relay.var', (['"""w"""'], {}), "('w')\n", (24550, 24555), False, 'from tvm import relay\n'), ((24666, 24835), 'tvm.relay.nn.conv2d_transpose', 'relay.nn.conv2d_transpose', (['x', 'w'], {'channels': '(10)', 'kernel_size': '(3, 3)', 'strides': '(2, 2)', 'padding': '(1, 1)', 'output_padding': '(2, 2)', 'data_layout': '"""NHWC"""', 'kernel_layout': '"""HWIO"""'}), "(x, w, channels=10, kernel_size=(3, 3), strides=(2,\n 2), padding=(1, 1), output_padding=(2, 2), data_layout='NHWC',\n kernel_layout='HWIO')\n", (24691, 24835), False, 'from tvm import relay\n'), ((24941, 24966), 'tvm.relay.Function', 'relay.Function', (['[x, w]', 'y'], {}), '([x, w], y)\n', (24955, 24966), False, 'from tvm import relay\n'), ((25246, 25273), 'numpy.zeros', 'np.zeros', ([], {'shape': 'oshape_nhwc'}), '(shape=oshape_nhwc)\n', (25254, 25273), True, 'import numpy as np\n'), ((25447, 25475), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'dshape'}), "('x', shape=dshape)\n", (25456, 25475), False, 'from tvm import relay\n'), ((25484, 25498), 'tvm.relay.var', 'relay.var', (['"""w"""'], {}), "('w')\n", (25493, 25498), False, 'from tvm import relay\n'), ((25507, 25622), 'tvm.relay.nn.conv1d_transpose', 'relay.nn.conv1d_transpose', (['x', 'w'], {'channels': '(10)', 'kernel_size': '(3,)', 'strides': '(2,)', 'padding': '(1,)', 'output_padding': '(2,)'}), '(x, w, channels=10, kernel_size=(3,), strides=(2,),\n padding=(1,), output_padding=(2,))\n', (25532, 25622), False, 'from tvm import relay\n'), ((25698, 25723), 'tvm.relay.Function', 'relay.Function', (['[x, w]', 'y'], {}), '([x, w], y)\n', (25712, 25723), False, 'from tvm import relay\n'), ((25952, 25974), 'numpy.zeros', 'np.zeros', ([], {'shape': 'oshape'}), '(shape=oshape)\n', (25960, 25974), True, 'import numpy as np\n'), ((26055, 26065), 'tvm.relay.testing.ctx_list', 'ctx_list', ([], {}), '()\n', (26063, 26065), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((26420, 26445), 'tvm.const', 'tvm.const', (['(2.0)', '"""float64"""'], {}), "(2.0, 'float64')\n", (26429, 26445), False, 'import tvm\n'), ((26520, 26598), 'tvm.relay.nn.upsampling', 'relay.nn.upsampling', (['x'], {'scale_h': '(2)', 'scale_w': '(2)', 'layout': '"""NCHW"""', 'method': '"""bilinear"""'}), "(x, scale_h=2, scale_w=2, layout='NCHW', method='bilinear')\n", (26539, 26598), False, 'from tvm import relay\n'), ((26649, 26666), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (26663, 26666), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((27043, 27121), 'tvm.relay.nn.upsampling', 'relay.nn.upsampling', (['x'], {'scale_h': '(2)', 'scale_w': '(2)', 'layout': '"""NCHW"""', 'method': '"""bilinear"""'}), "(x, scale_h=2, scale_w=2, layout='NCHW', method='bilinear')\n", (27062, 27121), False, 'from tvm import relay\n'), ((27131, 27148), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (27145, 27148), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((27409, 27434), 'tvm.const', 'tvm.const', (['(2.0)', '"""float64"""'], {}), "(2.0, 'float64')\n", (27418, 27434), False, 'import tvm\n'), ((27512, 27609), 'tvm.relay.nn.upsampling3d', 'relay.nn.upsampling3d', (['x'], {'scale_d': '(2)', 'scale_h': '(2)', 'scale_w': '(2)', 'layout': '"""NCDHW"""', 'method': '"""trilinear"""'}), "(x, scale_d=2, scale_h=2, scale_w=2, layout='NCDHW',\n method='trilinear')\n", (27533, 27609), False, 'from tvm import relay\n'), ((27616, 27633), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (27630, 27633), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((28107, 28204), 'tvm.relay.nn.upsampling3d', 'relay.nn.upsampling3d', (['x'], {'scale_d': '(2)', 'scale_h': '(2)', 'scale_w': '(2)', 'layout': '"""NCDHW"""', 'method': '"""trilinear"""'}), "(x, scale_d=2, scale_h=2, scale_w=2, layout='NCDHW',\n method='trilinear')\n", (28128, 28204), False, 'from tvm import relay\n'), ((28210, 28227), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (28224, 28227), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((28543, 28560), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (28557, 28560), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((28717, 28745), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'dshape'}), "('x', shape=dshape)\n", (28726, 28745), False, 'from tvm import relay\n'), ((28825, 28847), 'tvm.relay.Function', 'relay.Function', (['[x]', 'y'], {}), '([x], y)\n', (28839, 28847), False, 'from tvm import relay\n'), ((28996, 29006), 'tvm.relay.testing.ctx_list', 'ctx_list', ([], {}), '()\n', (29004, 29006), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((29453, 29470), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (29467, 29470), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((29621, 29662), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'dshape', 'dtype': 'dtype'}), "('x', shape=dshape, dtype=dtype)\n", (29630, 29662), False, 'from tvm import relay\n'), ((29742, 29764), 'tvm.relay.Function', 'relay.Function', (['[x]', 'y'], {}), '([x], y)\n', (29756, 29764), False, 'from tvm import relay\n'), ((29776, 29834), 'numpy.random.random_integers', 'np.random.random_integers', ([], {'low': '(-128)', 'high': '(128)', 'size': 'dshape'}), '(low=-128, high=128, size=dshape)\n', (29801, 29834), True, 'import numpy as np\n'), ((29935, 29945), 'tvm.relay.testing.ctx_list', 'ctx_list', ([], {}), '()\n', (29943, 29945), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((30366, 30383), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (30380, 30383), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((30642, 30659), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (30656, 30659), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((30812, 30840), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'dshape'}), "('x', shape=dshape)\n", (30821, 30840), False, 'from tvm import relay\n'), ((30870, 30892), 'tvm.relay.Function', 'relay.Function', (['[x]', 'y'], {}), '([x], y)\n', (30884, 30892), False, 'from tvm import relay\n'), ((31027, 31037), 'tvm.relay.testing.ctx_list', 'ctx_list', ([], {}), '()\n', (31035, 31037), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((35120, 35148), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'dshape'}), "('x', shape=dshape)\n", (35129, 35148), False, 'from tvm import relay\n'), ((35157, 35264), 'tvm.relay.nn.avg_pool2d', 'relay.nn.avg_pool2d', (['x'], {'pool_size': '(kh, kw)', 'strides': '(sw, sw)', 'padding': '(ph, pw)', 'count_include_pad': '(False)'}), '(x, pool_size=(kh, kw), strides=(sw, sw), padding=(ph,\n pw), count_include_pad=False)\n', (35176, 35264), False, 'from tvm import relay\n'), ((35384, 35406), 'tvm.relay.Function', 'relay.Function', (['[x]', 'y'], {}), '([x], y)\n', (35398, 35406), False, 'from tvm import relay\n'), ((36049, 36070), 'numpy.maximum', 'np.maximum', (['b_np', '(0.0)'], {}), '(b_np, 0.0)\n', (36059, 36070), True, 'import numpy as np\n'), ((36111, 36121), 'tvm.relay.testing.ctx_list', 'ctx_list', ([], {}), '()\n', (36119, 36121), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((36537, 36562), 'tvm.relay.nn.batch_flatten', 'relay.nn.batch_flatten', (['x'], {}), '(x)\n', (36559, 36562), False, 'from tvm import relay\n'), ((36572, 36589), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (36586, 36589), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((36743, 36768), 'tvm.relay.nn.batch_flatten', 'relay.nn.batch_flatten', (['x'], {}), '(x)\n', (36765, 36768), False, 'from tvm import relay\n'), ((36778, 36795), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (36792, 36795), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((36940, 36965), 'tvm.relay.nn.batch_flatten', 'relay.nn.batch_flatten', (['x'], {}), '(x)\n', (36962, 36965), False, 'from tvm import relay\n'), ((36975, 36992), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (36989, 36992), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((37205, 37230), 'tvm.relay.nn.batch_flatten', 'relay.nn.batch_flatten', (['x'], {}), '(x)\n', (37227, 37230), False, 'from tvm import relay\n'), ((37240, 37257), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['z'], {}), '(z)\n', (37254, 37257), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((37332, 37354), 'tvm.relay.Function', 'relay.Function', (['[x]', 'z'], {}), '([x], z)\n', (37346, 37354), False, 'from tvm import relay\n'), ((37500, 37510), 'tvm.relay.testing.ctx_list', 'ctx_list', ([], {}), '()\n', (37508, 37510), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((38061, 38110), 'tvm.relay.nn.pad', 'relay.nn.pad', (['t', '((1, 1), (2, 2), (3, 3), (4, 4))'], {}), '(t, ((1, 1), (2, 2), (3, 3), (4, 4)))\n', (38073, 38110), False, 'from tvm import relay\n'), ((38151, 38168), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (38165, 38168), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((38404, 38453), 'tvm.relay.nn.pad', 'relay.nn.pad', (['t', '((1, 1), (2, 2), (3, 3), (4, 4))'], {}), '(t, ((1, 1), (2, 2), (3, 3), (4, 4)))\n', (38416, 38453), False, 'from tvm import relay\n'), ((38463, 38480), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (38477, 38480), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((39339, 39373), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': '(n, c, h, w)'}), "('x', shape=(n, c, h, w))\n", (39348, 39373), False, 'from tvm import relay\n'), ((39383, 39449), 'tvm.relay.nn.lrn', 'relay.nn.lrn', (['x'], {'size': '(10)', 'axis': '(2)', 'bias': '(0.5)', 'alpha': '(1e-05)', 'beta': '(0.75)'}), '(x, size=10, axis=2, bias=0.5, alpha=1e-05, beta=0.75)\n', (39395, 39449), False, 'from tvm import relay\n'), ((39487, 39504), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (39501, 39504), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((39746, 39818), 'tvm.relay.nn.lrn', 'relay.nn.lrn', (['x'], {'size': 'size', 'axis': 'axis', 'bias': 'bias', 'alpha': 'alpha', 'beta': 'beta'}), '(x, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta)\n', (39758, 39818), False, 'from tvm import relay\n'), ((39828, 39845), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['z'], {}), '(z)\n', (39842, 39845), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((39918, 39940), 'tvm.relay.Function', 'relay.Function', (['[x]', 'z'], {}), '([x], z)\n', (39932, 39940), False, 'from tvm import relay\n'), ((40115, 40125), 'tvm.relay.testing.ctx_list', 'ctx_list', ([], {}), '()\n', (40123, 40125), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((40644, 40678), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': '(n, c, h, w)'}), "('x', shape=(n, c, h, w))\n", (40653, 40678), False, 'from tvm import relay\n'), ((40688, 40733), 'tvm.relay.nn.l2_normalize', 'relay.nn.l2_normalize', (['x'], {'eps': '(0.001)', 'axis': '[1]'}), '(x, eps=0.001, axis=[1])\n', (40709, 40733), False, 'from tvm import relay\n'), ((40769, 40786), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (40783, 40786), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((40987, 41035), 'tvm.relay.nn.l2_normalize', 'relay.nn.l2_normalize', (['x'], {'eps': '(0.001)', 'axis': '[axis]'}), '(x, eps=0.001, axis=[axis])\n', (41008, 41035), False, 'from tvm import relay\n'), ((41045, 41062), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['z'], {}), '(z)\n', (41059, 41062), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((41135, 41157), 'tvm.relay.Function', 'relay.Function', (['[x]', 'z'], {}), '([x], z)\n', (41149, 41157), False, 'from tvm import relay\n'), ((41321, 41331), 'tvm.relay.testing.ctx_list', 'ctx_list', ([], {}), '()\n', (41329, 41331), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((41886, 41926), 'numpy.reshape', 'np.reshape', (['data', '(shape[0], target_dim)'], {}), '(data, (shape[0], target_dim))\n', (41896, 41926), True, 'import numpy as np\n'), ((41964, 41992), 'tvm.relay.TensorType', 'relay.TensorType', (['(5, 10, 5)'], {}), '((5, 10, 5))\n', (41980, 41992), False, 'from tvm import relay\n'), ((42001, 42019), 'tvm.relay.Var', 'relay.Var', (['"""x"""', 't1'], {}), "('x', t1)\n", (42010, 42019), False, 'from tvm import relay\n'), ((42189, 42199), 'tvm.relay.testing.ctx_list', 'ctx_list', ([], {}), '()\n', (42197, 42199), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((42883, 43002), 'tvm.relay.nn.upsampling', 'relay.nn.upsampling', (['x'], {'scale_h': 'scale_h', 'scale_w': 'scale_w', 'layout': 'layout', 'method': 'method', 'align_corners': 'align_corners'}), '(x, scale_h=scale_h, scale_w=scale_w, layout=layout,\n method=method, align_corners=align_corners)\n', (42902, 43002), False, 'from tvm import relay\n'), ((43036, 43053), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (43050, 43053), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((43158, 43186), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'dshape'}), "('x', shape=dshape)\n", (43167, 43186), False, 'from tvm import relay\n'), ((43195, 43314), 'tvm.relay.nn.upsampling', 'relay.nn.upsampling', (['x'], {'scale_h': 'scale_h', 'scale_w': 'scale_w', 'layout': 'layout', 'method': 'method', 'align_corners': 'align_corners'}), '(x, scale_h=scale_h, scale_w=scale_w, layout=layout,\n method=method, align_corners=align_corners)\n', (43214, 43314), False, 'from tvm import relay\n'), ((43350, 43372), 'tvm.relay.Function', 'relay.Function', (['[x]', 'y'], {}), '([x], y)\n', (43364, 43372), False, 'from tvm import relay\n'), ((43740, 43750), 'tvm.relay.testing.ctx_list', 'ctx_list', ([], {}), '()\n', (43748, 43750), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((44831, 45008), 'tvm.relay.nn.upsampling3d', 'relay.nn.upsampling3d', (['x'], {'scale_d': 'scale_d', 'scale_h': 'scale_h', 'scale_w': 'scale_w', 'layout': 'layout', 'method': 'method', 'coordinate_transformation_mode': 'coordinate_transformation_mode'}), '(x, scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\n layout=layout, method=method, coordinate_transformation_mode=\n coordinate_transformation_mode)\n', (44852, 45008), False, 'from tvm import relay\n'), ((45072, 45089), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (45086, 45089), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((45194, 45222), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'dshape'}), "('x', shape=dshape)\n", (45203, 45222), False, 'from tvm import relay\n'), ((45231, 45408), 'tvm.relay.nn.upsampling3d', 'relay.nn.upsampling3d', (['x'], {'scale_d': 'scale_d', 'scale_h': 'scale_h', 'scale_w': 'scale_w', 'layout': 'layout', 'method': 'method', 'coordinate_transformation_mode': 'coordinate_transformation_mode'}), '(x, scale_d=scale_d, scale_h=scale_h, scale_w=scale_w,\n layout=layout, method=method, coordinate_transformation_mode=\n coordinate_transformation_mode)\n', (45252, 45408), False, 'from tvm import relay\n'), ((45469, 45491), 'tvm.relay.Function', 'relay.Function', (['[x]', 'y'], {}), '([x], y)\n', (45483, 45491), False, 'from tvm import relay\n'), ((45954, 45964), 'tvm.relay.testing.ctx_list', 'ctx_list', ([], {}), '()\n', (45962, 45964), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((48502, 48534), 'tvm.codegen.llvm_version_major', 'tvm.codegen.llvm_version_major', ([], {}), '()\n', (48532, 48534), False, 'import tvm\n'), ((52296, 52414), 'tvm.relay.nn.conv2d', 'relay.nn.conv2d', (['x', 'weight'], {'kernel_size': '(3, 3)', 'groups': '(64)', 'padding': '(1, 1)', 'dilation': '(1, 1)', 'out_dtype': 'output_dtype'}), '(x, weight, kernel_size=(3, 3), groups=64, padding=(1, 1),\n dilation=(1, 1), out_dtype=output_dtype)\n', (52311, 52414), False, 'from tvm import relay\n'), ((52542, 52572), 'tvm.relay.Function', 'relay.Function', (['[x, weight]', 'y'], {}), '([x, weight], y)\n', (52556, 52572), False, 'from tvm import relay\n'), ((52780, 52812), 'tvm.codegen.llvm_version_major', 'tvm.codegen.llvm_version_major', ([], {}), '()\n', (52810, 52812), False, 'import tvm\n'), ((53282, 53367), 'tvm.relay.nn.bitserial_conv2d', 'relay.nn.bitserial_conv2d', (['x', 'w'], {'kernel_size': '(3, 3)', 'padding': '(0, 0)', 'channels': '(32)'}), '(x, w, kernel_size=(3, 3), padding=(0, 0), channels=32\n )\n', (53307, 53367), False, 'from tvm import relay\n'), ((53381, 53398), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (53395, 53398), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((53667, 53739), 'tvm.relay.nn.bitpack', 'relay.nn.bitpack', (['x'], {'bit_axis': '(4)', 'pack_axis': '(1)', 'pack_type': '"""uint16"""', 'bits': '(1)'}), "(x, bit_axis=4, pack_axis=1, pack_type='uint16', bits=1)\n", (53683, 53739), False, 'from tvm import relay\n'), ((53749, 53766), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (53763, 53766), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((1121, 1133), 'tvm.var', 'tvm.var', (['"""n"""'], {}), "('n')\n", (1128, 1133), False, 'import tvm\n'), ((1166, 1207), 'tvm.relay.ty.TensorType', 'relay.ty.TensorType', (['(n, c, w)', '"""float32"""'], {}), "((n, c, w), 'float32')\n", (1185, 1207), False, 'from tvm import relay\n'), ((1435, 1475), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, 2, 224)', '"""float32"""'], {}), "((n, 2, 224), 'float32')\n", (1451, 1475), False, 'from tvm import relay\n'), ((1523, 1562), 'tvm.relay.TensorType', 'relay.TensorType', (['(2, 10, 3)', '"""float32"""'], {}), "((2, 10, 3), 'float32')\n", (1539, 1562), False, 'from tvm import relay\n'), ((1630, 1642), 'tvm.var', 'tvm.var', (['"""n"""'], {}), "('n')\n", (1637, 1642), False, 'import tvm\n'), ((1675, 1710), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, c, w)', '"""int8"""'], {}), "((n, c, w), 'int8')\n", (1691, 1710), False, 'from tvm import relay\n'), ((1735, 1771), 'tvm.relay.TensorType', 'relay.TensorType', (['(2, 10, 3)', '"""int8"""'], {}), "((2, 10, 3), 'int8')\n", (1751, 1771), False, 'from tvm import relay\n'), ((1927, 1965), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, 2, 222)', '"""int32"""'], {}), "((n, 2, 222), 'int32')\n", (1943, 1965), False, 'from tvm import relay\n'), ((2058, 2070), 'tvm.var', 'tvm.var', (['"""n"""'], {}), "('n')\n", (2065, 2070), False, 'import tvm\n'), ((2103, 2139), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, c, w)', '"""uint8"""'], {}), "((n, c, w), 'uint8')\n", (2119, 2139), False, 'from tvm import relay\n'), ((2164, 2200), 'tvm.relay.TensorType', 'relay.TensorType', (['(2, 10, 3)', '"""int8"""'], {}), "((2, 10, 3), 'int8')\n", (2180, 2200), False, 'from tvm import relay\n'), ((2356, 2394), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, 2, 222)', '"""int32"""'], {}), "((n, 2, 222), 'int32')\n", (2372, 2394), False, 'from tvm import relay\n'), ((2474, 2509), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, w, c)', '"""int8"""'], {}), "((n, w, c), 'int8')\n", (2490, 2509), False, 'from tvm import relay\n'), ((2826, 2863), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, w, 16)', '"""int32"""'], {}), "((n, w, 16), 'int32')\n", (2842, 2863), False, 'from tvm import relay\n'), ((3233, 3274), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'dshape', 'dtype': 'dtype'}), "('x', shape=dshape, dtype=dtype)\n", (3242, 3274), False, 'from tvm import relay\n'), ((3287, 3314), 'tvm.relay.var', 'relay.var', (['"""w"""'], {'dtype': 'dtype'}), "('w', dtype=dtype)\n", (3296, 3314), False, 'from tvm import relay\n'), ((3327, 3393), 'tvm.relay.nn.conv1d', 'relay.nn.conv1d', (['x', 'w'], {'padding': 'padding', 'dilation': 'dilation'}), '(x, w, padding=padding, dilation=dilation, **attrs)\n', (3342, 3393), False, 'from tvm import relay\n'), ((3493, 3518), 'tvm.relay.Function', 'relay.Function', (['[x, w]', 'y'], {}), '([x, w], y)\n', (3507, 3518), False, 'from tvm import relay\n'), ((3833, 3843), 'tvm.relay.testing.ctx_list', 'ctx_list', ([], {}), '()\n', (3841, 3843), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((4761, 4778), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (4773, 4778), False, 'import tvm\n'), ((4816, 4860), 'tvm.relay.ty.TensorType', 'relay.ty.TensorType', (['(n, c, h, w)', '"""float32"""'], {}), "((n, c, h, w), 'float32')\n", (4835, 4860), False, 'from tvm import relay\n'), ((5093, 5138), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, 2, 224, 224)', '"""float32"""'], {}), "((n, 2, 224, 224), 'float32')\n", (5109, 5138), False, 'from tvm import relay\n'), ((5186, 5228), 'tvm.relay.TensorType', 'relay.TensorType', (['(2, 10, 3, 3)', '"""float32"""'], {}), "((2, 10, 3, 3), 'float32')\n", (5202, 5228), False, 'from tvm import relay\n'), ((5299, 5316), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (5311, 5316), False, 'import tvm\n'), ((5354, 5392), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, c, h, w)', '"""int8"""'], {}), "((n, c, h, w), 'int8')\n", (5370, 5392), False, 'from tvm import relay\n'), ((5417, 5456), 'tvm.relay.TensorType', 'relay.TensorType', (['(2, 10, 3, 3)', '"""int8"""'], {}), "((2, 10, 3, 3), 'int8')\n", (5433, 5456), False, 'from tvm import relay\n'), ((5612, 5655), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, 2, 222, 222)', '"""int32"""'], {}), "((n, 2, 222, 222), 'int32')\n", (5628, 5655), False, 'from tvm import relay\n'), ((5751, 5768), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (5763, 5768), False, 'import tvm\n'), ((5806, 5845), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, c, h, w)', '"""uint8"""'], {}), "((n, c, h, w), 'uint8')\n", (5822, 5845), False, 'from tvm import relay\n'), ((5870, 5909), 'tvm.relay.TensorType', 'relay.TensorType', (['(2, 10, 3, 3)', '"""int8"""'], {}), "((2, 10, 3, 3), 'int8')\n", (5886, 5909), False, 'from tvm import relay\n'), ((6065, 6108), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, 2, 222, 222)', '"""int32"""'], {}), "((n, 2, 222, 222), 'int32')\n", (6081, 6108), False, 'from tvm import relay\n'), ((6211, 6265), 'tvm.relay.TensorType', 'relay.TensorType', (['(n // 4, c // 4, h, w, 4, 4)', '"""int8"""'], {}), "((n // 4, c // 4, h, w, 4, 4), 'int8')\n", (6227, 6265), False, 'from tvm import relay\n'), ((6638, 6687), 'tvm.relay.TensorType', 'relay.TensorType', (['(1, 4, 224, 224, 4, 4)', '"""int32"""'], {}), "((1, 4, 224, 224, 4, 4), 'int32')\n", (6654, 6687), False, 'from tvm import relay\n'), ((6735, 6779), 'tvm.relay.TensorType', 'relay.TensorType', (['(4, 8, 3, 3, 4, 4)', '"""int8"""'], {}), "((4, 8, 3, 3, 4, 4), 'int8')\n", (6751, 6779), False, 'from tvm import relay\n'), ((6868, 6906), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, h, w, c)', '"""int8"""'], {}), "((n, h, w, c), 'int8')\n", (6884, 6906), False, 'from tvm import relay\n'), ((7229, 7269), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, h, w, 16)', '"""int32"""'], {}), "((n, h, w, 16), 'int32')\n", (7245, 7269), False, 'from tvm import relay\n'), ((7678, 7719), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'dshape', 'dtype': 'dtype'}), "('x', shape=dshape, dtype=dtype)\n", (7687, 7719), False, 'from tvm import relay\n'), ((7732, 7759), 'tvm.relay.var', 'relay.var', (['"""w"""'], {'dtype': 'dtype'}), "('w', dtype=dtype)\n", (7741, 7759), False, 'from tvm import relay\n'), ((7772, 7858), 'tvm.relay.nn.conv2d', 'relay.nn.conv2d', (['x', 'w'], {'padding': 'padding', 'dilation': 'dilation', 'groups': 'groups'}), '(x, w, padding=padding, dilation=dilation, groups=groups, **\n attrs)\n', (7787, 7858), False, 'from tvm import relay\n'), ((7981, 8006), 'tvm.relay.Function', 'relay.Function', (['[x, w]', 'y'], {}), '([x, w], y)\n', (7995, 8006), False, 'from tvm import relay\n'), ((8542, 8552), 'tvm.relay.testing.ctx_list', 'ctx_list', ([], {}), '()\n', (8550, 8552), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((9083, 9124), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'dshape', 'dtype': 'dtype'}), "('x', shape=dshape, dtype=dtype)\n", (9092, 9124), False, 'from tvm import relay\n'), ((9137, 9164), 'tvm.relay.var', 'relay.var', (['"""w"""'], {'dtype': 'dtype'}), "('w', dtype=dtype)\n", (9146, 9164), False, 'from tvm import relay\n'), ((9177, 9263), 'tvm.relay.nn.conv2d', 'relay.nn.conv2d', (['x', 'w'], {'padding': 'padding', 'dilation': 'dilation', 'groups': 'groups'}), '(x, w, padding=padding, dilation=dilation, groups=groups, **\n attrs)\n', (9192, 9263), False, 'from tvm import relay\n'), ((9386, 9411), 'tvm.relay.Function', 'relay.Function', (['[x, w]', 'y'], {}), '([x, w], y)\n', (9400, 9411), False, 'from tvm import relay\n'), ((9426, 9444), 'tvm.relay.Module', 'tvm.relay.Module', ([], {}), '()\n', (9442, 9444), False, 'import tvm\n'), ((10701, 10715), 'tvm.contrib.util.tempdir', 'util.tempdir', ([], {}), '()\n', (10713, 10715), False, 'from tvm.contrib import util\n'), ((14264, 14305), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'dshape', 'dtype': 'dtype'}), "('x', shape=dshape, dtype=dtype)\n", (14273, 14305), False, 'from tvm import relay\n'), ((14318, 14359), 'tvm.relay.var', 'relay.var', (['"""w"""'], {'shape': 'kshape', 'dtype': 'dtype'}), "('w', shape=kshape, dtype=dtype)\n", (14327, 14359), False, 'from tvm import relay\n'), ((14372, 14458), 'tvm.relay.nn.conv2d', 'relay.nn.conv2d', (['x', 'w'], {'padding': 'padding', 'dilation': 'dilation', 'groups': 'groups'}), '(x, w, padding=padding, dilation=dilation, groups=groups, **\n attrs)\n', (14387, 14458), False, 'from tvm import relay\n'), ((14581, 14606), 'tvm.relay.Function', 'relay.Function', (['[x, w]', 'y'], {}), '([x, w], y)\n', (14595, 14606), False, 'from tvm import relay\n'), ((14621, 14635), 'tvm.relay.Module', 'relay.Module', ([], {}), '()\n', (14633, 14635), False, 'from tvm import relay\n'), ((16594, 16611), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (16606, 16611), False, 'import tvm\n'), ((16654, 16701), 'tvm.relay.ty.TensorType', 'relay.ty.TensorType', (['(n, c, d, h, w)', '"""float32"""'], {}), "((n, c, d, h, w), 'float32')\n", (16673, 16701), False, 'from tvm import relay\n'), ((16940, 16990), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, 2, 224, 224, 224)', '"""float32"""'], {}), "((n, 2, 224, 224, 224), 'float32')\n", (16956, 16990), False, 'from tvm import relay\n'), ((17038, 17083), 'tvm.relay.TensorType', 'relay.TensorType', (['(2, 10, 3, 3, 3)', '"""float32"""'], {}), "((2, 10, 3, 3, 3), 'float32')\n", (17054, 17083), False, 'from tvm import relay\n'), ((17157, 17174), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (17169, 17174), False, 'import tvm\n'), ((17217, 17258), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, c, d, h, w)', '"""int8"""'], {}), "((n, c, d, h, w), 'int8')\n", (17233, 17258), False, 'from tvm import relay\n'), ((17283, 17325), 'tvm.relay.TensorType', 'relay.TensorType', (['(2, 10, 3, 3, 3)', '"""int8"""'], {}), "((2, 10, 3, 3, 3), 'int8')\n", (17299, 17325), False, 'from tvm import relay\n'), ((17481, 17529), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, 2, 222, 222, 222)', '"""int32"""'], {}), "((n, 2, 222, 222, 222), 'int32')\n", (17497, 17529), False, 'from tvm import relay\n'), ((17628, 17645), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (17640, 17645), False, 'import tvm\n'), ((17688, 17730), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, c, d, h, w)', '"""uint8"""'], {}), "((n, c, d, h, w), 'uint8')\n", (17704, 17730), False, 'from tvm import relay\n'), ((17755, 17797), 'tvm.relay.TensorType', 'relay.TensorType', (['(2, 10, 3, 3, 3)', '"""int8"""'], {}), "((2, 10, 3, 3, 3), 'int8')\n", (17771, 17797), False, 'from tvm import relay\n'), ((17953, 18001), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, 2, 222, 222, 222)', '"""int32"""'], {}), "((n, 2, 222, 222, 222), 'int32')\n", (17969, 18001), False, 'from tvm import relay\n'), ((18099, 18140), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, d, h, w, c)', '"""int8"""'], {}), "((n, d, h, w, c), 'int8')\n", (18115, 18140), False, 'from tvm import relay\n'), ((18470, 18513), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, d, h, w, 16)', '"""int32"""'], {}), "((n, d, h, w, 16), 'int32')\n", (18486, 18513), False, 'from tvm import relay\n'), ((18928, 18969), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'dshape', 'dtype': 'dtype'}), "('x', shape=dshape, dtype=dtype)\n", (18937, 18969), False, 'from tvm import relay\n'), ((18982, 19009), 'tvm.relay.var', 'relay.var', (['"""w"""'], {'dtype': 'dtype'}), "('w', dtype=dtype)\n", (18991, 19009), False, 'from tvm import relay\n'), ((19022, 19108), 'tvm.relay.nn.conv3d', 'relay.nn.conv3d', (['x', 'w'], {'padding': 'padding', 'dilation': 'dilation', 'groups': 'groups'}), '(x, w, padding=padding, dilation=dilation, groups=groups, **\n attrs)\n', (19037, 19108), False, 'from tvm import relay\n'), ((19231, 19256), 'tvm.relay.Function', 'relay.Function', (['[x, w]', 'y'], {}), '([x, w], y)\n', (19245, 19256), False, 'from tvm import relay\n'), ((19793, 19803), 'tvm.relay.testing.ctx_list', 'ctx_list', ([], {}), '()\n', (19801, 19803), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((20718, 20759), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'dshape', 'dtype': 'dtype'}), "('x', shape=dshape, dtype=dtype)\n", (20727, 20759), False, 'from tvm import relay\n'), ((20772, 20799), 'tvm.relay.var', 'relay.var', (['"""w"""'], {'dtype': 'dtype'}), "('w', dtype=dtype)\n", (20781, 20799), False, 'from tvm import relay\n'), ((20812, 20941), 'tvm.relay.nn.conv3d', 'relay.nn.conv3d', (['x', 'w'], {'padding': 'padding', 'dilation': 'dilation', 'groups': 'groups', 'data_layout': '"""NDHWC"""', 'kernel_layout': '"""DHWIO"""'}), "(x, w, padding=padding, dilation=dilation, groups=groups,\n data_layout='NDHWC', kernel_layout='DHWIO', **attrs)\n", (20827, 20941), False, 'from tvm import relay\n'), ((21093, 21118), 'tvm.relay.Function', 'relay.Function', (['[x, w]', 'y'], {}), '([x, w], y)\n', (21107, 21118), False, 'from tvm import relay\n'), ((21624, 21634), 'tvm.relay.testing.ctx_list', 'ctx_list', ([], {}), '()\n', (21632, 21634), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((22257, 22274), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (22269, 22274), False, 'import tvm\n'), ((22310, 22351), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, c, h, w)', '"""float32"""'], {}), "((n, c, h, w), 'float32')\n", (22326, 22351), False, 'from tvm import relay\n'), ((22376, 22398), 'tvm.relay.IncompleteType', 'relay.IncompleteType', ([], {}), '()\n', (22396, 22398), False, 'from tvm import relay\n'), ((22687, 22731), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, 15, 10, 12)', '"""float32"""'], {}), "((n, 15, 10, 12), 'float32')\n", (22703, 22731), False, 'from tvm import relay\n'), ((22779, 22822), 'tvm.relay.TensorType', 'relay.TensorType', (['(10, 15, 3, 3)', '"""float32"""'], {}), "((10, 15, 3, 3), 'float32')\n", (22795, 22822), False, 'from tvm import relay\n'), ((22893, 22910), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (22905, 22910), False, 'import tvm\n'), ((22946, 22987), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, h, w, c)', '"""float32"""'], {}), "((n, h, w, c), 'float32')\n", (22962, 22987), False, 'from tvm import relay\n'), ((23012, 23055), 'tvm.relay.TensorType', 'relay.TensorType', (['(12, 11, 5, 5)', '"""float32"""'], {}), "((12, 11, 5, 5), 'float32')\n", (23028, 23055), False, 'from tvm import relay\n'), ((23312, 23356), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, 15, 15, 11)', '"""float32"""'], {}), "((n, 15, 15, 11), 'float32')\n", (23328, 23356), False, 'from tvm import relay\n'), ((24157, 24211), 'tvm.relay.create_executor', 'relay.create_executor', (['"""graph"""'], {'ctx': 'ctx', 'target': 'target'}), "('graph', ctx=ctx, target=target)\n", (24178, 24211), False, 'from tvm import relay\n'), ((26084, 26138), 'tvm.relay.create_executor', 'relay.create_executor', (['"""graph"""'], {'ctx': 'ctx', 'target': 'target'}), "('graph', ctx=ctx, target=target)\n", (26105, 26138), False, 'from tvm import relay\n'), ((26333, 26350), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (26345, 26350), False, 'import tvm\n'), ((26352, 26369), 'tvm.size_var', 'tvm.size_var', (['"""c"""'], {}), "('c')\n", (26364, 26369), False, 'import tvm\n'), ((26371, 26388), 'tvm.size_var', 'tvm.size_var', (['"""h"""'], {}), "('h')\n", (26383, 26388), False, 'import tvm\n'), ((26390, 26407), 'tvm.size_var', 'tvm.size_var', (['"""w"""'], {}), "('w')\n", (26402, 26407), False, 'import tvm\n'), ((26469, 26510), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, c, h, w)', '"""float32"""'], {}), "((n, c, h, w), 'float32')\n", (26485, 26510), False, 'from tvm import relay\n'), ((26928, 26945), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (26940, 26945), False, 'import tvm\n'), ((26947, 26964), 'tvm.size_var', 'tvm.size_var', (['"""c"""'], {}), "('c')\n", (26959, 26964), False, 'import tvm\n'), ((26988, 27033), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, c, 100, 200)', '"""float32"""'], {}), "((n, c, 100, 200), 'float32')\n", (27004, 27033), False, 'from tvm import relay\n'), ((27179, 27224), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, c, 200, 400)', '"""float32"""'], {}), "((n, c, 200, 400), 'float32')\n", (27195, 27224), False, 'from tvm import relay\n'), ((27282, 27299), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (27294, 27299), False, 'import tvm\n'), ((27301, 27318), 'tvm.size_var', 'tvm.size_var', (['"""c"""'], {}), "('c')\n", (27313, 27318), False, 'import tvm\n'), ((27341, 27358), 'tvm.size_var', 'tvm.size_var', (['"""d"""'], {}), "('d')\n", (27353, 27358), False, 'import tvm\n'), ((27360, 27377), 'tvm.size_var', 'tvm.size_var', (['"""h"""'], {}), "('h')\n", (27372, 27377), False, 'import tvm\n'), ((27379, 27396), 'tvm.size_var', 'tvm.size_var', (['"""w"""'], {}), "('w')\n", (27391, 27396), False, 'import tvm\n'), ((27458, 27502), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, c, d, h, w)', '"""float32"""'], {}), "((n, c, d, h, w), 'float32')\n", (27474, 27502), False, 'from tvm import relay\n'), ((27987, 28004), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (27999, 28004), False, 'import tvm\n'), ((28006, 28023), 'tvm.size_var', 'tvm.size_var', (['"""c"""'], {}), "('c')\n", (28018, 28023), False, 'import tvm\n'), ((28047, 28097), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, c, 100, 100, 200)', '"""float32"""'], {}), "((n, c, 100, 100, 200), 'float32')\n", (28063, 28097), False, 'from tvm import relay\n'), ((28258, 28308), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, c, 200, 200, 400)', '"""float32"""'], {}), "((n, c, 200, 200, 400), 'float32')\n", (28274, 28308), False, 'from tvm import relay\n'), ((28362, 28379), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (28374, 28379), False, 'import tvm\n'), ((28417, 28458), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, c, h, w)', '"""float32"""'], {}), "((n, c, h, w), 'float32')\n", (28433, 28458), False, 'from tvm import relay\n'), ((28591, 28637), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, 10, 224, 224)', '"""float32"""'], {}), "((n, 10, 224, 224), 'float32')\n", (28607, 28637), False, 'from tvm import relay\n'), ((29025, 29079), 'tvm.relay.create_executor', 'relay.create_executor', (['"""graph"""'], {'ctx': 'ctx', 'target': 'target'}), "('graph', ctx=ctx, target=target)\n", (29046, 29079), False, 'from tvm import relay\n'), ((29276, 29293), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (29288, 29293), False, 'import tvm\n'), ((29331, 29368), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, c, h, w)', 'dtype'], {}), '((n, c, h, w), dtype)\n', (29347, 29368), False, 'from tvm import relay\n'), ((29501, 29543), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, 10, 224, 224)', 'dtype'], {}), '((n, 10, 224, 224), dtype)\n', (29517, 29543), False, 'from tvm import relay\n'), ((29964, 30018), 'tvm.relay.create_executor', 'relay.create_executor', (['"""graph"""'], {'ctx': 'ctx', 'target': 'target'}), "('graph', ctx=ctx, target=target)\n", (29985, 30018), False, 'from tvm import relay\n'), ((30211, 30228), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (30223, 30228), False, 'import tvm\n'), ((30230, 30247), 'tvm.size_var', 'tvm.size_var', (['"""c"""'], {}), "('c')\n", (30242, 30247), False, 'import tvm\n'), ((30281, 30322), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, h, w, c)', '"""float32"""'], {}), "((n, h, w, c), 'float32')\n", (30297, 30322), False, 'from tvm import relay\n'), ((30414, 30455), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, 1, 1, c)', '"""float32"""'], {}), "((n, 1, 1, c), 'float32')\n", (30430, 30455), False, 'from tvm import relay\n'), ((30474, 30491), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (30486, 30491), False, 'import tvm\n'), ((30493, 30510), 'tvm.size_var', 'tvm.size_var', (['"""c"""'], {}), "('c')\n", (30505, 30510), False, 'import tvm\n'), ((30512, 30529), 'tvm.size_var', 'tvm.size_var', (['"""h"""'], {}), "('h')\n", (30524, 30529), False, 'import tvm\n'), ((30531, 30548), 'tvm.size_var', 'tvm.size_var', (['"""w"""'], {}), "('w')\n", (30543, 30548), False, 'import tvm\n'), ((30572, 30613), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, c, h, w)', '"""float32"""'], {}), "((n, c, h, w), 'float32')\n", (30588, 30613), False, 'from tvm import relay\n'), ((30690, 30731), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, c, 1, 1)', '"""float32"""'], {}), "((n, c, 1, 1), 'float32')\n", (30706, 30731), False, 'from tvm import relay\n'), ((31056, 31110), 'tvm.relay.create_executor', 'relay.create_executor', (['"""graph"""'], {'ctx': 'ctx', 'target': 'target'}), "('graph', ctx=ctx, target=target)\n", (31077, 31110), False, 'from tvm import relay\n'), ((31851, 31868), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (31865, 31868), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((32036, 32064), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'dshape'}), "('x', shape=dshape)\n", (32045, 32064), False, 'from tvm import relay\n'), ((32209, 32231), 'tvm.relay.Function', 'relay.Function', (['[x]', 'y'], {}), '([x], y)\n', (32223, 32231), False, 'from tvm import relay\n'), ((32473, 32483), 'tvm.relay.testing.ctx_list', 'ctx_list', ([], {}), '()\n', (32481, 32483), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((33118, 33135), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['y'], {}), '(y)\n', (33132, 33135), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((33319, 33347), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'dshape'}), "('x', shape=dshape)\n", (33328, 33347), False, 'from tvm import relay\n'), ((33503, 33525), 'tvm.relay.Function', 'relay.Function', (['[x]', 'y'], {}), '([x], y)\n', (33517, 33525), False, 'from tvm import relay\n'), ((34032, 34042), 'tvm.relay.testing.ctx_list', 'ctx_list', ([], {}), '()\n', (34040, 34042), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((36140, 36194), 'tvm.relay.create_executor', 'relay.create_executor', (['"""graph"""'], {'ctx': 'ctx', 'target': 'target'}), "('graph', ctx=ctx, target=target)\n", (36161, 36194), False, 'from tvm import relay\n'), ((36380, 36398), 'tvm.size_var', 'tvm.size_var', (['"""d1"""'], {}), "('d1')\n", (36392, 36398), False, 'import tvm\n'), ((36400, 36418), 'tvm.size_var', 'tvm.size_var', (['"""d2"""'], {}), "('d2')\n", (36412, 36418), False, 'import tvm\n'), ((36420, 36438), 'tvm.size_var', 'tvm.size_var', (['"""d3"""'], {}), "('d3')\n", (36432, 36438), False, 'import tvm\n'), ((36440, 36458), 'tvm.size_var', 'tvm.size_var', (['"""d4"""'], {}), "('d4')\n", (36452, 36458), False, 'import tvm\n'), ((36482, 36527), 'tvm.relay.TensorType', 'relay.TensorType', (['(d1, d2, d3, d4)', '"""float32"""'], {}), "((d1, d2, d3, d4), 'float32')\n", (36498, 36527), False, 'from tvm import relay\n'), ((36620, 36667), 'tvm.relay.TensorType', 'relay.TensorType', (['(d1, d2 * d3 * d4)', '"""float32"""'], {}), "((d1, d2 * d3 * d4), 'float32')\n", (36636, 36667), False, 'from tvm import relay\n'), ((36692, 36733), 'tvm.relay.TensorType', 'relay.TensorType', (['(3, 2, 4, 3)', '"""float32"""'], {}), "((3, 2, 4, 3), 'float32')\n", (36708, 36733), False, 'from tvm import relay\n'), ((36826, 36862), 'tvm.relay.TensorType', 'relay.TensorType', (['(3, 24)', '"""float32"""'], {}), "((3, 24), 'float32')\n", (36842, 36862), False, 'from tvm import relay\n'), ((36887, 36930), 'tvm.relay.TensorType', 'relay.TensorType', (['(d1, 2, d3, 3)', '"""float32"""'], {}), "((d1, 2, d3, 3), 'float32')\n", (36903, 36930), False, 'from tvm import relay\n'), ((37023, 37068), 'tvm.relay.TensorType', 'relay.TensorType', (['(d1, 2 * d3 * 3)', '"""float32"""'], {}), "((d1, 2 * d3 * 3), 'float32')\n", (37039, 37068), False, 'from tvm import relay\n'), ((37165, 37195), 'tvm.relay.TensorType', 'relay.TensorType', (['shape', 'dtype'], {}), '(shape, dtype)\n', (37181, 37195), False, 'from tvm import relay\n'), ((37288, 37320), 'tvm.relay.TensorType', 'relay.TensorType', (['o_shape', 'dtype'], {}), '(o_shape, dtype)\n', (37304, 37320), False, 'from tvm import relay\n'), ((37529, 37583), 'tvm.relay.create_executor', 'relay.create_executor', (['"""graph"""'], {'ctx': 'ctx', 'target': 'target'}), "('graph', ctx=ctx, target=target)\n", (37550, 37583), False, 'from tvm import relay\n'), ((37601, 37655), 'tvm.relay.create_executor', 'relay.create_executor', (['"""debug"""'], {'ctx': 'ctx', 'target': 'target'}), "('debug', ctx=ctx, target=target)\n", (37622, 37655), False, 'from tvm import relay\n'), ((38010, 38051), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, c, h, w)', '"""float32"""'], {}), "((n, c, h, w), 'float32')\n", (38026, 38051), False, 'from tvm import relay\n'), ((38199, 38241), 'tvm.relay.TensorType', 'relay.TensorType', (['(3, 6, 9, 12)', '"""float32"""'], {}), "((3, 6, 9, 12), 'float32')\n", (38215, 38241), False, 'from tvm import relay\n'), ((38287, 38304), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (38299, 38304), False, 'import tvm\n'), ((38312, 38329), 'tvm.size_var', 'tvm.size_var', (['"""w"""'], {}), "('w')\n", (38324, 38329), False, 'import tvm\n'), ((38353, 38394), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, c, h, w)', '"""float32"""'], {}), "((n, c, h, w), 'float32')\n", (38369, 38394), False, 'from tvm import relay\n'), ((38511, 38560), 'tvm.relay.TensorType', 'relay.TensorType', (['(n + 2, 6, 9, w + 8)', '"""float32"""'], {}), "((n + 2, 6, 9, w + 8), 'float32')\n", (38527, 38560), False, 'from tvm import relay\n'), ((38651, 38679), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': 'dshape'}), "('x', shape=dshape)\n", (38660, 38679), False, 'from tvm import relay\n'), ((38692, 38741), 'tvm.relay.nn.pad', 'relay.nn.pad', (['x', '((1, 1), (2, 2), (3, 3), (4, 4))'], {}), '(x, ((1, 1), (2, 2), (3, 3), (4, 4)))\n', (38704, 38741), False, 'from tvm import relay\n'), ((38757, 38779), 'tvm.relay.Function', 'relay.Function', (['[x]', 'y'], {}), '([x], y)\n', (38771, 38779), False, 'from tvm import relay\n'), ((38858, 38916), 'numpy.pad', 'np.pad', (['data', '((1, 1), (2, 2), (3, 3), (4, 4))', '"""constant"""'], {}), "(data, ((1, 1), (2, 2), (3, 3), (4, 4)), 'constant')\n", (38864, 38916), True, 'import numpy as np\n'), ((38944, 38954), 'tvm.relay.testing.ctx_list', 'ctx_list', ([], {}), '()\n', (38952, 38954), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((39256, 39273), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (39268, 39273), False, 'import tvm\n'), ((39275, 39292), 'tvm.size_var', 'tvm.size_var', (['"""c"""'], {}), "('c')\n", (39287, 39292), False, 'import tvm\n'), ((39294, 39311), 'tvm.size_var', 'tvm.size_var', (['"""h"""'], {}), "('h')\n", (39306, 39311), False, 'import tvm\n'), ((39313, 39330), 'tvm.size_var', 'tvm.size_var', (['"""w"""'], {}), "('w')\n", (39325, 39330), False, 'import tvm\n'), ((39535, 39565), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, c, h, w)'], {}), '((n, c, h, w))\n', (39551, 39565), False, 'from tvm import relay\n'), ((39640, 39670), 'tvm.relay.TensorType', 'relay.TensorType', (['shape', 'dtype'], {}), '(shape, dtype)\n', (39656, 39670), False, 'from tvm import relay\n'), ((39876, 39906), 'tvm.relay.TensorType', 'relay.TensorType', (['shape', 'dtype'], {}), '(shape, dtype)\n', (39892, 39906), False, 'from tvm import relay\n'), ((40144, 40198), 'tvm.relay.create_executor', 'relay.create_executor', (['"""graph"""'], {'ctx': 'ctx', 'target': 'target'}), "('graph', ctx=ctx, target=target)\n", (40165, 40198), False, 'from tvm import relay\n'), ((40216, 40270), 'tvm.relay.create_executor', 'relay.create_executor', (['"""debug"""'], {'ctx': 'ctx', 'target': 'target'}), "('debug', ctx=ctx, target=target)\n", (40237, 40270), False, 'from tvm import relay\n'), ((40561, 40578), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (40573, 40578), False, 'import tvm\n'), ((40580, 40597), 'tvm.size_var', 'tvm.size_var', (['"""c"""'], {}), "('c')\n", (40592, 40597), False, 'import tvm\n'), ((40599, 40616), 'tvm.size_var', 'tvm.size_var', (['"""h"""'], {}), "('h')\n", (40611, 40616), False, 'import tvm\n'), ((40618, 40635), 'tvm.size_var', 'tvm.size_var', (['"""w"""'], {}), "('w')\n", (40630, 40635), False, 'import tvm\n'), ((40817, 40847), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, c, h, w)'], {}), '((n, c, h, w))\n', (40833, 40847), False, 'from tvm import relay\n'), ((40922, 40952), 'tvm.relay.TensorType', 'relay.TensorType', (['shape', 'dtype'], {}), '(shape, dtype)\n', (40938, 40952), False, 'from tvm import relay\n'), ((41093, 41123), 'tvm.relay.TensorType', 'relay.TensorType', (['shape', 'dtype'], {}), '(shape, dtype)\n', (41109, 41123), False, 'from tvm import relay\n'), ((41350, 41404), 'tvm.relay.create_executor', 'relay.create_executor', (['"""graph"""'], {'ctx': 'ctx', 'target': 'target'}), "('graph', ctx=ctx, target=target)\n", (41371, 41404), False, 'from tvm import relay\n'), ((41422, 41476), 'tvm.relay.create_executor', 'relay.create_executor', (['"""debug"""'], {'ctx': 'ctx', 'target': 'target'}), "('debug', ctx=ctx, target=target)\n", (41443, 41476), False, 'from tvm import relay\n'), ((42051, 42076), 'tvm.relay.nn.batch_flatten', 'relay.nn.batch_flatten', (['x'], {}), '(x)\n', (42073, 42076), False, 'from tvm import relay\n'), ((42217, 42271), 'tvm.relay.create_executor', 'relay.create_executor', (['"""graph"""'], {'ctx': 'ctx', 'target': 'target'}), "('graph', ctx=ctx, target=target)\n", (42238, 42271), False, 'from tvm import relay\n'), ((42467, 42484), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (42479, 42484), False, 'import tvm\n'), ((42835, 42873), 'tvm.relay.TensorType', 'relay.TensorType', (['((n,) + ishape)', 'dtype'], {}), '((n,) + ishape, dtype)\n', (42851, 42873), False, 'from tvm import relay\n'), ((43084, 43122), 'tvm.relay.TensorType', 'relay.TensorType', (['((n,) + oshape)', 'dtype'], {}), '((n,) + oshape, dtype)\n', (43100, 43122), False, 'from tvm import relay\n'), ((43771, 43825), 'tvm.relay.create_executor', 'relay.create_executor', (['"""graph"""'], {'ctx': 'ctx', 'target': 'target'}), "('graph', ctx=ctx, target=target)\n", (43792, 43825), False, 'from tvm import relay\n'), ((44271, 44288), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (44283, 44288), False, 'import tvm\n'), ((44783, 44821), 'tvm.relay.TensorType', 'relay.TensorType', (['((n,) + ishape)', 'dtype'], {}), '((n,) + ishape, dtype)\n', (44799, 44821), False, 'from tvm import relay\n'), ((45120, 45158), 'tvm.relay.TensorType', 'relay.TensorType', (['((n,) + oshape)', 'dtype'], {}), '((n,) + oshape, dtype)\n', (45136, 45158), False, 'from tvm import relay\n'), ((45985, 46039), 'tvm.relay.create_executor', 'relay.create_executor', (['"""graph"""'], {'ctx': 'ctx', 'target': 'target'}), "('graph', ctx=ctx, target=target)\n", (46006, 46039), False, 'from tvm import relay\n'), ((47297, 47479), 'tvm.relay.nn.conv2d', 'relay.nn.conv2d', (['x', 'weight'], {'kernel_size': '(ch, cw)', 'channels': 'oc', 'padding': '(1, 1)', 'dilation': '(1, 1)', 'data_layout': 'data_layout', 'kernel_layout': 'kernel_layout', 'out_dtype': 'output_dtype'}), '(x, weight, kernel_size=(ch, cw), channels=oc, padding=(1, 1\n ), dilation=(1, 1), data_layout=data_layout, kernel_layout=\n kernel_layout, out_dtype=output_dtype)\n', (47312, 47479), False, 'from tvm import relay\n'), ((47681, 47711), 'tvm.relay.Function', 'relay.Function', (['[x, weight]', 'y'], {}), '([x, weight], y)\n', (47695, 47711), False, 'from tvm import relay\n'), ((52131, 52172), 'tvm.relay.TensorType', 'relay.TensorType', (['data_shape', 'input_dtype'], {}), '(data_shape, input_dtype)\n', (52147, 52172), False, 'from tvm import relay\n'), ((52241, 52285), 'tvm.relay.TensorType', 'relay.TensorType', (['kernel_shape', 'weight_dtype'], {}), '(kernel_shape, weight_dtype)\n', (52257, 52285), False, 'from tvm import relay\n'), ((52585, 52614), 'numpy.random.rand', 'np.random.rand', (['*kernel_shape'], {}), '(*kernel_shape)\n', (52599, 52614), True, 'import numpy as np\n'), ((53106, 53123), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (53118, 53123), False, 'import tvm\n'), ((53161, 53203), 'tvm.relay.ty.TensorType', 'relay.ty.TensorType', (['(n, c, h, w)', '"""int16"""'], {}), "((n, c, h, w), 'int16')\n", (53180, 53203), False, 'from tvm import relay\n'), ((53228, 53272), 'tvm.relay.ty.TensorType', 'relay.ty.TensorType', (['(32, 32, 3, 3)', '"""int16"""'], {}), "((32, 32, 3, 3), 'int16')\n", (53247, 53272), False, 'from tvm import relay\n'), ((53430, 53474), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, 32, 222, 222)', '"""int16"""'], {}), "((n, 32, 222, 222), 'int16')\n", (53446, 53474), False, 'from tvm import relay\n'), ((53615, 53657), 'tvm.relay.ty.TensorType', 'relay.ty.TensorType', (['(o, i, h, w)', '"""int16"""'], {}), "((o, i, h, w), 'int16')\n", (53634, 53657), False, 'from tvm import relay\n'), ((53798, 53846), 'tvm.relay.TensorType', 'relay.TensorType', (['(32, 2, 128, 128, 1)', '"""uint16"""'], {}), "((32, 2, 128, 128, 1), 'uint16')\n", (53814, 53846), False, 'from tvm import relay\n'), ((3932, 3986), 'tvm.relay.create_executor', 'relay.create_executor', (['"""graph"""'], {'ctx': 'ctx', 'target': 'target'}), "('graph', ctx=ctx, target=target)\n", (3953, 3986), False, 'from tvm import relay\n'), ((8641, 8695), 'tvm.relay.create_executor', 'relay.create_executor', (['"""graph"""'], {'ctx': 'ctx', 'target': 'target'}), "('graph', ctx=ctx, target=target)\n", (8662, 8695), False, 'from tvm import relay\n'), ((13380, 13421), 'tvm.autotvm.task.space.FallbackConfigEntity', 'autotvm.task.space.FallbackConfigEntity', ([], {}), '()\n', (13419, 13421), False, 'from tvm import autotvm\n'), ((13528, 13573), 'tvm.autotvm.task.space.SplitEntity', 'autotvm.task.space.SplitEntity', (['[-1, 1, 1, 1]'], {}), '([-1, 1, 1, 1])\n', (13558, 13573), False, 'from tvm import autotvm\n'), ((13602, 13647), 'tvm.autotvm.task.space.SplitEntity', 'autotvm.task.space.SplitEntity', (['[-1, 1, 1, 1]'], {}), '([-1, 1, 1, 1])\n', (13632, 13647), False, 'from tvm import autotvm\n'), ((13676, 13721), 'tvm.autotvm.task.space.SplitEntity', 'autotvm.task.space.SplitEntity', (['[-1, 1, 1, 1]'], {}), '([-1, 1, 1, 1])\n', (13706, 13721), False, 'from tvm import autotvm\n'), ((13751, 13790), 'tvm.autotvm.task.space.SplitEntity', 'autotvm.task.space.SplitEntity', (['[-1, 1]'], {}), '([-1, 1])\n', (13781, 13790), False, 'from tvm import autotvm\n'), ((13833, 13875), 'tvm.autotvm.task.space.OtherOptionEntity', 'autotvm.task.space.OtherOptionEntity', (['(1500)'], {}), '(1500)\n', (13869, 13875), False, 'from tvm import autotvm\n'), ((13913, 13952), 'tvm.autotvm.task.space.OtherOptionEntity', 'autotvm.task.space.OtherOptionEntity', (['(1)'], {}), '(1)\n', (13949, 13952), False, 'from tvm import autotvm\n'), ((14677, 14704), 'tvm.relay.transform.InferType', 'relay.transform.InferType', ([], {}), '()\n', (14702, 14704), False, 'from tvm import relay\n'), ((15049, 15080), 'tvm.relay.build_config', 'relay.build_config', ([], {'opt_level': '(3)'}), '(opt_level=3)\n', (15067, 15080), False, 'from tvm import relay\n'), ((15113, 15123), 'tvm.relay.testing.ctx_list', 'ctx_list', ([], {}), '()\n', (15121, 15123), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n'), ((19893, 19947), 'tvm.relay.create_executor', 'relay.create_executor', (['"""graph"""'], {'ctx': 'ctx', 'target': 'target'}), "('graph', ctx=ctx, target=target)\n", (19914, 19947), False, 'from tvm import relay\n'), ((21724, 21778), 'tvm.relay.create_executor', 'relay.create_executor', (['"""graph"""'], {'ctx': 'ctx', 'target': 'target'}), "('graph', ctx=ctx, target=target)\n", (21745, 21778), False, 'from tvm import relay\n'), ((23813, 23843), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'dshape'}), '(size=dshape)\n', (23830, 23843), True, 'import numpy as np\n'), ((23871, 23901), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'kshape'}), '(size=kshape)\n', (23888, 23901), True, 'import numpy as np\n'), ((25000, 25035), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'dshape_nhwc'}), '(size=dshape_nhwc)\n', (25017, 25035), True, 'import numpy as np\n'), ((25063, 25098), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'kshape_hwoi'}), '(size=kshape_hwoi)\n', (25080, 25098), True, 'import numpy as np\n'), ((25757, 25787), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'dshape'}), '(size=dshape)\n', (25774, 25787), True, 'import numpy as np\n'), ((25815, 25845), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'kshape'}), '(size=kshape)\n', (25832, 25845), True, 'import numpy as np\n'), ((28859, 28889), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'dshape'}), '(size=dshape)\n', (28876, 28889), True, 'import numpy as np\n'), ((30904, 30934), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'dshape'}), '(size=dshape)\n', (30921, 30934), True, 'import numpy as np\n'), ((31669, 31681), 'tvm.var', 'tvm.var', (['"""n"""'], {}), "('n')\n", (31676, 31681), False, 'import tvm\n'), ((31718, 31756), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, c, w)', '"""float32"""'], {}), "((n, c, w), 'float32')\n", (31734, 31756), False, 'from tvm import relay\n'), ((31903, 31944), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, 10, 224)', '"""float32"""'], {}), "((n, 10, 224), 'float32')\n", (31919, 31944), False, 'from tvm import relay\n'), ((32506, 32560), 'tvm.relay.create_executor', 'relay.create_executor', (['"""graph"""'], {'ctx': 'ctx', 'target': 'target'}), "('graph', ctx=ctx, target=target)\n", (32527, 32560), False, 'from tvm import relay\n'), ((32912, 32929), 'tvm.size_var', 'tvm.size_var', (['"""n"""'], {}), "('n')\n", (32924, 32929), False, 'import tvm\n'), ((32974, 33018), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, c, d, h, w)', '"""float32"""'], {}), "((n, c, d, h, w), 'float32')\n", (32990, 33018), False, 'from tvm import relay\n'), ((33170, 33219), 'tvm.relay.TensorType', 'relay.TensorType', (['(n, 10, 5, 224, 224)', '"""float32"""'], {}), "((n, 10, 5, 224, 224), 'float32')\n", (33186, 33219), False, 'from tvm import relay\n'), ((34065, 34119), 'tvm.relay.create_executor', 'relay.create_executor', (['"""graph"""'], {'ctx': 'ctx', 'target': 'target'}), "('graph', ctx=ctx, target=target)\n", (34086, 34119), False, 'from tvm import relay\n'), ((35440, 35490), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.001)', 'size': '(n, ic, ih, iw)'}), '(low=0.001, size=(n, ic, ih, iw))\n', (35457, 35490), True, 'import numpy as np\n'), ((35518, 35567), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, ic, ih + 2 * ph, iw + 2 * pw)'}), '(shape=(n, ic, ih + 2 * ph, iw + 2 * pw))\n', (35526, 35567), True, 'import numpy as np\n'), ((35661, 35677), 'numpy.ix_', 'np.ix_', (['*no_zero'], {}), '(*no_zero)\n', (35667, 35677), True, 'import numpy as np\n'), ((35697, 35728), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, oc, oh, ow)'}), '(shape=(n, oc, oh, ow))\n', (35705, 35728), True, 'import numpy as np\n'), ((35819, 35896), 'numpy.sum', 'np.sum', (['(pad_np[:, :, i * sh:i * sh + kh, j * sw:j * sw + kw] > 0)'], {'axis': '(2, 3)'}), '(pad_np[:, :, i * sh:i * sh + kh, j * sw:j * sw + kw] > 0, axis=(2, 3))\n', (35825, 35896), True, 'import numpy as np\n'), ((37368, 37413), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': 'shape'}), '(low=-1, high=1, size=shape)\n', (37385, 37413), True, 'import numpy as np\n'), ((38977, 39031), 'tvm.relay.create_executor', 'relay.create_executor', (['"""graph"""'], {'ctx': 'ctx', 'target': 'target'}), "('graph', ctx=ctx, target=target)\n", (38998, 39031), False, 'from tvm import relay\n'), ((39954, 39999), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': 'shape'}), '(low=-1, high=1, size=shape)\n', (39971, 39999), True, 'import numpy as np\n'), ((41171, 41216), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': 'shape'}), '(low=-1, high=1, size=shape)\n', (41188, 41216), True, 'import numpy as np\n'), ((42090, 42114), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)', '(5)'], {}), '(5, 10, 5)\n', (42104, 42114), True, 'import numpy as np\n'), ((43384, 43414), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'dshape'}), '(size=dshape)\n', (43401, 43414), True, 'import numpy as np\n'), ((45503, 45533), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'dshape'}), '(size=dshape)\n', (45520, 45533), True, 'import numpy as np\n'), ((47239, 47283), 'tvm.relay.TensorType', 'relay.TensorType', (['kernel_shape', 'weight_dtype'], {}), '(kernel_shape, weight_dtype)\n', (47255, 47283), False, 'from tvm import relay\n'), ((47728, 47757), 'numpy.random.rand', 'np.random.rand', (['*kernel_shape'], {}), '(*kernel_shape)\n', (47742, 47757), True, 'import numpy as np\n'), ((47851, 47882), 'tvm.relay.build_config', 'relay.build_config', ([], {'opt_level': '(3)'}), '(opt_level=3)\n', (47869, 47882), False, 'from tvm import relay\n'), ((47917, 47961), 'tvm.relay.build', 'relay.build', (['func', 'target'], {'params': 'parameters'}), '(func, target, params=parameters)\n', (47928, 47961), False, 'from tvm import relay\n'), ((3534, 3579), 'numpy.random.uniform', 'np.random.uniform', (['(-scale)', 'scale'], {'size': 'dshape'}), '(-scale, scale, size=dshape)\n', (3551, 3579), True, 'import numpy as np\n'), ((3611, 3656), 'numpy.random.uniform', 'np.random.uniform', (['(-scale)', 'scale'], {'size': 'kshape'}), '(-scale, scale, size=kshape)\n', (3628, 3656), True, 'import numpy as np\n'), ((8022, 8067), 'numpy.random.uniform', 'np.random.uniform', (['(-scale)', 'scale'], {'size': 'dshape'}), '(-scale, scale, size=dshape)\n', (8039, 8067), True, 'import numpy as np\n'), ((8099, 8144), 'numpy.random.uniform', 'np.random.uniform', (['(-scale)', 'scale'], {'size': 'kshape'}), '(-scale, scale, size=kshape)\n', (8116, 8144), True, 'import numpy as np\n'), ((10904, 10935), 'tvm.relay.build_config', 'relay.build_config', ([], {'opt_level': '(3)'}), '(opt_level=3)\n', (10922, 10935), False, 'from tvm import relay\n'), ((11017, 11068), 'tvm.relay.build', 'tvm.relay.build', (['mod'], {'target': '"""llvm -device=arm_cpu"""'}), "(mod, target='llvm -device=arm_cpu')\n", (11032, 11068), False, 'import tvm\n'), ((14726, 14771), 'numpy.random.uniform', 'np.random.uniform', (['(-scale)', 'scale'], {'size': 'dshape'}), '(-scale, scale, size=dshape)\n', (14743, 14771), True, 'import numpy as np\n'), ((14803, 14848), 'numpy.random.uniform', 'np.random.uniform', (['(-scale)', 'scale'], {'size': 'kshape'}), '(-scale, scale, size=kshape)\n', (14820, 14848), True, 'import numpy as np\n'), ((15281, 15340), 'tvm.relay.build_module.build', 'relay.build_module.build', (['mod'], {'target': 'target', 'params': 'params'}), '(mod, target=target, params=params)\n', (15305, 15340), False, 'from tvm import relay\n'), ((15366, 15415), 'tvm.contrib.graph_runtime.create', 'tvm.contrib.graph_runtime.create', (['graph', 'lib', 'ctx'], {}), '(graph, lib, ctx)\n', (15398, 15415), False, 'import tvm\n'), ((19272, 19317), 'numpy.random.uniform', 'np.random.uniform', (['(-scale)', 'scale'], {'size': 'dshape'}), '(-scale, scale, size=dshape)\n', (19289, 19317), True, 'import numpy as np\n'), ((19349, 19394), 'numpy.random.uniform', 'np.random.uniform', (['(-scale)', 'scale'], {'size': 'kshape'}), '(-scale, scale, size=kshape)\n', (19366, 19394), True, 'import numpy as np\n'), ((21134, 21179), 'numpy.random.uniform', 'np.random.uniform', (['(-scale)', 'scale'], {'size': 'dshape'}), '(-scale, scale, size=dshape)\n', (21151, 21179), True, 'import numpy as np\n'), ((21211, 21256), 'numpy.random.uniform', 'np.random.uniform', (['(-scale)', 'scale'], {'size': 'kshape'}), '(-scale, scale, size=kshape)\n', (21228, 21256), True, 'import numpy as np\n'), ((32247, 32277), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'dshape'}), '(size=dshape)\n', (32264, 32277), True, 'import numpy as np\n'), ((33792, 33822), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'dshape'}), '(size=dshape)\n', (33809, 33822), True, 'import numpy as np\n'), ((35912, 35985), 'numpy.sum', 'np.sum', (['pad_np[:, :, i * sh:i * sh + kh, j * sw:j * sw + kw]'], {'axis': '(2, 3)'}), '(pad_np[:, :, i * sh:i * sh + kh, j * sw:j * sw + kw], axis=(2, 3))\n', (35918, 35985), True, 'import numpy as np\n'), ((36010, 36034), 'numpy.maximum', 'np.maximum', (['pad_count', '(1)'], {}), '(pad_count, 1)\n', (36020, 36034), True, 'import numpy as np\n'), ((38795, 38825), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'dshape'}), '(size=dshape)\n', (38812, 38825), True, 'import numpy as np\n'), ((46726, 46767), 'tvm.relay.TensorType', 'relay.TensorType', (['data_shape', 'input_dtype'], {}), '(data_shape, input_dtype)\n', (46742, 46767), False, 'from tvm import relay\n'), ((52887, 52918), 'tvm.relay.build_config', 'relay.build_config', ([], {'opt_level': '(3)'}), '(opt_level=3)\n', (52905, 52918), False, 'from tvm import relay\n'), ((52957, 53001), 'tvm.relay.build', 'relay.build', (['func', 'target'], {'params': 'parameters'}), '(func, target, params=parameters)\n', (52968, 53001), False, 'from tvm import relay\n'), ((15222, 15242), 'tvm.nd.array', 'tvm.nd.array', (['kernel'], {}), '(kernel)\n', (15234, 15242), False, 'import tvm\n'), ((15454, 15472), 'tvm.nd.array', 'tvm.nd.array', (['data'], {}), '(data)\n', (15466, 15472), False, 'import tvm\n'), ((26744, 26764), 'tvm.round', 'tvm.round', (['(h * scale)'], {}), '(h * scale)\n', (26753, 26764), False, 'import tvm\n'), ((26836, 26856), 'tvm.round', 'tvm.round', (['(w * scale)'], {}), '(w * scale)\n', (26845, 26856), False, 'import tvm\n'), ((27711, 27731), 'tvm.round', 'tvm.round', (['(d * scale)'], {}), '(d * scale)\n', (27720, 27731), False, 'import tvm\n'), ((27803, 27823), 'tvm.round', 'tvm.round', (['(h * scale)'], {}), '(h * scale)\n', (27812, 27823), False, 'import tvm\n'), ((27895, 27915), 'tvm.round', 'tvm.round', (['(w * scale)'], {}), '(w * scale)\n', (27904, 27915), False, 'import tvm\n'), ((46875, 46916), 'tvm.relay.TensorType', 'relay.TensorType', (['data_shape', 'input_dtype'], {}), '(data_shape, input_dtype)\n', (46891, 46916), False, 'from tvm import relay\n'), ((33605, 33625), 'tvm.relay.testing.run_infer_type', 'run_infer_type', (['func'], {}), '(func)\n', (33619, 33625), False, 'from tvm.relay.testing import ctx_list, run_infer_type\n')]
hjkim-haga/TF-OD-API
official/nlp/transformer/utils/tokenizer_test.py
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test Subtokenizer and string helper methods.""" import collections import tempfile import tensorflow as tf from official.nlp.transformer.utils import tokenizer class SubtokenizerTest(tf.test.TestCase): def _init_subtokenizer(self, vocab_list): temp_file = tempfile.NamedTemporaryFile(delete=False) with tf.io.gfile.GFile(temp_file.name, "w") as w: for subtoken in vocab_list: w.write("'%s'" % subtoken) w.write("\n") return tokenizer.Subtokenizer(temp_file.name, reserved_tokens=[]) def test_encode(self): vocab_list = ["123_", "test", "ing_"] subtokenizer = self._init_subtokenizer(vocab_list) s = "testing 123" encoded_list = subtokenizer.encode(s) self.assertEqual([1, 2, 0], encoded_list) def test_decode(self): vocab_list = ["123_", "test", "ing_"] subtokenizer = self._init_subtokenizer(vocab_list) encoded_list = [1, 2, 0] # testing 123 decoded_str = subtokenizer.decode(encoded_list) self.assertEqual("testing 123", decoded_str) def test_subtoken_ids_to_tokens(self): vocab_list = ["123_", "test", "ing_"] subtokenizer = self._init_subtokenizer(vocab_list) encoded_list = [1, 2, 0] # testing 123 token_list = subtokenizer._subtoken_ids_to_tokens(encoded_list) self.assertEqual([u"testing", u"123"], token_list) class StringHelperTest(tf.test.TestCase): def test_split_string_to_tokens(self): text = "test? testing 123." tokens = tokenizer._split_string_to_tokens(text, tokenizer._ALPHANUMERIC_CHAR_SET) self.assertEqual(["test", "? ", "testing", "123", "."], tokens) def test_join_tokens_to_string(self): tokens = ["test", "? ", "testing", "123", "."] s = tokenizer._join_tokens_to_string(tokens, tokenizer._ALPHANUMERIC_CHAR_SET) self.assertEqual("test? testing 123.", s) def test_escape_token(self): token = u"abc_\\4" alphabet = set("abc_\\u;") escaped_token = tokenizer._escape_token(token, alphabet) self.assertEqual("abc\\u\\\\\\52;_", escaped_token) def test_unescape_token(self): escaped_token = u"Underline: \\u, Backslash: \\\\, Unicode: \\52;" unescaped_token = tokenizer._unescape_token(escaped_token) self.assertEqual("Underline: _, Backslash: \\, Unicode: 4", unescaped_token) def test_list_to_index_dict(self): lst = ["test", "strings"] d = tokenizer._list_to_index_dict(lst) self.assertDictEqual({"test": 0, "strings": 1}, d) def test_split_token_to_subtokens(self): token = "abc" subtoken_dict = {"a": 0, "b": 1, "c": 2, "ab": 3} max_subtoken_length = 2 subtokens = tokenizer._split_token_to_subtokens(token, subtoken_dict, max_subtoken_length) self.assertEqual(["ab", "c"], subtokens) def test_generate_alphabet_dict(self): s = ["testing", "123"] reserved_tokens = ["???"] alphabet = tokenizer._generate_alphabet_dict(s, reserved_tokens) self.assertIn("?", alphabet) self.assertIn("t", alphabet) self.assertIn("e", alphabet) self.assertIn("s", alphabet) self.assertIn("i", alphabet) self.assertIn("n", alphabet) self.assertIn("g", alphabet) self.assertIn("1", alphabet) self.assertIn("2", alphabet) self.assertIn("3", alphabet) def test_count_and_gen_subtokens(self): token_counts = {"abc": 5} alphabet = set("abc_") subtoken_dict = {"a": 0, "b": 1, "c": 2, "_": 3} max_subtoken_length = 2 subtoken_counts = tokenizer._count_and_gen_subtokens( token_counts, alphabet, subtoken_dict, max_subtoken_length) self.assertIsInstance(subtoken_counts, collections.defaultdict) self.assertDictEqual( { "a": 5, "b": 5, "c": 5, "_": 5, "ab": 5, "bc": 5, "c_": 5, "abc": 5, "bc_": 5, "abc_": 5 }, subtoken_counts) def test_filter_and_bucket_subtokens(self): subtoken_counts = collections.defaultdict(int, { "a": 2, "b": 4, "c": 1, "ab": 6, "ac": 3, "abbc": 5 }) min_count = 3 subtoken_buckets = tokenizer._filter_and_bucket_subtokens( subtoken_counts, min_count) self.assertEqual(len(subtoken_buckets[0]), 0) self.assertEqual(set("b"), subtoken_buckets[1]) self.assertEqual(set(["ab", "ac"]), subtoken_buckets[2]) self.assertEqual(len(subtoken_buckets[3]), 0) self.assertEqual(set(["abbc"]), subtoken_buckets[4]) def test_gen_new_subtoken_list(self): subtoken_counts = collections.defaultdict(int, { "translate": 10, "t": 40, "tr": 16, "tra": 12 }) min_count = 5 alphabet = set("translate") reserved_tokens = ["reserved", "tokens"] subtoken_list, max_token_length = tokenizer._gen_new_subtoken_list( subtoken_counts, min_count, alphabet, reserved_tokens) # Check that "tra" isn"t in the list (its count should be decremented to 2, # so it should not be added to the canddiate list). self.assertNotIn("tra", subtoken_list) self.assertIn("tr", subtoken_list) self.assertIn("t", subtoken_list) self.assertEqual(len("translate"), max_token_length) def test_generate_subtokens(self): token_counts = {"ab": 1, "bc": 3, "abc": 5} alphabet = set("abc_") min_count = 100 num_iterations = 1 reserved_tokens = ["reserved", "tokens"] vocab_list = tokenizer._generate_subtokens(token_counts, alphabet, min_count, num_iterations, reserved_tokens) # Check that reserved tokens are at the front of the list self.assertEqual(vocab_list[:2], reserved_tokens) # Check that each character in alphabet is in the vocab list for c in alphabet: self.assertIn(c, vocab_list) if __name__ == "__main__": tf.test.main()
[((6843, 6857), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (6855, 6857), True, 'import tensorflow as tf\n'), ((907, 948), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (934, 948), False, 'import tempfile\n'), ((1110, 1168), 'official.nlp.transformer.utils.tokenizer.Subtokenizer', 'tokenizer.Subtokenizer', (['temp_file.name'], {'reserved_tokens': '[]'}), '(temp_file.name, reserved_tokens=[])\n', (1132, 1168), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((2137, 2210), 'official.nlp.transformer.utils.tokenizer._split_string_to_tokens', 'tokenizer._split_string_to_tokens', (['text', 'tokenizer._ALPHANUMERIC_CHAR_SET'], {}), '(text, tokenizer._ALPHANUMERIC_CHAR_SET)\n', (2170, 2210), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((2434, 2508), 'official.nlp.transformer.utils.tokenizer._join_tokens_to_string', 'tokenizer._join_tokens_to_string', (['tokens', 'tokenizer._ALPHANUMERIC_CHAR_SET'], {}), '(tokens, tokenizer._ALPHANUMERIC_CHAR_SET)\n', (2466, 2508), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((2711, 2751), 'official.nlp.transformer.utils.tokenizer._escape_token', 'tokenizer._escape_token', (['token', 'alphabet'], {}), '(token, alphabet)\n', (2734, 2751), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((2942, 2982), 'official.nlp.transformer.utils.tokenizer._unescape_token', 'tokenizer._unescape_token', (['escaped_token'], {}), '(escaped_token)\n', (2967, 2982), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((3147, 3181), 'official.nlp.transformer.utils.tokenizer._list_to_index_dict', 'tokenizer._list_to_index_dict', (['lst'], {}), '(lst)\n', (3176, 3181), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((3406, 3484), 'official.nlp.transformer.utils.tokenizer._split_token_to_subtokens', 'tokenizer._split_token_to_subtokens', (['token', 'subtoken_dict', 'max_subtoken_length'], {}), '(token, subtoken_dict, max_subtoken_length)\n', (3441, 3484), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((3705, 3758), 'official.nlp.transformer.utils.tokenizer._generate_alphabet_dict', 'tokenizer._generate_alphabet_dict', (['s', 'reserved_tokens'], {}), '(s, reserved_tokens)\n', (3738, 3758), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((4311, 4409), 'official.nlp.transformer.utils.tokenizer._count_and_gen_subtokens', 'tokenizer._count_and_gen_subtokens', (['token_counts', 'alphabet', 'subtoken_dict', 'max_subtoken_length'], {}), '(token_counts, alphabet, subtoken_dict,\n max_subtoken_length)\n', (4345, 4409), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((4845, 4932), 'collections.defaultdict', 'collections.defaultdict', (['int', "{'a': 2, 'b': 4, 'c': 1, 'ab': 6, 'ac': 3, 'abbc': 5}"], {}), "(int, {'a': 2, 'b': 4, 'c': 1, 'ab': 6, 'ac': 3,\n 'abbc': 5})\n", (4868, 4932), False, 'import collections\n'), ((5035, 5101), 'official.nlp.transformer.utils.tokenizer._filter_and_bucket_subtokens', 'tokenizer._filter_and_bucket_subtokens', (['subtoken_counts', 'min_count'], {}), '(subtoken_counts, min_count)\n', (5073, 5101), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((5455, 5532), 'collections.defaultdict', 'collections.defaultdict', (['int', "{'translate': 10, 't': 40, 'tr': 16, 'tra': 12}"], {}), "(int, {'translate': 10, 't': 40, 'tr': 16, 'tra': 12})\n", (5478, 5532), False, 'import collections\n'), ((5715, 5806), 'official.nlp.transformer.utils.tokenizer._gen_new_subtoken_list', 'tokenizer._gen_new_subtoken_list', (['subtoken_counts', 'min_count', 'alphabet', 'reserved_tokens'], {}), '(subtoken_counts, min_count, alphabet,\n reserved_tokens)\n', (5747, 5806), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((6366, 6467), 'official.nlp.transformer.utils.tokenizer._generate_subtokens', 'tokenizer._generate_subtokens', (['token_counts', 'alphabet', 'min_count', 'num_iterations', 'reserved_tokens'], {}), '(token_counts, alphabet, min_count,\n num_iterations, reserved_tokens)\n', (6395, 6467), False, 'from official.nlp.transformer.utils import tokenizer\n'), ((959, 997), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['temp_file.name', '"""w"""'], {}), "(temp_file.name, 'w')\n", (976, 997), True, 'import tensorflow as tf\n')]
bcgov/court-of-appeal
api/api/form7_searching_utils/__init__.py
ef773b1baa80d3aff1ac807ed01f59266d885955
from .form7_search import Form7Search from .parse_form7 import Form7Parsing
[]
JackDan9/soil
soil/build/lib/soil/openstack/snapshot.py
ae612a4634634aace834491fbdefbc69e6167674
# Copyright 2020 Soil, Inc. from soil.openstack.base import DataBase from soil.openstack.base import SourceBase class SnapshotData(DataBase): """A class for openstack snapshot data""" def __init__(self, data): self.data = data['snapshot'] class Snapshot(SourceBase): """A class for openstack snapshot""" def __init__(self, plugin, source_id): super(Snapshot, self).__init__(plugin, source_id) self._snapshot_obj = None @property def snapshot_obj(self): if self._snapshot_obj is not None: return self._snapshot_obj self._snapshot_obj = SnapshotData(self.show()) return self._snapshot_obj def show(self): return self.plugin.cinder.show_snapshot(self.source_id) def delete(self): self.plugin.cinder.delete_snapshot(self.source_id) def is_created(self): snapshot_info = self.show() status = snapshot_info['snapshot']['status'] if status in ('available', ): return True self._check_failed_status(status) return False def is_delete(self): pass
[]
dfirpaul/Active-Directory-Exploitation-Cheat-Sheet-1
Z - Tool Box/LaZagne/Windows/lazagne/softwares/windows/ppypykatz.py
1dcf54522e9d20711ff1114550dc2893ed3e9ed0
# -*- coding: utf-8 -*- # Thanks to @skelsec for his awesome tool Pypykatz # Checks his project here: https://github.com/skelsec/pypykatz import codecs import traceback from lazagne.config.module_info import ModuleInfo from lazagne.config.constant import constant from pypykatz.pypykatz import pypykatz class Pypykatz(ModuleInfo): """ Pypykatz dumps all secrets from the lsass.exe memory It does not work if: - LSASS is running as a protected process - A security product blocks this access """ def __init__(self): ModuleInfo.__init__(self, 'pypykatz', 'windows', system_module=True) def run(self): mimi = None try: mimi = pypykatz.go_live() except Exception: self.debug(traceback.format_exc()) if mimi: results = {} logon_sessions = mimi.to_dict().get('logon_sessions', []) for logon_session in logon_sessions: # Right now kerberos_creds, dpapi_creds results are not used user = logon_sessions[logon_session] # Get cleartext password for i in ['credman_creds', 'ssp_creds', 'livessp_creds', 'tspkg_creds', 'wdigest_creds']: for data in user.get(i, []): if all((data['username'], data['password'])): login = data['username'] if login not in results: results[login] = {} results[login]['Type'] = i results[login]['Domain'] = data.get('domainname', 'N/A') results[login]['Password'] = data['password'] # msv_creds to get sha1 user hash for data in user.get('msv_creds', []): if data['username']: login = data['username'] else: login = user['username'] if login not in results: results[login] = {} if data['SHAHash']: results[login]['Shahash'] = codecs.encode(data['SHAHash'], 'hex') if data['LMHash']: results[login]['Lmhash'] = codecs.encode(data['LMHash'], 'hex') if data['NThash']: results[login]['Nthash'] = codecs.encode(data['NThash'], 'hex') constant.pypykatz_result = results pwd_found = [] for user in results: results[user]['Login'] = user pwd_found.append(results[user]) return pwd_found
[((579, 647), 'lazagne.config.module_info.ModuleInfo.__init__', 'ModuleInfo.__init__', (['self', '"""pypykatz"""', '"""windows"""'], {'system_module': '(True)'}), "(self, 'pypykatz', 'windows', system_module=True)\n", (598, 647), False, 'from lazagne.config.module_info import ModuleInfo\n'), ((725, 743), 'pypykatz.pypykatz.pypykatz.go_live', 'pypykatz.go_live', ([], {}), '()\n', (741, 743), False, 'from pypykatz.pypykatz import pypykatz\n'), ((795, 817), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (815, 817), False, 'import traceback\n'), ((2239, 2276), 'codecs.encode', 'codecs.encode', (["data['SHAHash']", '"""hex"""'], {}), "(data['SHAHash'], 'hex')\n", (2252, 2276), False, 'import codecs\n'), ((2369, 2405), 'codecs.encode', 'codecs.encode', (["data['LMHash']", '"""hex"""'], {}), "(data['LMHash'], 'hex')\n", (2382, 2405), False, 'import codecs\n'), ((2498, 2534), 'codecs.encode', 'codecs.encode', (["data['NThash']", '"""hex"""'], {}), "(data['NThash'], 'hex')\n", (2511, 2534), False, 'import codecs\n')]
mglukhovsky/beets
test/test_discogs.py
889e30c056a609cf71c8c8200259520230545222
# -*- coding: utf-8 -*- # This file is part of beets. # Copyright 2016, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. """Tests for discogs plugin. """ from __future__ import division, absolute_import, print_function import unittest from test import _common from test._common import Bag from test.helper import capture_log from beetsplug.discogs import DiscogsPlugin class DGAlbumInfoTest(_common.TestCase): def _make_release(self, tracks=None): """Returns a Bag that mimics a discogs_client.Release. The list of elements on the returned Bag is incomplete, including just those required for the tests on this class.""" data = { 'id': 'ALBUM ID', 'uri': 'ALBUM URI', 'title': 'ALBUM TITLE', 'year': '3001', 'artists': [{ 'name': 'ARTIST NAME', 'id': 'ARTIST ID', 'join': ',' }], 'formats': [{ 'descriptions': ['FORMAT DESC 1', 'FORMAT DESC 2'], 'name': 'FORMAT', 'qty': 1 }], 'styles': [ 'STYLE1', 'STYLE2' ], 'labels': [{ 'name': 'LABEL NAME', 'catno': 'CATALOG NUMBER', }], 'tracklist': [] } if tracks: for recording in tracks: data['tracklist'].append(recording) return Bag(data=data, # Make some fields available as properties, as they are # accessed by DiscogsPlugin methods. title=data['title'], artists=[Bag(data=d) for d in data['artists']]) def _make_track(self, title, position='', duration='', type_=None): track = { 'title': title, 'position': position, 'duration': duration } if type_ is not None: # Test samples on discogs_client do not have a 'type_' field, but # the API seems to return it. Values: 'track' for regular tracks, # 'heading' for descriptive texts (ie. not real tracks - 12.13.2). track['type_'] = type_ return track def _make_release_from_positions(self, positions): """Return a Bag that mimics a discogs_client.Release with a tracklist where tracks have the specified `positions`.""" tracks = [self._make_track('TITLE%s' % i, position) for (i, position) in enumerate(positions, start=1)] return self._make_release(tracks) def test_parse_media_for_tracks(self): tracks = [self._make_track('TITLE ONE', '1', '01:01'), self._make_track('TITLE TWO', '2', '02:02')] release = self._make_release(tracks=tracks) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.media, 'FORMAT') self.assertEqual(t[0].media, d.media) self.assertEqual(t[1].media, d.media) def test_parse_medium_numbers_single_medium(self): release = self._make_release_from_positions(['1', '2']) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums, 1) self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium, 1) self.assertEqual(t[0].medium_total, 2) def test_parse_medium_numbers_two_mediums(self): release = self._make_release_from_positions(['1-1', '2-1']) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums, 2) self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 1) self.assertEqual(t[1].medium, 2) self.assertEqual(t[1].medium_total, 1) def test_parse_medium_numbers_two_mediums_two_sided(self): release = self._make_release_from_positions(['A1', 'B1', 'C1']) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums, 2) self.assertEqual(t[0].medium, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[1].medium, 1) self.assertEqual(t[1].medium_total, 2) self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[2].medium, 2) self.assertEqual(t[2].medium_total, 1) self.assertEqual(t[2].medium_index, 1) def test_parse_track_indices(self): release = self._make_release_from_positions(['1', '2']) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[0].index, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[1].index, 2) self.assertEqual(t[1].medium_total, 2) def test_parse_track_indices_several_media(self): release = self._make_release_from_positions(['1-1', '1-2', '2-1', '3-1']) d = DiscogsPlugin().get_album_info(release) t = d.tracks self.assertEqual(d.mediums, 3) self.assertEqual(t[0].medium_index, 1) self.assertEqual(t[0].index, 1) self.assertEqual(t[0].medium_total, 2) self.assertEqual(t[1].medium_index, 2) self.assertEqual(t[1].index, 2) self.assertEqual(t[1].medium_total, 2) self.assertEqual(t[2].medium_index, 1) self.assertEqual(t[2].index, 3) self.assertEqual(t[2].medium_total, 1) self.assertEqual(t[3].medium_index, 1) self.assertEqual(t[3].index, 4) self.assertEqual(t[3].medium_total, 1) def test_parse_position(self): """Test the conversion of discogs `position` to medium, medium_index and subtrack_index.""" # List of tuples (discogs_position, (medium, medium_index, subindex) positions = [('1', (None, '1', None)), ('A12', ('A', '12', None)), ('12-34', ('12-', '34', None)), ('CD1-1', ('CD1-', '1', None)), ('1.12', (None, '1', '12')), ('12.a', (None, '12', 'A')), ('12.34', (None, '12', '34')), ('1ab', (None, '1', 'AB')), # Non-standard ('IV', ('IV', None, None)), ] d = DiscogsPlugin() for position, expected in positions: self.assertEqual(d.get_track_index(position), expected) def test_parse_tracklist_without_sides(self): """Test standard Discogs position 12.2.9#1: "without sides".""" release = self._make_release_from_positions(['1', '2', '3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_with_sides(self): """Test standard Discogs position 12.2.9#2: "with sides".""" release = self._make_release_from_positions(['A1', 'A2', 'B1', 'B2']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) # 2 sides = 1 LP self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_multiple_lp(self): """Test standard Discogs position 12.2.9#3: "multiple LP".""" release = self._make_release_from_positions(['A1', 'A2', 'B1', 'C1']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) # 3 sides = 1 LP + 1 LP self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_multiple_cd(self): """Test standard Discogs position 12.2.9#4: "multiple CDs".""" release = self._make_release_from_positions(['1-1', '1-2', '2-1', '3-1']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 3) self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_non_standard(self): """Test non standard Discogs position.""" release = self._make_release_from_positions(['I', 'II', 'III', 'IV']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 4) def test_parse_tracklist_subtracks_dot(self): """Test standard Discogs position 12.2.9#5: "sub tracks, dots".""" release = self._make_release_from_positions(['1', '2.1', '2.2', '3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) release = self._make_release_from_positions(['A1', 'A2.1', 'A2.2', 'A3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_letter(self): """Test standard Discogs position 12.2.9#5: "sub tracks, letter".""" release = self._make_release_from_positions(['A1', 'A2a', 'A2b', 'A3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) release = self._make_release_from_positions(['A1', 'A2.a', 'A2.b', 'A3']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_extra_material(self): """Test standard Discogs position 12.2.9#6: "extra material".""" release = self._make_release_from_positions(['1', '2', 'Video 1']) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) self.assertEqual(len(d.tracks), 3) def test_parse_tracklist_subtracks_indices(self): """Test parsing of subtracks that include index tracks.""" release = self._make_release_from_positions(['', '', '1.1', '1.2']) # Track 1: Index track with medium title release.data['tracklist'][0]['title'] = 'MEDIUM TITLE' # Track 2: Index track with track group title release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE' d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE') self.assertEqual(len(d.tracks), 1) self.assertEqual(d.tracks[0].title, 'TRACK GROUP TITLE') def test_parse_tracklist_subtracks_nested_logical(self): """Test parsing of subtracks defined inside a index track that are logical subtracks (ie. should be grouped together into a single track). """ release = self._make_release_from_positions(['1', '', '3']) # Track 2: Index track with track group title, and sub_tracks release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE' release.data['tracklist'][1]['sub_tracks'] = [ self._make_track('TITLE ONE', '2.1', '01:01'), self._make_track('TITLE TWO', '2.2', '02:02') ] d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 3) self.assertEqual(d.tracks[1].title, 'TRACK GROUP TITLE') def test_parse_tracklist_subtracks_nested_physical(self): """Test parsing of subtracks defined inside a index track that are physical subtracks (ie. should not be grouped together). """ release = self._make_release_from_positions(['1', '', '4']) # Track 2: Index track with track group title, and sub_tracks release.data['tracklist'][1]['title'] = 'TRACK GROUP TITLE' release.data['tracklist'][1]['sub_tracks'] = [ self._make_track('TITLE ONE', '2', '01:01'), self._make_track('TITLE TWO', '3', '02:02') ] d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 1) self.assertEqual(len(d.tracks), 4) self.assertEqual(d.tracks[1].title, 'TITLE ONE') self.assertEqual(d.tracks[2].title, 'TITLE TWO') def test_parse_tracklist_disctitles(self): """Test parsing of index tracks that act as disc titles.""" release = self._make_release_from_positions(['', '1-1', '1-2', '', '2-1']) # Track 1: Index track with medium title (Cd1) release.data['tracklist'][0]['title'] = 'MEDIUM TITLE CD1' # Track 4: Index track with medium title (Cd2) release.data['tracklist'][3]['title'] = 'MEDIUM TITLE CD2' d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.mediums, 2) self.assertEqual(d.tracks[0].disctitle, 'MEDIUM TITLE CD1') self.assertEqual(d.tracks[1].disctitle, 'MEDIUM TITLE CD1') self.assertEqual(d.tracks[2].disctitle, 'MEDIUM TITLE CD2') self.assertEqual(len(d.tracks), 3) def test_parse_minimal_release(self): """Test parsing of a release with the minimal amount of information.""" data = {'id': 123, 'tracklist': [self._make_track('A', '1', '01:01')], 'artists': [{'name': 'ARTIST NAME', 'id': 321, 'join': ''}], 'title': 'TITLE'} release = Bag(data=data, title=data['title'], artists=[Bag(data=d) for d in data['artists']]) d = DiscogsPlugin().get_album_info(release) self.assertEqual(d.artist, 'ARTIST NAME') self.assertEqual(d.album, 'TITLE') self.assertEqual(len(d.tracks), 1) def test_parse_release_without_required_fields(self): """Test parsing of a release that does not have the required fields.""" release = Bag(data={}, refresh=lambda *args: None) with capture_log() as logs: d = DiscogsPlugin().get_album_info(release) self.assertEqual(d, None) self.assertIn('Release does not contain the required fields', logs[0]) def suite(): return unittest.TestLoader().loadTestsFromName(__name__) if __name__ == '__main__': unittest.main(defaultTest='suite')
[((14946, 14980), 'unittest.main', 'unittest.main', ([], {'defaultTest': '"""suite"""'}), "(defaultTest='suite')\n", (14959, 14980), False, 'import unittest\n'), ((7139, 7154), 'beetsplug.discogs.DiscogsPlugin', 'DiscogsPlugin', ([], {}), '()\n', (7152, 7154), False, 'from beetsplug.discogs import DiscogsPlugin\n'), ((14591, 14631), 'test._common.Bag', 'Bag', ([], {'data': '{}', 'refresh': '(lambda *args: None)'}), '(data={}, refresh=lambda *args: None)\n', (14594, 14631), False, 'from test._common import Bag\n'), ((14645, 14658), 'test.helper.capture_log', 'capture_log', ([], {}), '()\n', (14656, 14658), False, 'from test.helper import capture_log\n'), ((14864, 14885), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (14883, 14885), False, 'import unittest\n'), ((3386, 3401), 'beetsplug.discogs.DiscogsPlugin', 'DiscogsPlugin', ([], {}), '()\n', (3399, 3401), False, 'from beetsplug.discogs import DiscogsPlugin\n'), ((3715, 3730), 'beetsplug.discogs.DiscogsPlugin', 'DiscogsPlugin', ([], {}), '()\n', (3728, 3730), False, 'from beetsplug.discogs import DiscogsPlugin\n'), ((4126, 4141), 'beetsplug.discogs.DiscogsPlugin', 'DiscogsPlugin', ([], {}), '()\n', (4139, 4141), False, 'from beetsplug.discogs import DiscogsPlugin\n'), ((4551, 4566), 'beetsplug.discogs.DiscogsPlugin', 'DiscogsPlugin', ([], {}), '()\n', (4564, 4566), False, 'from beetsplug.discogs import DiscogsPlugin\n'), ((5174, 5189), 'beetsplug.discogs.DiscogsPlugin', 'DiscogsPlugin', ([], {}), '()\n', (5187, 5189), False, 'from beetsplug.discogs import DiscogsPlugin\n'), ((5706, 5721), 'beetsplug.discogs.DiscogsPlugin', 'DiscogsPlugin', ([], {}), '()\n', (5719, 5721), False, 'from beetsplug.discogs import DiscogsPlugin\n'), ((7472, 7487), 'beetsplug.discogs.DiscogsPlugin', 'DiscogsPlugin', ([], {}), '()\n', (7485, 7487), False, 'from beetsplug.discogs import DiscogsPlugin\n'), ((7802, 7817), 'beetsplug.discogs.DiscogsPlugin', 'DiscogsPlugin', ([], {}), '()\n', (7815, 7817), False, 'from beetsplug.discogs import DiscogsPlugin\n'), ((8152, 8167), 'beetsplug.discogs.DiscogsPlugin', 'DiscogsPlugin', ([], {}), '()\n', (8165, 8167), False, 'from beetsplug.discogs import DiscogsPlugin\n'), ((8567, 8582), 'beetsplug.discogs.DiscogsPlugin', 'DiscogsPlugin', ([], {}), '()\n', (8580, 8582), False, 'from beetsplug.discogs import DiscogsPlugin\n'), ((8880, 8895), 'beetsplug.discogs.DiscogsPlugin', 'DiscogsPlugin', ([], {}), '()\n', (8893, 8895), False, 'from beetsplug.discogs import DiscogsPlugin\n'), ((9219, 9234), 'beetsplug.discogs.DiscogsPlugin', 'DiscogsPlugin', ([], {}), '()\n', (9232, 9234), False, 'from beetsplug.discogs import DiscogsPlugin\n'), ((9490, 9505), 'beetsplug.discogs.DiscogsPlugin', 'DiscogsPlugin', ([], {}), '()\n', (9503, 9505), False, 'from beetsplug.discogs import DiscogsPlugin\n'), ((9836, 9851), 'beetsplug.discogs.DiscogsPlugin', 'DiscogsPlugin', ([], {}), '()\n', (9849, 9851), False, 'from beetsplug.discogs import DiscogsPlugin\n'), ((10107, 10122), 'beetsplug.discogs.DiscogsPlugin', 'DiscogsPlugin', ([], {}), '()\n', (10120, 10122), False, 'from beetsplug.discogs import DiscogsPlugin\n'), ((10452, 10467), 'beetsplug.discogs.DiscogsPlugin', 'DiscogsPlugin', ([], {}), '()\n', (10465, 10467), False, 'from beetsplug.discogs import DiscogsPlugin\n'), ((11020, 11035), 'beetsplug.discogs.DiscogsPlugin', 'DiscogsPlugin', ([], {}), '()\n', (11033, 11035), False, 'from beetsplug.discogs import DiscogsPlugin\n'), ((11901, 11916), 'beetsplug.discogs.DiscogsPlugin', 'DiscogsPlugin', ([], {}), '()\n', (11914, 11916), False, 'from beetsplug.discogs import DiscogsPlugin\n'), ((12700, 12715), 'beetsplug.discogs.DiscogsPlugin', 'DiscogsPlugin', ([], {}), '()\n', (12713, 12715), False, 'from beetsplug.discogs import DiscogsPlugin\n'), ((13445, 13460), 'beetsplug.discogs.DiscogsPlugin', 'DiscogsPlugin', ([], {}), '()\n', (13458, 13460), False, 'from beetsplug.discogs import DiscogsPlugin\n'), ((14258, 14273), 'beetsplug.discogs.DiscogsPlugin', 'DiscogsPlugin', ([], {}), '()\n', (14271, 14273), False, 'from beetsplug.discogs import DiscogsPlugin\n'), ((2232, 2243), 'test._common.Bag', 'Bag', ([], {'data': 'd'}), '(data=d)\n', (2235, 2243), False, 'from test._common import Bag\n'), ((14207, 14218), 'test._common.Bag', 'Bag', ([], {'data': 'd'}), '(data=d)\n', (14210, 14218), False, 'from test._common import Bag\n'), ((14684, 14699), 'beetsplug.discogs.DiscogsPlugin', 'DiscogsPlugin', ([], {}), '()\n', (14697, 14699), False, 'from beetsplug.discogs import DiscogsPlugin\n')]
hank-chou/python
data_structures/queue/queue_on_pseudo_stack.py
a9f729fa263bce599d2774f3f6afb5a18bcc9862
"""Queue represented by a pseudo stack (represented by a list with pop and append)""" class Queue: def __init__(self): self.stack = [] self.length = 0 def __str__(self): printed = "<" + str(self.stack)[1:-1] + ">" return printed """Enqueues {@code item} @param item item to enqueue""" def put(self, item): self.stack.append(item) self.length = self.length + 1 """Dequeues {@code item} @requirement: |self.length| > 0 @return dequeued item that was dequeued""" def get(self): self.rotate(1) dequeued = self.stack[self.length - 1] self.stack = self.stack[:-1] self.rotate(self.length - 1) self.length = self.length - 1 return dequeued """Rotates the queue {@code rotation} times @param rotation number of times to rotate queue""" def rotate(self, rotation): for i in range(rotation): temp = self.stack[0] self.stack = self.stack[1:] self.put(temp) self.length = self.length - 1 """Reports item at the front of self @return item at front of self.stack""" def front(self): front = self.get() self.put(front) self.rotate(self.length - 1) return front """Returns the length of this.stack""" def size(self): return self.length
[]
nihui/gen-ncnn-models
darknet2ncnn.py
18523f1920d9afc44ce3058087c07e09f28aa151
#! /usr/bin/env python # coding: utf-8 import configparser import numpy as np import re,sys,os from graph import MyGraph from collections import OrderedDict def unique_config_sections(config_file): """Convert all config sections to have unique names. Adds unique suffixes to config sections for compability with configparser. """ from collections import defaultdict import io section_counters = defaultdict(int) output_stream = io.StringIO() with open(config_file) as fin: for line in fin: if line.startswith('['): section = line.strip().strip('[]') _section = section + '_' + str(section_counters[section]) section_counters[section] += 1 line = line.replace(section, _section) output_stream.write(line) output_stream.seek(0) return output_stream def getFilters(mydict, name): #print('find filters for ', name) if hasattr(mydict[name], 'filters'): return mydict[name].filters else: assert len(mydict[name].input) >= 1 return getFilters(mydict, mydict[name].input[0]) def readfile(f, len, msg): print(" %s read %d bytes" % (msg, len)) return f.read(len) def buildGraph(config_path, weights_path): unique_config_file = unique_config_sections(config_path) cfg_parser = configparser.ConfigParser() cfg_parser.read_file(unique_config_file) weights_file = open(weights_path, 'rb') # read out major, minor, revision, net.seen readfile(weights_file, (4*4), 'head') mydict = OrderedDict() # record the output of the original layer mylist = [] count = 4 import queue for _section in cfg_parser.sections(): sec_q = queue.Queue(0) sec_q.put(cfg_parser[_section]) while not sec_q.empty(): sec = sec_q.get() section = sec.name print('Parsing section {}'.format(section)) # this section will can be a subsection if section.startswith('activation') or section.endswith('activation'): activation = sec.get('activation', fallback = 'logistic') if activation == 'linear': pass elif activation == 'linear' or activation == 'leaky' or activation == 'relu': node = MyGraph.MyNode() node.name = section node.op = 'Leaky' if activation == 'linear': node.slope = 1 elif activation == 'leaky': node.slope = 0.1 elif activation == 'relu': node.slope = 0 node.input = [prev_output] node.input_norm = node.input #node.attr = [] mydict[node.name] = node prev_output = node.name # prev_layer_filters no change else: raise ValueError( 'Unknown activation function `{}` in section {}'.format( activation, section)) if section.startswith('activation'): mylist.append(section) elif re.match(r'^(convolutional|depthwise|groupwise)_\d+$', section): if section.startswith('convolutional'): conv = 'conv' filters = sec.getint('filters', fallback = 1) groups = 1 op = 'Conv2D' elif section.startswith('depthwise'): conv = 'dconv' filters = prev_layer_filters multiplier = sec.getint('multiplier', fallback = 1) assert multiplier == 1 groups = filters op = 'DepthwiseConv2dNative' elif section.startswith('groupwise'): conv = 'gconv' filters = sec.getint('filters', fallback=1) groups = sec.getint('groups', fallback = 1) op = 'DepthwiseConv2dNative' size = sec.getint('size', fallback = 1) stride = sec.getint('stride', fallback = 1) pad = sec.getint('pad', fallback = 0) padding = sec.getint('padding', fallback = 0) activation = sec.get('activation', fallback = 'logistic') batch_normalize = sec.getint('batch_normalize', 0) # padding='same' is equivalent to Darknet pad=1 # padding = 'same' if pad == 1 else 'valid' if pad: padding = size//2 # Setting weights. # Darknet serializes convolutional weights as: # [bias/beta, [gamma, mean, variance], conv_weights] #prev_layer_shape = prev_layer.shape # TODO: This assumes channel last dim_ordering. if conv == 'conv': weights_shape = (size, size, prev_layer_filters, filters) idx_tf2darknet = [0, 1, 2, 3] elif conv == 'dconv': weights_shape = (size, size, filters) idx_tf2darknet = [0, 1, 2] elif conv == 'gconv': weights_shape = (size, size, prev_layer_filters//groups, filters//groups, groups) idx_tf2darknet = [0, 1, 2, 3, 4] idxmap = {x: i for i, x in enumerate(idx_tf2darknet)} idx_dartnet2tf = [idxmap[i] for i in range(len(idxmap))] weights_size = np.product(weights_shape) print(' ' + conv, 'bn' if batch_normalize else ' ', activation, weights_shape) conv_bias = np.ndarray( shape=(filters, ), dtype=np.float32, buffer=readfile(weights_file, (filters * 4), section+'-bias')) count += filters if batch_normalize: bn_weights = np.ndarray( shape=(3, filters), dtype=np.float32, buffer=readfile(weights_file, (filters * 12), section+'-batchnorm')) count += 3 * filters # TODO: Keras BatchNormalization mistakenly refers to var # as std. bn_weight_list = [ bn_weights[0], # scale gamma conv_bias, # shift beta bn_weights[1], # running mean bn_weights[2] # running var ] conv_weights = np.ndarray( shape=[weights_shape[i] for i in idx_tf2darknet], dtype=np.float32, buffer=readfile(weights_file, (weights_size * 4), section+'-weights')) count += weights_size # DarkNet conv_weights are serialized Caffe-style: # (out_dim, in_dim, height, width) # We would like to set these to Tensorflow order: # (height, width, in_dim, out_dim) # TODO: Add check for Theano dim ordering. #print("the darknet shape is ", conv_weights.shape) conv_weights = np.transpose(conv_weights, idx_dartnet2tf) #print("the tf shape is ", conv_weights.shape) conv_weights = [conv_weights] if batch_normalize else [ conv_weights, conv_bias ] # Create nodes #conv_layer = np.zeros([1, 1, filters], dtype = np.float32) node = MyGraph.MyNode() node.name = section node.op = op node.input = [prev_output] node.input_norm = node.input node.kernel = conv_weights[0] node.padding = padding node.strides = [1,stride,stride,1] node.groups = groups node.filters = filters mydict[node.name] = node prev_output = node.name prev_layer_filters = filters if batch_normalize: node = MyGraph.MyNode() node.name = section + '_batch_normalize' node.op = 'FusedBatchNorm' node.input = [prev_output] node.input_norm = node.input #node.attr = [] node.gamma = bn_weights[0] node.beta = conv_bias node.mean = bn_weights[1] node.variance = bn_weights[2] mydict[node.name] = node prev_output = node.name # prev_layer_filters no change else: node = MyGraph.MyNode() node.name = section + '_bias' node.op = 'BiasAdd' node.input = [prev_output] node.input_norm = node.input #node.attr = [] node.bias = conv_bias mydict[node.name] = node prev_output = node.name if activation == 'linear': mylist.append(prev_output) else: tmp_parser = configparser.ConfigParser() name = section + '_activation' tmp_parser.add_section(name) tmp_parser.set(name, 'activation', activation) sec_q.put(tmp_parser[name]) mylist.append(name) elif section.startswith('shuffle'): node = MyGraph.MyNode() node.name = section node.op = 'Shuffle' node.input = [prev_output] node.input_norm = node.input node.groups = int(cfg_parser[section]['groups']) mydict[node.name] = node prev_output = node.name mylist.append(section) elif re.match(r'^(pooling|maxpool|avgpool)_\d+$', section): node = MyGraph.MyNode() node.stride = sec.getint('stride', fallback = 1) node.size = sec.getint('size', node.stride) node.padding = sec.getint('padding', fallback = (node.size-1)//2) if section.startswith('pooling'): node.mode = str(cfg_parser[section]['mode']) node.global_pooling = 0 elif section.startswith('maxpool'): node.mode = 'max' node.global_pooling = 0 elif section.startswith('avgpool'): node.mode = 'avg' node.global_pooling = 1 node.name = section node.op = 'Pooling' node.input = [prev_output] node.input_norm = node.input mydict[node.name] = node prev_output = node.name #print('pooling ', vars(node)) mylist.append(section) elif section.startswith('route'): ids = [int(i) for i in cfg_parser[section]['layers'].split(',')] node = MyGraph.MyNode() node.name = section node.op = 'NCNNConcat' node.input = [mylist[i] for i in ids] #print('mylist is ', mylist, 'the ids is ', ids, 'node input is ', node.input) node.input_norm = node.input node.axis = 0 node.filters = sum([getFilters(mydict, mylist[i]) for i in ids]) mydict[node.name] = node prev_output = node.name mylist.append(section) prev_layer_filters = node.filters elif section.startswith('reorg'): node = MyGraph.MyNode() node.name = section node.op = 'DarknetReorg' node.input = [prev_output] node.stride = sec.getint('stride', fallback = 1) node.input_norm = node.input node.filters = getFilters(mydict, node.input[0]) * node.stride * node.stride mydict[node.name] = node prev_output = node.name mylist.append(section) prev_layer_filters = node.filters elif re.match(r'^(shortcut)_\d+$', section): activation = sec.get('activation', fallback = 'logistic') from_ = sec.getint('from') node = MyGraph.MyNode() node.name = section node.op = 'BinaryOp' node.op_type = 0 node.input = [prev_output, mylist[from_]] #print('mylist is ', mylist, 'the from_ is ', from_, 'node input is ', node.input) node.input_norm = node.input mydict[node.name] = node prev_output = node.name if activation == 'linear': mylist.append(prev_output) else: tmp_parser = configparser.ConfigParser() name = section + '_activation' tmp_parser.add_section(name) tmp_parser.set(name, 'activation', activation) sec_q.put(tmp_parser[name]) # NOTE: this section has relative reference mylist.append(name) elif section.startswith('connected'): activation = sec.get('activation', fallback='linear') filters = sec.getint('output', 2) bias_data = np.ndarray( shape=[filters], dtype=np.float32, buffer=readfile(weights_file, (filters * 4), section+'-bias')) fc_data = np.ndarray( shape=[prev_layer_filters, filters], dtype=np.float32, buffer=readfile(weights_file, (prev_layer_filters * filters * 4), section+'-weight')) node = MyGraph.MyNode() node.name = section node.op = 'MatMul' node.input = [prev_output] node.input_norm = node.input node.multiplier = fc_data mydict[node.name] = node prev_output = node.name prev_layer_filters = filters node = MyGraph.MyNode() node.name = section + '_bias' node.op = 'BiasAdd' node.input = [prev_output] node.input_norm = node.input # node.attr = [] node.bias = bias_data mydict[node.name] = node prev_output = node.name if activation == 'linear': mylist.append(prev_output) else: tmp_parser = configparser.ConfigParser() name = section + '_activation' tmp_parser.add_section(name) tmp_parser.set(name, 'activation', activation) sec_q.put(tmp_parser[name]) mylist.append(name) elif section.startswith('net'): node = MyGraph.MyNode() node.name = section node.op = 'DarknetNet' node.input = [] node.input_norm = [] node.width = int(cfg_parser['net_0']['width']) node.height = int(cfg_parser['net_0']['height']) node.channels = int(cfg_parser['net_0']['channels']) node.filters = node.channels # print(vars(node)) # node.attr = [] mydict[node.name] = node # start here prev_output = node.name prev_layer_filters = node.channels mylist.append(section) elif section.startswith('region'): node = MyGraph.MyNode() node.name = section node.op = 'DarknetRegion' node.input = [prev_output] node.input_norm = node.input node.classes = int(cfg_parser[section]['classes']) node.num = int(cfg_parser[section]['num']) node.softmax = int(cfg_parser[section]['softmax']) node.anchors = [float(i) for i in re.split(r',', cfg_parser[section]['anchors'])] #print(vars(node)) #node.attr = [] mydict[node.name] = node prev_output = node.name mylist.append(section) elif section.startswith('softmax'): node = MyGraph.MyNode() node.name = section node.op = 'Softmax' node.input = [prev_output] node.input_norm = node.input mydict[node.name] = node prev_output = node.name mylist.append(section) pass elif section.startswith('cost'): pass # Configs not currently handled during model definition. else: raise ValueError( 'Unsupported section header type: {}'.format(section)) print(' out filters ', prev_layer_filters) print('loaded {} bytes in weights file'.format(count*4)) mygraph = MyGraph(mydict) mygraph.type = 'darknet' return mygraph if __name__ == '__main__': config_path = sys.argv[1] weights_path = sys.argv[2] mygraph = buildGraph(config_path, weights_path) # 定义子图所需要的输出节点,输入节点,终止节点 outputNodes = ['region_0', 'softmax_0'] stopNodes = [] inputNodes = ['darknet_0'] mygraph.extractSubGraph(inputNodes, outputNodes, stopNodes) mygraph.generateDot('YoloV2.dot') # 生成子图对应的代码 mygraph.generateSource('YoloV2', os.path.split(config_path)[1]+'.ncnn', os.path.split(weights_path)[1] + '.ncnn')
[((422, 438), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (433, 438), False, 'from collections import defaultdict\n'), ((459, 472), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (470, 472), False, 'import io\n'), ((1364, 1391), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1389, 1391), False, 'import configparser\n'), ((1586, 1599), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1597, 1599), False, 'from collections import OrderedDict\n'), ((16451, 16466), 'graph.MyGraph', 'MyGraph', (['mydict'], {}), '(mydict)\n', (16458, 16466), False, 'from graph import MyGraph\n'), ((1752, 1766), 'queue.Queue', 'queue.Queue', (['(0)'], {}), '(0)\n', (1763, 1766), False, 'import queue\n'), ((3181, 3244), 're.match', 're.match', (['"""^(convolutional|depthwise|groupwise)_\\\\d+$"""', 'section'], {}), "('^(convolutional|depthwise|groupwise)_\\\\d+$', section)\n", (3189, 3244), False, 'import re, sys, os\n'), ((16936, 16962), 'os.path.split', 'os.path.split', (['config_path'], {}), '(config_path)\n', (16949, 16962), False, 'import re, sys, os\n'), ((16975, 17002), 'os.path.split', 'os.path.split', (['weights_path'], {}), '(weights_path)\n', (16988, 17002), False, 'import re, sys, os\n'), ((5419, 5444), 'numpy.product', 'np.product', (['weights_shape'], {}), '(weights_shape)\n', (5429, 5444), True, 'import numpy as np\n'), ((7007, 7049), 'numpy.transpose', 'np.transpose', (['conv_weights', 'idx_dartnet2tf'], {}), '(conv_weights, idx_dartnet2tf)\n', (7019, 7049), True, 'import numpy as np\n'), ((7350, 7366), 'graph.MyGraph.MyNode', 'MyGraph.MyNode', ([], {}), '()\n', (7364, 7366), False, 'from graph import MyGraph\n'), ((2313, 2329), 'graph.MyGraph.MyNode', 'MyGraph.MyNode', ([], {}), '()\n', (2327, 2329), False, 'from graph import MyGraph\n'), ((7866, 7882), 'graph.MyGraph.MyNode', 'MyGraph.MyNode', ([], {}), '()\n', (7880, 7882), False, 'from graph import MyGraph\n'), ((8441, 8457), 'graph.MyGraph.MyNode', 'MyGraph.MyNode', ([], {}), '()\n', (8455, 8457), False, 'from graph import MyGraph\n'), ((8909, 8936), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (8934, 8936), False, 'import configparser\n'), ((9236, 9252), 'graph.MyGraph.MyNode', 'MyGraph.MyNode', ([], {}), '()\n', (9250, 9252), False, 'from graph import MyGraph\n'), ((9582, 9635), 're.match', 're.match', (['"""^(pooling|maxpool|avgpool)_\\\\d+$"""', 'section'], {}), "('^(pooling|maxpool|avgpool)_\\\\d+$', section)\n", (9590, 9635), False, 'import re, sys, os\n'), ((9656, 9672), 'graph.MyGraph.MyNode', 'MyGraph.MyNode', ([], {}), '()\n', (9670, 9672), False, 'from graph import MyGraph\n'), ((10706, 10722), 'graph.MyGraph.MyNode', 'MyGraph.MyNode', ([], {}), '()\n', (10720, 10722), False, 'from graph import MyGraph\n'), ((11291, 11307), 'graph.MyGraph.MyNode', 'MyGraph.MyNode', ([], {}), '()\n', (11305, 11307), False, 'from graph import MyGraph\n'), ((11775, 11813), 're.match', 're.match', (['"""^(shortcut)_\\\\d+$"""', 'section'], {}), "('^(shortcut)_\\\\d+$', section)\n", (11783, 11813), False, 'import re, sys, os\n'), ((11945, 11961), 'graph.MyGraph.MyNode', 'MyGraph.MyNode', ([], {}), '()\n', (11959, 11961), False, 'from graph import MyGraph\n'), ((12449, 12476), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (12474, 12476), False, 'import configparser\n'), ((13359, 13375), 'graph.MyGraph.MyNode', 'MyGraph.MyNode', ([], {}), '()\n', (13373, 13375), False, 'from graph import MyGraph\n'), ((13691, 13707), 'graph.MyGraph.MyNode', 'MyGraph.MyNode', ([], {}), '()\n', (13705, 13707), False, 'from graph import MyGraph\n'), ((14129, 14156), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (14154, 14156), False, 'import configparser\n'), ((14452, 14468), 'graph.MyGraph.MyNode', 'MyGraph.MyNode', ([], {}), '()\n', (14466, 14468), False, 'from graph import MyGraph\n'), ((15128, 15144), 'graph.MyGraph.MyNode', 'MyGraph.MyNode', ([], {}), '()\n', (15142, 15144), False, 'from graph import MyGraph\n'), ((15803, 15819), 'graph.MyGraph.MyNode', 'MyGraph.MyNode', ([], {}), '()\n', (15817, 15819), False, 'from graph import MyGraph\n'), ((15523, 15568), 're.split', 're.split', (['""","""', "cfg_parser[section]['anchors']"], {}), "(',', cfg_parser[section]['anchors'])\n", (15531, 15568), False, 'import re, sys, os\n')]
anirudha-bs/Django_music_app
music/models.py
1b80bd4299a35fb707c32307dd115074a8ecba9f
from django.contrib.auth.models import Permission, User from django.db import models class Album(models.Model): user = models.ForeignKey(User, default=1,on_delete=models.CASCADE) artist = models.CharField(max_length=250) album_title = models.CharField(max_length=500) genre = models.CharField(max_length=100) album_logo = models.FileField(default="avatar.jpg") album_visibility = models.CharField(max_length=100, default="private") is_favorite = models.BooleanField(default=False) def __str__(self): return self.album_title + '-' + self.artist + '-' + self.genre class Song(models.Model): user = models.ForeignKey(User, default=1,on_delete=models.CASCADE) album = models.ForeignKey(Album, on_delete=models.CASCADE, null=True) song_title = models.CharField(max_length=250) audio_file = models.FileField(default='') song_visibility = models.CharField(max_length=100, default="private") is_favorite = models.BooleanField(default=False) def __str__(self): return self.song_title
[((125, 185), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'default': '(1)', 'on_delete': 'models.CASCADE'}), '(User, default=1, on_delete=models.CASCADE)\n', (142, 185), False, 'from django.db import models\n'), ((198, 230), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)'}), '(max_length=250)\n', (214, 230), False, 'from django.db import models\n'), ((249, 281), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (265, 281), False, 'from django.db import models\n'), ((294, 326), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (310, 326), False, 'from django.db import models\n'), ((344, 382), 'django.db.models.FileField', 'models.FileField', ([], {'default': '"""avatar.jpg"""'}), "(default='avatar.jpg')\n", (360, 382), False, 'from django.db import models\n'), ((406, 457), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'default': '"""private"""'}), "(max_length=100, default='private')\n", (422, 457), False, 'from django.db import models\n'), ((476, 510), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (495, 510), False, 'from django.db import models\n'), ((646, 706), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'default': '(1)', 'on_delete': 'models.CASCADE'}), '(User, default=1, on_delete=models.CASCADE)\n', (663, 706), False, 'from django.db import models\n'), ((718, 779), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Album'], {'on_delete': 'models.CASCADE', 'null': '(True)'}), '(Album, on_delete=models.CASCADE, null=True)\n', (735, 779), False, 'from django.db import models\n'), ((797, 829), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)'}), '(max_length=250)\n', (813, 829), False, 'from django.db import models\n'), ((847, 875), 'django.db.models.FileField', 'models.FileField', ([], {'default': '""""""'}), "(default='')\n", (863, 875), False, 'from django.db import models\n'), ((898, 949), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'default': '"""private"""'}), "(max_length=100, default='private')\n", (914, 949), False, 'from django.db import models\n'), ((968, 1002), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (987, 1002), False, 'from django.db import models\n')]
yihming/gdax-data
finex_history.py
7e562f314e9ef12eb6be2df3b97190af632c4530
import datetime import calendar import requests import pandas as pd import json import os.path import time import MySQLdb as M from gdax_history import timestamp_to_utcstr def connect_to_db(): config = json.load(open('dbconn.json'))["mysql"] db = M.connect(host = config["host"], user = config["user"], passwd = config["password"], db = config["database"]) return db def write_to_db(df, db): print "Write %d entries to database." % df.shape[0] cur = db.cursor() try: for row in df.itertuples(): ts = row.Time / 1000 cur.execute( """INSERT INTO finex_history (timestamp, open, close, high, low, volume, utc_datetime) VALUES (%s, %s, %s, %s, %s, %s, %s)""", [ts, row.Open, row.Close, row.High, row.Low, row.Volume, timestamp_to_utcstr(ts)]) db.commit() print "Write successfully!\n" except (M.Error, M.Warning) as e: print e db.rollback() def collect_data(start, end): starttime = datetime.datetime.strptime(start, '%m/%d/%Y') endtime = datetime.datetime.strptime(end, '%m/%d/%Y') start_unixtime = calendar.timegm(starttime.utctimetuple()) end_unixtime = calendar.timegm(endtime.utctimetuple()) track_time = time.time() #because bitstamp only allows 10 requests per minute. Take rest if we are faster than that count = 0 df = pd.DataFrame(data = [], columns = ['Time', 'Open', 'Close', 'High', 'Low', 'Volume']) while (start_unixtime < end_unixtime): cur_end_unixtime = start_unixtime + 60 * 999 #60*60*24*30 #30 days at a time if (cur_end_unixtime > end_unixtime): cur_end_unixtime = end_unixtime #if the time is in future. url = 'https://api.bitfinex.com/v2/candles/trade:1m:tBTCUSD/hist?start={}&end={}&limit=1000'.format(str(start_unixtime) + "000", str(cur_end_unixtime) + "000") #1 hour can be changed to any timeframe response = requests.get(url) data = response.json() df_tmp = pd.DataFrame(data) df_tmp.columns = ['Time', 'Open', 'Close', 'High', 'Low', 'Volume'] #df.set_index('Time') df = pd.concat([df, df_tmp]) start_unixtime = cur_end_unixtime + 60 #to prevent duplicates count = count + 1 if (count == 10): #if 10 requests are made count = 0 #reset it diff = time.time() - track_time if (diff <= 60): print('Sleeping for {} seconds'.format(str(60 - diff))) time.sleep(60 - diff) #sleep track_time = time.time() #bitstamp limits to 10 requests per minute df = df.sort_values(by = ['Time']) return df def main(): db = connect_to_db() df = collect_data(start = '09/24/2018', end = '09/26/2018') write_to_db(df, db) db.close() if __name__ == "__main__": main()
[]
cvelas31/public_transportation_streaming
src/producers/connector.py
903a1a147645e1b0783555db4bfc02098f7941ae
"""Configures a Kafka Connector for Postgres Station data""" import json import logging import requests from settings import Settings logger = logging.getLogger(__name__) KAFKA_CONNECT_URL = f"{Settings.URLs.KAFKA_CONNECT_URL}/connectors" CONNECTOR_NAME = "stations" def configure_connector(): """Starts and configures the Kafka Connect connector""" logging.debug("Creating or updating kafka connect connector...") resp = requests.get(f"{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}") if resp.status_code == 200: logging.debug("Connector already created skipping recreation") return config = { "connector.class": "io.confluent.connect.jdbc.JdbcSourceConnector", "key.converter": "org.apache.kafka.connect.json.JsonConverter", "key.converter.schemas.enable": "false", "value.converter": "org.apache.kafka.connect.json.JsonConverter", "value.converter.schemas.enable": "false", "topic.prefix": "com.connect.transportation.", "connection.url": "jdbc:postgresql://postgres:5432/cta", "connection.user": "cta_admin", "connection.password": "chicago", "batch.max.rows": "500", "table.whitelist": "stations", "poll.interval.ms": "5000", # Poll every 5 seconds "mode": "incrementing", "incrementing.column.name": "stop_id", } # TODO: Complete the Kafka Connect Config below. # Directions: Use the JDBC Source Connector to connect to Postgres. Load the `stations` table # using incrementing mode, with `stop_id` as the incrementing column name. # Make sure to think about what an appropriate topic prefix would be, and how frequently Kafka # Connect should run this connector (hint: not very often!) data = json.dumps({"name": CONNECTOR_NAME, "config": config}) resp = requests.post( KAFKA_CONNECT_URL, headers={"Content-Type": "application/json"}, data=data, ) # Ensure a healthy response was given resp.raise_for_status() logging.info("-------Connector created successfully-------") if __name__ == "__main__": configure_connector()
[((145, 172), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (162, 172), False, 'import logging\n'), ((364, 428), 'logging.debug', 'logging.debug', (['"""Creating or updating kafka connect connector..."""'], {}), "('Creating or updating kafka connect connector...')\n", (377, 428), False, 'import logging\n'), ((441, 494), 'requests.get', 'requests.get', (['f"""{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}"""'], {}), "(f'{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}')\n", (453, 494), False, 'import requests\n'), ((1775, 1829), 'json.dumps', 'json.dumps', (["{'name': CONNECTOR_NAME, 'config': config}"], {}), "({'name': CONNECTOR_NAME, 'config': config})\n", (1785, 1829), False, 'import json\n'), ((1842, 1935), 'requests.post', 'requests.post', (['KAFKA_CONNECT_URL'], {'headers': "{'Content-Type': 'application/json'}", 'data': 'data'}), "(KAFKA_CONNECT_URL, headers={'Content-Type':\n 'application/json'}, data=data)\n", (1855, 1935), False, 'import requests\n'), ((2038, 2098), 'logging.info', 'logging.info', (['"""-------Connector created successfully-------"""'], {}), "('-------Connector created successfully-------')\n", (2050, 2098), False, 'import logging\n'), ((535, 597), 'logging.debug', 'logging.debug', (['"""Connector already created skipping recreation"""'], {}), "('Connector already created skipping recreation')\n", (548, 597), False, 'import logging\n')]
wasiahmad/GATE
liuetal2019/utils.py
1e48504a3641f00265a271a19eb6b6449fdc33bd
import io import logging import json import numpy import torch import numpy as np from tqdm import tqdm from clie.inputters import constant from clie.objects import Sentence from torch.utils.data import Dataset from torch.utils.data.sampler import Sampler logger = logging.getLogger(__name__) def load_word_embeddings(file): embeddings_index = {} fin = io.open(file, 'r', encoding='utf-8', newline='\n', errors='ignore') n, d = map(int, fin.readline().split()) for i, line in tqdm(enumerate(fin), total=n): tokens = line.rstrip().split(' ') v = numpy.array(tokens[1:], dtype=float) embeddings_index[tokens[0]] = v return embeddings_index # ------------------------------------------------------------------------------ # Data loading # ------------------------------------------------------------------------------ def load_data(filename, src_lang, tgt_lang, knn_file, knn_size, max_examples=-1): examples = [] wrong_subj_pos, wrong_obj_pos = 0, 0 with open(filename) as f: data = json.load(f) knn_dict = None if knn_file: with open(knn_file) as f: knn_dict = json.load(f) for idx, ex in enumerate(tqdm(data, total=len(data))): sentence = Sentence(ex['id']) sentence.language = src_lang sentence.words = ex['token'] sentence.pos = ex['stanford_pos'] sentence.ner = ex['stanford_ner'] sentence.deprel = ex['stanford_deprel'] sentence.head = [int(x) for x in ex['stanford_head']] sentence.subj_type = ex['subj_type'] sentence.obj_type = ex['obj_type'] sentence.relation = ex['relation'] if ex['subj_end'] - ex['subj_start'] < 0: # we swap the start and end index wrong_subj_pos += 1 sentence.subject = [ex['subj_end'], ex['subj_start']] else: sentence.subject = [ex['subj_start'], ex['subj_end']] if ex['obj_end'] - ex['obj_start'] < 0: # we swap the start and end index wrong_obj_pos += 1 sentence.object = [ex['obj_end'], ex['obj_start']] else: sentence.object = [ex['obj_start'], ex['obj_end']] # store KNN word info if knn_dict: sentence.tgt_lang = tgt_lang knn_words = [] for w in ex['token']: w = '!{}_{}'.format(src_lang, w) if w in knn_dict: assert len(knn_dict[w]) == knn_size knn_words.append(knn_dict[w]) else: knn_words.append([constant.UNK_WORD] * knn_size) sentence.knn_words = knn_words examples.append(sentence) if max_examples != -1 and len(examples) > max_examples: break if wrong_subj_pos > 0 or wrong_obj_pos > 0: logger.info('{} and {} wrong subject and object positions found!'.format( wrong_subj_pos, wrong_obj_pos)) return examples def vectorize(ex, model, iseval): """Torchify a single example.""" words = ['!{}_{}'.format(ex.language, w) for w in ex.words] words = [model.word_dict[w] for w in words] knn_word = None if ex.knn_words: knn_word = [[model.word_dict[w] for w in knn] for knn in ex.knn_words] knn_word = torch.LongTensor(knn_word) word = torch.LongTensor(words) pos = torch.LongTensor([model.pos_dict[p] for p in ex.pos]) ner = torch.LongTensor([model.ner_dict[n] for n in ex.ner]) deprel = torch.LongTensor([model.deprel_dict[d] for d in ex.deprel]) assert any([x == 0 for x in ex.head]) head = torch.LongTensor(ex.head) subj_position = torch.LongTensor(ex.subj_position) obj_position = torch.LongTensor(ex.obj_position) type = [0] * len(ex.words) ttype = model.type_dict[ex.subj_type] start, end = ex.subject type[start: end + 1] = [ttype] * (end - start + 1) atype = model.type_dict[ex.obj_type] start, end = ex.object type[start: end + 1] = [atype] * (end - start + 1) type = torch.LongTensor(type) return { 'id': ex.id, 'language': ex.language, 'word': word, 'pos': pos, 'ner': ner, 'deprel': deprel, 'type': type, 'head': head, 'subject': ex.subj_text, 'object': ex.obj_text, 'subject_pos': subj_position, 'object_pos': obj_position, 'relation': model.label_dict[ex.relation], 'knn_word': knn_word } def batchify(batch): """Gather a batch of individual examples into one batch.""" # batch is a list of vectorized examples batch_size = len(batch) ids = [ex['id'] for ex in batch] language = [ex['language'] for ex in batch] use_knn = batch[0]['knn_word'] is not None # NOTE. batch[0]['knn_word'] is a 2d list knn_size = len(batch[0]['knn_word'][0]) if use_knn else 0 # --------- Prepare Code tensors --------- max_len = max([ex['word'].size(0) for ex in batch]) # Batch Code Representations len_rep = torch.LongTensor(batch_size).fill_(constant.PAD) word_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) head_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) subject_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) object_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) ner_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) deprel_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) type_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD) labels = torch.LongTensor(batch_size) subject = [] object = [] knn_rep = None if use_knn: knn_rep = torch.LongTensor(batch_size, max_len, knn_size).fill_(constant.PAD) for i, ex in enumerate(batch): len_rep[i] = ex['word'].size(0) labels[i] = ex['relation'] word_rep[i, :len_rep[i]] = ex['word'] head_rep[i, :len_rep[i]] = ex['head'] subject_pos_rep[i, :len_rep[i]] = ex['subject_pos'] object_pos_rep[i, :len_rep[i]] = ex['object_pos'] pos_rep[i, :len_rep[i]] = ex['pos'] ner_rep[i, :len_rep[i]] = ex['ner'] deprel_rep[i, :len_rep[i]] = ex['deprel'] type_rep[i, :len_rep[i]] = ex['type'] subject.append(ex['subject']) object.append(ex['object']) if use_knn: knn_rep[i, :len_rep[i]] = ex['knn_word'] return { 'ids': ids, 'language': language, 'batch_size': batch_size, 'len_rep': len_rep, 'word_rep': word_rep, 'knn_rep': knn_rep, 'head_rep': head_rep, 'subject': subject, 'object': object, 'subject_pos_rep': subject_pos_rep, 'object_pos_rep': object_pos_rep, 'labels': labels, 'pos_rep': pos_rep, 'ner_rep': ner_rep, 'deprel_rep': deprel_rep, 'type_rep': type_rep } class ACE05Dataset(Dataset): def __init__(self, examples, model, evaluation=False): self.model = model self.examples = examples self.evaluation = evaluation def __len__(self): return len(self.examples) def __getitem__(self, index): return vectorize(self.examples[index], self.model, iseval=self.evaluation) def lengths(self): return [len(ex.words) for ex in self.examples] class SortedBatchSampler(Sampler): def __init__(self, lengths, batch_size, shuffle=True): self.lengths = lengths self.batch_size = batch_size self.shuffle = shuffle def __iter__(self): lengths = np.array( [(-l, np.random.random()) for l in self.lengths], dtype=[('l1', np.int_), ('rand', np.float_)] ) indices = np.argsort(lengths, order=('l1', 'rand')) batches = [indices[i:i + self.batch_size] for i in range(0, len(indices), self.batch_size)] if self.shuffle: np.random.shuffle(batches) return iter([i for batch in batches for i in batch]) def __len__(self): return len(self.lengths)
[((266, 293), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (283, 293), False, 'import logging\n'), ((364, 431), 'io.open', 'io.open', (['file', '"""r"""'], {'encoding': '"""utf-8"""', 'newline': '"""\n"""', 'errors': '"""ignore"""'}), "(file, 'r', encoding='utf-8', newline='\\n', errors='ignore')\n", (371, 431), False, 'import io\n'), ((3566, 3589), 'torch.LongTensor', 'torch.LongTensor', (['words'], {}), '(words)\n', (3582, 3589), False, 'import torch\n'), ((3600, 3653), 'torch.LongTensor', 'torch.LongTensor', (['[model.pos_dict[p] for p in ex.pos]'], {}), '([model.pos_dict[p] for p in ex.pos])\n', (3616, 3653), False, 'import torch\n'), ((3664, 3717), 'torch.LongTensor', 'torch.LongTensor', (['[model.ner_dict[n] for n in ex.ner]'], {}), '([model.ner_dict[n] for n in ex.ner])\n', (3680, 3717), False, 'import torch\n'), ((3731, 3790), 'torch.LongTensor', 'torch.LongTensor', (['[model.deprel_dict[d] for d in ex.deprel]'], {}), '([model.deprel_dict[d] for d in ex.deprel])\n', (3747, 3790), False, 'import torch\n'), ((3844, 3869), 'torch.LongTensor', 'torch.LongTensor', (['ex.head'], {}), '(ex.head)\n', (3860, 3869), False, 'import torch\n'), ((3890, 3924), 'torch.LongTensor', 'torch.LongTensor', (['ex.subj_position'], {}), '(ex.subj_position)\n', (3906, 3924), False, 'import torch\n'), ((3944, 3977), 'torch.LongTensor', 'torch.LongTensor', (['ex.obj_position'], {}), '(ex.obj_position)\n', (3960, 3977), False, 'import torch\n'), ((4269, 4291), 'torch.LongTensor', 'torch.LongTensor', (['type'], {}), '(type)\n', (4285, 4291), False, 'import torch\n'), ((5928, 5956), 'torch.LongTensor', 'torch.LongTensor', (['batch_size'], {}), '(batch_size)\n', (5944, 5956), False, 'import torch\n'), ((580, 616), 'numpy.array', 'numpy.array', (['tokens[1:]'], {'dtype': 'float'}), '(tokens[1:], dtype=float)\n', (591, 616), False, 'import numpy\n'), ((1067, 1079), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1076, 1079), False, 'import json\n'), ((3527, 3553), 'torch.LongTensor', 'torch.LongTensor', (['knn_word'], {}), '(knn_word)\n', (3543, 3553), False, 'import torch\n'), ((8131, 8172), 'numpy.argsort', 'np.argsort', (['lengths'], {'order': "('l1', 'rand')"}), "(lengths, order=('l1', 'rand'))\n", (8141, 8172), True, 'import numpy as np\n'), ((1289, 1307), 'clie.objects.Sentence', 'Sentence', (["ex['id']"], {}), "(ex['id'])\n", (1297, 1307), False, 'from clie.objects import Sentence\n'), ((5269, 5297), 'torch.LongTensor', 'torch.LongTensor', (['batch_size'], {}), '(batch_size)\n', (5285, 5297), False, 'import torch\n'), ((5333, 5370), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5349, 5370), False, 'import torch\n'), ((5406, 5443), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5422, 5443), False, 'import torch\n'), ((5486, 5523), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5502, 5523), False, 'import torch\n'), ((5565, 5602), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5581, 5602), False, 'import torch\n'), ((5637, 5674), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5653, 5674), False, 'import torch\n'), ((5709, 5746), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5725, 5746), False, 'import torch\n'), ((5784, 5821), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5800, 5821), False, 'import torch\n'), ((5857, 5894), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5873, 5894), False, 'import torch\n'), ((8329, 8355), 'numpy.random.shuffle', 'np.random.shuffle', (['batches'], {}), '(batches)\n', (8346, 8355), True, 'import numpy as np\n'), ((1190, 1202), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1199, 1202), False, 'import json\n'), ((6044, 6091), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len', 'knn_size'], {}), '(batch_size, max_len, knn_size)\n', (6060, 6091), False, 'import torch\n'), ((8002, 8020), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8018, 8020), True, 'import numpy as np\n')]
dnanexus/IndexTools
build.py
0392b3be92ff50b401290b59e9ca6c7767fa5a96
from distutils.extension import Extension cmdclass = {} try: # with Cython from Cython.Build import build_ext cmdclass["build_ext"] = build_ext module_src = "cgranges/python/cgranges.pyx" except ImportError: # without Cython module_src = "cgranges/python/cgranges.c" def build(setup_kwargs): """ This function is mandatory in order to build the extensions. """ setup_kwargs.update( { "ext_modules": [ Extension( "cgranges", sources=[module_src, "cgranges/cgranges.c"], depends=[ "cgranges/cgranges.h", "cgranges/khash.h", "cgranges/python/cgranges.pyx" ], include_dirs=["cgranges"] ) ], "cmdclass": cmdclass } )
[((479, 666), 'distutils.extension.Extension', 'Extension', (['"""cgranges"""'], {'sources': "[module_src, 'cgranges/cgranges.c']", 'depends': "['cgranges/cgranges.h', 'cgranges/khash.h', 'cgranges/python/cgranges.pyx']", 'include_dirs': "['cgranges']"}), "('cgranges', sources=[module_src, 'cgranges/cgranges.c'], depends=\n ['cgranges/cgranges.h', 'cgranges/khash.h',\n 'cgranges/python/cgranges.pyx'], include_dirs=['cgranges'])\n", (488, 666), False, 'from distutils.extension import Extension\n')]
oascigil/icarus_edge_comp
icarus/models/service/__init__.py
b7bb9f9b8d0f27b4b01469dcba9cfc0c4949d64b
# -*- coding: utf-8 -*- from .compSpot import *
[]
YvetteGuo/gluon-cv
gluoncv/data/kinetics400/classification.py
123af8cf9f15a879c16a5c7d12f01ce1471d85b6
# pylint: disable=line-too-long,too-many-lines,missing-docstring """Kinetics400 action classification dataset.""" import os import random import numpy as np from mxnet import nd from mxnet.gluon.data import dataset __all__ = ['Kinetics400'] class Kinetics400(dataset.Dataset): """Load the Kinetics400 action recognition dataset. Refer to :doc:`../build/examples_datasets/kinetics400` for the description of this dataset and how to prepare it. Parameters ---------- root : str, default '~/.mxnet/datasets/kinetics400' Path to the folder stored the dataset. setting : str, required Config file of the prepared dataset. train : bool, default True Whether to load the training or validation set. test_mode : bool, default False Whether to perform evaluation on the test set name_pattern : str, default None The naming pattern of the decoded video frames. For example, img_00012.jpg is_color : bool, default True Whether the loaded image is color or grayscale modality : str, default 'rgb' Input modalities, we support only rgb video frames for now. Will add support for rgb difference image and optical flow image later. num_segments : int, default 1 Number of segments to evenly divide the video into clips. A useful technique to obtain global video-level information. Limin Wang, etal, Temporal Segment Networks: Towards Good Practices for Deep Action Recognition, ECCV 2016 new_length : int, default 1 The length of input video clip. Default is a single image, but it can be multiple video frames. For example, new_length=16 means we will extract a video clip of consecutive 16 frames. new_width : int, default 340 Scale the width of loaded image to 'new_width' for later multiscale cropping and resizing. new_height : int, default 256 Scale the height of loaded image to 'new_height' for later multiscale cropping and resizing. target_width : int, default 224 Scale the width of transformed image to the same 'target_width' for batch forwarding. target_height : int, default 224 Scale the height of transformed image to the same 'target_height' for batch forwarding. transform : function, default None A function that takes data and label and transforms them. """ def __init__(self, setting=os.path.expanduser('~/.mxnet/datasets/kinetics400/kinetics400_train_list_rawframes.txt'), root=os.path.expanduser('~/.mxnet/datasets/kinetics400/rawframes_train'), train=True, test_mode=False, name_pattern=None, is_color=True, modality='rgb', num_segments=1, new_length=1, new_width=340, new_height=256, target_width=224, target_height=224, transform=None): super(Kinetics400, self).__init__() self.root = root self.setting = setting self.train = train self.test_mode = test_mode self.is_color = is_color self.modality = modality self.num_segments = num_segments self.new_height = new_height self.new_width = new_width self.target_height = target_height self.target_width = target_width self.new_length = new_length self.transform = transform self.classes, self.class_to_idx = self._find_classes(root) self.clips = self._make_dataset(root, setting) if len(self.clips) == 0: raise(RuntimeError("Found 0 video clips in subfolders of: " + root + "\n" "Check your data directory (opt.data-dir).")) if name_pattern: self.name_pattern = name_pattern else: if self.modality == "rgb": self.name_pattern = "img_%05d.jpg" elif self.modality == "flow": self.name_pattern = "flow_%s_%05d.jpg" def __getitem__(self, index): directory, duration, target = self.clips[index] average_duration = int(duration / self.num_segments) offsets = [] for seg_id in range(self.num_segments): if self.train and not self.test_mode: # training if average_duration >= self.new_length: offset = random.randint(0, average_duration - self.new_length) # No +1 because randint(a,b) return a random integer N such that a <= N <= b. offsets.append(offset + seg_id * average_duration) else: offsets.append(0) elif not self.train and not self.test_mode: # validation if average_duration >= self.new_length: offsets.append(int((average_duration - self.new_length + 1)/2 + seg_id * average_duration)) else: offsets.append(0) else: # test if average_duration >= self.new_length: offsets.append(int((average_duration - self.new_length + 1)/2 + seg_id * average_duration)) else: offsets.append(0) clip_input = self._TSN_RGB(directory, offsets, self.new_height, self.new_width, self.new_length, self.is_color, self.name_pattern) if self.transform is not None: clip_input = self.transform(clip_input) if self.num_segments > 1 and not self.test_mode: # For TSN training, reshape the input to B x 3 x H x W. Here, B = batch_size * num_segments clip_input = clip_input.reshape((-1, 3 * self.new_length, self.target_height, self.target_width)) return clip_input, target def __len__(self): return len(self.clips) def _find_classes(self, directory): classes = [d for d in os.listdir(directory) if os.path.isdir(os.path.join(directory, d))] classes.sort() class_to_idx = {classes[i]: i for i in range(len(classes))} return classes, class_to_idx def _make_dataset(self, directory, setting): if not os.path.exists(setting): raise(RuntimeError("Setting file %s doesn't exist. Check opt.train-list and opt.val-list. " % (setting))) clips = [] with open(setting) as split_f: data = split_f.readlines() for line in data: line_info = line.split() # line format: video_path, video_duration, video_label if len(line_info) < 3: print('Video input format is not correct, missing one or more element. %s' % line) continue clip_path = os.path.join(directory, line_info[0]) duration = int(line_info[1]) target = int(line_info[2]) item = (clip_path, duration, target) clips.append(item) return clips def _TSN_RGB(self, directory, offsets, new_height, new_width, new_length, is_color, name_pattern): from ...utils.filesystem import try_import_cv2 cv2 = try_import_cv2() if is_color: cv_read_flag = cv2.IMREAD_COLOR else: cv_read_flag = cv2.IMREAD_GRAYSCALE interpolation = cv2.INTER_LINEAR sampled_list = [] for _, offset in enumerate(offsets): for length_id in range(1, new_length+1): frame_name = name_pattern % (length_id + offset) frame_path = directory + "/" + frame_name cv_img_origin = cv2.imread(frame_path, cv_read_flag) if cv_img_origin is None: raise(RuntimeError("Could not load file %s. Check data path." % (frame_path))) if new_width > 0 and new_height > 0: cv_img = cv2.resize(cv_img_origin, (new_width, new_height), interpolation) else: cv_img = cv_img_origin cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB) sampled_list.append(cv_img) # the shape of clip_input will be H x W x C, and C = num_segments * new_length * 3 clip_input = np.concatenate(sampled_list, axis=2) return nd.array(clip_input) class Kinetics400Attr(object): def __init__(self): self.num_class = 400 self.classes = ['abseiling', 'air_drumming', 'answering_questions', 'applauding', 'applying_cream', 'archery', 'arm_wrestling', 'arranging_flowers', 'assembling_computer', 'auctioning', 'baby_waking_up', 'baking_cookies', 'balloon_blowing', 'bandaging', 'barbequing', 'bartending', 'beatboxing', 'bee_keeping', 'belly_dancing', 'bench_pressing', 'bending_back', 'bending_metal', 'biking_through_snow', 'blasting_sand', 'blowing_glass', 'blowing_leaves', 'blowing_nose', 'blowing_out_candles', 'bobsledding', 'bookbinding', 'bouncing_on_trampoline', 'bowling', 'braiding_hair', 'breading_or_breadcrumbing', 'breakdancing', 'brush_painting', 'brushing_hair', 'brushing_teeth', 'building_cabinet', 'building_shed', 'bungee_jumping', 'busking', 'canoeing_or_kayaking', 'capoeira', 'carrying_baby', 'cartwheeling', 'carving_pumpkin', 'catching_fish', 'catching_or_throwing_baseball', 'catching_or_throwing_frisbee', 'catching_or_throwing_softball', 'celebrating', 'changing_oil', 'changing_wheel', 'checking_tires', 'cheerleading', 'chopping_wood', 'clapping', 'clay_pottery_making', 'clean_and_jerk', 'cleaning_floor', 'cleaning_gutters', 'cleaning_pool', 'cleaning_shoes', 'cleaning_toilet', 'cleaning_windows', 'climbing_a_rope', 'climbing_ladder', 'climbing_tree', 'contact_juggling', 'cooking_chicken', 'cooking_egg', 'cooking_on_campfire', 'cooking_sausages', 'counting_money', 'country_line_dancing', 'cracking_neck', 'crawling_baby', 'crossing_river', 'crying', 'curling_hair', 'cutting_nails', 'cutting_pineapple', 'cutting_watermelon', 'dancing_ballet', 'dancing_charleston', 'dancing_gangnam_style', 'dancing_macarena', 'deadlifting', 'decorating_the_christmas_tree', 'digging', 'dining', 'disc_golfing', 'diving_cliff', 'dodgeball', 'doing_aerobics', 'doing_laundry', 'doing_nails', 'drawing', 'dribbling_basketball', 'drinking', 'drinking_beer', 'drinking_shots', 'driving_car', 'driving_tractor', 'drop_kicking', 'drumming_fingers', 'dunking_basketball', 'dying_hair', 'eating_burger', 'eating_cake', 'eating_carrots', 'eating_chips', 'eating_doughnuts', 'eating_hotdog', 'eating_ice_cream', 'eating_spaghetti', 'eating_watermelon', 'egg_hunting', 'exercising_arm', 'exercising_with_an_exercise_ball', 'extinguishing_fire', 'faceplanting', 'feeding_birds', 'feeding_fish', 'feeding_goats', 'filling_eyebrows', 'finger_snapping', 'fixing_hair', 'flipping_pancake', 'flying_kite', 'folding_clothes', 'folding_napkins', 'folding_paper', 'front_raises', 'frying_vegetables', 'garbage_collecting', 'gargling', 'getting_a_haircut', 'getting_a_tattoo', 'giving_or_receiving_award', 'golf_chipping', 'golf_driving', 'golf_putting', 'grinding_meat', 'grooming_dog', 'grooming_horse', 'gymnastics_tumbling', 'hammer_throw', 'headbanging', 'headbutting', 'high_jump', 'high_kick', 'hitting_baseball', 'hockey_stop', 'holding_snake', 'hopscotch', 'hoverboarding', 'hugging', 'hula_hooping', 'hurdling', 'hurling_-sport-', 'ice_climbing', 'ice_fishing', 'ice_skating', 'ironing', 'javelin_throw', 'jetskiing', 'jogging', 'juggling_balls', 'juggling_fire', 'juggling_soccer_ball', 'jumping_into_pool', 'jumpstyle_dancing', 'kicking_field_goal', 'kicking_soccer_ball', 'kissing', 'kitesurfing', 'knitting', 'krumping', 'laughing', 'laying_bricks', 'long_jump', 'lunge', 'making_a_cake', 'making_a_sandwich', 'making_bed', 'making_jewelry', 'making_pizza', 'making_snowman', 'making_sushi', 'making_tea', 'marching', 'massaging_back', 'massaging_feet', 'massaging_legs', "massaging_person's_head", 'milking_cow', 'mopping_floor', 'motorcycling', 'moving_furniture', 'mowing_lawn', 'news_anchoring', 'opening_bottle', 'opening_present', 'paragliding', 'parasailing', 'parkour', 'passing_American_football_-in_game-', 'passing_American_football_-not_in_game-', 'peeling_apples', 'peeling_potatoes', 'petting_animal_-not_cat-', 'petting_cat', 'picking_fruit', 'planting_trees', 'plastering', 'playing_accordion', 'playing_badminton', 'playing_bagpipes', 'playing_basketball', 'playing_bass_guitar', 'playing_cards', 'playing_cello', 'playing_chess', 'playing_clarinet', 'playing_controller', 'playing_cricket', 'playing_cymbals', 'playing_didgeridoo', 'playing_drums', 'playing_flute', 'playing_guitar', 'playing_harmonica', 'playing_harp', 'playing_ice_hockey', 'playing_keyboard', 'playing_kickball', 'playing_monopoly', 'playing_organ', 'playing_paintball', 'playing_piano', 'playing_poker', 'playing_recorder', 'playing_saxophone', 'playing_squash_or_racquetball', 'playing_tennis', 'playing_trombone', 'playing_trumpet', 'playing_ukulele', 'playing_violin', 'playing_volleyball', 'playing_xylophone', 'pole_vault', 'presenting_weather_forecast', 'pull_ups', 'pumping_fist', 'pumping_gas', 'punching_bag', 'punching_person_-boxing-', 'push_up', 'pushing_car', 'pushing_cart', 'pushing_wheelchair', 'reading_book', 'reading_newspaper', 'recording_music', 'riding_a_bike', 'riding_camel', 'riding_elephant', 'riding_mechanical_bull', 'riding_mountain_bike', 'riding_mule', 'riding_or_walking_with_horse', 'riding_scooter', 'riding_unicycle', 'ripping_paper', 'robot_dancing', 'rock_climbing', 'rock_scissors_paper', 'roller_skating', 'running_on_treadmill', 'sailing', 'salsa_dancing', 'sanding_floor', 'scrambling_eggs', 'scuba_diving', 'setting_table', 'shaking_hands', 'shaking_head', 'sharpening_knives', 'sharpening_pencil', 'shaving_head', 'shaving_legs', 'shearing_sheep', 'shining_shoes', 'shooting_basketball', 'shooting_goal_-soccer-', 'shot_put', 'shoveling_snow', 'shredding_paper', 'shuffling_cards', 'side_kick', 'sign_language_interpreting', 'singing', 'situp', 'skateboarding', 'ski_jumping', 'skiing_-not_slalom_or_crosscountry-', 'skiing_crosscountry', 'skiing_slalom', 'skipping_rope', 'skydiving', 'slacklining', 'slapping', 'sled_dog_racing', 'smoking', 'smoking_hookah', 'snatch_weight_lifting', 'sneezing', 'sniffing', 'snorkeling', 'snowboarding', 'snowkiting', 'snowmobiling', 'somersaulting', 'spinning_poi', 'spray_painting', 'spraying', 'springboard_diving', 'squat', 'sticking_tongue_out', 'stomping_grapes', 'stretching_arm', 'stretching_leg', 'strumming_guitar', 'surfing_crowd', 'surfing_water', 'sweeping_floor', 'swimming_backstroke', 'swimming_breast_stroke', 'swimming_butterfly_stroke', 'swing_dancing', 'swinging_legs', 'swinging_on_something', 'sword_fighting', 'tai_chi', 'taking_a_shower', 'tango_dancing', 'tap_dancing', 'tapping_guitar', 'tapping_pen', 'tasting_beer', 'tasting_food', 'testifying', 'texting', 'throwing_axe', 'throwing_ball', 'throwing_discus', 'tickling', 'tobogganing', 'tossing_coin', 'tossing_salad', 'training_dog', 'trapezing', 'trimming_or_shaving_beard', 'trimming_trees', 'triple_jump', 'tying_bow_tie', 'tying_knot_-not_on_a_tie-', 'tying_tie', 'unboxing', 'unloading_truck', 'using_computer', 'using_remote_controller_-not_gaming-', 'using_segway', 'vault', 'waiting_in_line', 'walking_the_dog', 'washing_dishes', 'washing_feet', 'washing_hair', 'washing_hands', 'water_skiing', 'water_sliding', 'watering_plants', 'waxing_back', 'waxing_chest', 'waxing_eyebrows', 'waxing_legs', 'weaving_basket', 'welding', 'whistling', 'windsurfing', 'wrapping_present', 'wrestling', 'writing', 'yawning', 'yoga', 'zumba']
[((2448, 2541), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.mxnet/datasets/kinetics400/kinetics400_train_list_rawframes.txt"""'], {}), "(\n '~/.mxnet/datasets/kinetics400/kinetics400_train_list_rawframes.txt')\n", (2466, 2541), False, 'import os\n'), ((2560, 2627), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.mxnet/datasets/kinetics400/rawframes_train"""'], {}), "('~/.mxnet/datasets/kinetics400/rawframes_train')\n", (2578, 2627), False, 'import os\n'), ((8362, 8398), 'numpy.concatenate', 'np.concatenate', (['sampled_list'], {'axis': '(2)'}), '(sampled_list, axis=2)\n', (8376, 8398), True, 'import numpy as np\n'), ((8414, 8434), 'mxnet.nd.array', 'nd.array', (['clip_input'], {}), '(clip_input)\n', (8422, 8434), False, 'from mxnet import nd\n'), ((6294, 6317), 'os.path.exists', 'os.path.exists', (['setting'], {}), '(setting)\n', (6308, 6317), False, 'import os\n'), ((6032, 6053), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (6042, 6053), False, 'import os\n'), ((6875, 6912), 'os.path.join', 'os.path.join', (['directory', 'line_info[0]'], {}), '(directory, line_info[0])\n', (6887, 6912), False, 'import os\n'), ((4501, 4554), 'random.randint', 'random.randint', (['(0)', '(average_duration - self.new_length)'], {}), '(0, average_duration - self.new_length)\n', (4515, 4554), False, 'import random\n'), ((6071, 6097), 'os.path.join', 'os.path.join', (['directory', 'd'], {}), '(directory, d)\n', (6083, 6097), False, 'import os\n')]
webclinic017/qf-lib
qf_lib/containers/futures/future_contract.py
96463876719bba8a76c8269cef76addf3a2d836d
# Copyright 2016-present CERN – European Organization for Nuclear Research # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import datetime from qf_lib.common.tickers.tickers import Ticker from qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame class FutureContract(object): """ Class representing a single future contract. The FutureContract is a simple class representing one futures contract. The FutureContract objects are used by the FuturesChain, in order to provide the contracts chaining possibilities. It requires 3 parameters: ticker, which is the symbol of the specific future contract (e.g. BloombergFutureTicker(“CTZ9 Comdty”)), expiration date of the contract and a PricesDataFrame, containing dates with price field values. Parameters ---------- ticker: Ticker symbol of the future contract exp_date: datetime expiration date data: PricesDataFrame data frame containing dates with price fields values """ def __init__(self, ticker: Ticker, exp_date: datetime, data: PricesDataFrame): self.ticker = ticker self.exp_date = exp_date self.data = data def __str__(self): return 'Contract: ticker: {}, expiration date: {}'.format( self.ticker, self.exp_date) def __eq__(self, other): if self is other: return True if not isinstance(other, FutureContract): return False return (self.ticker, self.exp_date, self.data) == (other.ticker, other.exp_date, other.data) def __hash__(self): return hash((self.ticker, self.exp_date, self.data))
[]
ajaytikoo/watcher
watcher/api/controllers/v1/action_plan.py
6dbac1f6ae7f3e10dfdcef5721fa4af7af54e159
# -*- encoding: utf-8 -*- # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ An :ref:`Action Plan <action_plan_definition>` specifies a flow of :ref:`Actions <action_definition>` that should be executed in order to satisfy a given :ref:`Goal <goal_definition>`. It also contains an estimated :ref:`global efficacy <efficacy_definition>` alongside a set of :ref:`efficacy indicators <efficacy_indicator_definition>`. An :ref:`Action Plan <action_plan_definition>` is generated by Watcher when an :ref:`Audit <audit_definition>` is successful which implies that the :ref:`Strategy <strategy_definition>` which was used has found a :ref:`Solution <solution_definition>` to achieve the :ref:`Goal <goal_definition>` of this :ref:`Audit <audit_definition>`. In the default implementation of Watcher, an action plan is composed of a list of successive :ref:`Actions <action_definition>` (i.e., a Workflow of :ref:`Actions <action_definition>` belonging to a unique branch). However, Watcher provides abstract interfaces for many of its components, allowing other implementations to generate and handle more complex :ref:`Action Plan(s) <action_plan_definition>` composed of two types of Action Item(s): - simple :ref:`Actions <action_definition>`: atomic tasks, which means it can not be split into smaller tasks or commands from an OpenStack point of view. - composite Actions: which are composed of several simple :ref:`Actions <action_definition>` ordered in sequential and/or parallel flows. An :ref:`Action Plan <action_plan_definition>` may be described using standard workflow model description formats such as `Business Process Model and Notation 2.0 (BPMN 2.0) <http://www.omg.org/spec/BPMN/2.0/>`_ or `Unified Modeling Language (UML) <http://www.uml.org/>`_. To see the life-cycle and description of :ref:`Action Plan <action_plan_definition>` states, visit :ref:`the Action Plan state machine <action_plan_state_machine>`. """ import datetime from http import HTTPStatus from oslo_log import log import pecan from pecan import rest import wsme from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher._i18n import _ from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import efficacy_indicator as efficacyindicator from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils as api_utils from watcher.applier import rpcapi from watcher.common import exception from watcher.common import policy from watcher.common import utils from watcher import objects from watcher.objects import action_plan as ap_objects LOG = log.getLogger(__name__) def hide_fields_in_newer_versions(obj): """This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. """ pass class ActionPlanPatchType(types.JsonPatchType): @staticmethod def _validate_state(patch): serialized_patch = {'path': patch.path, 'op': patch.op} if patch.value is not wtypes.Unset: serialized_patch['value'] = patch.value # todo: use state machines to handle state transitions state_value = patch.value if state_value and not hasattr(ap_objects.State, state_value): msg = _("Invalid state: %(state)s") raise exception.PatchError( patch=serialized_patch, reason=msg % dict(state=state_value)) @staticmethod def validate(patch): if patch.path == "/state": ActionPlanPatchType._validate_state(patch) return types.JsonPatchType.validate(patch) @staticmethod def internal_attrs(): return types.JsonPatchType.internal_attrs() @staticmethod def mandatory_attrs(): return ["audit_id", "state"] class ActionPlan(base.APIBase): """API representation of a action plan. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of an action plan. """ _audit_uuid = None _strategy_uuid = None _strategy_name = None _efficacy_indicators = None def _get_audit_uuid(self): return self._audit_uuid def _set_audit_uuid(self, value): if value == wtypes.Unset: self._audit_uuid = wtypes.Unset elif value and self._audit_uuid != value: try: audit = objects.Audit.get(pecan.request.context, value) self._audit_uuid = audit.uuid self.audit_id = audit.id except exception.AuditNotFound: self._audit_uuid = None def _get_efficacy_indicators(self): if self._efficacy_indicators is None: self._set_efficacy_indicators(wtypes.Unset) return self._efficacy_indicators def _set_efficacy_indicators(self, value): efficacy_indicators = [] if value == wtypes.Unset and not self._efficacy_indicators: try: _efficacy_indicators = objects.EfficacyIndicator.list( pecan.request.context, filters={"action_plan_uuid": self.uuid}) for indicator in _efficacy_indicators: efficacy_indicator = efficacyindicator.EfficacyIndicator( context=pecan.request.context, name=indicator.name, description=indicator.description, unit=indicator.unit, value=float(indicator.value), ) efficacy_indicators.append(efficacy_indicator.as_dict()) self._efficacy_indicators = efficacy_indicators except exception.EfficacyIndicatorNotFound as exc: LOG.exception(exc) elif value and self._efficacy_indicators != value: self._efficacy_indicators = value def _get_strategy(self, value): if value == wtypes.Unset: return None strategy = None try: if utils.is_uuid_like(value) or utils.is_int_like(value): strategy = objects.Strategy.get( pecan.request.context, value) else: strategy = objects.Strategy.get_by_name( pecan.request.context, value) except exception.StrategyNotFound: pass if strategy: self.strategy_id = strategy.id return strategy def _get_strategy_uuid(self): return self._strategy_uuid def _set_strategy_uuid(self, value): if value and self._strategy_uuid != value: self._strategy_uuid = None strategy = self._get_strategy(value) if strategy: self._strategy_uuid = strategy.uuid def _get_strategy_name(self): return self._strategy_name def _set_strategy_name(self, value): if value and self._strategy_name != value: self._strategy_name = None strategy = self._get_strategy(value) if strategy: self._strategy_name = strategy.name uuid = wtypes.wsattr(types.uuid, readonly=True) """Unique UUID for this action plan""" audit_uuid = wtypes.wsproperty(types.uuid, _get_audit_uuid, _set_audit_uuid, mandatory=True) """The UUID of the audit this port belongs to""" strategy_uuid = wtypes.wsproperty( wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False) """Strategy UUID the action plan refers to""" strategy_name = wtypes.wsproperty( wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False) """The name of the strategy this action plan refers to""" efficacy_indicators = wtypes.wsproperty( types.jsontype, _get_efficacy_indicators, _set_efficacy_indicators, mandatory=True) """The list of efficacy indicators associated to this action plan""" global_efficacy = wtypes.wsattr(types.jsontype, readonly=True) """The global efficacy of this action plan""" state = wtypes.text """This action plan state""" links = wtypes.wsattr([link.Link], readonly=True) """A list containing a self link and associated action links""" hostname = wtypes.wsattr(wtypes.text, mandatory=False) """Hostname the actionplan is running on""" def __init__(self, **kwargs): super(ActionPlan, self).__init__() self.fields = [] fields = list(objects.ActionPlan.fields) for field in fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) self.fields.append('audit_uuid') self.fields.append('efficacy_indicators') setattr(self, 'audit_uuid', kwargs.get('audit_id', wtypes.Unset)) fields.append('strategy_uuid') setattr(self, 'strategy_uuid', kwargs.get('strategy_id', wtypes.Unset)) fields.append('strategy_name') setattr(self, 'strategy_name', kwargs.get('strategy_id', wtypes.Unset)) @staticmethod def _convert_with_links(action_plan, url, expand=True): if not expand: action_plan.unset_fields_except( ['uuid', 'state', 'efficacy_indicators', 'global_efficacy', 'updated_at', 'audit_uuid', 'strategy_uuid', 'strategy_name']) action_plan.links = [ link.Link.make_link( 'self', url, 'action_plans', action_plan.uuid), link.Link.make_link( 'bookmark', url, 'action_plans', action_plan.uuid, bookmark=True)] return action_plan @classmethod def convert_with_links(cls, rpc_action_plan, expand=True): action_plan = ActionPlan(**rpc_action_plan.as_dict()) hide_fields_in_newer_versions(action_plan) return cls._convert_with_links(action_plan, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(uuid='9ef4d84c-41e8-4418-9220-ce55be0436af', state='ONGOING', created_at=datetime.datetime.utcnow(), deleted_at=None, updated_at=datetime.datetime.utcnow()) sample._audit_uuid = 'abcee106-14d3-4515-b744-5a26885cf6f6' sample._efficacy_indicators = [{'description': 'Test indicator', 'name': 'test_indicator', 'unit': '%'}] sample._global_efficacy = {'description': 'Global efficacy', 'name': 'test_global_efficacy', 'unit': '%'} return cls._convert_with_links(sample, 'http://localhost:9322', expand) class ActionPlanCollection(collection.Collection): """API representation of a collection of action_plans.""" action_plans = [ActionPlan] """A list containing action_plans objects""" def __init__(self, **kwargs): self._type = 'action_plans' @staticmethod def convert_with_links(rpc_action_plans, limit, url=None, expand=False, **kwargs): ap_collection = ActionPlanCollection() ap_collection.action_plans = [ActionPlan.convert_with_links( p, expand) for p in rpc_action_plans] ap_collection.next = ap_collection.get_next(limit, url=url, **kwargs) return ap_collection @classmethod def sample(cls): sample = cls() sample.action_plans = [ActionPlan.sample(expand=False)] return sample class ActionPlansController(rest.RestController): """REST controller for Actions.""" def __init__(self): super(ActionPlansController, self).__init__() self.applier_client = rpcapi.ApplierAPI() from_actionsPlans = False """A flag to indicate if the requests to this controller are coming from the top-level resource ActionPlan.""" _custom_actions = { 'start': ['POST'], 'detail': ['GET'] } def _get_action_plans_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None, audit_uuid=None, strategy=None): additional_fields = ['audit_uuid', 'strategy_uuid', 'strategy_name'] api_utils.validate_sort_key( sort_key, list(objects.ActionPlan.fields) + additional_fields) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.ActionPlan.get_by_uuid( pecan.request.context, marker) filters = {} if audit_uuid: filters['audit_uuid'] = audit_uuid if strategy: if utils.is_uuid_like(strategy): filters['strategy_uuid'] = strategy else: filters['strategy_name'] = strategy need_api_sort = api_utils.check_need_api_sort(sort_key, additional_fields) sort_db_key = (sort_key if not need_api_sort else None) action_plans = objects.ActionPlan.list( pecan.request.context, limit, marker_obj, sort_key=sort_db_key, sort_dir=sort_dir, filters=filters) action_plans_collection = ActionPlanCollection.convert_with_links( action_plans, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) if need_api_sort: api_utils.make_api_sort(action_plans_collection.action_plans, sort_key, sort_dir) return action_plans_collection @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text, wtypes.text, types.uuid, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None): """Retrieve a list of action plans. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param audit_uuid: Optional UUID of an audit, to get only actions for that audit. :param strategy: strategy UUID or name to filter by """ context = pecan.request.context policy.enforce(context, 'action_plan:get_all', action='action_plan:get_all') return self._get_action_plans_collection( marker, limit, sort_key, sort_dir, audit_uuid=audit_uuid, strategy=strategy) @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text, wtypes.text, types.uuid, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None): """Retrieve a list of action_plans with detail. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param audit_uuid: Optional UUID of an audit, to get only actions for that audit. :param strategy: strategy UUID or name to filter by """ context = pecan.request.context policy.enforce(context, 'action_plan:detail', action='action_plan:detail') # NOTE(lucasagomes): /detail should only work agaist collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "action_plans": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['action_plans', 'detail']) return self._get_action_plans_collection( marker, limit, sort_key, sort_dir, expand, resource_url, audit_uuid=audit_uuid, strategy=strategy) @wsme_pecan.wsexpose(ActionPlan, types.uuid) def get_one(self, action_plan_uuid): """Retrieve information about the given action plan. :param action_plan_uuid: UUID of a action plan. """ if self.from_actionsPlans: raise exception.OperationNotPermitted context = pecan.request.context action_plan = api_utils.get_resource('ActionPlan', action_plan_uuid) policy.enforce( context, 'action_plan:get', action_plan, action='action_plan:get') return ActionPlan.convert_with_links(action_plan) @wsme_pecan.wsexpose(None, types.uuid, status_code=HTTPStatus.NO_CONTENT) def delete(self, action_plan_uuid): """Delete an action plan. :param action_plan_uuid: UUID of a action. """ context = pecan.request.context action_plan = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) policy.enforce(context, 'action_plan:delete', action_plan, action='action_plan:delete') allowed_states = (ap_objects.State.SUCCEEDED, ap_objects.State.RECOMMENDED, ap_objects.State.FAILED, ap_objects.State.SUPERSEDED, ap_objects.State.CANCELLED) if action_plan.state not in allowed_states: raise exception.DeleteError( state=action_plan.state) action_plan.soft_delete() @wsme.validate(types.uuid, [ActionPlanPatchType]) @wsme_pecan.wsexpose(ActionPlan, types.uuid, body=[ActionPlanPatchType]) def patch(self, action_plan_uuid, patch): """Update an existing action plan. :param action_plan_uuid: UUID of a action plan. :param patch: a json PATCH document to apply to this action plan. """ if self.from_actionsPlans: raise exception.OperationNotPermitted context = pecan.request.context action_plan_to_update = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) policy.enforce(context, 'action_plan:update', action_plan_to_update, action='action_plan:update') try: action_plan_dict = action_plan_to_update.as_dict() action_plan = ActionPlan(**api_utils.apply_jsonpatch( action_plan_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) launch_action_plan = False cancel_action_plan = False # transitions that are allowed via PATCH allowed_patch_transitions = [ (ap_objects.State.RECOMMENDED, ap_objects.State.PENDING), (ap_objects.State.RECOMMENDED, ap_objects.State.CANCELLED), (ap_objects.State.ONGOING, ap_objects.State.CANCELLING), (ap_objects.State.PENDING, ap_objects.State.CANCELLED), ] # todo: improve this in blueprint watcher-api-validation if hasattr(action_plan, 'state'): transition = (action_plan_to_update.state, action_plan.state) if transition not in allowed_patch_transitions: error_message = _("State transition not allowed: " "(%(initial_state)s -> %(new_state)s)") raise exception.PatchError( patch=patch, reason=error_message % dict( initial_state=action_plan_to_update.state, new_state=action_plan.state)) if action_plan.state == ap_objects.State.PENDING: launch_action_plan = True if action_plan.state == ap_objects.State.CANCELLED: cancel_action_plan = True # Update only the fields that have changed for field in objects.ActionPlan.fields: try: patch_val = getattr(action_plan, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if action_plan_to_update[field] != patch_val: action_plan_to_update[field] = patch_val if (field == 'state' and patch_val == objects.action_plan.State.PENDING): launch_action_plan = True action_plan_to_update.save() # NOTE: if action plan is cancelled from pending or recommended # state update action state here only if cancel_action_plan: filters = {'action_plan_uuid': action_plan.uuid} actions = objects.Action.list(pecan.request.context, filters=filters, eager=True) for a in actions: a.state = objects.action.State.CANCELLED a.save() if launch_action_plan: self.applier_client.launch_action_plan(pecan.request.context, action_plan.uuid) action_plan_to_update = objects.ActionPlan.get_by_uuid( pecan.request.context, action_plan_uuid) return ActionPlan.convert_with_links(action_plan_to_update) @wsme_pecan.wsexpose(ActionPlan, types.uuid) def start(self, action_plan_uuid, **kwargs): """Start an action_plan :param action_plan_uuid: UUID of an action_plan. """ action_plan_to_start = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) context = pecan.request.context policy.enforce(context, 'action_plan:start', action_plan_to_start, action='action_plan:start') if action_plan_to_start['state'] != \ objects.action_plan.State.RECOMMENDED: raise exception.StartError( state=action_plan_to_start.state) action_plan_to_start['state'] = objects.action_plan.State.PENDING action_plan_to_start.save() self.applier_client.launch_action_plan(pecan.request.context, action_plan_uuid) action_plan_to_start = objects.ActionPlan.get_by_uuid( pecan.request.context, action_plan_uuid) return ActionPlan.convert_with_links(action_plan_to_start)
[((3265, 3288), 'oslo_log.log.getLogger', 'log.getLogger', (['__name__'], {}), '(__name__)\n', (3278, 3288), False, 'from oslo_log import log\n'), ((7961, 8001), 'wsme.types.wsattr', 'wtypes.wsattr', (['types.uuid'], {'readonly': '(True)'}), '(types.uuid, readonly=True)\n', (7974, 8001), True, 'from wsme import types as wtypes\n'), ((8063, 8142), 'wsme.types.wsproperty', 'wtypes.wsproperty', (['types.uuid', '_get_audit_uuid', '_set_audit_uuid'], {'mandatory': '(True)'}), '(types.uuid, _get_audit_uuid, _set_audit_uuid, mandatory=True)\n', (8080, 8142), True, 'from wsme import types as wtypes\n'), ((8287, 8378), 'wsme.types.wsproperty', 'wtypes.wsproperty', (['wtypes.text', '_get_strategy_uuid', '_set_strategy_uuid'], {'mandatory': '(False)'}), '(wtypes.text, _get_strategy_uuid, _set_strategy_uuid,\n mandatory=False)\n', (8304, 8378), True, 'from wsme import types as wtypes\n'), ((8455, 8546), 'wsme.types.wsproperty', 'wtypes.wsproperty', (['wtypes.text', '_get_strategy_name', '_set_strategy_name'], {'mandatory': '(False)'}), '(wtypes.text, _get_strategy_name, _set_strategy_name,\n mandatory=False)\n', (8472, 8546), True, 'from wsme import types as wtypes\n'), ((8641, 8746), 'wsme.types.wsproperty', 'wtypes.wsproperty', (['types.jsontype', '_get_efficacy_indicators', '_set_efficacy_indicators'], {'mandatory': '(True)'}), '(types.jsontype, _get_efficacy_indicators,\n _set_efficacy_indicators, mandatory=True)\n', (8658, 8746), True, 'from wsme import types as wtypes\n'), ((8856, 8900), 'wsme.types.wsattr', 'wtypes.wsattr', (['types.jsontype'], {'readonly': '(True)'}), '(types.jsontype, readonly=True)\n', (8869, 8900), True, 'from wsme import types as wtypes\n'), ((9022, 9063), 'wsme.types.wsattr', 'wtypes.wsattr', (['[link.Link]'], {'readonly': '(True)'}), '([link.Link], readonly=True)\n', (9035, 9063), True, 'from wsme import types as wtypes\n'), ((9148, 9191), 'wsme.types.wsattr', 'wtypes.wsattr', (['wtypes.text'], {'mandatory': '(False)'}), '(wtypes.text, mandatory=False)\n', (9161, 9191), True, 'from wsme import types as wtypes\n'), ((14882, 14995), 'wsmeext.pecan.wsexpose', 'wsme_pecan.wsexpose', (['ActionPlanCollection', 'types.uuid', 'int', 'wtypes.text', 'wtypes.text', 'types.uuid', 'wtypes.text'], {}), '(ActionPlanCollection, types.uuid, int, wtypes.text,\n wtypes.text, types.uuid, wtypes.text)\n', (14901, 14995), True, 'import wsmeext.pecan as wsme_pecan\n'), ((15951, 16064), 'wsmeext.pecan.wsexpose', 'wsme_pecan.wsexpose', (['ActionPlanCollection', 'types.uuid', 'int', 'wtypes.text', 'wtypes.text', 'types.uuid', 'wtypes.text'], {}), '(ActionPlanCollection, types.uuid, int, wtypes.text,\n wtypes.text, types.uuid, wtypes.text)\n', (15970, 16064), True, 'import wsmeext.pecan as wsme_pecan\n'), ((17340, 17383), 'wsmeext.pecan.wsexpose', 'wsme_pecan.wsexpose', (['ActionPlan', 'types.uuid'], {}), '(ActionPlan, types.uuid)\n', (17359, 17383), True, 'import wsmeext.pecan as wsme_pecan\n'), ((17926, 17998), 'wsmeext.pecan.wsexpose', 'wsme_pecan.wsexpose', (['None', 'types.uuid'], {'status_code': 'HTTPStatus.NO_CONTENT'}), '(None, types.uuid, status_code=HTTPStatus.NO_CONTENT)\n', (17945, 17998), True, 'import wsmeext.pecan as wsme_pecan\n'), ((18844, 18892), 'wsme.validate', 'wsme.validate', (['types.uuid', '[ActionPlanPatchType]'], {}), '(types.uuid, [ActionPlanPatchType])\n', (18857, 18892), False, 'import wsme\n'), ((18898, 18969), 'wsmeext.pecan.wsexpose', 'wsme_pecan.wsexpose', (['ActionPlan', 'types.uuid'], {'body': '[ActionPlanPatchType]'}), '(ActionPlan, types.uuid, body=[ActionPlanPatchType])\n', (18917, 18969), True, 'import wsmeext.pecan as wsme_pecan\n'), ((22745, 22788), 'wsmeext.pecan.wsexpose', 'wsme_pecan.wsexpose', (['ActionPlan', 'types.uuid'], {}), '(ActionPlan, types.uuid)\n', (22764, 22788), True, 'import wsmeext.pecan as wsme_pecan\n'), ((4372, 4407), 'watcher.api.controllers.v1.types.JsonPatchType.validate', 'types.JsonPatchType.validate', (['patch'], {}), '(patch)\n', (4400, 4407), False, 'from watcher.api.controllers.v1 import types\n'), ((4468, 4504), 'watcher.api.controllers.v1.types.JsonPatchType.internal_attrs', 'types.JsonPatchType.internal_attrs', ([], {}), '()\n', (4502, 4504), False, 'from watcher.api.controllers.v1 import types\n'), ((12840, 12859), 'watcher.applier.rpcapi.ApplierAPI', 'rpcapi.ApplierAPI', ([], {}), '()\n', (12857, 12859), False, 'from watcher.applier import rpcapi\n'), ((13556, 13587), 'watcher.api.controllers.v1.utils.validate_limit', 'api_utils.validate_limit', (['limit'], {}), '(limit)\n', (13580, 13587), True, 'from watcher.api.controllers.v1 import utils as api_utils\n'), ((13596, 13633), 'watcher.api.controllers.v1.utils.validate_sort_dir', 'api_utils.validate_sort_dir', (['sort_dir'], {}), '(sort_dir)\n', (13623, 13633), True, 'from watcher.api.controllers.v1 import utils as api_utils\n'), ((14090, 14148), 'watcher.api.controllers.v1.utils.check_need_api_sort', 'api_utils.check_need_api_sort', (['sort_key', 'additional_fields'], {}), '(sort_key, additional_fields)\n', (14119, 14148), True, 'from watcher.api.controllers.v1 import utils as api_utils\n'), ((14314, 14442), 'watcher.objects.ActionPlan.list', 'objects.ActionPlan.list', (['pecan.request.context', 'limit', 'marker_obj'], {'sort_key': 'sort_db_key', 'sort_dir': 'sort_dir', 'filters': 'filters'}), '(pecan.request.context, limit, marker_obj, sort_key=\n sort_db_key, sort_dir=sort_dir, filters=filters)\n', (14337, 14442), False, 'from watcher import objects\n'), ((15693, 15769), 'watcher.common.policy.enforce', 'policy.enforce', (['context', '"""action_plan:get_all"""'], {'action': '"""action_plan:get_all"""'}), "(context, 'action_plan:get_all', action='action_plan:get_all')\n", (15707, 15769), False, 'from watcher.common import policy\n'), ((16772, 16846), 'watcher.common.policy.enforce', 'policy.enforce', (['context', '"""action_plan:detail"""'], {'action': '"""action_plan:detail"""'}), "(context, 'action_plan:detail', action='action_plan:detail')\n", (16786, 16846), False, 'from watcher.common import policy\n'), ((17703, 17757), 'watcher.api.controllers.v1.utils.get_resource', 'api_utils.get_resource', (['"""ActionPlan"""', 'action_plan_uuid'], {}), "('ActionPlan', action_plan_uuid)\n", (17725, 17757), True, 'from watcher.api.controllers.v1 import utils as api_utils\n'), ((17766, 17852), 'watcher.common.policy.enforce', 'policy.enforce', (['context', '"""action_plan:get"""', 'action_plan'], {'action': '"""action_plan:get"""'}), "(context, 'action_plan:get', action_plan, action=\n 'action_plan:get')\n", (17780, 17852), False, 'from watcher.common import policy\n'), ((18199, 18265), 'watcher.api.controllers.v1.utils.get_resource', 'api_utils.get_resource', (['"""ActionPlan"""', 'action_plan_uuid'], {'eager': '(True)'}), "('ActionPlan', action_plan_uuid, eager=True)\n", (18221, 18265), True, 'from watcher.api.controllers.v1 import utils as api_utils\n'), ((18287, 18379), 'watcher.common.policy.enforce', 'policy.enforce', (['context', '"""action_plan:delete"""', 'action_plan'], {'action': '"""action_plan:delete"""'}), "(context, 'action_plan:delete', action_plan, action=\n 'action_plan:delete')\n", (18301, 18379), False, 'from watcher.common import policy\n'), ((19385, 19451), 'watcher.api.controllers.v1.utils.get_resource', 'api_utils.get_resource', (['"""ActionPlan"""', 'action_plan_uuid'], {'eager': '(True)'}), "('ActionPlan', action_plan_uuid, eager=True)\n", (19407, 19451), True, 'from watcher.api.controllers.v1 import utils as api_utils\n'), ((19473, 19575), 'watcher.common.policy.enforce', 'policy.enforce', (['context', '"""action_plan:update"""', 'action_plan_to_update'], {'action': '"""action_plan:update"""'}), "(context, 'action_plan:update', action_plan_to_update, action\n ='action_plan:update')\n", (19487, 19575), False, 'from watcher.common import policy\n'), ((22574, 22645), 'watcher.objects.ActionPlan.get_by_uuid', 'objects.ActionPlan.get_by_uuid', (['pecan.request.context', 'action_plan_uuid'], {}), '(pecan.request.context, action_plan_uuid)\n', (22604, 22645), False, 'from watcher import objects\n'), ((22972, 23038), 'watcher.api.controllers.v1.utils.get_resource', 'api_utils.get_resource', (['"""ActionPlan"""', 'action_plan_uuid'], {'eager': '(True)'}), "('ActionPlan', action_plan_uuid, eager=True)\n", (22994, 23038), True, 'from watcher.api.controllers.v1 import utils as api_utils\n'), ((23101, 23200), 'watcher.common.policy.enforce', 'policy.enforce', (['context', '"""action_plan:start"""', 'action_plan_to_start'], {'action': '"""action_plan:start"""'}), "(context, 'action_plan:start', action_plan_to_start, action=\n 'action_plan:start')\n", (23115, 23200), False, 'from watcher.common import policy\n'), ((23689, 23760), 'watcher.objects.ActionPlan.get_by_uuid', 'objects.ActionPlan.get_by_uuid', (['pecan.request.context', 'action_plan_uuid'], {}), '(pecan.request.context, action_plan_uuid)\n', (23719, 23760), False, 'from watcher import objects\n'), ((4075, 4104), 'watcher._i18n._', '_', (['"""Invalid state: %(state)s"""'], {}), "('Invalid state: %(state)s')\n", (4076, 4104), False, 'from watcher._i18n import _\n'), ((10386, 10452), 'watcher.api.controllers.link.Link.make_link', 'link.Link.make_link', (['"""self"""', 'url', '"""action_plans"""', 'action_plan.uuid'], {}), "('self', url, 'action_plans', action_plan.uuid)\n", (10405, 10452), False, 'from watcher.api.controllers import link\n'), ((10499, 10588), 'watcher.api.controllers.link.Link.make_link', 'link.Link.make_link', (['"""bookmark"""', 'url', '"""action_plans"""', 'action_plan.uuid'], {'bookmark': '(True)'}), "('bookmark', url, 'action_plans', action_plan.uuid,\n bookmark=True)\n", (10518, 10588), False, 'from watcher.api.controllers import link\n'), ((13705, 13766), 'watcher.objects.ActionPlan.get_by_uuid', 'objects.ActionPlan.get_by_uuid', (['pecan.request.context', 'marker'], {}), '(pecan.request.context, marker)\n', (13735, 13766), False, 'from watcher import objects\n'), ((13913, 13941), 'watcher.common.utils.is_uuid_like', 'utils.is_uuid_like', (['strategy'], {}), '(strategy)\n', (13931, 13941), False, 'from watcher.common import utils\n'), ((14718, 14803), 'watcher.api.controllers.v1.utils.make_api_sort', 'api_utils.make_api_sort', (['action_plans_collection.action_plans', 'sort_key', 'sort_dir'], {}), '(action_plans_collection.action_plans, sort_key,\n sort_dir)\n', (14741, 14803), True, 'from watcher.api.controllers.v1 import utils as api_utils\n'), ((18739, 18785), 'watcher.common.exception.DeleteError', 'exception.DeleteError', ([], {'state': 'action_plan.state'}), '(state=action_plan.state)\n', (18760, 18785), False, 'from watcher.common import exception\n'), ((22140, 22211), 'watcher.objects.Action.list', 'objects.Action.list', (['pecan.request.context'], {'filters': 'filters', 'eager': '(True)'}), '(pecan.request.context, filters=filters, eager=True)\n', (22159, 22211), False, 'from watcher import objects\n'), ((23339, 23393), 'watcher.common.exception.StartError', 'exception.StartError', ([], {'state': 'action_plan_to_start.state'}), '(state=action_plan_to_start.state)\n', (23359, 23393), False, 'from watcher.common import exception\n'), ((5825, 5924), 'watcher.objects.EfficacyIndicator.list', 'objects.EfficacyIndicator.list', (['pecan.request.context'], {'filters': "{'action_plan_uuid': self.uuid}"}), "(pecan.request.context, filters={\n 'action_plan_uuid': self.uuid})\n", (5855, 5924), False, 'from watcher import objects\n'), ((6866, 6891), 'watcher.common.utils.is_uuid_like', 'utils.is_uuid_like', (['value'], {}), '(value)\n', (6884, 6891), False, 'from watcher.common import utils\n'), ((6895, 6919), 'watcher.common.utils.is_int_like', 'utils.is_int_like', (['value'], {}), '(value)\n', (6912, 6919), False, 'from watcher.common import utils\n'), ((6948, 6998), 'watcher.objects.Strategy.get', 'objects.Strategy.get', (['pecan.request.context', 'value'], {}), '(pecan.request.context, value)\n', (6968, 6998), False, 'from watcher import objects\n'), ((7065, 7123), 'watcher.objects.Strategy.get_by_name', 'objects.Strategy.get_by_name', (['pecan.request.context', 'value'], {}), '(pecan.request.context, value)\n', (7093, 7123), False, 'from watcher import objects\n'), ((11167, 11193), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (11191, 11193), False, 'import datetime\n'), ((11265, 11291), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (11289, 11291), False, 'import datetime\n'), ((16961, 16990), 'pecan.request.path.split', 'pecan.request.path.split', (['"""/"""'], {}), "('/')\n", (16985, 16990), False, 'import pecan\n'), ((19849, 19892), 'watcher.common.exception.PatchError', 'exception.PatchError', ([], {'patch': 'patch', 'reason': 'e'}), '(patch=patch, reason=e)\n', (19869, 19892), False, 'from watcher.common import exception\n'), ((20667, 20738), 'watcher._i18n._', '_', (['"""State transition not allowed: (%(initial_state)s -> %(new_state)s)"""'], {}), "('State transition not allowed: (%(initial_state)s -> %(new_state)s)')\n", (20668, 20738), False, 'from watcher._i18n import _\n'), ((5217, 5264), 'watcher.objects.Audit.get', 'objects.Audit.get', (['pecan.request.context', 'value'], {}), '(pecan.request.context, value)\n', (5234, 5264), False, 'from watcher import objects\n'), ((19710, 19760), 'watcher.api.controllers.v1.utils.apply_jsonpatch', 'api_utils.apply_jsonpatch', (['action_plan_dict', 'patch'], {}), '(action_plan_dict, patch)\n', (19735, 19760), True, 'from watcher.api.controllers.v1 import utils as api_utils\n')]
jeonginlee/groove_scheduler
controllers/albums.py
84e61834e940e2ff138ffeeea61fd301f3c2a244
from flask import * albums = Blueprint('albums', __name__, template_folder='templates') @albums.route('/albums/edit') def albums_edit_route(): options = { "edit": True } return render_template("albums.html", **options) @albums.route('/albums') def albums_route(): options = { "edit": False } return render_template("albums.html", **options)
[]
sn0b4ll/Incident-Playbook
Incident-Response/Tools/dfirtrack/dfirtrack_main/views/division_views.py
cf519f58fcd4255674662b3620ea97c1091c1efb
from django.contrib import messages from django.contrib.auth.mixins import LoginRequiredMixin from django.shortcuts import redirect, render from django.urls import reverse from django.views.generic import DetailView, ListView from django.views.generic.edit import CreateView, UpdateView from dfirtrack_main.forms import DivisionForm from dfirtrack_main.logger.default_logger import debug_logger from dfirtrack_main.models import Division class DivisionList(LoginRequiredMixin, ListView): login_url = '/login' model = Division template_name = 'dfirtrack_main/division/division_list.html' context_object_name = 'division_list' def get_queryset(self): debug_logger(str(self.request.user), " DIVISION_LIST_ENTERED") return Division.objects.order_by('division_name') class DivisionDetail(LoginRequiredMixin, DetailView): login_url = '/login' model = Division template_name = 'dfirtrack_main/division/division_detail.html' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) division = self.object division.logger(str(self.request.user), " DIVISION_DETAIL_ENTERED") return context class DivisionCreate(LoginRequiredMixin, CreateView): login_url = '/login' model = Division form_class = DivisionForm template_name = 'dfirtrack_main/division/division_add.html' def get(self, request, *args, **kwargs): form = self.form_class() debug_logger(str(request.user), " DIVISION_ADD_ENTERED") return render(request, self.template_name, {'form': form}) def post(self, request, *args, **kwargs): form = self.form_class(request.POST) if form.is_valid(): division = form.save(commit=False) division.save() division.logger(str(request.user), " DIVISION_ADD_EXECUTED") messages.success(request, 'Division added') return redirect(reverse('division_detail', args=(division.division_id,))) else: return render(request, self.template_name, {'form': form}) class DivisionUpdate(LoginRequiredMixin, UpdateView): login_url = '/login' model = Division form_class = DivisionForm template_name = 'dfirtrack_main/division/division_edit.html' def get(self, request, *args, **kwargs): division = self.get_object() form = self.form_class(instance=division) division.logger(str(request.user), " DIVISION_EDIT_ENTERED") return render(request, self.template_name, {'form': form}) def post(self, request, *args, **kwargs): division = self.get_object() form = self.form_class(request.POST, instance=division) if form.is_valid(): division = form.save(commit=False) division.save() division.logger(str(request.user), " DIVISION_EDIT_EXECUTED") messages.success(request, 'Division edited') return redirect(reverse('division_detail', args=(division.division_id,))) else: return render(request, self.template_name, {'form': form})
[((757, 799), 'dfirtrack_main.models.Division.objects.order_by', 'Division.objects.order_by', (['"""division_name"""'], {}), "('division_name')\n", (782, 799), False, 'from dfirtrack_main.models import Division\n'), ((1548, 1599), 'django.shortcuts.render', 'render', (['request', 'self.template_name', "{'form': form}"], {}), "(request, self.template_name, {'form': form})\n", (1554, 1599), False, 'from django.shortcuts import redirect, render\n'), ((2508, 2559), 'django.shortcuts.render', 'render', (['request', 'self.template_name', "{'form': form}"], {}), "(request, self.template_name, {'form': form})\n", (2514, 2559), False, 'from django.shortcuts import redirect, render\n'), ((1880, 1923), 'django.contrib.messages.success', 'messages.success', (['request', '"""Division added"""'], {}), "(request, 'Division added')\n", (1896, 1923), False, 'from django.contrib import messages\n'), ((2043, 2094), 'django.shortcuts.render', 'render', (['request', 'self.template_name', "{'form': form}"], {}), "(request, self.template_name, {'form': form})\n", (2049, 2094), False, 'from django.shortcuts import redirect, render\n'), ((2897, 2941), 'django.contrib.messages.success', 'messages.success', (['request', '"""Division edited"""'], {}), "(request, 'Division edited')\n", (2913, 2941), False, 'from django.contrib import messages\n'), ((3061, 3112), 'django.shortcuts.render', 'render', (['request', 'self.template_name', "{'form': form}"], {}), "(request, self.template_name, {'form': form})\n", (3067, 3112), False, 'from django.shortcuts import redirect, render\n'), ((1952, 2008), 'django.urls.reverse', 'reverse', (['"""division_detail"""'], {'args': '(division.division_id,)'}), "('division_detail', args=(division.division_id,))\n", (1959, 2008), False, 'from django.urls import reverse\n'), ((2970, 3026), 'django.urls.reverse', 'reverse', (['"""division_detail"""'], {'args': '(division.division_id,)'}), "('division_detail', args=(division.division_id,))\n", (2977, 3026), False, 'from django.urls import reverse\n')]
shark803/Torch_serve_example_NLP
run.py
7f7984a1668f21aced3a7a1e8ddac3c8e0ff0105
# coding: UTF-8 import time import torch import numpy as np from train_eval import train, init_network from importlib import import_module import argparse parser = argparse.ArgumentParser(description='Chinese Text Classification') parser.add_argument('--model', type=str, required=True, help='choose a model: TextCNN') parser.add_argument('--embedding', default='pre_trained', type=str, help='random or pre_trained') parser.add_argument('--word', default=False, type=bool, help='True for word, False for char') args = parser.parse_args() if __name__ == '__main__': dataset = 'THUCNews' # 数据集 # 搜狗新闻:embedding_SougouNews.npz, 腾讯:embedding_Tencent.npz, 随机初始化:random # embedding = 'random' model_name = args.model # TextCNN from utils import build_dataset, build_iterator, get_time_dif x = import_module('models.' + model_name) from config import Config config = Config(dataset) np.random.seed(1) torch.manual_seed(1) torch.cuda.manual_seed_all(1) torch.backends.cudnn.deterministic = True # 保证每次结果一样 start_time = time.time() print("Loading data...") vocab, train_data, dev_data, test_data = build_dataset(config, args.word) train_iter = build_iterator(train_data, config) dev_iter = build_iterator(dev_data, config) test_iter = build_iterator(test_data, config) time_dif = get_time_dif(start_time) print("Time usage:", time_dif) # train config.n_vocab = len(vocab) model = x.Model().to(config.device) init_network(model) print(model.parameters) train(config, model, train_iter, dev_iter, test_iter)
[((165, 231), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Chinese Text Classification"""'}), "(description='Chinese Text Classification')\n", (188, 231), False, 'import argparse\n'), ((820, 857), 'importlib.import_module', 'import_module', (["('models.' + model_name)"], {}), "('models.' + model_name)\n", (833, 857), False, 'from importlib import import_module\n'), ((901, 916), 'config.Config', 'Config', (['dataset'], {}), '(dataset)\n', (907, 916), False, 'from config import Config\n'), ((921, 938), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (935, 938), True, 'import numpy as np\n'), ((943, 963), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (960, 963), False, 'import torch\n'), ((968, 997), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(1)'], {}), '(1)\n', (994, 997), False, 'import torch\n'), ((1074, 1085), 'time.time', 'time.time', ([], {}), '()\n', (1083, 1085), False, 'import time\n'), ((1160, 1192), 'utils.build_dataset', 'build_dataset', (['config', 'args.word'], {}), '(config, args.word)\n', (1173, 1192), False, 'from utils import build_dataset, build_iterator, get_time_dif\n'), ((1210, 1244), 'utils.build_iterator', 'build_iterator', (['train_data', 'config'], {}), '(train_data, config)\n', (1224, 1244), False, 'from utils import build_dataset, build_iterator, get_time_dif\n'), ((1260, 1292), 'utils.build_iterator', 'build_iterator', (['dev_data', 'config'], {}), '(dev_data, config)\n', (1274, 1292), False, 'from utils import build_dataset, build_iterator, get_time_dif\n'), ((1309, 1342), 'utils.build_iterator', 'build_iterator', (['test_data', 'config'], {}), '(test_data, config)\n', (1323, 1342), False, 'from utils import build_dataset, build_iterator, get_time_dif\n'), ((1358, 1382), 'utils.get_time_dif', 'get_time_dif', (['start_time'], {}), '(start_time)\n', (1370, 1382), False, 'from utils import build_dataset, build_iterator, get_time_dif\n'), ((1507, 1526), 'train_eval.init_network', 'init_network', (['model'], {}), '(model)\n', (1519, 1526), False, 'from train_eval import train, init_network\n'), ((1559, 1612), 'train_eval.train', 'train', (['config', 'model', 'train_iter', 'dev_iter', 'test_iter'], {}), '(config, model, train_iter, dev_iter, test_iter)\n', (1564, 1612), False, 'from train_eval import train, init_network\n')]
xhub/pretalx
src/tests/cfp/views/test_cfp_user.py
33bd07ec98ddeb5b7ff35fe7e30c4d38bef57d7e
import pytest from django.conf import settings from django.core import mail as djmail from django.core.files.uploadedfile import SimpleUploadedFile from django.urls import reverse from django_scopes import scope from rest_framework.authtoken.models import Token from pretalx.submission.models import SubmissionStates @pytest.mark.django_db def test_can_see_submission_list(speaker_client, submission): response = speaker_client.get(submission.event.urls.user_submissions, follow=True) assert response.status_code == 200 assert submission.title in response.content.decode() @pytest.mark.django_db def test_can_see_submission(speaker_client, submission): response = speaker_client.get(submission.urls.user_base, follow=True) assert response.status_code == 200 assert submission.title in response.content.decode() @pytest.mark.django_db def test_cannot_see_other_submission(speaker_client, other_submission): response = speaker_client.get(other_submission.urls.user_base, follow=True) assert response.status_code == 404 @pytest.mark.django_db def test_can_confirm_submission(speaker_client, accepted_submission): response = speaker_client.get(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db() assert response.status_code == 200 assert accepted_submission.state == SubmissionStates.ACCEPTED response = speaker_client.post(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db() assert response.status_code == 200 assert accepted_submission.state == SubmissionStates.CONFIRMED @pytest.mark.django_db def test_can_reconfirm_submission(speaker_client, accepted_submission): accepted_submission.state = SubmissionStates.CONFIRMED accepted_submission.save() response = speaker_client.get(accepted_submission.urls.confirm, follow=True) accepted_submission.refresh_from_db() assert response.status_code == 200 assert accepted_submission.state == SubmissionStates.CONFIRMED @pytest.mark.django_db def test_cannot_confirm_rejected_submission(other_speaker_client, rejected_submission): rejected_submission.state = SubmissionStates.REJECTED rejected_submission.save() response = other_speaker_client.get(rejected_submission.urls.confirm, follow=True) rejected_submission.refresh_from_db() assert response.status_code == 200 assert rejected_submission.state == SubmissionStates.REJECTED @pytest.mark.django_db def test_can_withdraw_submission(speaker_client, submission): response = speaker_client.get(submission.urls.withdraw, follow=True) submission.refresh_from_db() assert response.status_code == 200 assert submission.state == SubmissionStates.SUBMITTED response = speaker_client.post(submission.urls.withdraw, follow=True) submission.refresh_from_db() assert response.status_code == 200 assert submission.state == SubmissionStates.WITHDRAWN @pytest.mark.django_db def test_cannot_withdraw_accepted_submission(speaker_client, accepted_submission): response = speaker_client.get(accepted_submission.urls.withdraw, follow=True) accepted_submission.refresh_from_db() assert response.status_code == 200 assert accepted_submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db def test_can_edit_submission(speaker_client, submission, resource, other_resource): with scope(event=submission.event): assert submission.resources.count() == 2 resource_one = submission.resources.first() resource_two = submission.resources.last() assert submission.title in str(resource_one) f = SimpleUploadedFile('testfile.txt', b'file_content') data = { 'title': 'Ein ganz neuer Titel', 'submission_type': submission.submission_type.pk, 'content_locale': submission.content_locale, 'description': submission.description, 'abstract': submission.abstract, 'notes': submission.notes, 'slot_count': submission.slot_count, 'resource-0-id': resource_one.id, 'resource-0-description': 'new resource name', 'resource-0-resource': resource_one.resource, 'resource-1-id': resource_two.id, 'resource-1-DELETE': True, 'resource-1-description': resource_two.description, 'resource-1-resource': resource_two.resource, 'resource-2-id': '', 'resource-2-description': 'new resource', 'resource-2-resource': f, 'resource-TOTAL_FORMS': 3, 'resource-INITIAL_FORMS': 2, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000, } response = speaker_client.post(submission.urls.user_base, follow=True, data=data) assert response.status_code == 200 with scope(event=submission.event): assert submission.resources.count() == 2 submission.refresh_from_db() resource_one.refresh_from_db() new_resource = submission.resources.exclude(pk=resource_one.pk).first() assert submission.title == 'Ein ganz neuer Titel', response.content.decode() assert submission.resources.count() == 2 assert new_resource.description == 'new resource' assert new_resource.resource.read() == b'file_content' assert not submission.resources.filter(pk=resource_two.pk).exists() @pytest.mark.django_db def test_can_edit_slot_count(speaker_client, submission): with scope(event=submission.event): submission.event.settings.present_multiple_times = True data = { 'title': 'Ein ganz neuer Titel', 'submission_type': submission.submission_type.pk, 'content_locale': submission.content_locale, 'description': submission.description, 'abstract': submission.abstract, 'notes': submission.notes, 'slot_count': 13, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000, } response = speaker_client.post(submission.urls.user_base, follow=True, data=data) assert response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.slot_count == 13 @pytest.mark.django_db def test_cannot_edit_confirmed_slot_count(speaker_client, confirmed_submission): submission = confirmed_submission submission.event.settings.present_multiple_times = True with scope(event=submission.event): data = { 'title': 'Ein ganz neuer Titel', 'submission_type': submission.submission_type.pk, 'content_locale': submission.content_locale, 'description': submission.description, 'abstract': submission.abstract, 'notes': submission.notes, 'slot_count': 13, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000, } response = speaker_client.post(submission.urls.user_base, follow=True, data=data) assert response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.slot_count != 13 @pytest.mark.django_db def test_cannot_edit_rejected_submission(other_speaker_client, rejected_submission): title = rejected_submission.title data = { 'title': 'Ein ganz neuer Titel', 'submission_type': rejected_submission.submission_type.pk, 'content_locale': rejected_submission.content_locale, 'description': rejected_submission.description, 'abstract': rejected_submission.abstract, 'notes': rejected_submission.notes, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000, } response = other_speaker_client.post( rejected_submission.urls.user_base, follow=True, data=data ) assert response.status_code == 200 rejected_submission.refresh_from_db() assert rejected_submission.title == title @pytest.mark.django_db def test_can_edit_submission_type(speaker_client, submission, event): with scope(event=submission.event): new_type = event.submission_types.create(name='Other', default_duration=13) data = { 'title': 'Ein ganz neuer Titel', 'submission_type': new_type.pk, 'content_locale': submission.content_locale, 'description': submission.description, 'abstract': submission.abstract, 'notes': submission.notes, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000, } response = speaker_client.post(submission.urls.user_base, follow=True, data=data) assert response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.submission_type == new_type @pytest.mark.django_db def test_cannot_edit_submission_type_after_acceptance(speaker_client, submission, event): with scope(event=submission.event): submission.accept() new_type = event.submission_types.create(name='Other', default_duration=13) data = { 'title': 'Ein ganz neuer Titel', 'submission_type': new_type.pk, 'content_locale': submission.content_locale, 'description': submission.description, 'abstract': submission.abstract, 'notes': submission.notes, 'resource-TOTAL_FORMS': 0, 'resource-INITIAL_FORMS': 0, 'resource-MIN_NUM_FORMS': 0, 'resource-MAX_NUM_FORMS': 1000, } response = speaker_client.post(submission.urls.user_base, follow=True, data=data) assert response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.submission_type != new_type @pytest.mark.django_db def test_can_edit_profile(speaker, event, speaker_client): response = speaker_client.post( event.urls.user, data={ 'name': 'Lady Imperator', 'biography': 'Ruling since forever.', 'form': 'profile', }, follow=True, ) assert response.status_code == 200 with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography == 'Ruling since forever.' assert speaker.name == 'Lady Imperator' @pytest.mark.django_db def test_can_change_api_token(speaker, event, speaker_client): speaker.regenerate_token() old_token = Token.objects.filter(user=speaker).first().key response = speaker_client.post( event.urls.user, data={ 'form': 'token', }, follow=True, ) assert response.status_code == 200 new_token = Token.objects.filter(user=speaker).first().key assert new_token != old_token @pytest.mark.django_db def test_must_provide_availabilities(speaker, event, speaker_client): event.settings.cfp_require_availabilities = True response = speaker_client.post( event.urls.user, data={ 'name': 'Lady Imperator', 'biography': 'Ruling since forever.', 'form': 'profile', }, follow=True, ) assert response.status_code == 200 with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography != 'Ruling since forever.' response = speaker_client.post( event.urls.user, data={ 'name': 'Lady Imperator', 'biography': 'Ruling since forever.', 'form': 'profile', 'availabilities': '{"availabilities": []}', }, follow=True, ) assert response.status_code == 200 with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography != 'Ruling since forever.' @pytest.mark.django_db def test_can_edit_login_info(speaker, event, speaker_client): response = speaker_client.post( event.urls.user, data={ 'old_password': 'speakerpwd1!', 'email': '[email protected]', 'password': '', 'password_repeat': '', 'form': 'login', }, follow=True, ) assert response.status_code == 200 speaker.refresh_from_db() assert speaker.email == '[email protected]' @pytest.mark.django_db def test_can_edit_login_info_wrong_password(speaker, event, speaker_client): response = speaker_client.post( event.urls.user, data={ 'old_password': 'speakerpwd23!', 'email': '[email protected]', 'password': '', 'password_repeat': '', 'form': 'login', }, follow=True, ) assert response.status_code == 200 speaker.refresh_from_db() assert speaker.email != '[email protected]' @pytest.mark.django_db def test_can_edit_and_update_speaker_answers( speaker, event, speaker_question, speaker_boolean_question, speaker_client, speaker_text_question, speaker_file_question, ): with scope(event=event): answer = speaker.answers.filter(question_id=speaker_question.pk).first() assert not answer f = SimpleUploadedFile('testfile.txt', b'file_content') response = speaker_client.post( event.urls.user, data={ f'question_{speaker_question.id}': 'black as the night', f'question_{speaker_boolean_question.id}': 'True', f'question_{speaker_file_question.id}': f, f'question_{speaker_text_question.id}': 'Green is totally the best color.', 'form': 'questions', }, follow=True, ) assert response.status_code == 200 with scope(event=event): answer = speaker.answers.get(question_id=speaker_question.pk) assert answer.answer == 'black as the night' assert speaker.answers.get(question_id=speaker_boolean_question.pk).answer == 'True' assert ( speaker.answers.get(question_id=speaker_text_question.pk).answer == 'Green is totally the best color.' ) file_answer = speaker.answers.get(question_id=speaker_file_question.pk) assert file_answer.answer.startswith('file://') assert file_answer.answer_file.read() == b'file_content' assert (settings.MEDIA_ROOT / file_answer.answer_file.name).exists() response = speaker_client.post( event.urls.user, data={ f'question_{speaker_question.id}': 'green as the sky', 'form': 'questions', }, follow=True, ) assert response.status_code == 200 with scope(event=event): answer.refresh_from_db() assert answer.answer == 'green as the sky' @pytest.mark.django_db def test_cannot_delete_profile_on_first_try(speaker, event, speaker_client): with scope(event=event): assert speaker.profiles.get(event=event).biography != '' response = speaker_client.post(event.urls.user_delete, follow=True) assert response.status_code == 200 with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography != '' assert speaker.name != 'Deleted User' @pytest.mark.django_db def test_can_delete_profile(speaker, event, speaker_client): with scope(event=event): assert speaker.profiles.get(event=event).biography != '' response = speaker_client.post( event.urls.user_delete, data={'really': True}, follow=True ) assert response.status_code == 200 with scope(event=event): speaker.refresh_from_db() assert speaker.profiles.get(event=event).biography == '' assert speaker.name == 'Deleted User' assert speaker.email.startswith('deleted_user') assert speaker.email.endswith('@localhost') @pytest.mark.django_db def test_can_change_locale(multilingual_event, client): first_response = client.get(multilingual_event.cfp.urls.public, follow=True) assert 'submission' in first_response.content.decode() assert 'Einreichung' not in first_response.content.decode() second_response = client.get( reverse('cfp:locale.set', kwargs={'event': multilingual_event.slug}) + f'?locale=de&next=/{multilingual_event.slug}/', follow=True, ) assert 'Einreichung' in second_response.content.decode() @pytest.mark.django_db def test_persists_changed_locale(multilingual_event, orga_user, orga_client): assert orga_user.locale == 'en' response = orga_client.get( reverse('cfp:locale.set', kwargs={'event': multilingual_event.slug}) + f'?locale=de&next=/{multilingual_event.slug}/', follow=True, ) orga_user.refresh_from_db() assert response.status_code == 200 assert orga_user.locale == 'de' @pytest.mark.django_db def test_can_invite_speaker(speaker_client, submission): djmail.outbox = [] response = speaker_client.get( submission.urls.invite, follow=True, data={'email': 'invalidemail'} ) assert response.status_code == 200 data = { 'speaker': '[email protected]', 'subject': 'Please join!', 'text': 'C\'mon, it will be fun!', } response = speaker_client.post(submission.urls.invite, follow=True, data=data) assert response.status_code == 200 assert len(djmail.outbox) == 1 assert djmail.outbox[0].to == ['[email protected]'] @pytest.mark.django_db def test_can_accept_invitation(orga_client, submission): assert submission.speakers.count() == 1 response = orga_client.post(submission.urls.accept_invitation, follow=True) submission.refresh_from_db() assert response.status_code == 200 assert submission.speakers.count() == 2 @pytest.mark.django_db def test_wrong_acceptance_link(orga_client, submission): assert submission.speakers.count() == 1 response = orga_client.post( submission.urls.accept_invitation + 'olololol', follow=True ) submission.refresh_from_db() assert response.status_code == 404 assert submission.speakers.count() == 1 @pytest.mark.django_db @pytest.mark.parametrize('request_availability', (True, False)) def test_submission_accept(speaker_client, submission, request_availability): submission.event.settings.cfp_request_availabilities = request_availability submission.state = SubmissionStates.ACCEPTED submission.save() response = speaker_client.post(submission.urls.confirm, follow=True) submission.refresh_from_db() assert response.status_code == 200 assert submission.state == SubmissionStates.CONFIRMED @pytest.mark.django_db def test_submission_accept_with_missing_availability(speaker_client, submission): submission.event.settings.cfp_request_availabilities = True submission.event.settings.cfp_require_availabilities = True submission.state = SubmissionStates.ACCEPTED submission.save() response = speaker_client.post(submission.urls.confirm, follow=True) submission.refresh_from_db() assert response.status_code == 200 assert submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db def test_submission_accept_nologin(client, submission): submission.state = SubmissionStates.ACCEPTED submission.save() response = client.post(submission.urls.confirm, follow=True) submission.refresh_from_db() assert response.status_code == 200 assert response.redirect_chain[-1][1] == 302 assert 'login/?next=' in response.redirect_chain[-1][0] assert submission.state == SubmissionStates.ACCEPTED @pytest.mark.django_db def test_submission_accept_wrong_code(client, submission): submission.state = SubmissionStates.ACCEPTED submission.save() assert submission.code in submission.urls.confirm response = client.post( submission.urls.confirm.replace(submission.code, "foo"), follow=True ) assert response.status_code == 200 assert response.redirect_chain[-1][1] == 302 assert 'login/?next=' in response.redirect_chain[-1][0] @pytest.mark.django_db def test_submission_withdraw(speaker_client, submission): djmail.outbox = [] submission.state = SubmissionStates.SUBMITTED submission.save() response = speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code == 200 submission.refresh_from_db() assert submission.state == SubmissionStates.WITHDRAWN assert len(djmail.outbox) == 0 @pytest.mark.django_db def test_submission_withdraw_if_accepted(speaker_client, submission): djmail.outbox = [] with scope(event=submission.event): submission.accept() response = speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.state == SubmissionStates.WITHDRAWN assert len(djmail.outbox) == 1 @pytest.mark.django_db def test_submission_withdraw_if_confirmed(speaker_client, submission): with scope(event=submission.event): submission.accept() submission.confirm() response = speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.state != SubmissionStates.WITHDRAWN @pytest.mark.django_db def test_submission_withdraw_if_rejected(speaker_client, submission): with scope(event=submission.event): submission.reject() response = speaker_client.post(submission.urls.withdraw, follow=True) assert response.status_code == 200 with scope(event=submission.event): submission.refresh_from_db() assert submission.state != SubmissionStates.WITHDRAWN
[((18623, 18685), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""request_availability"""', '(True, False)'], {}), "('request_availability', (True, False))\n", (18646, 18685), False, 'import pytest\n'), ((13683, 13734), 'django.core.files.uploadedfile.SimpleUploadedFile', 'SimpleUploadedFile', (['"""testfile.txt"""', "b'file_content'"], {}), "('testfile.txt', b'file_content')\n", (13701, 13734), False, 'from django.core.files.uploadedfile import SimpleUploadedFile\n'), ((3410, 3439), 'django_scopes.scope', 'scope', ([], {'event': 'submission.event'}), '(event=submission.event)\n', (3415, 3439), False, 'from django_scopes import scope\n'), ((3658, 3709), 'django.core.files.uploadedfile.SimpleUploadedFile', 'SimpleUploadedFile', (['"""testfile.txt"""', "b'file_content'"], {}), "('testfile.txt', b'file_content')\n", (3676, 3709), False, 'from django.core.files.uploadedfile import SimpleUploadedFile\n'), ((4879, 4908), 'django_scopes.scope', 'scope', ([], {'event': 'submission.event'}), '(event=submission.event)\n', (4884, 4908), False, 'from django_scopes import scope\n'), ((5538, 5567), 'django_scopes.scope', 'scope', ([], {'event': 'submission.event'}), '(event=submission.event)\n', (5543, 5567), False, 'from django_scopes import scope\n'), ((6288, 6317), 'django_scopes.scope', 'scope', ([], {'event': 'submission.event'}), '(event=submission.event)\n', (6293, 6317), False, 'from django_scopes import scope\n'), ((6612, 6641), 'django_scopes.scope', 'scope', ([], {'event': 'submission.event'}), '(event=submission.event)\n', (6617, 6641), False, 'from django_scopes import scope\n'), ((7298, 7327), 'django_scopes.scope', 'scope', ([], {'event': 'submission.event'}), '(event=submission.event)\n', (7303, 7327), False, 'from django_scopes import scope\n'), ((8391, 8420), 'django_scopes.scope', 'scope', ([], {'event': 'submission.event'}), '(event=submission.event)\n', (8396, 8420), False, 'from django_scopes import scope\n'), ((9113, 9142), 'django_scopes.scope', 'scope', ([], {'event': 'submission.event'}), '(event=submission.event)\n', (9118, 9142), False, 'from django_scopes import scope\n'), ((9359, 9388), 'django_scopes.scope', 'scope', ([], {'event': 'submission.event'}), '(event=submission.event)\n', (9364, 9388), False, 'from django_scopes import scope\n'), ((10109, 10138), 'django_scopes.scope', 'scope', ([], {'event': 'submission.event'}), '(event=submission.event)\n', (10114, 10138), False, 'from django_scopes import scope\n'), ((10596, 10614), 'django_scopes.scope', 'scope', ([], {'event': 'event'}), '(event=event)\n', (10601, 10614), False, 'from django_scopes import scope\n'), ((11674, 11692), 'django_scopes.scope', 'scope', ([], {'event': 'event'}), '(event=event)\n', (11679, 11692), False, 'from django_scopes import scope\n'), ((12151, 12169), 'django_scopes.scope', 'scope', ([], {'event': 'event'}), '(event=event)\n', (12156, 12169), False, 'from django_scopes import scope\n'), ((13548, 13566), 'django_scopes.scope', 'scope', ([], {'event': 'event'}), '(event=event)\n', (13553, 13566), False, 'from django_scopes import scope\n'), ((14206, 14224), 'django_scopes.scope', 'scope', ([], {'event': 'event'}), '(event=event)\n', (14211, 14224), False, 'from django_scopes import scope\n'), ((15138, 15156), 'django_scopes.scope', 'scope', ([], {'event': 'event'}), '(event=event)\n', (15143, 15156), False, 'from django_scopes import scope\n'), ((15353, 15371), 'django_scopes.scope', 'scope', ([], {'event': 'event'}), '(event=event)\n', (15358, 15371), False, 'from django_scopes import scope\n'), ((15558, 15576), 'django_scopes.scope', 'scope', ([], {'event': 'event'}), '(event=event)\n', (15563, 15576), False, 'from django_scopes import scope\n'), ((15818, 15836), 'django_scopes.scope', 'scope', ([], {'event': 'event'}), '(event=event)\n', (15823, 15836), False, 'from django_scopes import scope\n'), ((16060, 16078), 'django_scopes.scope', 'scope', ([], {'event': 'event'}), '(event=event)\n', (16065, 16078), False, 'from django_scopes import scope\n'), ((21102, 21131), 'django_scopes.scope', 'scope', ([], {'event': 'submission.event'}), '(event=submission.event)\n', (21107, 21131), False, 'from django_scopes import scope\n'), ((21284, 21313), 'django_scopes.scope', 'scope', ([], {'event': 'submission.event'}), '(event=submission.event)\n', (21289, 21313), False, 'from django_scopes import scope\n'), ((21558, 21587), 'django_scopes.scope', 'scope', ([], {'event': 'submission.event'}), '(event=submission.event)\n', (21563, 21587), False, 'from django_scopes import scope\n'), ((21769, 21798), 'django_scopes.scope', 'scope', ([], {'event': 'submission.event'}), '(event=submission.event)\n', (21774, 21798), False, 'from django_scopes import scope\n'), ((22003, 22032), 'django_scopes.scope', 'scope', ([], {'event': 'submission.event'}), '(event=submission.event)\n', (22008, 22032), False, 'from django_scopes import scope\n'), ((22185, 22214), 'django_scopes.scope', 'scope', ([], {'event': 'submission.event'}), '(event=submission.event)\n', (22190, 22214), False, 'from django_scopes import scope\n'), ((16660, 16728), 'django.urls.reverse', 'reverse', (['"""cfp:locale.set"""'], {'kwargs': "{'event': multilingual_event.slug}"}), "('cfp:locale.set', kwargs={'event': multilingual_event.slug})\n", (16667, 16728), False, 'from django.urls import reverse\n'), ((17054, 17122), 'django.urls.reverse', 'reverse', (['"""cfp:locale.set"""'], {'kwargs': "{'event': multilingual_event.slug}"}), "('cfp:locale.set', kwargs={'event': multilingual_event.slug})\n", (17061, 17122), False, 'from django.urls import reverse\n'), ((10919, 10953), 'rest_framework.authtoken.models.Token.objects.filter', 'Token.objects.filter', ([], {'user': 'speaker'}), '(user=speaker)\n', (10939, 10953), False, 'from rest_framework.authtoken.models import Token\n'), ((11164, 11198), 'rest_framework.authtoken.models.Token.objects.filter', 'Token.objects.filter', ([], {'user': 'speaker'}), '(user=speaker)\n', (11184, 11198), False, 'from rest_framework.authtoken.models import Token\n')]
vasilydenisenko/modbus_rtu_slave
tests/mb_util.py
8a531b776ab82c60b5d335f0565468f19a7801f5
# MIT License # Copyright (c) 2021 Vasily Denisenko, Sergey Kuznetsov # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import mb_bsp PDU_SIZE_REG = 0 CONFIG_REG = 1 SLAVE_ADDR_REG = 2 CS_REG = 3 MB_MAX_WRITE_REGNUM = 123 MB_MAX_READ_REGNUM = 125 MB_MAX_REG_ADDR = 65535 MB_MAX_REG_VAL = 65535 MB_MAX_SLAVE_ADDR = 247 MB_MIN_SLAVE_ADDR = 1 MB_MAX_PDU_SIZE = 253 MB_MIN_PDU_SIZE = 1 FCODE_0x3 = 0x3 FCODE_0x6 = 0x6 FCODE_0x10 = 0x10 def incr_err_count(): incr_err_count.count += 1 setattr(incr_err_count, 'count', 0) def wait_mb_master_status(status): mb_bsp.wait_master_status(status) # 'FSM status' or 'PDU status' if mb_bsp.alarm_cb.status_timeout == 1: print('*** Test FAILED: ', status , ' timeout ***') mb_bsp.alarm_cb.status_timeout = 0 incr_err_count() def config_modbus(modbus_role, slave_addr, pdu, config_val): wait_mb_master_status('FSM status') if modbus_role == 'Master': mb_bsp.write_mb_master_cs(CONFIG_REG, config_val) # Set configuration mb_bsp.write_mb_master_cs(SLAVE_ADDR_REG, slave_addr) # Set slave address mb_bsp.write_mb_master_cs(PDU_SIZE_REG, len(pdu)) # Set request PDU size mb_bsp.write_mb_master_pdu(pdu) # Set request PDU else: mb_bsp.write_mb_slave_cs(CONFIG_REG, config_val) # Set configuration mb_bsp.write_mb_slave_cs(SLAVE_ADDR_REG, slave_addr) # Set slave address def generate_0x03_pdu(addr, regnum): pdu = list() ref_pdu = list() pdu.append(0x3) ref_pdu.append(0x3) addr_h = (addr & 0xff00) >> 8 pdu.append(addr_h) addr_l = (addr & 0xff) pdu.append(addr_l) regnum_h = (regnum & 0xff00) >> 8 pdu.append(regnum_h) regnum_l = regnum & 0xff pdu.append(regnum_l) bytecount = regnum << 1 ref_pdu.append(bytecount) for i in range(bytecount): ref_pdu.append(0) return [pdu, ref_pdu] def generate_0x06_pdu(addr, regval): pdu = list() pdu.append(0x6) addr_h = (addr & 0xff00) >> 8 pdu.append(addr_h) addr_l = (addr & 0xff) pdu.append(addr_l) regval_h = (regval[0] & 0xff00) >> 8 pdu.append(regval_h) regval_l = regval[0] & 0xff pdu.append(regval_l) ref_pdu = pdu.copy() return [pdu, ref_pdu] def generate_0x10_pdu(addr, regnum, regval): pdu = list() pdu.append(0x10) addr_h = (addr & 0xff00) >> 8 pdu.append(addr_h) addr_l = (addr & 0xff) pdu.append(addr_l) regnum_h = (regnum & 0xff00) >> 8 pdu.append(regnum_h) regnum_l = regnum & 0xff pdu.append(regnum_l) ref_pdu = pdu.copy() bytecount = regnum_l << 1 pdu.append(bytecount) for i in range(regnum_l): regval_h = (regval[i] & 0xff00) >> 8 pdu.append(regval_h) regval_l = regval[i] & 0xff pdu.append(regval_l) return [pdu, ref_pdu] def print_test_result(result_ok): if result_ok: msg = '\tTest Successful' else: msg = '\tTest FAILED' print() print('***************************') print(msg) print('***************************') print() def get_total_error_count(modbus_role): count = 0 error_tuple = mb_bsp.get_error_count() if modbus_role == 'Both': for err_list in error_tuple: for i in err_list: count += i elif modbus_role == 'Master': for i in error_tuple[0]: count += i elif modbus_role == 'Slave': for i in error_tuple[1]: count += i return count def get_single_error_count(modbus_role, error_type): error_tuple = mb_bsp.get_error_count() count = 0 if modbus_role == 'Master': if error_type == 'parity': count = error_tuple[0][0] elif error_type == 'start bit': count = error_tuple[0][1] elif error_type == 'stop bit': count = error_tuple[0][2] elif error_type == 'address': count = error_tuple[0][3] elif error_type == 'crc': count = error_tuple[0][4] elif modbus_role == 'Slave': if error_type == 'parity': count = error_tuple[1][0] elif error_type == 'start bit': count = error_tuple[1][1] elif error_type == 'stop bit': count = error_tuple[1][2] elif error_type == 'address': count = error_tuple[1][3] elif error_type == 'crc': count = error_tuple[1][4] return count def print_error_count(): error_tuple = mb_bsp.get_error_count() print() print('master_parity_err_count = ', error_tuple[0][0]) print('master_start_bit_err_count = ', error_tuple[0][1]) print('master_stop_bit_err_count = ', error_tuple[0][2]) print('master_addr_err_count = ', error_tuple[0][3]) print('master_crc_err_count = ', error_tuple[0][4]) print('slave_parity_err_count = ', error_tuple[1][0]) print('slave_start_bit_err_count = ', error_tuple[1][1]) print('slave_stop_bit_err_count = ', error_tuple[1][2]) print('slave_addr_err_count = ', error_tuple[1][3]) print('slave_crc_err_count = ', error_tuple[1][4]) print('--------------------------------') print()
[((1630, 1663), 'mb_bsp.wait_master_status', 'mb_bsp.wait_master_status', (['status'], {}), '(status)\n', (1655, 1663), False, 'import mb_bsp\n'), ((4129, 4153), 'mb_bsp.get_error_count', 'mb_bsp.get_error_count', ([], {}), '()\n', (4151, 4153), False, 'import mb_bsp\n'), ((4498, 4522), 'mb_bsp.get_error_count', 'mb_bsp.get_error_count', ([], {}), '()\n', (4520, 4522), False, 'import mb_bsp\n'), ((5289, 5313), 'mb_bsp.get_error_count', 'mb_bsp.get_error_count', ([], {}), '()\n', (5311, 5313), False, 'import mb_bsp\n'), ((1994, 2043), 'mb_bsp.write_mb_master_cs', 'mb_bsp.write_mb_master_cs', (['CONFIG_REG', 'config_val'], {}), '(CONFIG_REG, config_val)\n', (2019, 2043), False, 'import mb_bsp\n'), ((2068, 2121), 'mb_bsp.write_mb_master_cs', 'mb_bsp.write_mb_master_cs', (['SLAVE_ADDR_REG', 'slave_addr'], {}), '(SLAVE_ADDR_REG, slave_addr)\n', (2093, 2121), False, 'import mb_bsp\n'), ((2227, 2258), 'mb_bsp.write_mb_master_pdu', 'mb_bsp.write_mb_master_pdu', (['pdu'], {}), '(pdu)\n', (2253, 2258), False, 'import mb_bsp\n'), ((2293, 2341), 'mb_bsp.write_mb_slave_cs', 'mb_bsp.write_mb_slave_cs', (['CONFIG_REG', 'config_val'], {}), '(CONFIG_REG, config_val)\n', (2317, 2341), False, 'import mb_bsp\n'), ((2366, 2418), 'mb_bsp.write_mb_slave_cs', 'mb_bsp.write_mb_slave_cs', (['SLAVE_ADDR_REG', 'slave_addr'], {}), '(SLAVE_ADDR_REG, slave_addr)\n', (2390, 2418), False, 'import mb_bsp\n')]
tjsavage/polymer-dashboard
modules/stackoverflow/models.py
19bc467f1206613f8eec646b6f2bc43cc319ef75
import fix_path import json import datetime from google.appengine.ext import ndb # Taken from http://stackoverflow.com/questions/455580/json-datetime-between-python-and-javascript dthandler = lambda obj: ( obj.isoformat() if isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date) else None ) class StackOverflowSnapshot(ndb.Model): """Example Model""" raw_timestamp = ndb.DateTimeProperty(required=True, auto_now_add=True) requested_time = ndb.DateTimeProperty(required=True) num_questions_by_tag = ndb.JsonProperty() num_tagged_questions = ndb.IntegerProperty() num_answered = ndb.IntegerProperty() num_unanswered = ndb.IntegerProperty() total_question_views = ndb.IntegerProperty() status = ndb.StringProperty() status_string = ndb.StringProperty() def as_dict(self): result = {} result['requested_time'] = dthandler(self.requested_time) result['num_tagged_questions'] = self.num_tagged_questions result['num_questions_by_tag'] = self.num_questions_by_tag result['num_answered'] = self.num_answered result['num_unanswered'] = self.num_unanswered result['total_question_views'] = self.total_question_views result['status'] = self.status result['status_string'] = self.status_string return result class StackOverflowQuestion(ndb.Model): first_seen = ndb.DateTimeProperty(required=True, auto_now_add=True) tags = ndb.StringProperty(repeated=True) is_answered = ndb.BooleanProperty() view_count = ndb.IntegerProperty() answer_count = ndb.IntegerProperty() url = ndb.StringProperty() title = ndb.StringProperty() creation_date = ndb.DateTimeProperty() question_id = ndb.IntegerProperty() def as_dict(self): result = {} result['first_seen'] = dthandler(self.first_seen) result['tags'] = [t for t in self.tags] result['is_answered'] = self.is_answered result['view_count'] = self.view_count result['answer_count'] = self.answer_count result['url'] = self.url result['title'] = self.title result['creation_date'] = dthandler(self.creation_date) result['question_id'] = self.question_id return result def update_to_stackexchange_question(self, stackexchange_question): updated = False if stackexchange_question.tags != self.tags: self.tags = stackexchange_question.tags updated = True if stackexchange_question.json['is_answered'] != self.is_answered: self.is_answered = stackexchange_question.json['is_answered'] updated = True if stackexchange_question.view_count != self.view_count: self.view_count = stackexchange_question.view_count updated = True if stackexchange_question.json['answer_count'] != self.answer_count: self.answer_count = stackexchange_question.json['answer_count'] updated = True if stackexchange_question.url != self.url: self.url = stackexchange_question.url updated = True if stackexchange_question.title != self.title: self.title = stackexchange_question.title updated = True if stackexchange_question.creation_date != self.creation_date: self.creation_date = stackexchange_question.creation_date updated = True if stackexchange_question.json['question_id'] != self.question_id: self.question_id = stackexchange_question.json['question_id'] updated = True return updated @classmethod def from_stackexchange_question(cls, stackexchange_question): result = cls( tags = [t for t in stackexchange_question.tags], is_answered = stackexchange_question.json['is_answered'], view_count = stackexchange_question.view_count, answer_count = stackexchange_question.json['answer_count'], url = stackexchange_question.url, title = stackexchange_question.title, creation_date = stackexchange_question.creation_date, question_id = stackexchange_question.json['question_id'] ) return result
[((409, 463), 'google.appengine.ext.ndb.DateTimeProperty', 'ndb.DateTimeProperty', ([], {'required': '(True)', 'auto_now_add': '(True)'}), '(required=True, auto_now_add=True)\n', (429, 463), False, 'from google.appengine.ext import ndb\n'), ((485, 520), 'google.appengine.ext.ndb.DateTimeProperty', 'ndb.DateTimeProperty', ([], {'required': '(True)'}), '(required=True)\n', (505, 520), False, 'from google.appengine.ext import ndb\n'), ((548, 566), 'google.appengine.ext.ndb.JsonProperty', 'ndb.JsonProperty', ([], {}), '()\n', (564, 566), False, 'from google.appengine.ext import ndb\n'), ((594, 615), 'google.appengine.ext.ndb.IntegerProperty', 'ndb.IntegerProperty', ([], {}), '()\n', (613, 615), False, 'from google.appengine.ext import ndb\n'), ((635, 656), 'google.appengine.ext.ndb.IntegerProperty', 'ndb.IntegerProperty', ([], {}), '()\n', (654, 656), False, 'from google.appengine.ext import ndb\n'), ((678, 699), 'google.appengine.ext.ndb.IntegerProperty', 'ndb.IntegerProperty', ([], {}), '()\n', (697, 699), False, 'from google.appengine.ext import ndb\n'), ((727, 748), 'google.appengine.ext.ndb.IntegerProperty', 'ndb.IntegerProperty', ([], {}), '()\n', (746, 748), False, 'from google.appengine.ext import ndb\n'), ((762, 782), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (780, 782), False, 'from google.appengine.ext import ndb\n'), ((803, 823), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (821, 823), False, 'from google.appengine.ext import ndb\n'), ((1414, 1468), 'google.appengine.ext.ndb.DateTimeProperty', 'ndb.DateTimeProperty', ([], {'required': '(True)', 'auto_now_add': '(True)'}), '(required=True, auto_now_add=True)\n', (1434, 1468), False, 'from google.appengine.ext import ndb\n'), ((1480, 1513), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {'repeated': '(True)'}), '(repeated=True)\n', (1498, 1513), False, 'from google.appengine.ext import ndb\n'), ((1532, 1553), 'google.appengine.ext.ndb.BooleanProperty', 'ndb.BooleanProperty', ([], {}), '()\n', (1551, 1553), False, 'from google.appengine.ext import ndb\n'), ((1571, 1592), 'google.appengine.ext.ndb.IntegerProperty', 'ndb.IntegerProperty', ([], {}), '()\n', (1590, 1592), False, 'from google.appengine.ext import ndb\n'), ((1612, 1633), 'google.appengine.ext.ndb.IntegerProperty', 'ndb.IntegerProperty', ([], {}), '()\n', (1631, 1633), False, 'from google.appengine.ext import ndb\n'), ((1644, 1664), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (1662, 1664), False, 'from google.appengine.ext import ndb\n'), ((1677, 1697), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (1695, 1697), False, 'from google.appengine.ext import ndb\n'), ((1718, 1740), 'google.appengine.ext.ndb.DateTimeProperty', 'ndb.DateTimeProperty', ([], {}), '()\n', (1738, 1740), False, 'from google.appengine.ext import ndb\n'), ((1759, 1780), 'google.appengine.ext.ndb.IntegerProperty', 'ndb.IntegerProperty', ([], {}), '()\n', (1778, 1780), False, 'from google.appengine.ext import ndb\n')]
sonymoon/algorithm
src/main/java/com/bailei/study/beautyOfCoding/cpu50.py
cc2a9e0125fc64bdbf6549034bad6482d2027ea2
#!/usr/bin/python3 # -*- coding: UTF-8 -*- import time busyTime = 10 idleTime = busyTime while True: start = time.clock() while time.clock() - start < busyTime: pass time.sleep(busyTime / 1000)
[((117, 129), 'time.clock', 'time.clock', ([], {}), '()\n', (127, 129), False, 'import time\n'), ((190, 217), 'time.sleep', 'time.sleep', (['(busyTime / 1000)'], {}), '(busyTime / 1000)\n', (200, 217), False, 'import time\n'), ((140, 152), 'time.clock', 'time.clock', ([], {}), '()\n', (150, 152), False, 'import time\n')]
danicarrion/carto-python
carto/maps.py
631b018f065960baa35473e2087ce598560b9e17
""" Module for working with named and anonymous maps .. module:: carto.maps :platform: Unix, Windows :synopsis: Module for working with named and anonymous maps .. moduleauthor:: Daniel Carrion <[email protected]> .. moduleauthor:: Alberto Romeu <[email protected]> """ try: from urllib.parse import urljoin except ImportError: from urlparse import urljoin from pyrestcli.resources import Manager, Resource from .exceptions import CartoException, CartoRateLimitException API_VERSION = "v1" NAMED_API_ENDPOINT = "api/{api_version}/map/named/" ANONYMOUS_API_ENDPOINT = "api/{api_version}/map/" class BaseMap(Resource): """ Base class for NamedMap and AnonymousMap """ def __init__(self, auth_client): """ Initializes a BaseMap instance :param auth_client: Auth client """ super(BaseMap, self).__init__(auth_client) def get_tile_url(self, x, y, z, layer_id=None, feature_id=None, filter=None, extension="png"): """ Prepares a URL to get data (raster or vector) from a NamedMap or AnonymousMap :param x: The x tile :param y: The y tile :param z: The zoom level :param layer_id: Can be a number (referring to the # layer of your \ map), all layers of your map, or a list of layers. To show just the basemap layer, enter the value 0 To show the first layer, enter the value 1 To show all layers, enter the value 'all' To show a list of layers, enter the comma separated \ layer value as '0,1,2' :param feature_id: The id of the feature :param filter: The filter to be applied to the layer :param extension: The format of the data to be retrieved: png, mvt, ... :type x: int :type y: int :type z: int :type layer_id: str :type feature_id: str :type filter: str :type extension: str :return: A URL to download data :rtype: str :raise: CartoException """ base_url = self.client.base_url + self.Meta.collection_endpoint template_id = self.template_id if hasattr(self, 'template_id') \ else self.layergroupid if layer_id is not None and feature_id is not None: url = urljoin(base_url, "{template_id}/{layer}/attributes/{feature_id}"). \ format(template_id=template_id, layer=layer_id, feature_id=feature_id) elif layer_id is not None and filter is not None: url = urljoin(base_url, "{template_id}/{filter}/{z}/{x}/{y}.{extension}"). \ format(template_id=template_id, filter=filter, z=z, x=x, y=y, extension=extension) elif layer_id is not None: url = urljoin(base_url, "{template_id}/{layer}/{z}/{x}/{y}.{extension}"). \ format(template_id=template_id, layer=layer_id, z=z, x=x, y=y, extension=extension) else: url = urljoin(base_url, "{template_id}/{z}/{x}/{y}.{extension}"). \ format( template_id=template_id, z=z, x=x, y=y, extension=extension) if hasattr(self, 'auth') and self.auth is not None \ and len(self.auth['valid_tokens']) > 0: url = urljoin(url, "?auth_token={auth_token}"). \ format(auth_token=self.auth['valid_tokens'][0]) return url class NamedMap(BaseMap): """ Equivalent to creating a named map in CARTO. """ class Meta: collection_endpoint = NAMED_API_ENDPOINT.format( api_version=API_VERSION) id_field = "template_id" name_field = "name" def __str__(self): try: return unicode(self.name).encode("utf-8") except AttributeError: return super(NamedMap, self).__repr__() def __init__(self, auth_client): """ Initializes a NamedMap instance :param auth_client: Auth client """ self.fields = ["version", "name", "auth", "placeholders", "layergroup", "view"] # Optional fields can be assigned by some responses create, instantiate, # but are not saved to the backend self.optional_fields = ["template_id", "layergroupid", "last_updated"] super(NamedMap, self).__init__(auth_client) def instantiate(self, params, auth=None): """ Allows you to fetch the map tiles of a created map :param params: The json with the styling info for the named map :param auth: The auth client :type params: dict :type auth: :class:`carto.auth.APIKeyAuthClient` :return: :raise: CartoException """ try: endpoint = (self.Meta.collection_endpoint + "{template_id}"). \ format(template_id=self.template_id) if (auth is not None): endpoint = (endpoint + "?auth_token={auth_token}"). \ format(auth_token=auth) self.send(endpoint, "POST", json=params) except CartoRateLimitException as e: raise e except Exception as e: raise CartoException(e) def update_from_dict(self, attribute_dict): """ Method overriden from the base class """ if 'template' in attribute_dict: self.update_from_dict(attribute_dict['template']) setattr(self, self.Meta.id_field, attribute_dict['template']['name']) return try: for k, v in attribute_dict.items(): if k in self.fields + self.optional_fields: setattr(self, k, v) except Exception: setattr(self, self.Meta.id_field, attribute_dict) class AnonymousMap(BaseMap): """ Equivalent to creating an anonymous map in CARTO. """ class Meta: collection_endpoint = ANONYMOUS_API_ENDPOINT.format( api_version=API_VERSION) def __init__(self, auth_client): """ Initializes an AnonymousMap instance :param auth_client: Auth client """ self.optional_fields = ['cdn_url', 'last_updated', 'layergroupid', 'metadata'] super(AnonymousMap, self).__init__(auth_client) def instantiate(self, params): """ Allows you to fetch the map tiles of a created map :param params: The json with the styling info for the named map :type params: dict :return: :raise: CartoException """ try: self.send(self.Meta.collection_endpoint, "POST", json=params) except CartoRateLimitException as e: raise e except Exception as e: raise CartoException(e) def update_from_dict(self, attribute_dict): for k, v in attribute_dict.items(): if k in self.fields + self.optional_fields: setattr(self, k, v) class NamedMapManager(Manager): """ Manager for the NamedMap class """ resource_class = NamedMap json_collection_attribute = "template_ids" def create(self, **kwargs): """ Creates a named map :param kwargs: Attributes for creating the named map. Specifically an attribute `template` must contain the JSON object defining the named map :type kwargs: kwargs :return: New named map object :rtype: NamedMap :raise: CartoException """ resource = self.resource_class(self.client) resource.update_from_dict(kwargs['template']) resource.save(force_create=True) return resource
[((2425, 2491), 'urlparse.urljoin', 'urljoin', (['base_url', '"""{template_id}/{layer}/attributes/{feature_id}"""'], {}), "(base_url, '{template_id}/{layer}/attributes/{feature_id}')\n", (2432, 2491), False, 'from urlparse import urljoin\n'), ((3836, 3876), 'urlparse.urljoin', 'urljoin', (['url', '"""?auth_token={auth_token}"""'], {}), "(url, '?auth_token={auth_token}')\n", (3843, 3876), False, 'from urlparse import urljoin\n'), ((2760, 2827), 'urlparse.urljoin', 'urljoin', (['base_url', '"""{template_id}/{filter}/{z}/{x}/{y}.{extension}"""'], {}), "(base_url, '{template_id}/{filter}/{z}/{x}/{y}.{extension}')\n", (2767, 2827), False, 'from urlparse import urljoin\n'), ((3118, 3184), 'urlparse.urljoin', 'urljoin', (['base_url', '"""{template_id}/{layer}/{z}/{x}/{y}.{extension}"""'], {}), "(base_url, '{template_id}/{layer}/{z}/{x}/{y}.{extension}')\n", (3125, 3184), False, 'from urlparse import urljoin\n'), ((3455, 3513), 'urlparse.urljoin', 'urljoin', (['base_url', '"""{template_id}/{z}/{x}/{y}.{extension}"""'], {}), "(base_url, '{template_id}/{z}/{x}/{y}.{extension}')\n", (3462, 3513), False, 'from urlparse import urljoin\n')]
tlagore/kv_store
client_driver.py
e3f139eabaa14d0e001193e21baf7e5c96e0358d
from kv_client.kv_client import KVClient def main(): kvSlave = KVClient(1, "127.0.0.1", 3456) kvSlave.start() if __name__ == "__main__": main()
[((68, 98), 'kv_client.kv_client.KVClient', 'KVClient', (['(1)', '"""127.0.0.1"""', '(3456)'], {}), "(1, '127.0.0.1', 3456)\n", (76, 98), False, 'from kv_client.kv_client import KVClient\n')]
AndreasKaratzas/stonne
pytorch-frontend/benchmarks/operator_benchmark/pt/embeddingbag_test.py
2915fcc46cc94196303d81abbd1d79a56d6dd4a9
import operator_benchmark as op_bench import torch import numpy from . import configs """EmbeddingBag Operator Benchmark""" class EmbeddingBagBenchmark(op_bench.TorchBenchmarkBase): def init(self, embeddingbags, dim, mode, input_size, offset, sparse, include_last_offset, device): self.embedding = torch.nn.EmbeddingBag( num_embeddings=embeddingbags, embedding_dim=dim, mode=mode, include_last_offset=include_last_offset, sparse=sparse).to(device=device) numpy.random.seed((1 << 32) - 1) self.input = torch.tensor(numpy.random.randint(0, embeddingbags, input_size), device=device).long() offsets = torch.LongTensor([offset], device=device) self.offset = torch.cat((offsets, torch.tensor([self.input.size(0)], dtype=torch.long)), 0) self.set_module_name('embeddingbag') def forward(self): return self.embedding(self.input, self.offset) op_bench.generate_pt_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark) op_bench.generate_pt_gradient_test(configs.embeddingbag_short_configs, EmbeddingBagBenchmark) if __name__ == "__main__": op_bench.benchmark_runner.main()
[((963, 1051), 'operator_benchmark.generate_pt_test', 'op_bench.generate_pt_test', (['configs.embeddingbag_short_configs', 'EmbeddingBagBenchmark'], {}), '(configs.embeddingbag_short_configs,\n EmbeddingBagBenchmark)\n', (988, 1051), True, 'import operator_benchmark as op_bench\n'), ((1048, 1145), 'operator_benchmark.generate_pt_gradient_test', 'op_bench.generate_pt_gradient_test', (['configs.embeddingbag_short_configs', 'EmbeddingBagBenchmark'], {}), '(configs.embeddingbag_short_configs,\n EmbeddingBagBenchmark)\n', (1082, 1145), True, 'import operator_benchmark as op_bench\n'), ((1175, 1207), 'operator_benchmark.benchmark_runner.main', 'op_bench.benchmark_runner.main', ([], {}), '()\n', (1205, 1207), True, 'import operator_benchmark as op_bench\n'), ((537, 569), 'numpy.random.seed', 'numpy.random.seed', (['((1 << 32) - 1)'], {}), '((1 << 32) - 1)\n', (554, 569), False, 'import numpy\n'), ((696, 737), 'torch.LongTensor', 'torch.LongTensor', (['[offset]'], {'device': 'device'}), '([offset], device=device)\n', (712, 737), False, 'import torch\n'), ((312, 454), 'torch.nn.EmbeddingBag', 'torch.nn.EmbeddingBag', ([], {'num_embeddings': 'embeddingbags', 'embedding_dim': 'dim', 'mode': 'mode', 'include_last_offset': 'include_last_offset', 'sparse': 'sparse'}), '(num_embeddings=embeddingbags, embedding_dim=dim, mode\n =mode, include_last_offset=include_last_offset, sparse=sparse)\n', (333, 454), False, 'import torch\n'), ((604, 654), 'numpy.random.randint', 'numpy.random.randint', (['(0)', 'embeddingbags', 'input_size'], {}), '(0, embeddingbags, input_size)\n', (624, 654), False, 'import numpy\n')]
lfchener/dgl
python/dgl/geometry/capi.py
77f4287a4118db64c46f4f413a426e1419a09d53
"""Python interfaces to DGL farthest point sampler.""" from dgl._ffi.base import DGLError import numpy as np from .._ffi.function import _init_api from .. import backend as F from .. import ndarray as nd def _farthest_point_sampler(data, batch_size, sample_points, dist, start_idx, result): r"""Farthest Point Sampler Parameters ---------- data : tensor A tensor of shape (N, d) where N is the number of points and d is the dimension. batch_size : int The number of batches in the ``data``. N should be divisible by batch_size. sample_points : int The number of points to sample in each batch. dist : tensor Pre-allocated tensor of shape (N, ) for to-sample distance. start_idx : tensor of int Pre-allocated tensor of shape (batch_size, ) for the starting sample in each batch. result : tensor of int Pre-allocated tensor of shape (sample_points * batch_size, ) for the sampled index. Returns ------- No return value. The input variable ``result`` will be overwriten with sampled indices. """ assert F.shape(data)[0] >= sample_points * batch_size assert F.shape(data)[0] % batch_size == 0 _CAPI_FarthestPointSampler(F.zerocopy_to_dgl_ndarray(data), batch_size, sample_points, F.zerocopy_to_dgl_ndarray(dist), F.zerocopy_to_dgl_ndarray(start_idx), F.zerocopy_to_dgl_ndarray(result)) def _neighbor_matching(graph_idx, num_nodes, edge_weights=None, relabel_idx=True): """ Description ----------- The neighbor matching procedure of edge coarsening used in `Metis <http://cacs.usc.edu/education/cs653/Karypis-METIS-SIAMJSC98.pdf>`__ and `Graclus <https://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf>`__ for homogeneous graph coarsening. This procedure keeps picking an unmarked vertex and matching it with one its unmarked neighbors (that maximizes its edge weight) until no match can be done. If no edge weight is given, this procedure will randomly pick neighbor for each vertex. The GPU implementation is based on `A GPU Algorithm for Greedy Graph Matching <http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf>`__ NOTE: The input graph must be bi-directed (undirected) graph. Call :obj:`dgl.to_bidirected` if you are not sure your graph is bi-directed. Parameters ---------- graph : HeteroGraphIndex The input homogeneous graph. num_nodes : int The number of nodes in this homogeneous graph. edge_weight : tensor, optional The edge weight tensor holding non-negative scalar weight for each edge. default: :obj:`None` relabel_idx : bool, optional If true, relabel resulting node labels to have consecutive node ids. default: :obj:`True` Returns ------- a 1-D tensor A vector with each element that indicates the cluster ID of a vertex. """ edge_weight_capi = nd.NULL["int64"] if edge_weights is not None: edge_weight_capi = F.zerocopy_to_dgl_ndarray(edge_weights) node_label = F.full_1d( num_nodes, -1, getattr(F, graph_idx.dtype), F.to_backend_ctx(graph_idx.ctx)) node_label_capi = F.zerocopy_to_dgl_ndarray_for_write(node_label) _CAPI_NeighborMatching(graph_idx, edge_weight_capi, node_label_capi) if F.reduce_sum(node_label < 0).item() != 0: raise DGLError("Find unmatched node") # reorder node id # TODO: actually we can add `return_inverse` option for `unique` # function in backend for efficiency. if relabel_idx: node_label_np = F.zerocopy_to_numpy(node_label) _, node_label_np = np.unique(node_label_np, return_inverse=True) return F.tensor(node_label_np) else: return node_label _init_api('dgl.geometry', __name__)
[((3538, 3569), 'dgl._ffi.base.DGLError', 'DGLError', (['"""Find unmatched node"""'], {}), "('Find unmatched node')\n", (3546, 3569), False, 'from dgl._ffi.base import DGLError\n'), ((3813, 3858), 'numpy.unique', 'np.unique', (['node_label_np'], {'return_inverse': '(True)'}), '(node_label_np, return_inverse=True)\n', (3822, 3858), True, 'import numpy as np\n')]
oxdc/sci.db
scidb/core/data.py
0a751a0e05e7ad4c83c350e32e32ea9ce5831cbb
import shutil import hashlib from pathlib import Path from typing import TextIO, BinaryIO, IO, Union from datetime import datetime from os.path import getmtime from .low import ObservableDict class Data: def __init__(self, data_name: str, parent, bucket, protected_parent_methods: Union[None, dict] = None): self.__data_name__ = data_name self.__parent__ = parent self.__bucket__ = bucket self.__protected_parent_methods__ = protected_parent_methods self.__protected_parent_methods__['increase_data_count']() self.init_metadata() self.init_properties() @property def database(self): return self.__bucket__.db @property def db(self): return self.__bucket__.db @property def bucket(self): return self.__bucket__ def init_metadata(self): if self.__data_name__ not in self.__parent__.metadata: self.__parent__.metadata[self.__data_name__] = dict() def init_properties(self): if self.__data_name__ not in self.__parent__.properties: self.__parent__.properties[self.__data_name__] = dict() def set_metadata(self, metadata: Union[None, dict], merge: bool = True): if metadata is None: return if merge: metadata = {**self.metadata, **metadata} self.__parent__.metadata[self.__data_name__] = metadata def set_properties(self, properties: Union[None, dict], merge: bool = True): if properties is None: return if merge: properties = {**self.properties, **properties} self.__parent__.properties[self.__data_name__] = properties @property def parent(self): return self.__parent__ @property def path(self) -> Path: return self.__parent__.path / self.__data_name__ @property def name(self) -> str: return self.__data_name__ @property def metadata(self) -> ObservableDict: return self.__parent__.metadata[self.__data_name__] @property def properties(self) -> ObservableDict: return self.__parent__.properties[self.__data_name__] def rename(self, new_name: str): shutil.move(str(self.path), str(self.__parent__.path / new_name)) self.__data_name__ = new_name def reader(self, binary: bool = False, **kwargs) -> [IO, BinaryIO, TextIO, None]: mode = 'r' mode += 'b' if binary else '' return open(str(self.path), mode=mode, **kwargs) def creator(self, binary: bool = False, confirm: bool = False, feedback: bool = False, **kwargs) -> [IO, BinaryIO, TextIO, None]: if confirm and not feedback: return None mode = 'x' mode += 'b' if binary else '' return open(str(self.path), mode=mode, **kwargs) def writer(self, binary: bool = False, append: bool = True, allow_overwrite: bool = False, confirm: bool = True, feedback: bool = False, **kwargs) -> [IO, BinaryIO, TextIO, None]: if not allow_overwrite and not append: raise PermissionError('Trying to overwrite existed data.') if confirm and not feedback: return mode = 'a' if append else 'w' mode += 'b' if binary else '' return open(str(self.path), mode=mode, **kwargs) def __repr__(self): return f"Data('{self.__data_name__}')" def import_file(self, src_path: [str, Path], allow_overwrite=False, confirm=True, feedback=False): if self.path.exists() and not allow_overwrite: return if confirm and not feedback: return shutil.copyfile(str(src_path), str(self.path)) def export_file(self, dst_path: [str, Path], allow_overwrite=False): if Path(dst_path).exists() and not allow_overwrite: return shutil.copyfile(str(self.path), str(dst_path)) def __calc_hash__(self, h, buffer_size: int = 131072): if not self.path.exists(): return None with open(str(self.path), 'rb') as file_reader: while True: data = file_reader.read(buffer_size) if not data: break h.update(data) return h.hexdigest() def md5(self, buffer_size: int = 131072, require_update: bool = False) -> [str, None]: if not self.path.exists(): return None last_modified_time = getmtime(str(self.path)) if require_update \ or 'md5' not in self.metadata \ or 'md5-timestamp' not in self.metadata \ or self.metadata['md5-timestamp'] < last_modified_time: result = self.__calc_hash__(hashlib.md5(), buffer_size) self.metadata['md5'] = result self.metadata['md5-timestamp'] = datetime.now().timestamp() return result else: return self.metadata['md5'] def sha1(self, buffer_size: int = 131072, require_update: bool = False) -> [str, None]: if not self.path.exists(): return None last_modified_time = getmtime(str(self.path)) if require_update \ or 'sha1' not in self.metadata \ or 'sha1-timestamp' not in self.metadata \ or self.metadata['sha1-timestamp'] < last_modified_time: result = self.__calc_hash__(hashlib.sha1(), buffer_size) self.metadata['sha1'] = result self.metadata['sha1-timestamp'] = datetime.now().timestamp() return result else: return self.metadata['sha1'] def sha256(self, buffer_size: int = 131072, require_update: bool = False) -> [str, None]: if not self.path.exists(): return None last_modified_time = getmtime(str(self.path)) if require_update \ or 'sha256' not in self.metadata \ or 'sha256-timestamp' not in self.metadata \ or self.metadata['sha256-timestamp'] < last_modified_time: result = self.__calc_hash__(hashlib.sha256(), buffer_size) self.metadata['sha256'] = result self.metadata['sha256-timestamp'] = datetime.now().timestamp() return result else: return self.metadata['sha256']
[((4878, 4891), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (4889, 4891), False, 'import hashlib\n'), ((5555, 5569), 'hashlib.sha1', 'hashlib.sha1', ([], {}), '()\n', (5567, 5569), False, 'import hashlib\n'), ((6244, 6260), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (6258, 6260), False, 'import hashlib\n'), ((3937, 3951), 'pathlib.Path', 'Path', (['dst_path'], {}), '(dst_path)\n', (3941, 3951), False, 'from pathlib import Path\n'), ((4993, 5007), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5005, 5007), False, 'from datetime import datetime\n'), ((5673, 5687), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5685, 5687), False, 'from datetime import datetime\n'), ((6368, 6382), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6380, 6382), False, 'from datetime import datetime\n')]
PaulWang1905/tensorflow
tensorflow/python/keras/optimizer_v2/optimizer_v2.py
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Version 2 of class Optimizer.""" # pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import functools import six from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx from tensorflow.python.distribute import reduce_util as ds_reduce_util from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.keras import backend from tensorflow.python.keras import initializers from tensorflow.python.keras.engine import base_layer_utils from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import gradients from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables as tf_variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model import revived_types from tensorflow.python.training.tracking import base as trackable from tensorflow.python.util import nest from tensorflow.python.util.tf_export import keras_export def _deduplicate_indexed_slices(values, indices): """Sums `values` associated with any non-unique `indices`. Args: values: A `Tensor` with rank >= 1. indices: A one-dimensional integer `Tensor`, indexing into the first dimension of `values` (as in an IndexedSlices object). Returns: A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a de-duplicated version of `indices` and `summed_values` contains the sum of `values` slices associated with each unique index. """ unique_indices, new_index_positions = array_ops.unique(indices) summed_values = math_ops.unsorted_segment_sum( values, new_index_positions, array_ops.shape(unique_indices)[0]) return (summed_values, unique_indices) @six.add_metaclass(abc.ABCMeta) @keras_export("keras.optimizers.Optimizer") class OptimizerV2(trackable.Trackable): """Updated base class for optimizers. This class defines the API to add Ops to train a model. You never use this class directly, but instead instantiate one of its subclasses such as `tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`. ### Usage ```python # Create an optimizer with the desired parameters. opt = tf.keras.optimizers.SGD(learning_rate=0.1) # `loss` is a callable that takes no argument and returns the value # to minimize. loss = lambda: 3 * var1 * var1 + 2 * var2 * var2 # In graph mode, returns op that minimizes the loss by updating the listed # variables. opt_op = opt.minimize(loss, var_list=[var1, var2]) opt_op.run() # In eager mode, simply call minimize to update the list of variables. opt.minimize(loss, var_list=[var1, var2]) ``` ### Custom training loop with Keras models In Keras models, sometimes variables are created when the model is first called, instead of construction time. Examples include 1) sequential models without input shape pre-defined, or 2) subclassed models. Pass var_list as callable in these cases. Example: ```python opt = tf.keras.optimizers.SGD(learning_rate=0.1) model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(num_hidden, activation='relu')) model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid') loss_fn = lambda: tf.keras.losses.mse(model(input), output) var_list_fn = lambda: model.trainable_weights for input, output in data: opt.minimize(loss_fn, var_list_fn) ``` ### Processing gradients before applying them. Calling `minimize()` takes care of both computing the gradients and applying them to the variables. If you want to process the gradients before applying them you can instead use the optimizer in three steps: 1. Compute the gradients with `tf.GradientTape`. 2. Process the gradients as you wish. 3. Apply the processed gradients with `apply_gradients()`. Example: ```python # Create an optimizer. opt = tf.keras.optimizers.SGD(learning_rate=0.1) # Compute the gradients for a list of variables. with tf.GradientTape() as tape: loss = <call_loss_function> vars = <list_of_variables> grads = tape.gradient(loss, vars) processed_grads = [process_gradient(g) for g in grads] grads_and_vars = zip(processed_grads, var_list) # grads_and_vars is a list of tuples (gradient, variable). Do whatever you # need to the 'gradient' part, for example cap them, etc. capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars] # Ask the optimizer to apply the capped gradients. opt.apply_gradients(capped_grads_and_vars) ``` ### Use with `tf.distribute.Strategy`. This optimizer class is `tf.distribute.Strategy` aware, which means it automatically sums gradients across all replicas. To average gradients, you divide your loss by the global batch size, which is done automatically if you use `tf.keras` built-in training or evaluation loops. See the `reduction` argument of your loss which should be set to `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or `tf.keras.losses.Reduction.SUM` for not. If you are not using these and you want to average gradients, you should use `tf.math.reduce_sum` to add up your per-example losses and then divide by the global batch size. Note that when using `tf.distribute.Strategy`, the first component of a tensor's shape is the *replica-local* batch size, which is off by a factor equal to the number of replicas being used to compute a single step. As a result, using `tf.math.reduce_mean` will give the wrong answer, resulting in gradients that can be many times too big. ### Variable Constraint All Keras optimizers respect variable constraints. If constraint function is passed to any variable, the constraint will be applied to the variable after the gradient has been applied to the variable. Important: If gradient is sparse tensor, variable constraint is not supported. ### Thread Compatibility The entire optimizer is currently thread compatible, not thread-safe. The user needs to perform synchronization if necessary. ### Slots Many optimizer subclasses, such as `Adam` and `Adagrad` allocate and manage additional variables associated with the variables to train. These are called <i>Slots</i>. Slots have names and you can ask the optimizer for the names of the slots that it uses. Once you have a slot name you can ask the optimizer for the variable it created to hold the slot value. This can be useful if you want to log debug a training algorithm, report stats about the slots, etc. ### Hyper parameters These are arguments passed to the optimizer subclass constructor (the `__init__` method), and then passed to `self._set_hyper()`. They can be either regular Python values (like 1.0), tensors, or callables. If they are callable, the callable will be called during `apply_gradients()` to get the value for the hyper parameter. Hyper parameters can be overwritten through user code: Example: ```python # Create an optimizer with the desired parameters. opt = tf.keras.optimizers.SGD(learning_rate=0.1) # `loss` is a callable that takes no argument and returns the value # to minimize. loss = lambda: 3 * var1 + 2 * var2 # In eager mode, simply call minimize to update the list of variables. opt.minimize(loss, var_list=[var1, var2]) # update learning rate opt.learning_rate = 0.05 opt.minimize(loss, var_list=[var1, var2]) ``` ### Write a customized optimizer. If you intend to create your own optimization algorithm, simply inherit from this class and override the following methods: - resource_apply_dense (update variable given gradient tensor is dense) - resource_apply_sparse (update variable given gradient tensor is sparse) - create_slots (if your optimizer algorithm requires additional variables) - get_config (serialization of the optimizer, include all hyper parameters) """ def __init__(self, name, **kwargs): """Create a new Optimizer. This must be called by the constructors of subclasses. Note that Optimizer instances should not bind to a single graph, and so shouldn't keep Tensors as member variables. Generally you should be able to use the _set_hyper()/state.get_hyper() facility instead. This class in stateful and thread-compatible. Args: name: A non-empty string. The name to use for accumulators created for the optimizer. **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip gradients by value, `decay` is included for backward compatibility to allow time inverse decay of learning rate. `lr` is included for backward compatibility, recommended to use `learning_rate` instead. Raises: ValueError: If name is malformed. RuntimeError: If _create_slots has been overridden instead of _create_vars. """ allowed_kwargs = {"clipnorm", "clipvalue", "lr", "decay"} for k in kwargs: if k not in allowed_kwargs: raise TypeError("Unexpected keyword argument " "passed to optimizer: " + str(k)) # checks that all keyword arguments are non-negative. if kwargs[k] < 0: raise ValueError("Expected {} >= 0, received: {}".format(k, kwargs[k])) self._use_locking = True self._name = name self._hyper = {} # dict: {variable name : {slot name : variable}} self._slots = {} self._slot_names = [] self._weights = [] self._iterations = None # For implementing Trackable. Stores information about how to restore # slot variables which have not yet been created # (trackable._CheckpointPosition objects). # {slot_name : # {_var_key(variable_to_train): [checkpoint_position, ... ], ... }, # ... } self._deferred_slot_restorations = {} decay = kwargs.pop("decay", 0.0) if decay < 0.: raise ValueError("decay cannot be less than 0: {}".format(decay)) self._initial_decay = decay if "clipnorm" in kwargs: self.clipnorm = kwargs.pop("clipnorm") if "clipvalue" in kwargs: self.clipvalue = kwargs.pop("clipvalue") self._hypers_created = False def minimize(self, loss, var_list, grad_loss=None, name=None): """Minimize `loss` by updating `var_list`. This method simply computes gradient using `tf.GradientTape` and calls `apply_gradients()`. If you want to process the gradient before applying then call `tf.GradientTape` and `apply_gradients()` explicitly instead of using this function. Args: loss: A callable taking no arguments which returns the value to minimize. var_list: list or tuple of `Variable` objects to update to minimize `loss`, or a callable returning the list or tuple of `Variable` objects. Use callable when the variable list would otherwise be incomplete before `minimize` since the variables are created at the first time `loss` is called. grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`. name: Optional name for the returned operation. Returns: An Operation that updates the variables in `var_list`. If `global_step` was not `None`, that operation also increments `global_step`. Raises: ValueError: If some of the variables are not `Variable` objects. """ grads_and_vars = self._compute_gradients( loss, var_list=var_list, grad_loss=grad_loss) return self.apply_gradients(grads_and_vars, name=name) def _compute_gradients(self, loss, var_list, grad_loss=None): """Compute gradients of `loss` for the variables in `var_list`. This is the first part of `minimize()`. It returns a list of (gradient, variable) pairs where "gradient" is the gradient for "variable". Note that "gradient" can be a `Tensor`, an `IndexedSlices`, or `None` if there is no gradient for the given variable. Args: loss: A callable taking no arguments which returns the value to minimize. var_list: list or tuple of `Variable` objects to update to minimize `loss`, or a callable returning the list or tuple of `Variable` objects. Use callable when the variable list would otherwise be incomplete before `minimize` and the variables are created at the first time when `loss` is called. grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`. Returns: A list of (gradient, variable) pairs. Variable is always present, but gradient can be `None`. Raises: TypeError: If `var_list` contains anything else than `Variable` objects. ValueError: If some arguments are invalid, or var_list is None. """ # TODO(josh11b): Test that we handle weight decay in a reasonable way. with backprop.GradientTape() as tape: if not callable(var_list): tape.watch(var_list) loss_value = loss() if callable(var_list): var_list = var_list() var_list = nest.flatten(var_list) grads = tape.gradient(loss_value, var_list, grad_loss) if hasattr(self, "clipnorm"): grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads] if hasattr(self, "clipvalue"): grads = [ clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue) for g in grads ] grads_and_vars = list(zip(grads, var_list)) self._assert_valid_dtypes([ v for g, v in grads_and_vars if g is not None and v.dtype != dtypes.resource ]) return grads_and_vars def get_gradients(self, loss, params): """Returns gradients of `loss` with respect to `params`. Arguments: loss: Loss tensor. params: List of variables. Returns: List of gradient tensors. Raises: ValueError: In case any gradient cannot be computed (e.g. if gradient function not implemented). """ params = nest.flatten(params) with backend.get_graph().as_default(): grads = gradients.gradients(loss, params) for grad, param in zip(grads, params): if grad is None: raise ValueError("Variable {} has `None` for gradient. " "Please make sure that all of your ops have a " "gradient defined (i.e. are differentiable). " "Common ops without gradient: " "K.argmax, K.round, K.eval.".format(param)) if hasattr(self, "clipnorm"): grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads] if hasattr(self, "clipvalue"): grads = [ clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue) for g in grads ] return grads def apply_gradients(self, grads_and_vars, name=None): """Apply gradients to variables. This is the second part of `minimize()`. It returns an `Operation` that applies gradients. Args: grads_and_vars: List of (gradient, variable) pairs. name: Optional name for the returned operation. Default to the name passed to the `Optimizer` constructor. Returns: An `Operation` that applies the specified gradients. If `global_step` was not None, that operation also increments `global_step`. Raises: TypeError: If `grads_and_vars` is malformed. ValueError: If none of the variables have gradients. """ grads_and_vars = _filter_grads(grads_and_vars) var_list = [v for (_, v) in grads_and_vars] # Create iteration if necessary. with ops.init_scope(): _ = self.iterations self._create_hypers() self._create_slots(var_list) self._prepare(var_list) return distribute_ctx.get_replica_context().merge_call( self._distributed_apply, args=(grads_and_vars,), kwargs={"name": name}) def _distributed_apply(self, distribution, grads_and_vars, name): """`apply_gradients` using a `DistributionStrategy`.""" reduced_grads = distribution.extended.batch_reduce_to( ds_reduce_util.ReduceOp.SUM, grads_and_vars) var_list = [v for _, v in grads_and_vars] grads_and_vars = zip(reduced_grads, var_list) def apply_grad_to_update_var(var, grad): """Apply gradient to variable.""" if isinstance(var, ops.Tensor): raise NotImplementedError("Trying to update a Tensor ", var) if isinstance(grad, ops.IndexedSlices): if var.constraint is not None: raise RuntimeError( "Cannot use a constraint function on a sparse variable.") return self._resource_apply_sparse_duplicate_indices( grad.values, var, grad.indices) update_op = self._resource_apply_dense(grad, var) if var.constraint is not None: with ops.control_dependencies([update_op]): return var.assign(var.constraint(var)) else: return update_op update_ops = [] with backend.name_scope(name or self._name): for grad, var in grads_and_vars: scope_name = ("" if ops.executing_eagerly_outside_functions() else "_" + var.op.name) with backend.name_scope("update" + scope_name): update_ops.extend( distribution.extended.update( var, apply_grad_to_update_var, args=(grad,), group=False)) any_symbolic = any(isinstance(i, ops.Operation) or tf_utils.is_symbolic_tensor(i) for i in update_ops) if not context.executing_eagerly() or any_symbolic: # If the current context is graph mode or any of the update ops are # symbolic then the step update should be carried out under a graph # context. (eager updates execute immediately) with ops._get_graph_from_inputs(update_ops).as_default(): # pylint: disable=protected-access with ops.control_dependencies(update_ops): return self._iterations.assign_add(1).op return self._iterations.assign_add(1) def get_updates(self, loss, params): grads = self.get_gradients(loss, params) grads_and_vars = list(zip(grads, params)) self._assert_valid_dtypes([ v for g, v in grads_and_vars if g is not None and v.dtype != dtypes.resource ]) return [self.apply_gradients(grads_and_vars)] def _set_hyper(self, name, value): """set hyper `name` to value. value can be callable, tensor, numeric.""" if isinstance(value, trackable.Trackable): self._track_trackable(value, name, overwrite=True) if name not in self._hyper: self._hyper[name] = value else: prev_value = self._hyper[name] if (callable(prev_value) or isinstance(prev_value, (ops.Tensor, int, float, learning_rate_schedule.LearningRateSchedule)) or isinstance(value, learning_rate_schedule.LearningRateSchedule)): self._hyper[name] = value else: backend.set_value(self._hyper[name], value) def _get_hyper(self, name, dtype=None): if not self._hypers_created: self._create_hypers() value = self._hyper[name] if isinstance(value, learning_rate_schedule.LearningRateSchedule): return value if callable(value): value = value() if dtype: return math_ops.cast(value, dtype) else: return value def __getattribute__(self, name): """Overridden to support hyperparameter access.""" try: return super(OptimizerV2, self).__getattribute__(name) except AttributeError as e: # Needed to avoid infinite recursion with __setattr__. if name == "_hyper": raise e # Backwards compatibility with Keras optimizers. if name == "lr": name = "learning_rate" if name in self._hyper: return self._get_hyper(name) raise e def __setattr__(self, name, value): """Override setattr to support dynamic hyperparameter setting.""" # Backwards compatibility with Keras optimizers. if name == "lr": name = "learning_rate" if hasattr(self, "_hyper") and name in self._hyper: self._set_hyper(name, value) else: super(OptimizerV2, self).__setattr__(name, value) def get_slot_names(self): """A list of names for this optimizer's slots.""" return self._slot_names def add_slot(self, var, slot_name, initializer="zeros"): """Add a new slot variable for `var`.""" if slot_name not in self._slot_names: self._slot_names.append(slot_name) var_key = _var_key(var) slot_dict = self._slots.setdefault(var_key, {}) weight = slot_dict.get(slot_name, None) if weight is None: if isinstance(initializer, six.string_types) or callable(initializer): initializer = initializers.get(initializer) initial_value = functools.partial( initializer, shape=var.shape, dtype=var.dtype) else: initial_value = initializer strategy = distribute_ctx.get_strategy() with strategy.extended.colocate_vars_with(var): weight = tf_variables.Variable( name="%s/%s" % (var._shared_name, slot_name), # pylint: disable=protected-access dtype=var.dtype, trainable=False, initial_value=initial_value) backend.track_variable(weight) slot_dict[slot_name] = weight self._restore_slot_variable( slot_name=slot_name, variable=var, slot_variable=weight) self._weights.append(weight) return weight def get_slot(self, var, slot_name): var_key = _var_key(var) slot_dict = self._slots[var_key] return slot_dict[slot_name] def _prepare(self, var_list): pass def _create_hypers(self): if self._hypers_created: return # Iterate hyper values deterministically. for name, value in sorted(self._hyper.items()): if isinstance(value, ops.Tensor) or callable(value): continue else: self._hyper[name] = self.add_weight( name, shape=[], trainable=False, initializer=value, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA) self._hypers_created = True @property def iterations(self): """Variable. The number of training steps this Optimizer has run.""" if self._iterations is None: self._iterations = self.add_weight( "iter", shape=[], dtype=dtypes.int64, trainable=False, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA) self._weights.append(self._iterations) return self._iterations @iterations.setter def iterations(self, variable): if self._iterations is not None: raise RuntimeError("Cannot set `iterations` to a new Variable after " "the Optimizer weights have been created") self._iterations = variable self._weights.append(self._iterations) def _decayed_lr(self, var_dtype): """Get decayed learning rate as a Tensor with dtype=var_dtype.""" lr_t = self._get_hyper("learning_rate", var_dtype) if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule): local_step = math_ops.cast(self.iterations, var_dtype) lr_t = math_ops.cast(lr_t(local_step), var_dtype) if self._initial_decay > 0.: local_step = math_ops.cast(self.iterations, var_dtype) decay_t = self._get_hyper("decay", var_dtype) lr_t = lr_t / (1. + decay_t * local_step) return lr_t @abc.abstractmethod def get_config(self): """Returns the config of the optimimizer. An optimizer config is a Python dictionary (serializable) containing the configuration of an optimizer. The same optimizer can be reinstantiated later (without any saved state) from this configuration. Returns: Python dictionary. """ config = {"name": self._name} if hasattr(self, "clipnorm"): config["clipnorm"] = self.clipnorm if hasattr(self, "clipvalue"): config["clipvalue"] = self.clipvalue return config @classmethod def from_config(cls, config, custom_objects=None): """Creates an optimizer from its config. This method is the reverse of `get_config`, capable of instantiating the same optimizer from the config dictionary. Arguments: config: A Python dictionary, typically the output of get_config. custom_objects: A Python dictionary mapping names to additional Python objects used to create this optimizer, such as a function used for a hyperparameter. Returns: An optimizer instance. """ if "lr" in config: config["learning_rate"] = config.pop("lr") if "learning_rate" in config: if isinstance(config["learning_rate"], dict): config["learning_rate"] = learning_rate_schedule.deserialize( config["learning_rate"], custom_objects=custom_objects) return cls(**config) def _serialize_hyperparameter(self, hyperparameter_name): """Serialize a hyperparameter that can be a float, callable, or Tensor.""" value = self._hyper[hyperparameter_name] if isinstance(value, learning_rate_schedule.LearningRateSchedule): return learning_rate_schedule.serialize(value) if callable(value): return value() if tensor_util.is_tensor(value): return backend.get_value(value) return value def variables(self): """Returns variables of this Optimizer based on the order created.""" return self._weights @property def weights(self): """Returns variables of this Optimizer based on the order created.""" return self._weights def get_weights(self): params = self.weights return backend.batch_get_value(params) # TODO(tanzheny): Maybe share this logic with base_layer. def set_weights(self, weights): params = self.weights if len(params) != len(weights): raise ValueError( "You called `set_weights(weights)` on optimizer " + self._name + " with a weight list of length " + str(len(weights)) + ", but the optimizer was expecting " + str(len(params)) + " weights. Provided weights: " + str(weights)[:50] + "...") if not params: return weight_value_tuples = [] param_values = backend.batch_get_value(params) for pv, p, w in zip(param_values, params, weights): if pv.shape != w.shape: raise ValueError("Optimizer weight shape " + str(pv.shape) + " not compatible with " "provided weight shape " + str(w.shape)) weight_value_tuples.append((p, w)) backend.batch_set_value(weight_value_tuples) def add_weight(self, name, shape, dtype=None, initializer="zeros", trainable=None, synchronization=tf_variables.VariableSynchronization.AUTO, aggregation=tf_variables.VariableAggregation.NONE): if dtype is None: dtype = dtypes.float32 if isinstance(initializer, six.string_types) or callable(initializer): initializer = initializers.get(initializer) if synchronization == tf_variables.VariableSynchronization.ON_READ: if trainable: raise ValueError( "Synchronization value can be set to " "VariableSynchronization.ON_READ only for non-trainable variables. " "You have specified trainable=True and " "synchronization=VariableSynchronization.ON_READ.") else: # Set trainable to be false when variable is to be synced on read. trainable = False elif trainable is None: trainable = True variable = self._add_variable_with_custom_getter( name=name, shape=shape, getter=base_layer_utils.make_variable, overwrite=True, initializer=initializer, dtype=dtype, trainable=trainable, use_resource=True, synchronization=synchronization, aggregation=aggregation) backend.track_variable(variable) return variable def _assert_valid_dtypes(self, tensors): """Asserts tensors are all valid types (see `_valid_dtypes`). Args: tensors: Tensors to check. Raises: ValueError: If any tensor is not a valid type. """ valid_dtypes = self._valid_dtypes() for t in tensors: dtype = t.dtype.base_dtype if dtype not in valid_dtypes: raise ValueError("Invalid type %r for %s, expected: %s." % (dtype, t.name, [v for v in valid_dtypes])) def _valid_dtypes(self): """Valid types for loss, variables and gradients. Subclasses should override to allow other float types. Returns: Valid types for loss, variables and gradients. """ return set( [dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64]) def _call_if_callable(self, param): """Call the function if param is callable.""" return param() if callable(param) else param def _resource_apply_dense(self, grad, handle): """Add ops to apply dense gradients to the variable `handle`. Args: grad: a `Tensor` representing the gradient. handle: a `Tensor` of dtype `resource` which points to the variable to be updated. Returns: An `Operation` which updates the value of the variable. """ raise NotImplementedError() def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices): """Add ops to apply sparse gradients to `handle`, with repeated indices. Optimizers which override this method must deal with repeated indices. See the docstring of `_apply_sparse_duplicate_indices` for details. By default the correct behavior, to sum non-unique indices and their associated gradients, is enforced by first pre-processing `grad` and `indices` and passing them on to `_resource_apply_sparse`. Optimizers which deal correctly with duplicate indices may instead override this method to avoid the overhead of summing. Args: grad: a `Tensor` representing the gradient for the affected indices. handle: a `Tensor` of dtype `resource` which points to the variable to be updated. indices: a `Tensor` of integral type representing the indices for which the gradient is nonzero. Indices may be repeated. Returns: An `Operation` which updates the value of the variable. """ summed_grad, unique_indices = _deduplicate_indexed_slices( values=grad, indices=indices) return self._resource_apply_sparse(summed_grad, handle, unique_indices) def _resource_apply_sparse(self, grad, handle, indices): """Add ops to apply sparse gradients to the variable `handle`. Similar to `_apply_sparse`, the `indices` argument to this method has been de-duplicated. Optimizers which deal correctly with non-unique indices may instead override `_resource_apply_sparse_duplicate_indices` to avoid this overhead. Args: grad: a `Tensor` representing the gradient for the affected indices. handle: a `Tensor` of dtype `resource` which points to the variable to be updated. indices: a `Tensor` of integral type representing the indices for which the gradient is nonzero. Indices are unique. Returns: An `Operation` which updates the value of the variable. """ raise NotImplementedError() def _resource_scatter_add(self, x, i, v): with ops.control_dependencies( [resource_variable_ops.resource_scatter_add(x.handle, i, v)]): return x.value() def _resource_scatter_update(self, x, i, v): with ops.control_dependencies( [resource_variable_ops.resource_scatter_update(x.handle, i, v)]): return x.value() # --------------- # For implementing the trackable interface # --------------- def _restore_slot_variable(self, slot_name, variable, slot_variable): """Restore a newly created slot variable's value.""" variable_key = _var_key(variable) deferred_restorations = self._deferred_slot_restorations.get( slot_name, {}).pop(variable_key, []) # Iterate over restores, highest restore UID first to minimize the number # of assignments. deferred_restorations.sort(key=lambda position: position.restore_uid, reverse=True) for checkpoint_position in deferred_restorations: checkpoint_position.restore(slot_variable) def _create_or_restore_slot_variable( self, slot_variable_position, slot_name, variable): """Restore a slot variable's value, possibly creating it. Called when a variable which has an associated slot variable is created or restored. When executing eagerly, we create the slot variable with a restoring initializer. No new variables are created when graph building. Instead, _restore_slot_variable catches these after normal creation and adds restore ops to the graph. This method is nonetheless important when graph building for the case when a slot variable has already been created but `variable` has just been added to a dependency graph (causing us to realize that the slot variable needs to be restored). Args: slot_variable_position: A `trackable._CheckpointPosition` object indicating the slot variable `Trackable` object to be restored. slot_name: The name of this `Optimizer`'s slot to restore into. variable: The variable object this slot is being created for. """ variable_key = _var_key(variable) slot_dict = self._slots.get(variable_key, {}) slot_variable = slot_dict.get(slot_name, None) if (slot_variable is None and context.executing_eagerly() and slot_variable_position.is_simple_variable() # Defer slot variable creation if there is an active variable creator # scope. Generally we'd like to eagerly create/restore slot variables # when possible, but this may mean that scopes intended to catch # `variable` also catch its eagerly created slot variable # unintentionally (specifically make_template would add a dependency on # a slot variable if not for this case). Deferring is mostly harmless # (aside from double initialization), and makes variable creator scopes # behave the same way they do when graph building. and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access initializer = trackable.CheckpointInitialValue( checkpoint_position=slot_variable_position) slot_variable = self.add_slot( var=variable, initializer=initializer, slot_name=slot_name) # Slot variables are not owned by any one object (because we don't want to # save the slot variable if the optimizer is saved without the non-slot # variable, or if the non-slot variable is saved without the optimizer; # it's a dependency hypergraph with edges of the form (optimizer, non-slot # variable, variable)). So we don't _track_ slot variables anywhere, and # instead special-case this dependency and otherwise pretend it's a normal # graph. if slot_variable is not None: # If we've either made this slot variable, or if we've pulled out an # existing slot variable, we should restore it. slot_variable_position.restore(slot_variable) else: # We didn't make the slot variable. Defer restoring until it gets created # normally. We keep a list rather than the one with the highest restore # UID in case slot variables have their own dependencies, in which case # those could differ between restores. self._deferred_slot_restorations.setdefault( slot_name, {}).setdefault(variable_key, []).append( slot_variable_position) def _filter_grads(grads_and_vars): """Filter out iterable with grad equal to None.""" grads_and_vars = tuple(grads_and_vars) if not grads_and_vars: return grads_and_vars filtered = [] vars_with_empty_grads = [] for grad, var in grads_and_vars: if grad is None: vars_with_empty_grads.append(var) else: filtered.append((grad, var)) filtered = tuple(filtered) if not filtered: raise ValueError("No gradients provided for any variable: %s." % ([v.name for _, v in grads_and_vars],)) if vars_with_empty_grads: logging.warning( ("Gradients does not exist for variables %s when minimizing the loss."), ([v.name for v in vars_with_empty_grads])) return filtered def _var_key(var): """Key for representing a primary variable, for looking up slots. In graph mode the name is derived from the var shared name. In eager mode the name is derived from the var unique id. If distribution strategy exists, get the primary variable first. Args: var: the variable. Returns: the unique name of the variable. """ # pylint: disable=protected-access # Get the distributed variable if it exists. if getattr(var, "_distributed_container", None) is not None: var = var._distributed_container() if var._in_graph_mode: return var._shared_name return var._unique_id def _get_slot_key_from_var(var, slot_name): """Get the slot key for the variable: var_name/slot_name.""" name = _var_key(var) return name + "/" + slot_name class _RestoredOptimizer(OptimizerV2): """A non-functional Optimizer implementation for checkpoint compatibility. Holds slot variables and hyperparameters when an optimizer is restored from a SavedModel. These variables may be referenced in functions along with ops created by the original optimizer, but currently we do not support using the optimizer object iself (e.g. through `apply_gradients`). """ # TODO(allenl): Make the restored optimizer functional by tracing its apply # methods. def __init__(self): super(_RestoredOptimizer, self).__init__("_RestoredOptimizer") self._hypers_created = True def get_config(self): # TODO(allenl): Save and restore the Optimizer's config raise NotImplementedError( "Restoring functional Optimzers from SavedModels is not currently " "supported. Please file a feature request if this limitation bothers " "you.") revived_types.register_revived_type( "optimizer", lambda obj: isinstance(obj, OptimizerV2), versions=[revived_types.VersionedTypeRegistration( object_factory=lambda proto: _RestoredOptimizer(), version=1, min_producer_version=1, min_consumer_version=1, setter=_RestoredOptimizer._set_hyper # pylint: disable=protected-access )])
[((2908, 2938), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (2925, 2938), False, 'import six\n'), ((2940, 2982), 'tensorflow.python.util.tf_export.keras_export', 'keras_export', (['"""keras.optimizers.Optimizer"""'], {}), "('keras.optimizers.Optimizer')\n", (2952, 2982), False, 'from tensorflow.python.util.tf_export import keras_export\n'), ((2712, 2737), 'tensorflow.python.ops.array_ops.unique', 'array_ops.unique', (['indices'], {}), '(indices)\n', (2728, 2737), False, 'from tensorflow.python.ops import array_ops\n'), ((14228, 14250), 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['var_list'], {}), '(var_list)\n', (14240, 14250), False, 'from tensorflow.python.util import nest\n'), ((15145, 15165), 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['params'], {}), '(params)\n', (15157, 15165), False, 'from tensorflow.python.util import nest\n'), ((26486, 26514), 'tensorflow.python.framework.tensor_util.is_tensor', 'tensor_util.is_tensor', (['value'], {}), '(value)\n', (26507, 26514), False, 'from tensorflow.python.framework import tensor_util\n'), ((26890, 26921), 'tensorflow.python.keras.backend.batch_get_value', 'backend.batch_get_value', (['params'], {}), '(params)\n', (26913, 26921), False, 'from tensorflow.python.keras import backend\n'), ((27462, 27493), 'tensorflow.python.keras.backend.batch_get_value', 'backend.batch_get_value', (['params'], {}), '(params)\n', (27485, 27493), False, 'from tensorflow.python.keras import backend\n'), ((27809, 27853), 'tensorflow.python.keras.backend.batch_set_value', 'backend.batch_set_value', (['weight_value_tuples'], {}), '(weight_value_tuples)\n', (27832, 27853), False, 'from tensorflow.python.keras import backend\n'), ((29233, 29265), 'tensorflow.python.keras.backend.track_variable', 'backend.track_variable', (['variable'], {}), '(variable)\n', (29255, 29265), False, 'from tensorflow.python.keras import backend\n'), ((37664, 37800), 'tensorflow.python.platform.tf_logging.warning', 'logging.warning', (['"""Gradients does not exist for variables %s when minimizing the loss."""', '[v.name for v in vars_with_empty_grads]'], {}), "(\n 'Gradients does not exist for variables %s when minimizing the loss.',\n [v.name for v in vars_with_empty_grads])\n", (37679, 37800), True, 'from tensorflow.python.platform import tf_logging as logging\n'), ((2828, 2859), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['unique_indices'], {}), '(unique_indices)\n', (2843, 2859), False, 'from tensorflow.python.ops import array_ops\n'), ((14037, 14060), 'tensorflow.python.eager.backprop.GradientTape', 'backprop.GradientTape', ([], {}), '()\n', (14058, 14060), False, 'from tensorflow.python.eager import backprop\n'), ((15223, 15256), 'tensorflow.python.ops.gradients.gradients', 'gradients.gradients', (['loss', 'params'], {}), '(loss, params)\n', (15242, 15256), False, 'from tensorflow.python.ops import gradients\n'), ((16752, 16768), 'tensorflow.python.framework.ops.init_scope', 'ops.init_scope', ([], {}), '()\n', (16766, 16768), False, 'from tensorflow.python.framework import ops\n'), ((18113, 18151), 'tensorflow.python.keras.backend.name_scope', 'backend.name_scope', (['(name or self._name)'], {}), '(name or self._name)\n', (18131, 18151), False, 'from tensorflow.python.keras import backend\n'), ((20470, 20497), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['value', 'dtype'], {}), '(value, dtype)\n', (20483, 20497), False, 'from tensorflow.python.ops import math_ops\n'), ((22126, 22155), 'tensorflow.python.distribute.distribution_strategy_context.get_strategy', 'distribute_ctx.get_strategy', ([], {}), '()\n', (22153, 22155), True, 'from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx\n'), ((22449, 22479), 'tensorflow.python.keras.backend.track_variable', 'backend.track_variable', (['weight'], {}), '(weight)\n', (22471, 22479), False, 'from tensorflow.python.keras import backend\n'), ((24361, 24402), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['self.iterations', 'var_dtype'], {}), '(self.iterations, var_dtype)\n', (24374, 24402), False, 'from tensorflow.python.ops import math_ops\n'), ((24511, 24552), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['self.iterations', 'var_dtype'], {}), '(self.iterations, var_dtype)\n', (24524, 24552), False, 'from tensorflow.python.ops import math_ops\n'), ((26394, 26433), 'tensorflow.python.keras.optimizer_v2.learning_rate_schedule.serialize', 'learning_rate_schedule.serialize', (['value'], {}), '(value)\n', (26426, 26433), False, 'from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule\n'), ((26529, 26553), 'tensorflow.python.keras.backend.get_value', 'backend.get_value', (['value'], {}), '(value)\n', (26546, 26553), False, 'from tensorflow.python.keras import backend\n'), ((28317, 28346), 'tensorflow.python.keras.initializers.get', 'initializers.get', (['initializer'], {}), '(initializer)\n', (28333, 28346), False, 'from tensorflow.python.keras import initializers\n'), ((34923, 34950), 'tensorflow.python.eager.context.executing_eagerly', 'context.executing_eagerly', ([], {}), '()\n', (34948, 34950), False, 'from tensorflow.python.eager import context\n'), ((35721, 35797), 'tensorflow.python.training.tracking.base.CheckpointInitialValue', 'trackable.CheckpointInitialValue', ([], {'checkpoint_position': 'slot_variable_position'}), '(checkpoint_position=slot_variable_position)\n', (35753, 35797), True, 'from tensorflow.python.training.tracking import base as trackable\n'), ((14360, 14399), 'tensorflow.python.ops.clip_ops.clip_by_norm', 'clip_ops.clip_by_norm', (['g', 'self.clipnorm'], {}), '(g, self.clipnorm)\n', (14381, 14399), False, 'from tensorflow.python.ops import clip_ops\n'), ((14477, 14535), 'tensorflow.python.ops.clip_ops.clip_by_value', 'clip_ops.clip_by_value', (['g', '(-self.clipvalue)', 'self.clipvalue'], {}), '(g, -self.clipvalue, self.clipvalue)\n', (14499, 14535), False, 'from tensorflow.python.ops import clip_ops\n'), ((15708, 15747), 'tensorflow.python.ops.clip_ops.clip_by_norm', 'clip_ops.clip_by_norm', (['g', 'self.clipnorm'], {}), '(g, self.clipnorm)\n', (15729, 15747), False, 'from tensorflow.python.ops import clip_ops\n'), ((15825, 15883), 'tensorflow.python.ops.clip_ops.clip_by_value', 'clip_ops.clip_by_value', (['g', '(-self.clipvalue)', 'self.clipvalue'], {}), '(g, -self.clipvalue, self.clipvalue)\n', (15847, 15883), False, 'from tensorflow.python.ops import clip_ops\n'), ((16900, 16936), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'distribute_ctx.get_replica_context', ([], {}), '()\n', (16934, 16936), True, 'from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx\n'), ((20129, 20172), 'tensorflow.python.keras.backend.set_value', 'backend.set_value', (['self._hyper[name]', 'value'], {}), '(self._hyper[name], value)\n', (20146, 20172), False, 'from tensorflow.python.keras import backend\n'), ((21929, 21958), 'tensorflow.python.keras.initializers.get', 'initializers.get', (['initializer'], {}), '(initializer)\n', (21945, 21958), False, 'from tensorflow.python.keras import initializers\n'), ((21983, 22047), 'functools.partial', 'functools.partial', (['initializer'], {'shape': 'var.shape', 'dtype': 'var.dtype'}), '(initializer, shape=var.shape, dtype=var.dtype)\n', (22000, 22047), False, 'import functools\n'), ((22227, 22362), 'tensorflow.python.ops.variables.Variable', 'tf_variables.Variable', ([], {'name': "('%s/%s' % (var._shared_name, slot_name))", 'dtype': 'var.dtype', 'trainable': '(False)', 'initial_value': 'initial_value'}), "(name='%s/%s' % (var._shared_name, slot_name), dtype=\n var.dtype, trainable=False, initial_value=initial_value)\n", (22248, 22362), True, 'from tensorflow.python.ops import variables as tf_variables\n'), ((25996, 26091), 'tensorflow.python.keras.optimizer_v2.learning_rate_schedule.deserialize', 'learning_rate_schedule.deserialize', (["config['learning_rate']"], {'custom_objects': 'custom_objects'}), "(config['learning_rate'], custom_objects=\n custom_objects)\n", (26030, 26091), False, 'from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule\n'), ((15175, 15194), 'tensorflow.python.keras.backend.get_graph', 'backend.get_graph', ([], {}), '()\n', (15192, 15194), False, 'from tensorflow.python.keras import backend\n'), ((17958, 17995), 'tensorflow.python.framework.ops.control_dependencies', 'ops.control_dependencies', (['[update_op]'], {}), '([update_op])\n', (17982, 17995), False, 'from tensorflow.python.framework import ops\n'), ((18220, 18261), 'tensorflow.python.framework.ops.executing_eagerly_outside_functions', 'ops.executing_eagerly_outside_functions', ([], {}), '()\n', (18259, 18261), False, 'from tensorflow.python.framework import ops\n'), ((18321, 18362), 'tensorflow.python.keras.backend.name_scope', 'backend.name_scope', (["('update' + scope_name)"], {}), "('update' + scope_name)\n", (18339, 18362), False, 'from tensorflow.python.keras import backend\n'), ((18662, 18689), 'tensorflow.python.eager.context.executing_eagerly', 'context.executing_eagerly', ([], {}), '()\n', (18687, 18689), False, 'from tensorflow.python.eager import context\n'), ((32737, 32795), 'tensorflow.python.ops.resource_variable_ops.resource_scatter_add', 'resource_variable_ops.resource_scatter_add', (['x.handle', 'i', 'v'], {}), '(x.handle, i, v)\n', (32779, 32795), False, 'from tensorflow.python.ops import resource_variable_ops\n'), ((32914, 32975), 'tensorflow.python.ops.resource_variable_ops.resource_scatter_update', 'resource_variable_ops.resource_scatter_update', (['x.handle', 'i', 'v'], {}), '(x.handle, i, v)\n', (32959, 32975), False, 'from tensorflow.python.ops import resource_variable_ops\n'), ((35615, 35638), 'tensorflow.python.framework.ops.get_default_graph', 'ops.get_default_graph', ([], {}), '()\n', (35636, 35638), False, 'from tensorflow.python.framework import ops\n'), ((18597, 18627), 'tensorflow.python.keras.utils.tf_utils.is_symbolic_tensor', 'tf_utils.is_symbolic_tensor', (['i'], {}), '(i)\n', (18624, 18627), False, 'from tensorflow.python.keras.utils import tf_utils\n'), ((19031, 19067), 'tensorflow.python.framework.ops.control_dependencies', 'ops.control_dependencies', (['update_ops'], {}), '(update_ops)\n', (19055, 19067), False, 'from tensorflow.python.framework import ops\n'), ((18927, 18965), 'tensorflow.python.framework.ops._get_graph_from_inputs', 'ops._get_graph_from_inputs', (['update_ops'], {}), '(update_ops)\n', (18953, 18965), False, 'from tensorflow.python.framework import ops\n')]
danielrosendos/djangoRestFramework
escola/teste_get.py
946bb95b8dd9976d1920302ce724572ffd9f98cf
import requests headers = { 'content-type': 'application/json', 'Authorization': 'Token 80ca9f249b80e7226cdc7fcaada8d7297352f0f9' } url_base_cursos = 'http://127.0.0.1:8000/api/v2/cursos' url_base_avaliacoes = 'http://127.0.0.1:8000/api/v2/avaliacoes' resultado = requests.get(url=url_base_cursos, headers=headers) assert resultado.status_code == 200
[((275, 325), 'requests.get', 'requests.get', ([], {'url': 'url_base_cursos', 'headers': 'headers'}), '(url=url_base_cursos, headers=headers)\n', (287, 325), False, 'import requests\n')]
aaronjwood/alb-sdk
python/avi/sdk/utils/waf_policy/vdi_waf_policy.py
ae4c47b2228651d3f5095e7c14f081aa4adbb732
# Copyright 2021 VMware, Inc. import argparse import json import re import logging import os import sys from avi.sdk.avi_api import ApiSession API_VERSION = "18.2.13" SYSTEM_WAF_POLICY_VDI='System-WAF-Policy-VDI' logger = logging.getLogger(__name__) def add_allowlist_rule(waf_policy_obj): #add a allowlist rule to allow request with uri beginning with /ice/ allowlist_rule={ "index": 0, "name": "allowlist-start-with-ice", "description": "WAF will buffer the whole request body first and then release to backend. With VDI, client wants to stream data between client and server for some URLs like /ice/..., we should allow these URLs", "actions": [ "WAF_POLICY_WHITELIST_ACTION_ALLOW" ], "match": { "path": { "match_case": "SENSITIVE", "match_str": [ "/ice/" ], "match_criteria": "BEGINS_WITH" } } } index = 0 waf_policy_obj.setdefault("whitelist", {}).setdefault("rules", []) for rule in waf_policy_obj["whitelist"]["rules"][:]: if rule["name"] == "allowlist-start-with-ice": waf_policy_obj["whitelist"]["rules"].remove(rule) if rule["index"]>index: index = rule["index"] allowlist_rule["index"] = index+1 waf_policy_obj["whitelist"]["rules"].append(allowlist_rule) def get_id_from_group(group): pattern = re.compile("[^\d]*(?P<group_id>\d\d\d)") match = pattern.match(group["name"]) assert match, "can not extract group id from group '{}'".format(group["name"]) groupid = int(match.group("group_id")) assert groupid == 0 or 100 <= groupid <= 999, "group id for group '{}' not in expected range".format(group["name"]) return groupid def disable_crs_response_rules(waf_policy_obj): #disable response side rules and some specific rules for crs_group in waf_policy_obj.get("crs_groups", []): group_id = get_id_from_group(crs_group) if group_id >= 950: crs_group["enable"] = False for rule in crs_group.get("rules", []): if rule["rule_id"] == "920330" or rule["rule_id"] == "932105": rule["enable"] = False def add_pre_crs_group(waf_policy_obj): #add a rule to parse body as xml for requests with /broker/xml uri xml_rule = [ { "index": 0, "name": "enforce XML parsing for /broker/xml", "description": "Clients often send the wrong Content-Type header. We ignore the header and enforce the request body to be parsed as XML in WAF", "rule": "SecRule REQUEST_METHOD \"@streq POST\" \"phase:1,id:4099822,t:none,nolog,pass,chain\" \n SecRule REQUEST_URI \"@streq /broker/xml\" \"t:none,ctl:requestBodyProcessor=XML\"" } ] pre_crs_group = { "index": 0, "name": "VDI_409_ENFORCE_XML", "rules": xml_rule } index = 0 if "pre_crs_groups" not in waf_policy_obj: waf_policy_obj["pre_crs_groups"] = list() for pre_crs in waf_policy_obj["pre_crs_groups"]: if pre_crs["name"] == "VDI_409_ENFORCE_XML": pre_crs["rules"] = xml_rule pre_crs["enable"] = True return if pre_crs["index"] > index: index = pre_crs["index"] pre_crs_group["index"] = index + 1 waf_policy_obj["pre_crs_groups"].append(pre_crs_group) def get_crs(api): tested_crs = "CRS-2021-1" resp = api.get("wafcrs?name=" + tested_crs) if resp.status_code not in range(200, 300): if resp.status_code == 404: logger.error("Controller does not have CRS %s, please install first." % tested_crs) return None logger.error('Error : %s', resp.text) exit(0) waf_crs = json.loads(resp.text)["results"] return waf_crs[0] def create_vdi_waf_policy(api, args): waf_policy_obj = { "name": SYSTEM_WAF_POLICY_VDI, "mode": "WAF_MODE_DETECTION_ONLY", "waf_profile_ref": "/api/wafprofile?name=System-WAF-Profile" } waf_crs = get_crs(api) if waf_crs is None: return waf_policy_obj["waf_crs_ref"]="/api/wafcrs?name="+waf_crs["name"] waf_policy_obj["crs_groups"] = list() for group in waf_crs["groups"]: waf_policy_obj["crs_groups"].append(group) add_allowlist_rule(waf_policy_obj) disable_crs_response_rules(waf_policy_obj) add_pre_crs_group(waf_policy_obj) resp = api.post('wafpolicy', data=json.dumps(waf_policy_obj)) if resp.status_code in range(200, 300): logger.debug('Create WAF policy successfully') else: logger.error('Error : %s' % resp.text) def update_waf_policy(api, args, waf_policy_obj): add_allowlist_rule(waf_policy_obj) disable_crs_response_rules(waf_policy_obj) add_pre_crs_group(waf_policy_obj) resp = api.put('wafpolicy/%s' %waf_policy_obj['uuid'], data=waf_policy_obj) if resp.status_code in range(200, 300): logger.debug('Create WAF policy successfully') else: logger.error('Error : %s' % resp.text) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-u', '--user', action="store", help='controller user', default='admin') parser.add_argument('-p', '--password', action="store", help='controller user password', default='admin') parser.add_argument('-t', '--tenant', action="store", help='tenant name', default='admin') parser.add_argument('-a', '--authtoken', help='Authentication token') parser.add_argument('-c', '--controller_ip', action="store", help='controller ip') args = parser.parse_args() if args.password: api = ApiSession.get_session(args.controller_ip, args.user, args.password, tenant=args.tenant, api_version=API_VERSION) elif args.authtoken: api = ApiSession.get_session(args.controller_ip, args.user, tenant=args.tenant, token=args.authtoken, api_version=API_VERSION) else: logging.error("Either password or authtokentoken must be provided.") sys.exit(1) waf_policy_obj = api.get_object_by_name('wafpolicy', SYSTEM_WAF_POLICY_VDI) if not waf_policy_obj: create_vdi_waf_policy(api, args) else: update_waf_policy(api, args, waf_policy_obj)
[((226, 253), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (243, 253), False, 'import logging\n'), ((1465, 1509), 're.compile', 're.compile', (['"""[^\\\\d]*(?P<group_id>\\\\d\\\\d\\\\d)"""'], {}), "('[^\\\\d]*(?P<group_id>\\\\d\\\\d\\\\d)')\n", (1475, 1509), False, 'import re\n'), ((5158, 5183), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5181, 5183), False, 'import argparse\n'), ((3819, 3840), 'json.loads', 'json.loads', (['resp.text'], {}), '(resp.text)\n', (3829, 3840), False, 'import json\n'), ((5787, 5905), 'avi.sdk.avi_api.ApiSession.get_session', 'ApiSession.get_session', (['args.controller_ip', 'args.user', 'args.password'], {'tenant': 'args.tenant', 'api_version': 'API_VERSION'}), '(args.controller_ip, args.user, args.password, tenant\n =args.tenant, api_version=API_VERSION)\n', (5809, 5905), False, 'from avi.sdk.avi_api import ApiSession\n'), ((4521, 4547), 'json.dumps', 'json.dumps', (['waf_policy_obj'], {}), '(waf_policy_obj)\n', (4531, 4547), False, 'import json\n'), ((5977, 6101), 'avi.sdk.avi_api.ApiSession.get_session', 'ApiSession.get_session', (['args.controller_ip', 'args.user'], {'tenant': 'args.tenant', 'token': 'args.authtoken', 'api_version': 'API_VERSION'}), '(args.controller_ip, args.user, tenant=args.tenant,\n token=args.authtoken, api_version=API_VERSION)\n', (5999, 6101), False, 'from avi.sdk.avi_api import ApiSession\n'), ((6153, 6221), 'logging.error', 'logging.error', (['"""Either password or authtokentoken must be provided."""'], {}), "('Either password or authtokentoken must be provided.')\n", (6166, 6221), False, 'import logging\n'), ((6230, 6241), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6238, 6241), False, 'import sys\n')]
Chace-wang/bk-user
src/api/bkuser_core/tests/bkiam/test_constants.py
057f270d66a1834312306c9fba1f4e95521f10b1
# -*- coding: utf-8 -*- """ TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import pytest from bkuser_core.bkiam.constants import ResourceType from bkuser_core.categories.models import Department, ProfileCategory from bkuser_core.tests.utils import make_simple_department pytestmark = pytest.mark.django_db class TestResourceTypeEnum: @pytest.mark.parametrize( "is_leaf, path, f, v", [ (True, "/category,5/department,3440/department,3443/", "parent_id", 3443), (False, "/category,5/department,3440/department,3443/", "id", 3443), (True, "/category,5/", "category_id", 5), (False, "/category,5/", "category_id", 5), (True, "/department,3440/department,3443/", "parent_id", 3443), (False, "/department,3440/department,3443/", "id", 3443), ], ) def test_get_key_mapping(self, is_leaf, path, f, v): key_mapping = ResourceType.get_key_mapping(ResourceType.DEPARTMENT) path_method = key_mapping["department._bk_iam_path_"] data = {"value": path} if not is_leaf: data["node_type"] = "non-leaf" f, v = path_method(data) assert f == f assert v == v @pytest.mark.parametrize( "dep_chain, expected", [ ( [1000, 1001, 1002], {"_bk_iam_path_": "/category,1/department,1000/department,1001/department,1002/"}, ), ( [1000], {"_bk_iam_path_": "/category,1/department,1000/"}, ), ], ) def test_get_attributes_mapping(self, dep_chain, expected): target_parent = None for d in dep_chain: parent_id = target_parent if not target_parent else target_parent.pk target_parent = make_simple_department(str(d), force_create_params={"id": d}, parent_id=parent_id) attributes_mapping = ResourceType.get_attributes_mapping(target_parent) assert attributes_mapping == expected def test_get_attributes_mapping_other(self): pc = ProfileCategory.objects.get_default() attributes_mapping = ResourceType.get_attributes_mapping(pc) assert attributes_mapping == {} @pytest.mark.parametrize( "dep_chain,expected", [ ( ["a", "b", "c"], [ ("category", "默认目录"), ("department", "a"), ("department", "b"), ("department", "c"), ], ), ( ["a", "b"], [("category", "默认目录"), ("department", "a"), ("department", "b")], ), ], ) def test_get_resource_nodes_dep(self, dep_chain, expected): target_parent = None for d in dep_chain: parent_id = target_parent if not target_parent else target_parent.pk target_parent = make_simple_department(d, parent_id=parent_id) # 只添加 parent,mptt 树需要重建 Department.tree_objects.rebuild() nodes = ResourceType.get_instance_resource_nodes(target_parent) assert [(x["type"], x["name"]) for x in nodes] == expected def test_get_resource_nodes_other(self): pc = ProfileCategory.objects.get_default() nodes = ResourceType.get_instance_resource_nodes(pc) assert [(x["type"], x["name"]) for x in nodes] == [("category", "默认目录")]
[((978, 1404), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_leaf, path, f, v"""', "[(True, '/category,5/department,3440/department,3443/', 'parent_id', 3443),\n (False, '/category,5/department,3440/department,3443/', 'id', 3443), (\n True, '/category,5/', 'category_id', 5), (False, '/category,5/',\n 'category_id', 5), (True, '/department,3440/department,3443/',\n 'parent_id', 3443), (False, '/department,3440/department,3443/', 'id', \n 3443)]"], {}), "('is_leaf, path, f, v', [(True,\n '/category,5/department,3440/department,3443/', 'parent_id', 3443), (\n False, '/category,5/department,3440/department,3443/', 'id', 3443), (\n True, '/category,5/', 'category_id', 5), (False, '/category,5/',\n 'category_id', 5), (True, '/department,3440/department,3443/',\n 'parent_id', 3443), (False, '/department,3440/department,3443/', 'id', \n 3443)])\n", (1001, 1404), False, 'import pytest\n'), ((1862, 2090), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dep_chain, expected"""', "[([1000, 1001, 1002], {'_bk_iam_path_':\n '/category,1/department,1000/department,1001/department,1002/'}), ([\n 1000], {'_bk_iam_path_': '/category,1/department,1000/'})]"], {}), "('dep_chain, expected', [([1000, 1001, 1002], {\n '_bk_iam_path_':\n '/category,1/department,1000/department,1001/department,1002/'}), ([\n 1000], {'_bk_iam_path_': '/category,1/department,1000/'})])\n", (1885, 2090), False, 'import pytest\n'), ((2885, 3132), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dep_chain,expected"""', "[(['a', 'b', 'c'], [('category', '默认目录'), ('department', 'a'), (\n 'department', 'b'), ('department', 'c')]), (['a', 'b'], [('category',\n '默认目录'), ('department', 'a'), ('department', 'b')])]"], {}), "('dep_chain,expected', [(['a', 'b', 'c'], [(\n 'category', '默认目录'), ('department', 'a'), ('department', 'b'), (\n 'department', 'c')]), (['a', 'b'], [('category', '默认目录'), ('department',\n 'a'), ('department', 'b')])])\n", (2908, 3132), False, 'import pytest\n'), ((1563, 1616), 'bkuser_core.bkiam.constants.ResourceType.get_key_mapping', 'ResourceType.get_key_mapping', (['ResourceType.DEPARTMENT'], {}), '(ResourceType.DEPARTMENT)\n', (1591, 1616), False, 'from bkuser_core.bkiam.constants import ResourceType\n'), ((2572, 2622), 'bkuser_core.bkiam.constants.ResourceType.get_attributes_mapping', 'ResourceType.get_attributes_mapping', (['target_parent'], {}), '(target_parent)\n', (2607, 2622), False, 'from bkuser_core.bkiam.constants import ResourceType\n'), ((2732, 2769), 'bkuser_core.categories.models.ProfileCategory.objects.get_default', 'ProfileCategory.objects.get_default', ([], {}), '()\n', (2767, 2769), False, 'from bkuser_core.categories.models import Department, ProfileCategory\n'), ((2799, 2838), 'bkuser_core.bkiam.constants.ResourceType.get_attributes_mapping', 'ResourceType.get_attributes_mapping', (['pc'], {}), '(pc)\n', (2834, 2838), False, 'from bkuser_core.bkiam.constants import ResourceType\n'), ((3688, 3721), 'bkuser_core.categories.models.Department.tree_objects.rebuild', 'Department.tree_objects.rebuild', ([], {}), '()\n', (3719, 3721), False, 'from bkuser_core.categories.models import Department, ProfileCategory\n'), ((3739, 3794), 'bkuser_core.bkiam.constants.ResourceType.get_instance_resource_nodes', 'ResourceType.get_instance_resource_nodes', (['target_parent'], {}), '(target_parent)\n', (3779, 3794), False, 'from bkuser_core.bkiam.constants import ResourceType\n'), ((3921, 3958), 'bkuser_core.categories.models.ProfileCategory.objects.get_default', 'ProfileCategory.objects.get_default', ([], {}), '()\n', (3956, 3958), False, 'from bkuser_core.categories.models import Department, ProfileCategory\n'), ((3975, 4019), 'bkuser_core.bkiam.constants.ResourceType.get_instance_resource_nodes', 'ResourceType.get_instance_resource_nodes', (['pc'], {}), '(pc)\n', (4015, 4019), False, 'from bkuser_core.bkiam.constants import ResourceType\n'), ((3600, 3646), 'bkuser_core.tests.utils.make_simple_department', 'make_simple_department', (['d'], {'parent_id': 'parent_id'}), '(d, parent_id=parent_id)\n', (3622, 3646), False, 'from bkuser_core.tests.utils import make_simple_department\n')]
johnh865/election_sim
votesim/benchmarks/__init__.py
b73b7e65f1bb22abb82cbe8442fcf02b0c20894e
# from votesim.benchmarks.benchrunner import ( # run_benchmark, # get_benchmarks, # post_benchmark, # plot_benchmark, # ) from votesim.benchmarks import runtools, simple
[]
MrIgumnov96/ETL-CloudDeployment
src/handler.py
666b85a9350460fba49f82ec90f5cddc0bdd0235
import boto3 import src.app as app import csv import psycopg2 as ps import os from dotenv import load_dotenv load_dotenv() dbname = os.environ["db"] host = os.environ["host"] port = os.environ["port"] user = os.environ["user"] password = os.environ["pass"] connection = ps.connect(dbname=dbname, host=host, port=port, user=user, password=password) def handle(event, context): cursor = connection.cursor() cursor.execute("SELECT 1", ()) print(cursor.fetchall()) # Get key and bucket informaition key = event['Records'][0]['s3']['object']['key'] bucket = event['Records'][0]['s3']['bucket']['name'] # use boto3 library to get object from S3 s3 = boto3.client('s3') s3_object = s3.get_object(Bucket = bucket, Key = key) data = s3_object['Body'].read().decode('utf-8') all_lines = [] # read CSV # csv_data = csv.reader(data.splitlines()) # for row in csv_data: # datestr = row[0] #.replace('/', '-') # # print(datestr) # date_obj = datetime.strptime(datestr, '%d/%m/%Y %H:%M') # # print(date_obj) # # time = str(row[0][-5:]) # location = str(row[1]) # order = str(row[3]) # total = str(row[4]) # all_lines.append({'date':date_obj, 'location':location, 'order':order, 'total':total}) # return cached_list # print(all_lines) app.start_app(all_lines, data) print_all_lines = [print(line) for line in all_lines] print_all_lines return {"message": "success!!! Check the cloud watch logs for this lambda in cloudwatch https://eu-west-1.console.aws.amazon.com/cloudwatch/home?region=eu-west-1#logsV2:log-groups"} # Form all the lines of data into a list of lists # all_lines = [line for line in csv_data] # print(data) # print(all_lines)
[((111, 124), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (122, 124), False, 'from dotenv import load_dotenv\n'), ((273, 350), 'psycopg2.connect', 'ps.connect', ([], {'dbname': 'dbname', 'host': 'host', 'port': 'port', 'user': 'user', 'password': 'password'}), '(dbname=dbname, host=host, port=port, user=user, password=password)\n', (283, 350), True, 'import psycopg2 as ps\n'), ((805, 823), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (817, 823), False, 'import boto3\n'), ((1498, 1528), 'src.app.start_app', 'app.start_app', (['all_lines', 'data'], {}), '(all_lines, data)\n', (1511, 1528), True, 'import src.app as app\n')]
cpelley/improver
improver_tests/precipitation_type/test_utilities.py
ebf77fe2adc85ed7aec74c26671872a2e4388ded
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # (C) British Crown Copyright 2017-2021 Met Office. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ Tests of precipitation_type utilities""" import numpy as np import pytest from iris.exceptions import CoordinateNotFoundError from improver.metadata.constants import FLOAT_DTYPE from improver.precipitation_type.utilities import make_shower_condition_cube from improver.synthetic_data.set_up_test_cubes import set_up_probability_cube def set_up_test_cube(n_thresholds=1): """Set up a cube testing shower condition conversion""" thresholds = np.arange(n_thresholds) shape = [2, 2] shape = [n_thresholds, *shape] if n_thresholds > 0 else shape data = np.ones(shape, dtype=FLOAT_DTYPE) cube = set_up_probability_cube( data, thresholds, variable_name="texture_of_cloud_area_fraction", threshold_units=1, spatial_grid="equalarea", ) return cube def test_basic(): """Test that with a valid input the cube is transformed into a shower condition cube.""" cube = set_up_test_cube() result = make_shower_condition_cube(cube) threshold_coord = result.coord(var_name="threshold") assert result.name() == "probability_of_shower_condition_above_threshold" assert result.dtype == FLOAT_DTYPE assert (result.data == cube.data).all() assert threshold_coord.name() == "shower_condition" assert threshold_coord.units == 1 def test_no_threshold_coord(): """Test an exception is raised if the proxy diagnostic cube does not have a threshold coordinate.""" cube = set_up_test_cube() cube.remove_coord("texture_of_cloud_area_fraction") expected = "Input has no threshold coordinate and cannot be used" with pytest.raises(CoordinateNotFoundError, match=expected): make_shower_condition_cube(cube) def test_multi_valued_threshold_coord(): """Test an exception is raised if the proxy diagnostic cube has a multi valued threshold coordinate.""" cube = set_up_test_cube(n_thresholds=2) expected = "Expected a single valued threshold coordinate.*" with pytest.raises(ValueError, match=expected): make_shower_condition_cube(cube)
[((2118, 2141), 'numpy.arange', 'np.arange', (['n_thresholds'], {}), '(n_thresholds)\n', (2127, 2141), True, 'import numpy as np\n'), ((2238, 2271), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'FLOAT_DTYPE'}), '(shape, dtype=FLOAT_DTYPE)\n', (2245, 2271), True, 'import numpy as np\n'), ((2283, 2427), 'improver.synthetic_data.set_up_test_cubes.set_up_probability_cube', 'set_up_probability_cube', (['data', 'thresholds'], {'variable_name': '"""texture_of_cloud_area_fraction"""', 'threshold_units': '(1)', 'spatial_grid': '"""equalarea"""'}), "(data, thresholds, variable_name=\n 'texture_of_cloud_area_fraction', threshold_units=1, spatial_grid=\n 'equalarea')\n", (2306, 2427), False, 'from improver.synthetic_data.set_up_test_cubes import set_up_probability_cube\n'), ((2642, 2674), 'improver.precipitation_type.utilities.make_shower_condition_cube', 'make_shower_condition_cube', (['cube'], {}), '(cube)\n', (2668, 2674), False, 'from improver.precipitation_type.utilities import make_shower_condition_cube\n'), ((3297, 3351), 'pytest.raises', 'pytest.raises', (['CoordinateNotFoundError'], {'match': 'expected'}), '(CoordinateNotFoundError, match=expected)\n', (3310, 3351), False, 'import pytest\n'), ((3361, 3393), 'improver.precipitation_type.utilities.make_shower_condition_cube', 'make_shower_condition_cube', (['cube'], {}), '(cube)\n', (3387, 3393), False, 'from improver.precipitation_type.utilities import make_shower_condition_cube\n'), ((3669, 3710), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'expected'}), '(ValueError, match=expected)\n', (3682, 3710), False, 'import pytest\n'), ((3720, 3752), 'improver.precipitation_type.utilities.make_shower_condition_cube', 'make_shower_condition_cube', (['cube'], {}), '(cube)\n', (3746, 3752), False, 'from improver.precipitation_type.utilities import make_shower_condition_cube\n')]
bryan-munene/Store-Manager-DB
app/api/v1/models/items.py
40b24039189aea6854d7fcf33ccb648bb6642231
from .db_conn import ModelSetup class ItemsModel(ModelSetup): '''Handles the data logic of the items section''' def __init__( self, name=None, price=None, quantity=None, category_id=None, reorder_point=None, auth=None): '''Initializes the variables for the items class''' self.name = name self.price = price self.quantity = quantity self.category_id = category_id self.reorder_point = reorder_point self.auth = auth def add_item( self, name, price, quantity, image, category_id, reorder_point, auth): '''Adds item given the above arguements. Then returns the created item''' model = ModelSetup() self.conn = model.conn self.cur = model.cur query = """INSERT INTO items(name, price, quantity, image, category, reorder_point, created_by)\ VALUES(%s,%s,%s,%s,%s,%s,%s);""" self.cur.execute( query, (name, price, quantity, image, category_id, reorder_point, auth)) self.conn.commit() query_confirm = """SELECT * FROM items WHERE name = %s AND price = %s;""" self.cur.execute(query_confirm, (name, price)) self.item = self.cur.fetchone() return self.item def get_all(self): '''gets all records of items in the databas and returns them''' model = ModelSetup() self.conn = model.conn self.cur = model.cur query = """SELECT * FROM items;""" self.cur.execute(query) self.items = self.cur.fetchall() return self.items def get_by_id(self, item_id): '''retrieves one item by finding them using their unique item_id''' model = ModelSetup() self.conn = model.conn self.cur = model.cur query = """SELECT * FROM items WHERE item_id = %s;""" self.cur.execute(query, (item_id, )) self.item = self.cur.fetchone() return self.item def get_by_category(self, category): '''retrieves items by finding them using their category. all items in the same category are retrieved''' model = ModelSetup() self.conn = model.conn self.cur = model.cur query = """SELECT * FROM items WHERE category LIKE %s;""" self.cur.execute(query, (category)) self.item = self.cur.fetchall() return self.item def get_by_name_and_price(self, name, price): '''retrieves one item by finding them using their unique unique combination''' model = ModelSetup() self.conn = model.conn self.cur = model.cur query = """SELECT * FROM items WHERE name LIKE %s AND price = %s;""" self.cur.execute(query, (name, price)) self.item = self.cur.fetchone() return self.item def update_item( self, item_id, price, quantity, image, category_id, reorder_point, auth): '''updates item's details. the values in the db are changed to what is provided''' model = ModelSetup() self.conn = model.conn self.cur = model.cur query = """UPDATE items SET price = %s, quantity = %s, image = %s, category = %s, reorder_point = %s, created_by = %s WHERE item_id= %s """ self.cur.execute( query, (price, quantity, image, category_id, reorder_point, auth, item_id)) self.conn.commit() query_confirm = """SELECT * FROM items WHERE item_id = %s;""" self.cur.execute(query_confirm, (item_id, )) self.item = self.cur.fetchone() return self.item def update_item_quantity(self, item_id, quantity): '''updates item's quantity.adds the quantity added to the quantity available''' model = ModelSetup() self.conn = model.conn self.cur = model.cur query = """UPDATE items SET quantity = %s WHERE item_id= %s """ self.cur.execute(query, (quantity, item_id)) self.conn.commit() query_confirm = """SELECT * FROM items WHERE item_id = %s;""" self.cur.execute(query_confirm, (item_id, )) self.item = self.cur.fetchone() return self.item def delete_item(self, item_id): '''deletes an item by finding them using the item_id''' model = ModelSetup() self.conn = model.conn self.cur = model.cur query = """DELETE FROM items WHERE item_id = %s""" self.cur.execute(query, (item_id, )) self.conn.commit() query_confirm = """SELECT * FROM items;""" self.cur.execute(query_confirm) self.items = self.cur.fetchall() return self.items
[]
linxGnu/armeria
site/src/sphinx/_extensions/api.py
7f4b10e66acc377dd16929157aeb417b729ce55a
from docutils.parsers.rst.roles import register_canonical_role, set_classes from docutils.parsers.rst import directives from docutils import nodes from sphinx.writers.html import HTMLTranslator from sphinx.errors import ExtensionError import os import re def api_role(role, rawtext, text, lineno, inliner, options={}, content=[]): return api_role_internal(False, role, rawtext, text, lineno, inliner, options, content) def apiplural_role(role, rawtext, text, lineno, inliner, options={}, content=[]): return api_role_internal(True, role, rawtext, text, lineno, inliner, options, content) def api_role_internal(plural, role, rawtext, text, lineno, inliner, options, content): set_classes(options) classes = ['code', 'api-reference'] if 'classes' in options: classes.extend(options['classes']) node = nodes.literal(rawtext, text, classes=classes, api_reference=True, is_plural=plural) return [node], [] def api_visit_literal(self, node, next_visitor): if 'api_reference' not in node.attributes: return next_visitor(self, node) env = self.builder.env javadoc_dir = os.path.abspath(env.config['javadoc_dir']) # Build the mappings from a simple class name to its Javadoc file. if not hasattr(env, '__javadoc_cache__'): env.__javadoc_mappings__ = javadoc_mappings = {} for dirname, subdirs, files in os.walk(javadoc_dir): for basename in files: if re.match(r'^[^A-Z]', basename) or not basename.endswith('.html'): # Ignore the non-class files. We rely on the simple assumption that # a class name always starts with an upper-case English alphabet. continue simple_class_name = basename[:-5].replace('.', '$') javadoc_mappings[simple_class_name] = os.path.relpath(dirname, javadoc_dir) \ .replace(os.sep, '/') + '/' + basename else: javadoc_mappings = env.__javadoc_mappings__ text = node.astext() if text.startswith('@'): text = text[1:] is_annotation = True else: is_annotation = False if text.find('.') != -1: # FQCN or package name. if re.fullmatch(r'^[^A-Z$]+$', text): # Package uri = text.replace('.', '/') + '/package-summary.html' else: # Class uri = text.replace('.', '/').replace('$', '.') + '.html' text = re.sub(r'^.*\.', '', text).replace('$', '.') else: # Simple class name; find from the pre-calculated mappings. if text not in javadoc_mappings: raise ExtensionError('Cannot find a class from Javadoc: ' + text) uri = javadoc_mappings[text] text = text.replace('$', '.') # Prepend the frame index.html path. uri = os.path.relpath(javadoc_dir, env.app.outdir).replace(os.sep, '/') + '/index.html?' + uri # Prepend the '@' back again if necessary if is_annotation: text = '@' + text # Emit the tags. self.body.append(self.starttag(node, 'code', suffix='', CLASS='docutils literal javadoc')) self.body.append(self.starttag(node, 'a', suffix='', CLASS='reference external javadoc', HREF=uri)) self.body.append(text + '</a>') # Append a plural suffix. if node.attributes['is_plural']: self.body.append(self.starttag(node, 'span', suffix='', CLASS='plural-suffix')) if re.fullmatch(r'^.*(ch|s|sh|x|z)$', text): self.body.append('es') else: self.body.append('s') self.body.append('</span>') self.body.append('</code>') raise nodes.SkipNode def setup(app): app.add_config_value('javadoc_dir', os.path.join(app.outdir, 'apidocs'), 'html') # Register the 'javadoc' role. api_role.options = {'class': directives.class_option} register_canonical_role('api', api_role) register_canonical_role('apiplural', apiplural_role) # Intercept the rendering of HTML literals. old_visitor = HTMLTranslator.visit_literal HTMLTranslator.visit_literal = lambda self, node: api_visit_literal(self, node, old_visitor) pass
[((694, 714), 'docutils.parsers.rst.roles.set_classes', 'set_classes', (['options'], {}), '(options)\n', (705, 714), False, 'from docutils.parsers.rst.roles import register_canonical_role, set_classes\n'), ((841, 929), 'docutils.nodes.literal', 'nodes.literal', (['rawtext', 'text'], {'classes': 'classes', 'api_reference': '(True)', 'is_plural': 'plural'}), '(rawtext, text, classes=classes, api_reference=True, is_plural\n =plural)\n', (854, 929), False, 'from docutils import nodes\n'), ((1131, 1173), 'os.path.abspath', 'os.path.abspath', (["env.config['javadoc_dir']"], {}), "(env.config['javadoc_dir'])\n", (1146, 1173), False, 'import os\n'), ((3914, 3954), 'docutils.parsers.rst.roles.register_canonical_role', 'register_canonical_role', (['"""api"""', 'api_role'], {}), "('api', api_role)\n", (3937, 3954), False, 'from docutils.parsers.rst.roles import register_canonical_role, set_classes\n'), ((3959, 4011), 'docutils.parsers.rst.roles.register_canonical_role', 'register_canonical_role', (['"""apiplural"""', 'apiplural_role'], {}), "('apiplural', apiplural_role)\n", (3982, 4011), False, 'from docutils.parsers.rst.roles import register_canonical_role, set_classes\n'), ((1388, 1408), 'os.walk', 'os.walk', (['javadoc_dir'], {}), '(javadoc_dir)\n', (1395, 1408), False, 'import os\n'), ((2273, 2305), 're.fullmatch', 're.fullmatch', (['"""^[^A-Z$]+$"""', 'text'], {}), "('^[^A-Z$]+$', text)\n", (2285, 2305), False, 'import re\n'), ((3494, 3533), 're.fullmatch', 're.fullmatch', (['"""^.*(ch|s|sh|x|z)$"""', 'text'], {}), "('^.*(ch|s|sh|x|z)$', text)\n", (3506, 3533), False, 'import re\n'), ((3771, 3806), 'os.path.join', 'os.path.join', (['app.outdir', '"""apidocs"""'], {}), "(app.outdir, 'apidocs')\n", (3783, 3806), False, 'import os\n'), ((2701, 2760), 'sphinx.errors.ExtensionError', 'ExtensionError', (["('Cannot find a class from Javadoc: ' + text)"], {}), "('Cannot find a class from Javadoc: ' + text)\n", (2715, 2760), False, 'from sphinx.errors import ExtensionError\n'), ((1464, 1493), 're.match', 're.match', (['"""^[^A-Z]"""', 'basename'], {}), "('^[^A-Z]', basename)\n", (1472, 1493), False, 'import re\n'), ((2519, 2545), 're.sub', 're.sub', (['"""^.*\\\\."""', '""""""', 'text'], {}), "('^.*\\\\.', '', text)\n", (2525, 2545), False, 'import re\n'), ((2888, 2932), 'os.path.relpath', 'os.path.relpath', (['javadoc_dir', 'env.app.outdir'], {}), '(javadoc_dir, env.app.outdir)\n', (2903, 2932), False, 'import os\n'), ((1855, 1892), 'os.path.relpath', 'os.path.relpath', (['dirname', 'javadoc_dir'], {}), '(dirname, javadoc_dir)\n', (1870, 1892), False, 'import os\n')]
tsuru/varnishapi
feaas/runners/__init__.py
d63a8c8c5f9c837855509fc5af59d8213c1c91d6
# Copyright 2014 varnishapi authors. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. import time from feaas import storage class Base(object): def __init__(self, manager, interval, *locks): self.manager = manager self.storage = manager.storage self.interval = interval def init_locker(self, *lock_names): self.locker = storage.MultiLocker(self.storage) for lock_name in lock_names: self.locker.init(lock_name) def loop(self): self.running = True while self.running: self.run() time.sleep(self.interval) def stop(self): self.running = False
[((440, 473), 'feaas.storage.MultiLocker', 'storage.MultiLocker', (['self.storage'], {}), '(self.storage)\n', (459, 473), False, 'from feaas import storage\n'), ((663, 688), 'time.sleep', 'time.sleep', (['self.interval'], {}), '(self.interval)\n', (673, 688), False, 'import time\n')]