Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
9,200
edx/pa11ycrawler
pa11ycrawler/pipelines/pa11y.py
write_pa11y_results
def write_pa11y_results(item, pa11y_results, data_dir): """ Write the output from pa11y into a data file. """ data = dict(item) data['pa11y'] = pa11y_results # it would be nice to use the URL as the filename, # but that gets complicated (long URLs, special characters, etc) # so we'll make the filename a hash of the URL instead, # and throw in the access time so that we can store the same URL # multiple times in this data directory hasher = hashlib.md5() hasher.update(item["url"].encode('utf8')) hasher.update(item["accessed_at"].isoformat().encode('utf8')) basename = hasher.hexdigest() filename = basename + ".json" filepath = data_dir / filename data_dir.makedirs_p() text = json.dumps(data, cls=DateTimeEncoder) filepath.write_text(text)
python
def write_pa11y_results(item, pa11y_results, data_dir): """ Write the output from pa11y into a data file. """ data = dict(item) data['pa11y'] = pa11y_results # it would be nice to use the URL as the filename, # but that gets complicated (long URLs, special characters, etc) # so we'll make the filename a hash of the URL instead, # and throw in the access time so that we can store the same URL # multiple times in this data directory hasher = hashlib.md5() hasher.update(item["url"].encode('utf8')) hasher.update(item["accessed_at"].isoformat().encode('utf8')) basename = hasher.hexdigest() filename = basename + ".json" filepath = data_dir / filename data_dir.makedirs_p() text = json.dumps(data, cls=DateTimeEncoder) filepath.write_text(text)
['def', 'write_pa11y_results', '(', 'item', ',', 'pa11y_results', ',', 'data_dir', ')', ':', 'data', '=', 'dict', '(', 'item', ')', 'data', '[', "'pa11y'", ']', '=', 'pa11y_results', '# it would be nice to use the URL as the filename,', '# but that gets complicated (long URLs, special characters, etc)', "# so we'll make the filename a hash of the URL instead,", '# and throw in the access time so that we can store the same URL', '# multiple times in this data directory', 'hasher', '=', 'hashlib', '.', 'md5', '(', ')', 'hasher', '.', 'update', '(', 'item', '[', '"url"', ']', '.', 'encode', '(', "'utf8'", ')', ')', 'hasher', '.', 'update', '(', 'item', '[', '"accessed_at"', ']', '.', 'isoformat', '(', ')', '.', 'encode', '(', "'utf8'", ')', ')', 'basename', '=', 'hasher', '.', 'hexdigest', '(', ')', 'filename', '=', 'basename', '+', '".json"', 'filepath', '=', 'data_dir', '/', 'filename', 'data_dir', '.', 'makedirs_p', '(', ')', 'text', '=', 'json', '.', 'dumps', '(', 'data', ',', 'cls', '=', 'DateTimeEncoder', ')', 'filepath', '.', 'write_text', '(', 'text', ')']
Write the output from pa11y into a data file.
['Write', 'the', 'output', 'from', 'pa11y', 'into', 'a', 'data', 'file', '.']
train
https://github.com/edx/pa11ycrawler/blob/fc672d4524463bc050ade4c7c97801c0d5bf8c9e/pa11ycrawler/pipelines/pa11y.py#L141-L161
9,201
fermiPy/fermipy
fermipy/diffuse/name_policy.py
NameFactory._replace_none
def _replace_none(self, aDict): """ Replace all None values in a dict with 'none' """ for k, v in aDict.items(): if v is None: aDict[k] = 'none'
python
def _replace_none(self, aDict): """ Replace all None values in a dict with 'none' """ for k, v in aDict.items(): if v is None: aDict[k] = 'none'
['def', '_replace_none', '(', 'self', ',', 'aDict', ')', ':', 'for', 'k', ',', 'v', 'in', 'aDict', '.', 'items', '(', ')', ':', 'if', 'v', 'is', 'None', ':', 'aDict', '[', 'k', ']', '=', "'none'"]
Replace all None values in a dict with 'none'
['Replace', 'all', 'None', 'values', 'in', 'a', 'dict', 'with', 'none']
train
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/name_policy.py#L136-L140
9,202
reingart/pyafipws
wslpg.py
WSLPG.CrearAjusteBase
def CrearAjusteBase(self, pto_emision=1, nro_orden=None, # unificado, contrato, papel coe_ajustado=None, # unificado nro_contrato=None, # contrato tipo_formulario=None, # papel nro_formulario=None, # papel actividad=None, # contrato / papel cod_grano=None, # contrato / papel cuit_vendedor=None, # contrato / papel cuit_comprador=None, # contrato / papel cuit_corredor=None, # contrato / papel nro_ing_bruto_vendedor=None, # papel nro_ing_bruto_comprador=None, # papel nro_ing_bruto_corredor=None, # papel tipo_operacion=None, # papel precio_ref_tn=None, # contrato cod_grado_ent=None, # contrato val_grado_ent=None, # contrato precio_flete_tn=None, # contrato cod_puerto=None, # contrato des_puerto_localidad=None, # contrato cod_provincia=None, # unificado, contrato, papel cod_localidad=None, # unificado, contrato, papel comision_corredor=None, # papel **kwargs ): "Inicializa internamente los datos de una liquidación para ajustar" # ajusto nombre de campos para compatibilidad hacia atrás (encabezado): if 'cod_localidad_procedencia' in kwargs: cod_localidad = kwargs['cod_localidad_procedencia'] if 'cod_provincia_procedencia' in kwargs: cod_provincia = kwargs['cod_provincia_procedencia'] if 'nro_act_comprador' in kwargs: actividad = kwargs['nro_act_comprador'] if 'cod_tipo_operacion' in kwargs: tipo_operacion = kwargs['cod_tipo_operacion'] # limpio los campos especiales (segun validaciones de AFIP) if val_grado_ent == 0: val_grado_ent = None # borrando datos si no corresponden if cuit_corredor and int(cuit_corredor) == 0: cuit_corredor = None comision_corredor = None nro_ing_bruto_corredor = None if cod_puerto and int(cod_puerto) != 14: des_puerto_localidad = None # validacion 1630 # limpio los campos opcionales para no enviarlos si no corresponde: if cod_grado_ent == "": cod_grado_ent = None if val_grado_ent == 0: val_grado_ent = None # creo el diccionario con los campos generales del ajuste base: self.ajuste = { 'ajusteBase': { 'ptoEmision': pto_emision, 'nroOrden': nro_orden, 'coeAjustado': coe_ajustado, 'nroContrato': nro_contrato, 'tipoFormulario': tipo_formulario, 'nroFormulario': nro_formulario, 'actividad': actividad, 'codGrano': cod_grano, 'cuitVendedor': cuit_vendedor, 'cuitComprador': cuit_comprador, 'cuitCorredor': cuit_corredor, 'nroIngBrutoVendedor': nro_ing_bruto_vendedor, 'nroIngBrutoComprador': nro_ing_bruto_comprador, 'nroIngBrutoCorredor': nro_ing_bruto_corredor, 'tipoOperacion': tipo_operacion, 'codPuerto': cod_puerto, 'desPuertoLocalidad': des_puerto_localidad, 'comisionCorredor': comision_corredor, 'precioRefTn': precio_ref_tn, 'codGradoEnt': cod_grado_ent, 'valGradoEnt': val_grado_ent, 'precioFleteTn': precio_flete_tn, 'codLocalidad': cod_localidad, 'codProv': cod_provincia, 'certificados': [], } } # para compatibilidad con AgregarCertificado self.liquidacion = self.ajuste['ajusteBase'] # inicializar temporales self.__ajuste_base = None self.__ajuste_debito = None self.__ajuste_credito = None return True
python
def CrearAjusteBase(self, pto_emision=1, nro_orden=None, # unificado, contrato, papel coe_ajustado=None, # unificado nro_contrato=None, # contrato tipo_formulario=None, # papel nro_formulario=None, # papel actividad=None, # contrato / papel cod_grano=None, # contrato / papel cuit_vendedor=None, # contrato / papel cuit_comprador=None, # contrato / papel cuit_corredor=None, # contrato / papel nro_ing_bruto_vendedor=None, # papel nro_ing_bruto_comprador=None, # papel nro_ing_bruto_corredor=None, # papel tipo_operacion=None, # papel precio_ref_tn=None, # contrato cod_grado_ent=None, # contrato val_grado_ent=None, # contrato precio_flete_tn=None, # contrato cod_puerto=None, # contrato des_puerto_localidad=None, # contrato cod_provincia=None, # unificado, contrato, papel cod_localidad=None, # unificado, contrato, papel comision_corredor=None, # papel **kwargs ): "Inicializa internamente los datos de una liquidación para ajustar" # ajusto nombre de campos para compatibilidad hacia atrás (encabezado): if 'cod_localidad_procedencia' in kwargs: cod_localidad = kwargs['cod_localidad_procedencia'] if 'cod_provincia_procedencia' in kwargs: cod_provincia = kwargs['cod_provincia_procedencia'] if 'nro_act_comprador' in kwargs: actividad = kwargs['nro_act_comprador'] if 'cod_tipo_operacion' in kwargs: tipo_operacion = kwargs['cod_tipo_operacion'] # limpio los campos especiales (segun validaciones de AFIP) if val_grado_ent == 0: val_grado_ent = None # borrando datos si no corresponden if cuit_corredor and int(cuit_corredor) == 0: cuit_corredor = None comision_corredor = None nro_ing_bruto_corredor = None if cod_puerto and int(cod_puerto) != 14: des_puerto_localidad = None # validacion 1630 # limpio los campos opcionales para no enviarlos si no corresponde: if cod_grado_ent == "": cod_grado_ent = None if val_grado_ent == 0: val_grado_ent = None # creo el diccionario con los campos generales del ajuste base: self.ajuste = { 'ajusteBase': { 'ptoEmision': pto_emision, 'nroOrden': nro_orden, 'coeAjustado': coe_ajustado, 'nroContrato': nro_contrato, 'tipoFormulario': tipo_formulario, 'nroFormulario': nro_formulario, 'actividad': actividad, 'codGrano': cod_grano, 'cuitVendedor': cuit_vendedor, 'cuitComprador': cuit_comprador, 'cuitCorredor': cuit_corredor, 'nroIngBrutoVendedor': nro_ing_bruto_vendedor, 'nroIngBrutoComprador': nro_ing_bruto_comprador, 'nroIngBrutoCorredor': nro_ing_bruto_corredor, 'tipoOperacion': tipo_operacion, 'codPuerto': cod_puerto, 'desPuertoLocalidad': des_puerto_localidad, 'comisionCorredor': comision_corredor, 'precioRefTn': precio_ref_tn, 'codGradoEnt': cod_grado_ent, 'valGradoEnt': val_grado_ent, 'precioFleteTn': precio_flete_tn, 'codLocalidad': cod_localidad, 'codProv': cod_provincia, 'certificados': [], } } # para compatibilidad con AgregarCertificado self.liquidacion = self.ajuste['ajusteBase'] # inicializar temporales self.__ajuste_base = None self.__ajuste_debito = None self.__ajuste_credito = None return True
['def', 'CrearAjusteBase', '(', 'self', ',', 'pto_emision', '=', '1', ',', 'nro_orden', '=', 'None', ',', '# unificado, contrato, papel', 'coe_ajustado', '=', 'None', ',', '# unificado', 'nro_contrato', '=', 'None', ',', '# contrato', 'tipo_formulario', '=', 'None', ',', '# papel', 'nro_formulario', '=', 'None', ',', '# papel', 'actividad', '=', 'None', ',', '# contrato / papel', 'cod_grano', '=', 'None', ',', '# contrato / papel', 'cuit_vendedor', '=', 'None', ',', '# contrato / papel', 'cuit_comprador', '=', 'None', ',', '# contrato / papel', 'cuit_corredor', '=', 'None', ',', '# contrato / papel', 'nro_ing_bruto_vendedor', '=', 'None', ',', '# papel', 'nro_ing_bruto_comprador', '=', 'None', ',', '# papel', 'nro_ing_bruto_corredor', '=', 'None', ',', '# papel', 'tipo_operacion', '=', 'None', ',', '# papel', 'precio_ref_tn', '=', 'None', ',', '# contrato', 'cod_grado_ent', '=', 'None', ',', '# contrato', 'val_grado_ent', '=', 'None', ',', '# contrato', 'precio_flete_tn', '=', 'None', ',', '# contrato', 'cod_puerto', '=', 'None', ',', '# contrato', 'des_puerto_localidad', '=', 'None', ',', '# contrato', 'cod_provincia', '=', 'None', ',', '# unificado, contrato, papel', 'cod_localidad', '=', 'None', ',', '# unificado, contrato, papel', 'comision_corredor', '=', 'None', ',', '# papel', '*', '*', 'kwargs', ')', ':', '# ajusto nombre de campos para compatibilidad hacia atrás (encabezado):', 'if', "'cod_localidad_procedencia'", 'in', 'kwargs', ':', 'cod_localidad', '=', 'kwargs', '[', "'cod_localidad_procedencia'", ']', 'if', "'cod_provincia_procedencia'", 'in', 'kwargs', ':', 'cod_provincia', '=', 'kwargs', '[', "'cod_provincia_procedencia'", ']', 'if', "'nro_act_comprador'", 'in', 'kwargs', ':', 'actividad', '=', 'kwargs', '[', "'nro_act_comprador'", ']', 'if', "'cod_tipo_operacion'", 'in', 'kwargs', ':', 'tipo_operacion', '=', 'kwargs', '[', "'cod_tipo_operacion'", ']', '# limpio los campos especiales (segun validaciones de AFIP)', 'if', 'val_grado_ent', '==', '0', ':', 'val_grado_ent', '=', 'None', '# borrando datos si no corresponden', 'if', 'cuit_corredor', 'and', 'int', '(', 'cuit_corredor', ')', '==', '0', ':', 'cuit_corredor', '=', 'None', 'comision_corredor', '=', 'None', 'nro_ing_bruto_corredor', '=', 'None', 'if', 'cod_puerto', 'and', 'int', '(', 'cod_puerto', ')', '!=', '14', ':', 'des_puerto_localidad', '=', 'None', '# validacion 1630', '# limpio los campos opcionales para no enviarlos si no corresponde:', 'if', 'cod_grado_ent', '==', '""', ':', 'cod_grado_ent', '=', 'None', 'if', 'val_grado_ent', '==', '0', ':', 'val_grado_ent', '=', 'None', '# creo el diccionario con los campos generales del ajuste base:', 'self', '.', 'ajuste', '=', '{', "'ajusteBase'", ':', '{', "'ptoEmision'", ':', 'pto_emision', ',', "'nroOrden'", ':', 'nro_orden', ',', "'coeAjustado'", ':', 'coe_ajustado', ',', "'nroContrato'", ':', 'nro_contrato', ',', "'tipoFormulario'", ':', 'tipo_formulario', ',', "'nroFormulario'", ':', 'nro_formulario', ',', "'actividad'", ':', 'actividad', ',', "'codGrano'", ':', 'cod_grano', ',', "'cuitVendedor'", ':', 'cuit_vendedor', ',', "'cuitComprador'", ':', 'cuit_comprador', ',', "'cuitCorredor'", ':', 'cuit_corredor', ',', "'nroIngBrutoVendedor'", ':', 'nro_ing_bruto_vendedor', ',', "'nroIngBrutoComprador'", ':', 'nro_ing_bruto_comprador', ',', "'nroIngBrutoCorredor'", ':', 'nro_ing_bruto_corredor', ',', "'tipoOperacion'", ':', 'tipo_operacion', ',', "'codPuerto'", ':', 'cod_puerto', ',', "'desPuertoLocalidad'", ':', 'des_puerto_localidad', ',', "'comisionCorredor'", ':', 'comision_corredor', ',', "'precioRefTn'", ':', 'precio_ref_tn', ',', "'codGradoEnt'", ':', 'cod_grado_ent', ',', "'valGradoEnt'", ':', 'val_grado_ent', ',', "'precioFleteTn'", ':', 'precio_flete_tn', ',', "'codLocalidad'", ':', 'cod_localidad', ',', "'codProv'", ':', 'cod_provincia', ',', "'certificados'", ':', '[', ']', ',', '}', '}', '# para compatibilidad con AgregarCertificado', 'self', '.', 'liquidacion', '=', 'self', '.', 'ajuste', '[', "'ajusteBase'", ']', '# inicializar temporales', 'self', '.', '__ajuste_base', '=', 'None', 'self', '.', '__ajuste_debito', '=', 'None', 'self', '.', '__ajuste_credito', '=', 'None', 'return', 'True']
Inicializa internamente los datos de una liquidación para ajustar
['Inicializa', 'internamente', 'los', 'datos', 'de', 'una', 'liquidación', 'para', 'ajustar']
train
https://github.com/reingart/pyafipws/blob/ee87cfe4ac12285ab431df5fec257f103042d1ab/wslpg.py#L1188-L1279
9,203
ArchiveTeam/wpull
wpull/writer.py
BaseFileWriterSession.set_timestamp
def set_timestamp(cls, filename: str, response: HTTPResponse): '''Set the Last-Modified timestamp onto the given file. Args: filename: The path of the file response: Response ''' last_modified = response.fields.get('Last-Modified') if not last_modified: return try: last_modified = email.utils.parsedate(last_modified) except ValueError: _logger.exception('Failed to parse date.') return last_modified = time.mktime(last_modified) os.utime(filename, (time.time(), last_modified))
python
def set_timestamp(cls, filename: str, response: HTTPResponse): '''Set the Last-Modified timestamp onto the given file. Args: filename: The path of the file response: Response ''' last_modified = response.fields.get('Last-Modified') if not last_modified: return try: last_modified = email.utils.parsedate(last_modified) except ValueError: _logger.exception('Failed to parse date.') return last_modified = time.mktime(last_modified) os.utime(filename, (time.time(), last_modified))
['def', 'set_timestamp', '(', 'cls', ',', 'filename', ':', 'str', ',', 'response', ':', 'HTTPResponse', ')', ':', 'last_modified', '=', 'response', '.', 'fields', '.', 'get', '(', "'Last-Modified'", ')', 'if', 'not', 'last_modified', ':', 'return', 'try', ':', 'last_modified', '=', 'email', '.', 'utils', '.', 'parsedate', '(', 'last_modified', ')', 'except', 'ValueError', ':', '_logger', '.', 'exception', '(', "'Failed to parse date.'", ')', 'return', 'last_modified', '=', 'time', '.', 'mktime', '(', 'last_modified', ')', 'os', '.', 'utime', '(', 'filename', ',', '(', 'time', '.', 'time', '(', ')', ',', 'last_modified', ')', ')']
Set the Last-Modified timestamp onto the given file. Args: filename: The path of the file response: Response
['Set', 'the', 'Last', '-', 'Modified', 'timestamp', 'onto', 'the', 'given', 'file', '.']
train
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/writer.py#L126-L146
9,204
log2timeline/dfvfs
dfvfs/file_io/lvm_file_io.py
LVMFile._Open
def _Open(self, path_spec=None, mode='rb'): """Opens the file-like object defined by path specification. Args: path_spec (PathSpec): path specification. mode (Optional[str]): file access mode. Raises: AccessError: if the access to open the file was denied. IOError: if the file-like object could not be opened. OSError: if the file-like object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid. """ if not path_spec: raise ValueError('Missing path specfication.') volume_index = lvm.LVMPathSpecGetVolumeIndex(path_spec) if volume_index is None: raise errors.PathSpecError( 'Unable to retrieve volume index from path specification.') self._file_system = resolver.Resolver.OpenFileSystem( path_spec, resolver_context=self._resolver_context) vslvm_volume_group = self._file_system.GetLVMVolumeGroup() if (volume_index < 0 or volume_index >= vslvm_volume_group.number_of_logical_volumes): raise errors.PathSpecError(( 'Unable to retrieve LVM logical volume index: {0:d} from path ' 'specification.').format(volume_index)) self._vslvm_logical_volume = vslvm_volume_group.get_logical_volume( volume_index)
python
def _Open(self, path_spec=None, mode='rb'): """Opens the file-like object defined by path specification. Args: path_spec (PathSpec): path specification. mode (Optional[str]): file access mode. Raises: AccessError: if the access to open the file was denied. IOError: if the file-like object could not be opened. OSError: if the file-like object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid. """ if not path_spec: raise ValueError('Missing path specfication.') volume_index = lvm.LVMPathSpecGetVolumeIndex(path_spec) if volume_index is None: raise errors.PathSpecError( 'Unable to retrieve volume index from path specification.') self._file_system = resolver.Resolver.OpenFileSystem( path_spec, resolver_context=self._resolver_context) vslvm_volume_group = self._file_system.GetLVMVolumeGroup() if (volume_index < 0 or volume_index >= vslvm_volume_group.number_of_logical_volumes): raise errors.PathSpecError(( 'Unable to retrieve LVM logical volume index: {0:d} from path ' 'specification.').format(volume_index)) self._vslvm_logical_volume = vslvm_volume_group.get_logical_volume( volume_index)
['def', '_Open', '(', 'self', ',', 'path_spec', '=', 'None', ',', 'mode', '=', "'rb'", ')', ':', 'if', 'not', 'path_spec', ':', 'raise', 'ValueError', '(', "'Missing path specfication.'", ')', 'volume_index', '=', 'lvm', '.', 'LVMPathSpecGetVolumeIndex', '(', 'path_spec', ')', 'if', 'volume_index', 'is', 'None', ':', 'raise', 'errors', '.', 'PathSpecError', '(', "'Unable to retrieve volume index from path specification.'", ')', 'self', '.', '_file_system', '=', 'resolver', '.', 'Resolver', '.', 'OpenFileSystem', '(', 'path_spec', ',', 'resolver_context', '=', 'self', '.', '_resolver_context', ')', 'vslvm_volume_group', '=', 'self', '.', '_file_system', '.', 'GetLVMVolumeGroup', '(', ')', 'if', '(', 'volume_index', '<', '0', 'or', 'volume_index', '>=', 'vslvm_volume_group', '.', 'number_of_logical_volumes', ')', ':', 'raise', 'errors', '.', 'PathSpecError', '(', '(', "'Unable to retrieve LVM logical volume index: {0:d} from path '", "'specification.'", ')', '.', 'format', '(', 'volume_index', ')', ')', 'self', '.', '_vslvm_logical_volume', '=', 'vslvm_volume_group', '.', 'get_logical_volume', '(', 'volume_index', ')']
Opens the file-like object defined by path specification. Args: path_spec (PathSpec): path specification. mode (Optional[str]): file access mode. Raises: AccessError: if the access to open the file was denied. IOError: if the file-like object could not be opened. OSError: if the file-like object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid.
['Opens', 'the', 'file', '-', 'like', 'object', 'defined', 'by', 'path', 'specification', '.']
train
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/lvm_file_io.py#L34-L67
9,205
hozn/stravalib
stravalib/attributes.py
EntityAttribute.unmarshal
def unmarshal(self, value, bind_client=None): """ Cast the specified value to the entity type. """ #self.log.debug("Unmarshall {0!r}: {1!r}".format(self, value)) if not isinstance(value, self.type): o = self.type() if bind_client is not None and hasattr(o.__class__, 'bind_client'): o.bind_client = bind_client if isinstance(value, dict): for (k, v) in value.items(): if not hasattr(o.__class__, k): self.log.warning("Unable to set attribute {0} on entity {1!r}".format(k, o)) else: #self.log.debug("Setting attribute {0} on entity {1!r}".format(k, o)) setattr(o, k, v) value = o else: raise Exception("Unable to unmarshall object {0!r}".format(value)) return value
python
def unmarshal(self, value, bind_client=None): """ Cast the specified value to the entity type. """ #self.log.debug("Unmarshall {0!r}: {1!r}".format(self, value)) if not isinstance(value, self.type): o = self.type() if bind_client is not None and hasattr(o.__class__, 'bind_client'): o.bind_client = bind_client if isinstance(value, dict): for (k, v) in value.items(): if not hasattr(o.__class__, k): self.log.warning("Unable to set attribute {0} on entity {1!r}".format(k, o)) else: #self.log.debug("Setting attribute {0} on entity {1!r}".format(k, o)) setattr(o, k, v) value = o else: raise Exception("Unable to unmarshall object {0!r}".format(value)) return value
['def', 'unmarshal', '(', 'self', ',', 'value', ',', 'bind_client', '=', 'None', ')', ':', '#self.log.debug("Unmarshall {0!r}: {1!r}".format(self, value))', 'if', 'not', 'isinstance', '(', 'value', ',', 'self', '.', 'type', ')', ':', 'o', '=', 'self', '.', 'type', '(', ')', 'if', 'bind_client', 'is', 'not', 'None', 'and', 'hasattr', '(', 'o', '.', '__class__', ',', "'bind_client'", ')', ':', 'o', '.', 'bind_client', '=', 'bind_client', 'if', 'isinstance', '(', 'value', ',', 'dict', ')', ':', 'for', '(', 'k', ',', 'v', ')', 'in', 'value', '.', 'items', '(', ')', ':', 'if', 'not', 'hasattr', '(', 'o', '.', '__class__', ',', 'k', ')', ':', 'self', '.', 'log', '.', 'warning', '(', '"Unable to set attribute {0} on entity {1!r}"', '.', 'format', '(', 'k', ',', 'o', ')', ')', 'else', ':', '#self.log.debug("Setting attribute {0} on entity {1!r}".format(k, o))', 'setattr', '(', 'o', ',', 'k', ',', 'v', ')', 'value', '=', 'o', 'else', ':', 'raise', 'Exception', '(', '"Unable to unmarshall object {0!r}"', '.', 'format', '(', 'value', ')', ')', 'return', 'value']
Cast the specified value to the entity type.
['Cast', 'the', 'specified', 'value', 'to', 'the', 'entity', 'type', '.']
train
https://github.com/hozn/stravalib/blob/5500ebc39e0bf4706bb1ca4c27b25e56becaaa5f/stravalib/attributes.py#L337-L357
9,206
mlperf/training
image_classification/tensorflow/official/resnet/imagenet_main.py
parse_record
def parse_record(raw_record, is_training, dtype): """Parses a record containing a training example of an image. The input record is parsed into a label and image, and the image is passed through preprocessing steps (cropping, flipping, and so on). Args: raw_record: scalar Tensor tf.string containing a serialized Example protocol buffer. is_training: A boolean denoting whether the input is for training. dtype: data type to use for images/features. Returns: Tuple with processed image tensor and one-hot-encoded label tensor. """ image_buffer, label = _parse_example_proto(raw_record) image = imagenet_preprocessing.preprocess_image( image_buffer=image_buffer, output_height=_DEFAULT_IMAGE_SIZE, output_width=_DEFAULT_IMAGE_SIZE, num_channels=_NUM_CHANNELS, is_training=is_training) image = tf.cast(image, dtype) return image, label
python
def parse_record(raw_record, is_training, dtype): """Parses a record containing a training example of an image. The input record is parsed into a label and image, and the image is passed through preprocessing steps (cropping, flipping, and so on). Args: raw_record: scalar Tensor tf.string containing a serialized Example protocol buffer. is_training: A boolean denoting whether the input is for training. dtype: data type to use for images/features. Returns: Tuple with processed image tensor and one-hot-encoded label tensor. """ image_buffer, label = _parse_example_proto(raw_record) image = imagenet_preprocessing.preprocess_image( image_buffer=image_buffer, output_height=_DEFAULT_IMAGE_SIZE, output_width=_DEFAULT_IMAGE_SIZE, num_channels=_NUM_CHANNELS, is_training=is_training) image = tf.cast(image, dtype) return image, label
['def', 'parse_record', '(', 'raw_record', ',', 'is_training', ',', 'dtype', ')', ':', 'image_buffer', ',', 'label', '=', '_parse_example_proto', '(', 'raw_record', ')', 'image', '=', 'imagenet_preprocessing', '.', 'preprocess_image', '(', 'image_buffer', '=', 'image_buffer', ',', 'output_height', '=', '_DEFAULT_IMAGE_SIZE', ',', 'output_width', '=', '_DEFAULT_IMAGE_SIZE', ',', 'num_channels', '=', '_NUM_CHANNELS', ',', 'is_training', '=', 'is_training', ')', 'image', '=', 'tf', '.', 'cast', '(', 'image', ',', 'dtype', ')', 'return', 'image', ',', 'label']
Parses a record containing a training example of an image. The input record is parsed into a label and image, and the image is passed through preprocessing steps (cropping, flipping, and so on). Args: raw_record: scalar Tensor tf.string containing a serialized Example protocol buffer. is_training: A boolean denoting whether the input is for training. dtype: data type to use for images/features. Returns: Tuple with processed image tensor and one-hot-encoded label tensor.
['Parses', 'a', 'record', 'containing', 'a', 'training', 'example', 'of', 'an', 'image', '.']
train
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/image_classification/tensorflow/official/resnet/imagenet_main.py#L120-L145
9,207
flatangle/flatlib
flatlib/angle.py
slistStr
def slistStr(slist): """ Converts signed list to angle string. """ slist = _fixSlist(slist) string = ':'.join(['%02d' % x for x in slist[1:]]) return slist[0] + string
python
def slistStr(slist): """ Converts signed list to angle string. """ slist = _fixSlist(slist) string = ':'.join(['%02d' % x for x in slist[1:]]) return slist[0] + string
['def', 'slistStr', '(', 'slist', ')', ':', 'slist', '=', '_fixSlist', '(', 'slist', ')', 'string', '=', "':'", '.', 'join', '(', '[', "'%02d'", '%', 'x', 'for', 'x', 'in', 'slist', '[', '1', ':', ']', ']', ')', 'return', 'slist', '[', '0', ']', '+', 'string']
Converts signed list to angle string.
['Converts', 'signed', 'list', 'to', 'angle', 'string', '.']
train
https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/angle.py#L76-L80
9,208
horazont/aioxmpp
aioxmpp/im/service.py
ConversationService._add_conversation
def _add_conversation(self, conversation): """ Add the conversation and fire the :meth:`on_conversation_added` event. :param conversation: The conversation object to add. :type conversation: :class:`~.AbstractConversation` The conversation is added to the internal list of conversations which can be queried at :attr:`conversations`. The :meth:`on_conversation_added` event is fired. In addition, the :class:`ConversationService` subscribes to the :meth:`~.AbstractConversation.on_exit` event to remove the conversation from the list automatically. There is no need to remove a conversation from the list explicitly. """ handler = functools.partial( self._handle_conversation_exit, conversation ) tokens = [] def linked_token(signal, handler): return signal, signal.connect(handler) tokens.append(linked_token(conversation.on_exit, handler)) tokens.append(linked_token(conversation.on_failure, handler)) tokens.append(linked_token(conversation.on_message, functools.partial( self.on_message, conversation, ))) self._conversation_meta[conversation] = ( tokens, ) self._conversation_map[conversation.jid] = conversation self.on_conversation_added(conversation)
python
def _add_conversation(self, conversation): """ Add the conversation and fire the :meth:`on_conversation_added` event. :param conversation: The conversation object to add. :type conversation: :class:`~.AbstractConversation` The conversation is added to the internal list of conversations which can be queried at :attr:`conversations`. The :meth:`on_conversation_added` event is fired. In addition, the :class:`ConversationService` subscribes to the :meth:`~.AbstractConversation.on_exit` event to remove the conversation from the list automatically. There is no need to remove a conversation from the list explicitly. """ handler = functools.partial( self._handle_conversation_exit, conversation ) tokens = [] def linked_token(signal, handler): return signal, signal.connect(handler) tokens.append(linked_token(conversation.on_exit, handler)) tokens.append(linked_token(conversation.on_failure, handler)) tokens.append(linked_token(conversation.on_message, functools.partial( self.on_message, conversation, ))) self._conversation_meta[conversation] = ( tokens, ) self._conversation_map[conversation.jid] = conversation self.on_conversation_added(conversation)
['def', '_add_conversation', '(', 'self', ',', 'conversation', ')', ':', 'handler', '=', 'functools', '.', 'partial', '(', 'self', '.', '_handle_conversation_exit', ',', 'conversation', ')', 'tokens', '=', '[', ']', 'def', 'linked_token', '(', 'signal', ',', 'handler', ')', ':', 'return', 'signal', ',', 'signal', '.', 'connect', '(', 'handler', ')', 'tokens', '.', 'append', '(', 'linked_token', '(', 'conversation', '.', 'on_exit', ',', 'handler', ')', ')', 'tokens', '.', 'append', '(', 'linked_token', '(', 'conversation', '.', 'on_failure', ',', 'handler', ')', ')', 'tokens', '.', 'append', '(', 'linked_token', '(', 'conversation', '.', 'on_message', ',', 'functools', '.', 'partial', '(', 'self', '.', 'on_message', ',', 'conversation', ',', ')', ')', ')', 'self', '.', '_conversation_meta', '[', 'conversation', ']', '=', '(', 'tokens', ',', ')', 'self', '.', '_conversation_map', '[', 'conversation', '.', 'jid', ']', '=', 'conversation', 'self', '.', 'on_conversation_added', '(', 'conversation', ')']
Add the conversation and fire the :meth:`on_conversation_added` event. :param conversation: The conversation object to add. :type conversation: :class:`~.AbstractConversation` The conversation is added to the internal list of conversations which can be queried at :attr:`conversations`. The :meth:`on_conversation_added` event is fired. In addition, the :class:`ConversationService` subscribes to the :meth:`~.AbstractConversation.on_exit` event to remove the conversation from the list automatically. There is no need to remove a conversation from the list explicitly.
['Add', 'the', 'conversation', 'and', 'fire', 'the', ':', 'meth', ':', 'on_conversation_added', 'event', '.']
train
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/im/service.py#L99-L135
9,209
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py
StringDecoder
def StringDecoder(field_number, is_repeated, is_packed, key, new_default): """Returns a decoder for a string field.""" local_DecodeVarint = _DecodeVarint local_unicode = six.text_type def _ConvertToUnicode(byte_str): try: return local_unicode(byte_str, 'utf-8') except UnicodeDecodeError as e: # add more information to the error message and re-raise it. e.reason = '%s in field: %s' % (e, key.full_name) raise assert not is_packed if is_repeated: tag_bytes = encoder.TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) tag_len = len(tag_bytes) def DecodeRepeatedField(buffer, pos, end, message, field_dict): value = field_dict.get(key) if value is None: value = field_dict.setdefault(key, new_default(message)) while 1: (size, pos) = local_DecodeVarint(buffer, pos) new_pos = pos + size if new_pos > end: raise _DecodeError('Truncated string.') value.append(_ConvertToUnicode(buffer[pos:new_pos])) # Predict that the next tag is another copy of the same repeated field. pos = new_pos + tag_len if buffer[new_pos:pos] != tag_bytes or new_pos == end: # Prediction failed. Return. return new_pos return DecodeRepeatedField else: def DecodeField(buffer, pos, end, message, field_dict): (size, pos) = local_DecodeVarint(buffer, pos) new_pos = pos + size if new_pos > end: raise _DecodeError('Truncated string.') field_dict[key] = _ConvertToUnicode(buffer[pos:new_pos]) return new_pos return DecodeField
python
def StringDecoder(field_number, is_repeated, is_packed, key, new_default): """Returns a decoder for a string field.""" local_DecodeVarint = _DecodeVarint local_unicode = six.text_type def _ConvertToUnicode(byte_str): try: return local_unicode(byte_str, 'utf-8') except UnicodeDecodeError as e: # add more information to the error message and re-raise it. e.reason = '%s in field: %s' % (e, key.full_name) raise assert not is_packed if is_repeated: tag_bytes = encoder.TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED) tag_len = len(tag_bytes) def DecodeRepeatedField(buffer, pos, end, message, field_dict): value = field_dict.get(key) if value is None: value = field_dict.setdefault(key, new_default(message)) while 1: (size, pos) = local_DecodeVarint(buffer, pos) new_pos = pos + size if new_pos > end: raise _DecodeError('Truncated string.') value.append(_ConvertToUnicode(buffer[pos:new_pos])) # Predict that the next tag is another copy of the same repeated field. pos = new_pos + tag_len if buffer[new_pos:pos] != tag_bytes or new_pos == end: # Prediction failed. Return. return new_pos return DecodeRepeatedField else: def DecodeField(buffer, pos, end, message, field_dict): (size, pos) = local_DecodeVarint(buffer, pos) new_pos = pos + size if new_pos > end: raise _DecodeError('Truncated string.') field_dict[key] = _ConvertToUnicode(buffer[pos:new_pos]) return new_pos return DecodeField
['def', 'StringDecoder', '(', 'field_number', ',', 'is_repeated', ',', 'is_packed', ',', 'key', ',', 'new_default', ')', ':', 'local_DecodeVarint', '=', '_DecodeVarint', 'local_unicode', '=', 'six', '.', 'text_type', 'def', '_ConvertToUnicode', '(', 'byte_str', ')', ':', 'try', ':', 'return', 'local_unicode', '(', 'byte_str', ',', "'utf-8'", ')', 'except', 'UnicodeDecodeError', 'as', 'e', ':', '# add more information to the error message and re-raise it.', 'e', '.', 'reason', '=', "'%s in field: %s'", '%', '(', 'e', ',', 'key', '.', 'full_name', ')', 'raise', 'assert', 'not', 'is_packed', 'if', 'is_repeated', ':', 'tag_bytes', '=', 'encoder', '.', 'TagBytes', '(', 'field_number', ',', 'wire_format', '.', 'WIRETYPE_LENGTH_DELIMITED', ')', 'tag_len', '=', 'len', '(', 'tag_bytes', ')', 'def', 'DecodeRepeatedField', '(', 'buffer', ',', 'pos', ',', 'end', ',', 'message', ',', 'field_dict', ')', ':', 'value', '=', 'field_dict', '.', 'get', '(', 'key', ')', 'if', 'value', 'is', 'None', ':', 'value', '=', 'field_dict', '.', 'setdefault', '(', 'key', ',', 'new_default', '(', 'message', ')', ')', 'while', '1', ':', '(', 'size', ',', 'pos', ')', '=', 'local_DecodeVarint', '(', 'buffer', ',', 'pos', ')', 'new_pos', '=', 'pos', '+', 'size', 'if', 'new_pos', '>', 'end', ':', 'raise', '_DecodeError', '(', "'Truncated string.'", ')', 'value', '.', 'append', '(', '_ConvertToUnicode', '(', 'buffer', '[', 'pos', ':', 'new_pos', ']', ')', ')', '# Predict that the next tag is another copy of the same repeated field.', 'pos', '=', 'new_pos', '+', 'tag_len', 'if', 'buffer', '[', 'new_pos', ':', 'pos', ']', '!=', 'tag_bytes', 'or', 'new_pos', '==', 'end', ':', '# Prediction failed. Return.', 'return', 'new_pos', 'return', 'DecodeRepeatedField', 'else', ':', 'def', 'DecodeField', '(', 'buffer', ',', 'pos', ',', 'end', ',', 'message', ',', 'field_dict', ')', ':', '(', 'size', ',', 'pos', ')', '=', 'local_DecodeVarint', '(', 'buffer', ',', 'pos', ')', 'new_pos', '=', 'pos', '+', 'size', 'if', 'new_pos', '>', 'end', ':', 'raise', '_DecodeError', '(', "'Truncated string.'", ')', 'field_dict', '[', 'key', ']', '=', '_ConvertToUnicode', '(', 'buffer', '[', 'pos', ':', 'new_pos', ']', ')', 'return', 'new_pos', 'return', 'DecodeField']
Returns a decoder for a string field.
['Returns', 'a', 'decoder', 'for', 'a', 'string', 'field', '.']
train
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py#L461-L504
9,210
crs4/hl7apy
hl7apy/validation.py
Validator.validate
def validate(element, reference=None, report_file=None): """ Checks if the :class:`Element <hl7apy.core.Element>` is a valid HL7 message according to the reference specified. If the reference is not specified, it will be used the official HL7 structures for the elements. In particular it checks: * the maximum and minimum number of occurrences for every child * that children are all allowed * the datatype of fields, components and subcomponents * the values, in particular the length and the adherence with the HL7 table, if one is specified It raises the first exception that it finds. If :attr:`report_file` is specified, it will create a file with all the errors that occur. :param element: :class:`Element <hl7apy.core.Element>`: The element to validate :param reference: the reference to use. Usually is None or a message profile object :param report_file: the name of the report file to create :return: The True if everything is ok :raises: :exc:`ValidationError <hl7apy.exceptions.ValidationError>`: when errors occur :raises: :exc:`ValidationWarning <hl7apy.exceptions.ValidationWarning>`: errors concerning the values """ from hl7apy.core import is_base_datatype def _check_z_element(el, errs, warns): if el.classname == 'Field': if is_base_datatype(el.datatype, el.version) or \ el.datatype == 'varies': return True elif el.datatype is not None: # if the datatype the is a complex datatype, the z element must follow the correct # structure of that datatype # Component just to search in the datatypes.... dt_struct = load_reference(el.datatype, 'Datatypes_Structs', el.version) ref = ('sequence', dt_struct, el.datatype, None, None, -1) _check_known_element(el, ref, errs, warns) for c in el.children: _is_valid(c, None, errs, warns) return True def _check_repetitions(el, children, cardinality, child_name, errs): children_num = len(children) min_repetitions, max_repetitions = cardinality if max_repetitions != -1: if children_num < min_repetitions: errs.append(ValidationError("Missing required child {}.{}".format(el.name, child_name))) elif children_num > max_repetitions: errs.append(ValidationError("Child limit exceeded {}.{}".format(child_name, el.name))) else: if children_num < min_repetitions: errs.append(ValidationError("Missing required child {}.{}".format(el.name, child_name))) def _check_table_compliance(el, ref, warns): table = ref[4] if table is not None: try: table_ref = load_reference(table, 'Table', el.version) except ChildNotFound: pass else: table_children = table_ref[1] if el.to_er7() not in table_children: warns.append(ValidationWarning("Value {} not in table {} in element {}.{}". format(el.to_er7(), table, el.parent.name, el.name))) def _check_length(el, ref, warns): max_length = ref[5] if -1 < max_length < len(el.to_er7()): warns.append(ValidationWarning("Exceeded max length ({}) of {}.{}". format(max_length, el.parent.name, el.name))) def _check_datatype(el, ref, errs): ref_datatype = ref[2] if el.datatype != ref_datatype: errs.append(ValidationError("Datatype {} is not correct for {}.{} (it must be {})". format(el.datatype, el.parent.name, el.name, ref[1]))) def _get_valid_children_info(ref): valid_children = {c[0] for c in ref[1]} children_refs = ref[1] return valid_children, children_refs def _get_child_reference_info(ref): child_name, cardinality = ref[0], ref[2] return child_name, cardinality def _check_known_element(el, ref, errs, warns): if ref is None: try: ref = load_reference(el.name, el.classname, el.version) except ChildNotFound: errs.append(ValidationError("Invalid element found: {}".format(el))) if ref[0] in ('sequence', 'choice'): element_children = {c.name for c in el.children if not c.is_z_element()} valid_children, valid_children_refs = _get_valid_children_info(ref) # check that the children are all allowed children if not element_children <= valid_children: errs.append(ValidationError("Invalid children detected for {}: {}". format(el, list(element_children - valid_children)))) # iterates the valid children for child_ref in valid_children_refs: # it gets the structure of the children to check child_name, cardinality = _get_child_reference_info(child_ref) try: # it gets all the occurrences of the children of a type children = el.children.get(child_name) except Exception: # TODO: it is due to the lack of element in the official reference files... should # we raise an exception here? pass else: _check_repetitions(el, children, cardinality, child_name, errs) # calls validation for every children for c in children: _is_valid(c, child_ref[1], errs, warns) # finally calls validation for z_elements z_children = [c for c in el.children if c.is_z_element()] for c in z_children: _is_valid(c, None, errs, warns) else: _check_table_compliance(el, ref, warns) _check_length(el, ref, warns) if el.datatype == 'varies': # TODO: it should check the real rule return True _check_datatype(el, ref, errs) # For complex datatypes element, the reference is the one of the datatype if not is_base_datatype(el.datatype, el.version): # Component just to search in the datatypes.... ref = load_reference(el.datatype, 'Datatypes_Structs', el.version) _is_valid(el, ref, errs, warns) def _is_valid(el, ref, errs, warns): if el.is_unknown(): errs.append(ValidationError("Unknown element found: {}.{}".format(el.parent, el))) return if el.is_z_element(): return _check_z_element(el, errs, warns) return _check_known_element(el, ref, errs, warns) errors = [] warnings = [] _is_valid(element, reference, errors, warnings) if report_file is not None: with open(report_file, "w") as f: for e in errors: f.write("Error: {}\n".format(e)) for w in warnings: f.write("Warning: {}\n".format(w)) if errors: raise errors[0] return True
python
def validate(element, reference=None, report_file=None): """ Checks if the :class:`Element <hl7apy.core.Element>` is a valid HL7 message according to the reference specified. If the reference is not specified, it will be used the official HL7 structures for the elements. In particular it checks: * the maximum and minimum number of occurrences for every child * that children are all allowed * the datatype of fields, components and subcomponents * the values, in particular the length and the adherence with the HL7 table, if one is specified It raises the first exception that it finds. If :attr:`report_file` is specified, it will create a file with all the errors that occur. :param element: :class:`Element <hl7apy.core.Element>`: The element to validate :param reference: the reference to use. Usually is None or a message profile object :param report_file: the name of the report file to create :return: The True if everything is ok :raises: :exc:`ValidationError <hl7apy.exceptions.ValidationError>`: when errors occur :raises: :exc:`ValidationWarning <hl7apy.exceptions.ValidationWarning>`: errors concerning the values """ from hl7apy.core import is_base_datatype def _check_z_element(el, errs, warns): if el.classname == 'Field': if is_base_datatype(el.datatype, el.version) or \ el.datatype == 'varies': return True elif el.datatype is not None: # if the datatype the is a complex datatype, the z element must follow the correct # structure of that datatype # Component just to search in the datatypes.... dt_struct = load_reference(el.datatype, 'Datatypes_Structs', el.version) ref = ('sequence', dt_struct, el.datatype, None, None, -1) _check_known_element(el, ref, errs, warns) for c in el.children: _is_valid(c, None, errs, warns) return True def _check_repetitions(el, children, cardinality, child_name, errs): children_num = len(children) min_repetitions, max_repetitions = cardinality if max_repetitions != -1: if children_num < min_repetitions: errs.append(ValidationError("Missing required child {}.{}".format(el.name, child_name))) elif children_num > max_repetitions: errs.append(ValidationError("Child limit exceeded {}.{}".format(child_name, el.name))) else: if children_num < min_repetitions: errs.append(ValidationError("Missing required child {}.{}".format(el.name, child_name))) def _check_table_compliance(el, ref, warns): table = ref[4] if table is not None: try: table_ref = load_reference(table, 'Table', el.version) except ChildNotFound: pass else: table_children = table_ref[1] if el.to_er7() not in table_children: warns.append(ValidationWarning("Value {} not in table {} in element {}.{}". format(el.to_er7(), table, el.parent.name, el.name))) def _check_length(el, ref, warns): max_length = ref[5] if -1 < max_length < len(el.to_er7()): warns.append(ValidationWarning("Exceeded max length ({}) of {}.{}". format(max_length, el.parent.name, el.name))) def _check_datatype(el, ref, errs): ref_datatype = ref[2] if el.datatype != ref_datatype: errs.append(ValidationError("Datatype {} is not correct for {}.{} (it must be {})". format(el.datatype, el.parent.name, el.name, ref[1]))) def _get_valid_children_info(ref): valid_children = {c[0] for c in ref[1]} children_refs = ref[1] return valid_children, children_refs def _get_child_reference_info(ref): child_name, cardinality = ref[0], ref[2] return child_name, cardinality def _check_known_element(el, ref, errs, warns): if ref is None: try: ref = load_reference(el.name, el.classname, el.version) except ChildNotFound: errs.append(ValidationError("Invalid element found: {}".format(el))) if ref[0] in ('sequence', 'choice'): element_children = {c.name for c in el.children if not c.is_z_element()} valid_children, valid_children_refs = _get_valid_children_info(ref) # check that the children are all allowed children if not element_children <= valid_children: errs.append(ValidationError("Invalid children detected for {}: {}". format(el, list(element_children - valid_children)))) # iterates the valid children for child_ref in valid_children_refs: # it gets the structure of the children to check child_name, cardinality = _get_child_reference_info(child_ref) try: # it gets all the occurrences of the children of a type children = el.children.get(child_name) except Exception: # TODO: it is due to the lack of element in the official reference files... should # we raise an exception here? pass else: _check_repetitions(el, children, cardinality, child_name, errs) # calls validation for every children for c in children: _is_valid(c, child_ref[1], errs, warns) # finally calls validation for z_elements z_children = [c for c in el.children if c.is_z_element()] for c in z_children: _is_valid(c, None, errs, warns) else: _check_table_compliance(el, ref, warns) _check_length(el, ref, warns) if el.datatype == 'varies': # TODO: it should check the real rule return True _check_datatype(el, ref, errs) # For complex datatypes element, the reference is the one of the datatype if not is_base_datatype(el.datatype, el.version): # Component just to search in the datatypes.... ref = load_reference(el.datatype, 'Datatypes_Structs', el.version) _is_valid(el, ref, errs, warns) def _is_valid(el, ref, errs, warns): if el.is_unknown(): errs.append(ValidationError("Unknown element found: {}.{}".format(el.parent, el))) return if el.is_z_element(): return _check_z_element(el, errs, warns) return _check_known_element(el, ref, errs, warns) errors = [] warnings = [] _is_valid(element, reference, errors, warnings) if report_file is not None: with open(report_file, "w") as f: for e in errors: f.write("Error: {}\n".format(e)) for w in warnings: f.write("Warning: {}\n".format(w)) if errors: raise errors[0] return True
['def', 'validate', '(', 'element', ',', 'reference', '=', 'None', ',', 'report_file', '=', 'None', ')', ':', 'from', 'hl7apy', '.', 'core', 'import', 'is_base_datatype', 'def', '_check_z_element', '(', 'el', ',', 'errs', ',', 'warns', ')', ':', 'if', 'el', '.', 'classname', '==', "'Field'", ':', 'if', 'is_base_datatype', '(', 'el', '.', 'datatype', ',', 'el', '.', 'version', ')', 'or', 'el', '.', 'datatype', '==', "'varies'", ':', 'return', 'True', 'elif', 'el', '.', 'datatype', 'is', 'not', 'None', ':', '# if the datatype the is a complex datatype, the z element must follow the correct', '# structure of that datatype', '# Component just to search in the datatypes....', 'dt_struct', '=', 'load_reference', '(', 'el', '.', 'datatype', ',', "'Datatypes_Structs'", ',', 'el', '.', 'version', ')', 'ref', '=', '(', "'sequence'", ',', 'dt_struct', ',', 'el', '.', 'datatype', ',', 'None', ',', 'None', ',', '-', '1', ')', '_check_known_element', '(', 'el', ',', 'ref', ',', 'errs', ',', 'warns', ')', 'for', 'c', 'in', 'el', '.', 'children', ':', '_is_valid', '(', 'c', ',', 'None', ',', 'errs', ',', 'warns', ')', 'return', 'True', 'def', '_check_repetitions', '(', 'el', ',', 'children', ',', 'cardinality', ',', 'child_name', ',', 'errs', ')', ':', 'children_num', '=', 'len', '(', 'children', ')', 'min_repetitions', ',', 'max_repetitions', '=', 'cardinality', 'if', 'max_repetitions', '!=', '-', '1', ':', 'if', 'children_num', '<', 'min_repetitions', ':', 'errs', '.', 'append', '(', 'ValidationError', '(', '"Missing required child {}.{}"', '.', 'format', '(', 'el', '.', 'name', ',', 'child_name', ')', ')', ')', 'elif', 'children_num', '>', 'max_repetitions', ':', 'errs', '.', 'append', '(', 'ValidationError', '(', '"Child limit exceeded {}.{}"', '.', 'format', '(', 'child_name', ',', 'el', '.', 'name', ')', ')', ')', 'else', ':', 'if', 'children_num', '<', 'min_repetitions', ':', 'errs', '.', 'append', '(', 'ValidationError', '(', '"Missing required child {}.{}"', '.', 'format', '(', 'el', '.', 'name', ',', 'child_name', ')', ')', ')', 'def', '_check_table_compliance', '(', 'el', ',', 'ref', ',', 'warns', ')', ':', 'table', '=', 'ref', '[', '4', ']', 'if', 'table', 'is', 'not', 'None', ':', 'try', ':', 'table_ref', '=', 'load_reference', '(', 'table', ',', "'Table'", ',', 'el', '.', 'version', ')', 'except', 'ChildNotFound', ':', 'pass', 'else', ':', 'table_children', '=', 'table_ref', '[', '1', ']', 'if', 'el', '.', 'to_er7', '(', ')', 'not', 'in', 'table_children', ':', 'warns', '.', 'append', '(', 'ValidationWarning', '(', '"Value {} not in table {} in element {}.{}"', '.', 'format', '(', 'el', '.', 'to_er7', '(', ')', ',', 'table', ',', 'el', '.', 'parent', '.', 'name', ',', 'el', '.', 'name', ')', ')', ')', 'def', '_check_length', '(', 'el', ',', 'ref', ',', 'warns', ')', ':', 'max_length', '=', 'ref', '[', '5', ']', 'if', '-', '1', '<', 'max_length', '<', 'len', '(', 'el', '.', 'to_er7', '(', ')', ')', ':', 'warns', '.', 'append', '(', 'ValidationWarning', '(', '"Exceeded max length ({}) of {}.{}"', '.', 'format', '(', 'max_length', ',', 'el', '.', 'parent', '.', 'name', ',', 'el', '.', 'name', ')', ')', ')', 'def', '_check_datatype', '(', 'el', ',', 'ref', ',', 'errs', ')', ':', 'ref_datatype', '=', 'ref', '[', '2', ']', 'if', 'el', '.', 'datatype', '!=', 'ref_datatype', ':', 'errs', '.', 'append', '(', 'ValidationError', '(', '"Datatype {} is not correct for {}.{} (it must be {})"', '.', 'format', '(', 'el', '.', 'datatype', ',', 'el', '.', 'parent', '.', 'name', ',', 'el', '.', 'name', ',', 'ref', '[', '1', ']', ')', ')', ')', 'def', '_get_valid_children_info', '(', 'ref', ')', ':', 'valid_children', '=', '{', 'c', '[', '0', ']', 'for', 'c', 'in', 'ref', '[', '1', ']', '}', 'children_refs', '=', 'ref', '[', '1', ']', 'return', 'valid_children', ',', 'children_refs', 'def', '_get_child_reference_info', '(', 'ref', ')', ':', 'child_name', ',', 'cardinality', '=', 'ref', '[', '0', ']', ',', 'ref', '[', '2', ']', 'return', 'child_name', ',', 'cardinality', 'def', '_check_known_element', '(', 'el', ',', 'ref', ',', 'errs', ',', 'warns', ')', ':', 'if', 'ref', 'is', 'None', ':', 'try', ':', 'ref', '=', 'load_reference', '(', 'el', '.', 'name', ',', 'el', '.', 'classname', ',', 'el', '.', 'version', ')', 'except', 'ChildNotFound', ':', 'errs', '.', 'append', '(', 'ValidationError', '(', '"Invalid element found: {}"', '.', 'format', '(', 'el', ')', ')', ')', 'if', 'ref', '[', '0', ']', 'in', '(', "'sequence'", ',', "'choice'", ')', ':', 'element_children', '=', '{', 'c', '.', 'name', 'for', 'c', 'in', 'el', '.', 'children', 'if', 'not', 'c', '.', 'is_z_element', '(', ')', '}', 'valid_children', ',', 'valid_children_refs', '=', '_get_valid_children_info', '(', 'ref', ')', '# check that the children are all allowed children', 'if', 'not', 'element_children', '<=', 'valid_children', ':', 'errs', '.', 'append', '(', 'ValidationError', '(', '"Invalid children detected for {}: {}"', '.', 'format', '(', 'el', ',', 'list', '(', 'element_children', '-', 'valid_children', ')', ')', ')', ')', '# iterates the valid children', 'for', 'child_ref', 'in', 'valid_children_refs', ':', '# it gets the structure of the children to check', 'child_name', ',', 'cardinality', '=', '_get_child_reference_info', '(', 'child_ref', ')', 'try', ':', '# it gets all the occurrences of the children of a type', 'children', '=', 'el', '.', 'children', '.', 'get', '(', 'child_name', ')', 'except', 'Exception', ':', '# TODO: it is due to the lack of element in the official reference files... should', '# we raise an exception here?', 'pass', 'else', ':', '_check_repetitions', '(', 'el', ',', 'children', ',', 'cardinality', ',', 'child_name', ',', 'errs', ')', '# calls validation for every children', 'for', 'c', 'in', 'children', ':', '_is_valid', '(', 'c', ',', 'child_ref', '[', '1', ']', ',', 'errs', ',', 'warns', ')', '# finally calls validation for z_elements', 'z_children', '=', '[', 'c', 'for', 'c', 'in', 'el', '.', 'children', 'if', 'c', '.', 'is_z_element', '(', ')', ']', 'for', 'c', 'in', 'z_children', ':', '_is_valid', '(', 'c', ',', 'None', ',', 'errs', ',', 'warns', ')', 'else', ':', '_check_table_compliance', '(', 'el', ',', 'ref', ',', 'warns', ')', '_check_length', '(', 'el', ',', 'ref', ',', 'warns', ')', 'if', 'el', '.', 'datatype', '==', "'varies'", ':', '# TODO: it should check the real rule', 'return', 'True', '_check_datatype', '(', 'el', ',', 'ref', ',', 'errs', ')', '# For complex datatypes element, the reference is the one of the datatype', 'if', 'not', 'is_base_datatype', '(', 'el', '.', 'datatype', ',', 'el', '.', 'version', ')', ':', '# Component just to search in the datatypes....', 'ref', '=', 'load_reference', '(', 'el', '.', 'datatype', ',', "'Datatypes_Structs'", ',', 'el', '.', 'version', ')', '_is_valid', '(', 'el', ',', 'ref', ',', 'errs', ',', 'warns', ')', 'def', '_is_valid', '(', 'el', ',', 'ref', ',', 'errs', ',', 'warns', ')', ':', 'if', 'el', '.', 'is_unknown', '(', ')', ':', 'errs', '.', 'append', '(', 'ValidationError', '(', '"Unknown element found: {}.{}"', '.', 'format', '(', 'el', '.', 'parent', ',', 'el', ')', ')', ')', 'return', 'if', 'el', '.', 'is_z_element', '(', ')', ':', 'return', '_check_z_element', '(', 'el', ',', 'errs', ',', 'warns', ')', 'return', '_check_known_element', '(', 'el', ',', 'ref', ',', 'errs', ',', 'warns', ')', 'errors', '=', '[', ']', 'warnings', '=', '[', ']', '_is_valid', '(', 'element', ',', 'reference', ',', 'errors', ',', 'warnings', ')', 'if', 'report_file', 'is', 'not', 'None', ':', 'with', 'open', '(', 'report_file', ',', '"w"', ')', 'as', 'f', ':', 'for', 'e', 'in', 'errors', ':', 'f', '.', 'write', '(', '"Error: {}\\n"', '.', 'format', '(', 'e', ')', ')', 'for', 'w', 'in', 'warnings', ':', 'f', '.', 'write', '(', '"Warning: {}\\n"', '.', 'format', '(', 'w', ')', ')', 'if', 'errors', ':', 'raise', 'errors', '[', '0', ']', 'return', 'True']
Checks if the :class:`Element <hl7apy.core.Element>` is a valid HL7 message according to the reference specified. If the reference is not specified, it will be used the official HL7 structures for the elements. In particular it checks: * the maximum and minimum number of occurrences for every child * that children are all allowed * the datatype of fields, components and subcomponents * the values, in particular the length and the adherence with the HL7 table, if one is specified It raises the first exception that it finds. If :attr:`report_file` is specified, it will create a file with all the errors that occur. :param element: :class:`Element <hl7apy.core.Element>`: The element to validate :param reference: the reference to use. Usually is None or a message profile object :param report_file: the name of the report file to create :return: The True if everything is ok :raises: :exc:`ValidationError <hl7apy.exceptions.ValidationError>`: when errors occur :raises: :exc:`ValidationWarning <hl7apy.exceptions.ValidationWarning>`: errors concerning the values
['Checks', 'if', 'the', ':', 'class', ':', 'Element', '<hl7apy', '.', 'core', '.', 'Element', '>', 'is', 'a', 'valid', 'HL7', 'message', 'according', 'to', 'the', 'reference', 'specified', '.', 'If', 'the', 'reference', 'is', 'not', 'specified', 'it', 'will', 'be', 'used', 'the', 'official', 'HL7', 'structures', 'for', 'the', 'elements', '.', 'In', 'particular', 'it', 'checks', ':']
train
https://github.com/crs4/hl7apy/blob/91be488e9274f6ec975519a1d9c17045bc91bf74/hl7apy/validation.py#L40-L210
9,211
burnash/gspread
gspread/models.py
Spreadsheet.worksheets
def worksheets(self): """Returns a list of all :class:`worksheets <gsperad.models.Worksheet>` in a spreadsheet. """ sheet_data = self.fetch_sheet_metadata() return [Worksheet(self, x['properties']) for x in sheet_data['sheets']]
python
def worksheets(self): """Returns a list of all :class:`worksheets <gsperad.models.Worksheet>` in a spreadsheet. """ sheet_data = self.fetch_sheet_metadata() return [Worksheet(self, x['properties']) for x in sheet_data['sheets']]
['def', 'worksheets', '(', 'self', ')', ':', 'sheet_data', '=', 'self', '.', 'fetch_sheet_metadata', '(', ')', 'return', '[', 'Worksheet', '(', 'self', ',', 'x', '[', "'properties'", ']', ')', 'for', 'x', 'in', 'sheet_data', '[', "'sheets'", ']', ']']
Returns a list of all :class:`worksheets <gsperad.models.Worksheet>` in a spreadsheet.
['Returns', 'a', 'list', 'of', 'all', ':', 'class', ':', 'worksheets', '<gsperad', '.', 'models', '.', 'Worksheet', '>', 'in', 'a', 'spreadsheet', '.']
train
https://github.com/burnash/gspread/blob/0e8debe208095aeed3e3e7136c2fa5cd74090946/gspread/models.py#L211-L217
9,212
esterhui/pypu
pypu/pusher.py
status._computeStatus
def _computeStatus(self, dfile, service): """Computes status for file, basically this means if more than one service handles the file, it will place a 'C' (for complicated) otherwise if status matches between all services, will place that status""" # If only one service requested if service: if not dfile['services'].has_key(service): return self.ST_UNTRACKED else: return dfile['services'][service]['status'] # Otherwise go through all services and compute # a sensible status first_service_key=dfile['services'].keys()[0] # Save off one of the statuses so we can compute # if they are all the same between services. first_status=dfile['services'][first_service_key]['status'] all_status_match=True # Return ST_COMPLICATED "C" if status # differs for service in dfile['services']: if dfile['services'][service]['status']!=first_status: return self.ST_COMPLICATED return first_status
python
def _computeStatus(self, dfile, service): """Computes status for file, basically this means if more than one service handles the file, it will place a 'C' (for complicated) otherwise if status matches between all services, will place that status""" # If only one service requested if service: if not dfile['services'].has_key(service): return self.ST_UNTRACKED else: return dfile['services'][service]['status'] # Otherwise go through all services and compute # a sensible status first_service_key=dfile['services'].keys()[0] # Save off one of the statuses so we can compute # if they are all the same between services. first_status=dfile['services'][first_service_key]['status'] all_status_match=True # Return ST_COMPLICATED "C" if status # differs for service in dfile['services']: if dfile['services'][service]['status']!=first_status: return self.ST_COMPLICATED return first_status
['def', '_computeStatus', '(', 'self', ',', 'dfile', ',', 'service', ')', ':', '# If only one service requested', 'if', 'service', ':', 'if', 'not', 'dfile', '[', "'services'", ']', '.', 'has_key', '(', 'service', ')', ':', 'return', 'self', '.', 'ST_UNTRACKED', 'else', ':', 'return', 'dfile', '[', "'services'", ']', '[', 'service', ']', '[', "'status'", ']', '# Otherwise go through all services and compute', '# a sensible status', 'first_service_key', '=', 'dfile', '[', "'services'", ']', '.', 'keys', '(', ')', '[', '0', ']', '# Save off one of the statuses so we can compute', '# if they are all the same between services.', 'first_status', '=', 'dfile', '[', "'services'", ']', '[', 'first_service_key', ']', '[', "'status'", ']', 'all_status_match', '=', 'True', '# Return ST_COMPLICATED "C" if status', '# differs', 'for', 'service', 'in', 'dfile', '[', "'services'", ']', ':', 'if', 'dfile', '[', "'services'", ']', '[', 'service', ']', '[', "'status'", ']', '!=', 'first_status', ':', 'return', 'self', '.', 'ST_COMPLICATED', 'return', 'first_status']
Computes status for file, basically this means if more than one service handles the file, it will place a 'C' (for complicated) otherwise if status matches between all services, will place that status
['Computes', 'status', 'for', 'file', 'basically', 'this', 'means', 'if', 'more', 'than', 'one', 'service', 'handles', 'the', 'file', 'it', 'will', 'place', 'a', 'C', '(', 'for', 'complicated', ')', 'otherwise', 'if', 'status', 'matches', 'between', 'all', 'services', 'will', 'place', 'that', 'status']
train
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/pusher.py#L89-L118
9,213
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/configuration.py
Config.validate
def validate(self): """Validate / fix up the current config""" if not self.get('api_key'): raise ValueError("api_key not found in config. Please see documentation.") host = self.get('host') or DEFAULT_CLOUD_HOST if host: # remove extraneous slashes and force to byte string # otherwise msg += message_body in httplib will fail in python2 # when message_body contains binary data, and url is unicode # remaining failure modes include at least: # passing bytes in python3 will fail as we try to strip unicode '/' characters # passing unicode code points in python2 will fail due to httplib host.encode('ascii') host = host.strip('/') if not isinstance(host, str): host = host.encode('utf-8') self['host'] = host self.setdefault('autostart_notification_thread', True)
python
def validate(self): """Validate / fix up the current config""" if not self.get('api_key'): raise ValueError("api_key not found in config. Please see documentation.") host = self.get('host') or DEFAULT_CLOUD_HOST if host: # remove extraneous slashes and force to byte string # otherwise msg += message_body in httplib will fail in python2 # when message_body contains binary data, and url is unicode # remaining failure modes include at least: # passing bytes in python3 will fail as we try to strip unicode '/' characters # passing unicode code points in python2 will fail due to httplib host.encode('ascii') host = host.strip('/') if not isinstance(host, str): host = host.encode('utf-8') self['host'] = host self.setdefault('autostart_notification_thread', True)
['def', 'validate', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'get', '(', "'api_key'", ')', ':', 'raise', 'ValueError', '(', '"api_key not found in config. Please see documentation."', ')', 'host', '=', 'self', '.', 'get', '(', "'host'", ')', 'or', 'DEFAULT_CLOUD_HOST', 'if', 'host', ':', '# remove extraneous slashes and force to byte string', '# otherwise msg += message_body in httplib will fail in python2', '# when message_body contains binary data, and url is unicode', '# remaining failure modes include at least:', "# passing bytes in python3 will fail as we try to strip unicode '/' characters", "# passing unicode code points in python2 will fail due to httplib host.encode('ascii')", 'host', '=', 'host', '.', 'strip', '(', "'/'", ')', 'if', 'not', 'isinstance', '(', 'host', ',', 'str', ')', ':', 'host', '=', 'host', '.', 'encode', '(', "'utf-8'", ')', 'self', '[', "'host'", ']', '=', 'host', 'self', '.', 'setdefault', '(', "'autostart_notification_thread'", ',', 'True', ')']
Validate / fix up the current config
['Validate', '/', 'fix', 'up', 'the', 'current', 'config']
train
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/configuration.py#L108-L126
9,214
Stewori/pytypes
pytypes/type_util.py
is_Union
def is_Union(tp): """Python version independent check if a type is typing.Union. Tested with CPython 2.7, 3.5, 3.6 and Jython 2.7.1. """ if tp is Union: return True try: # Python 3.6 return tp.__origin__ is Union except AttributeError: try: return isinstance(tp, typing.UnionMeta) except AttributeError: return False
python
def is_Union(tp): """Python version independent check if a type is typing.Union. Tested with CPython 2.7, 3.5, 3.6 and Jython 2.7.1. """ if tp is Union: return True try: # Python 3.6 return tp.__origin__ is Union except AttributeError: try: return isinstance(tp, typing.UnionMeta) except AttributeError: return False
['def', 'is_Union', '(', 'tp', ')', ':', 'if', 'tp', 'is', 'Union', ':', 'return', 'True', 'try', ':', '# Python 3.6', 'return', 'tp', '.', '__origin__', 'is', 'Union', 'except', 'AttributeError', ':', 'try', ':', 'return', 'isinstance', '(', 'tp', ',', 'typing', '.', 'UnionMeta', ')', 'except', 'AttributeError', ':', 'return', 'False']
Python version independent check if a type is typing.Union. Tested with CPython 2.7, 3.5, 3.6 and Jython 2.7.1.
['Python', 'version', 'independent', 'check', 'if', 'a', 'type', 'is', 'typing', '.', 'Union', '.', 'Tested', 'with', 'CPython', '2', '.', '7', '3', '.', '5', '3', '.', '6', 'and', 'Jython', '2', '.', '7', '.', '1', '.']
train
https://github.com/Stewori/pytypes/blob/b814d38709e84c0e0825caf8b721c20eb5a8ab3b/pytypes/type_util.py#L336-L349
9,215
awslabs/aws-shell
awsshell/config.py
Config.load
def load(self, config_template, config_file=None): """Read the config file if it exists, else read the default config. Creates the user config file if it doesn't exist using the template. :type config_template: str :param config_template: The config template file name. :type config_file: str :param config_file: (Optional) The config file name. If None, the config_file name will be set to the config_template. :rtype: :class:`configobj.ConfigObj` :return: The config information for reading and writing. """ if config_file is None: config_file = config_template config_path = build_config_file_path(config_file) template_path = os.path.join(os.path.dirname(__file__), config_template) self._copy_template_to_config(template_path, config_path) return self._load_template_or_config(template_path, config_path)
python
def load(self, config_template, config_file=None): """Read the config file if it exists, else read the default config. Creates the user config file if it doesn't exist using the template. :type config_template: str :param config_template: The config template file name. :type config_file: str :param config_file: (Optional) The config file name. If None, the config_file name will be set to the config_template. :rtype: :class:`configobj.ConfigObj` :return: The config information for reading and writing. """ if config_file is None: config_file = config_template config_path = build_config_file_path(config_file) template_path = os.path.join(os.path.dirname(__file__), config_template) self._copy_template_to_config(template_path, config_path) return self._load_template_or_config(template_path, config_path)
['def', 'load', '(', 'self', ',', 'config_template', ',', 'config_file', '=', 'None', ')', ':', 'if', 'config_file', 'is', 'None', ':', 'config_file', '=', 'config_template', 'config_path', '=', 'build_config_file_path', '(', 'config_file', ')', 'template_path', '=', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'path', '.', 'dirname', '(', '__file__', ')', ',', 'config_template', ')', 'self', '.', '_copy_template_to_config', '(', 'template_path', ',', 'config_path', ')', 'return', 'self', '.', '_load_template_or_config', '(', 'template_path', ',', 'config_path', ')']
Read the config file if it exists, else read the default config. Creates the user config file if it doesn't exist using the template. :type config_template: str :param config_template: The config template file name. :type config_file: str :param config_file: (Optional) The config file name. If None, the config_file name will be set to the config_template. :rtype: :class:`configobj.ConfigObj` :return: The config information for reading and writing.
['Read', 'the', 'config', 'file', 'if', 'it', 'exists', 'else', 'read', 'the', 'default', 'config', '.']
train
https://github.com/awslabs/aws-shell/blob/8950f03d9d720879890af6c11537b8f9789ce5a9/awsshell/config.py#L24-L45
9,216
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavextra.py
gps_velocity_old
def gps_velocity_old(GPS_RAW_INT): '''return GPS velocity vector''' return Vector3(GPS_RAW_INT.vel*0.01*cos(radians(GPS_RAW_INT.cog*0.01)), GPS_RAW_INT.vel*0.01*sin(radians(GPS_RAW_INT.cog*0.01)), 0)
python
def gps_velocity_old(GPS_RAW_INT): '''return GPS velocity vector''' return Vector3(GPS_RAW_INT.vel*0.01*cos(radians(GPS_RAW_INT.cog*0.01)), GPS_RAW_INT.vel*0.01*sin(radians(GPS_RAW_INT.cog*0.01)), 0)
['def', 'gps_velocity_old', '(', 'GPS_RAW_INT', ')', ':', 'return', 'Vector3', '(', 'GPS_RAW_INT', '.', 'vel', '*', '0.01', '*', 'cos', '(', 'radians', '(', 'GPS_RAW_INT', '.', 'cog', '*', '0.01', ')', ')', ',', 'GPS_RAW_INT', '.', 'vel', '*', '0.01', '*', 'sin', '(', 'radians', '(', 'GPS_RAW_INT', '.', 'cog', '*', '0.01', ')', ')', ',', '0', ')']
return GPS velocity vector
['return', 'GPS', 'velocity', 'vector']
train
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavextra.py#L630-L633
9,217
hovren/crisp
crisp/imu.py
IMU.from_mat_file
def from_mat_file(cls, matfilename): """Load gyro data from .mat file The MAT file should contain the following two arrays gyro : (3, N) float ndarray The angular velocity measurements. timestamps : (N, ) float ndarray Timestamps of the measurements. Parameters --------------- matfilename : string Name of the .mat file Returns ---------------- A new IMU class instance """ M = scipy.io.loadmat(matfilename) instance = cls() instance.gyro_data = M['gyro'] instance.timestamps = M['timestamps'] return instance
python
def from_mat_file(cls, matfilename): """Load gyro data from .mat file The MAT file should contain the following two arrays gyro : (3, N) float ndarray The angular velocity measurements. timestamps : (N, ) float ndarray Timestamps of the measurements. Parameters --------------- matfilename : string Name of the .mat file Returns ---------------- A new IMU class instance """ M = scipy.io.loadmat(matfilename) instance = cls() instance.gyro_data = M['gyro'] instance.timestamps = M['timestamps'] return instance
['def', 'from_mat_file', '(', 'cls', ',', 'matfilename', ')', ':', 'M', '=', 'scipy', '.', 'io', '.', 'loadmat', '(', 'matfilename', ')', 'instance', '=', 'cls', '(', ')', 'instance', '.', 'gyro_data', '=', 'M', '[', "'gyro'", ']', 'instance', '.', 'timestamps', '=', 'M', '[', "'timestamps'", ']', 'return', 'instance']
Load gyro data from .mat file The MAT file should contain the following two arrays gyro : (3, N) float ndarray The angular velocity measurements. timestamps : (N, ) float ndarray Timestamps of the measurements. Parameters --------------- matfilename : string Name of the .mat file Returns ---------------- A new IMU class instance
['Load', 'gyro', 'data', 'from', '.', 'mat', 'file', 'The', 'MAT', 'file', 'should', 'contain', 'the', 'following', 'two', 'arrays', 'gyro', ':', '(', '3', 'N', ')', 'float', 'ndarray', 'The', 'angular', 'velocity', 'measurements', '.', 'timestamps', ':', '(', 'N', ')', 'float', 'ndarray', 'Timestamps', 'of', 'the', 'measurements', '.', 'Parameters', '---------------', 'matfilename', ':', 'string', 'Name', 'of', 'the', '.', 'mat', 'file', 'Returns', '----------------', 'A', 'new', 'IMU', 'class', 'instance']
train
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/imu.py#L39-L62
9,218
mbj4668/pyang
pyang/translators/schemanode.py
SchemaNode.subnode
def subnode(self, node): """Make `node` receiver's child.""" self.children.append(node) node.parent = self node.adjust_interleave(node.interleave)
python
def subnode(self, node): """Make `node` receiver's child.""" self.children.append(node) node.parent = self node.adjust_interleave(node.interleave)
['def', 'subnode', '(', 'self', ',', 'node', ')', ':', 'self', '.', 'children', '.', 'append', '(', 'node', ')', 'node', '.', 'parent', '=', 'self', 'node', '.', 'adjust_interleave', '(', 'node', '.', 'interleave', ')']
Make `node` receiver's child.
['Make', 'node', 'receiver', 's', 'child', '.']
train
https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/schemanode.py#L146-L150
9,219
Jaymon/prom
prom/model.py
Orm.insert
def insert(self): """persist the field values of this orm""" ret = True schema = self.schema fields = self.depopulate(False) q = self.query q.set_fields(fields) pk = q.insert() if pk: fields = q.fields fields[schema.pk.name] = pk self._populate(fields) else: ret = False return ret
python
def insert(self): """persist the field values of this orm""" ret = True schema = self.schema fields = self.depopulate(False) q = self.query q.set_fields(fields) pk = q.insert() if pk: fields = q.fields fields[schema.pk.name] = pk self._populate(fields) else: ret = False return ret
['def', 'insert', '(', 'self', ')', ':', 'ret', '=', 'True', 'schema', '=', 'self', '.', 'schema', 'fields', '=', 'self', '.', 'depopulate', '(', 'False', ')', 'q', '=', 'self', '.', 'query', 'q', '.', 'set_fields', '(', 'fields', ')', 'pk', '=', 'q', '.', 'insert', '(', ')', 'if', 'pk', ':', 'fields', '=', 'q', '.', 'fields', 'fields', '[', 'schema', '.', 'pk', '.', 'name', ']', '=', 'pk', 'self', '.', '_populate', '(', 'fields', ')', 'else', ':', 'ret', '=', 'False', 'return', 'ret']
persist the field values of this orm
['persist', 'the', 'field', 'values', 'of', 'this', 'orm']
train
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/model.py#L296-L314
9,220
bitesofcode/projexui
projexui/widgets/xganttwidget/xganttwidgetitem.py
XGanttWidgetItem.takeChild
def takeChild(self, index): """ Removes the child at the given index from this item. :param index | <int> """ item = super(XGanttWidgetItem, self).takeChild(index) if item: item.removeFromScene() return item
python
def takeChild(self, index): """ Removes the child at the given index from this item. :param index | <int> """ item = super(XGanttWidgetItem, self).takeChild(index) if item: item.removeFromScene() return item
['def', 'takeChild', '(', 'self', ',', 'index', ')', ':', 'item', '=', 'super', '(', 'XGanttWidgetItem', ',', 'self', ')', '.', 'takeChild', '(', 'index', ')', 'if', 'item', ':', 'item', '.', 'removeFromScene', '(', ')', 'return', 'item']
Removes the child at the given index from this item. :param index | <int>
['Removes', 'the', 'child', 'at', 'the', 'given', 'index', 'from', 'this', 'item', '.', ':', 'param', 'index', '|', '<int', '>']
train
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xganttwidget/xganttwidgetitem.py#L664-L675
9,221
bcbio/bcbio-nextgen
bcbio/pipeline/shared.py
subset_bed_by_chrom
def subset_bed_by_chrom(in_file, chrom, data, out_dir=None): """Subset a BED file to only have items from the specified chromosome. """ if out_dir is None: out_dir = os.path.dirname(in_file) base, ext = os.path.splitext(os.path.basename(in_file)) out_file = os.path.join(out_dir, "%s-%s%s" % (base, chrom, ext)) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: _rewrite_bed_with_chrom(in_file, tx_out_file, chrom) return out_file
python
def subset_bed_by_chrom(in_file, chrom, data, out_dir=None): """Subset a BED file to only have items from the specified chromosome. """ if out_dir is None: out_dir = os.path.dirname(in_file) base, ext = os.path.splitext(os.path.basename(in_file)) out_file = os.path.join(out_dir, "%s-%s%s" % (base, chrom, ext)) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: _rewrite_bed_with_chrom(in_file, tx_out_file, chrom) return out_file
['def', 'subset_bed_by_chrom', '(', 'in_file', ',', 'chrom', ',', 'data', ',', 'out_dir', '=', 'None', ')', ':', 'if', 'out_dir', 'is', 'None', ':', 'out_dir', '=', 'os', '.', 'path', '.', 'dirname', '(', 'in_file', ')', 'base', ',', 'ext', '=', 'os', '.', 'path', '.', 'splitext', '(', 'os', '.', 'path', '.', 'basename', '(', 'in_file', ')', ')', 'out_file', '=', 'os', '.', 'path', '.', 'join', '(', 'out_dir', ',', '"%s-%s%s"', '%', '(', 'base', ',', 'chrom', ',', 'ext', ')', ')', 'if', 'not', 'utils', '.', 'file_uptodate', '(', 'out_file', ',', 'in_file', ')', ':', 'with', 'file_transaction', '(', 'data', ',', 'out_file', ')', 'as', 'tx_out_file', ':', '_rewrite_bed_with_chrom', '(', 'in_file', ',', 'tx_out_file', ',', 'chrom', ')', 'return', 'out_file']
Subset a BED file to only have items from the specified chromosome.
['Subset', 'a', 'BED', 'file', 'to', 'only', 'have', 'items', 'from', 'the', 'specified', 'chromosome', '.']
train
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/shared.py#L111-L121
9,222
alvarogzp/telegram-bot-framework
bot/action/standard/info/formatter/user.py
UserInfoFormatter.format
def format(self, member_info: bool = False): """ :param member_info: If True, adds also chat member info. Please, note that this additional info requires to make ONE api call. """ user = self.api_object self.__format_user(user) if member_info and self.chat.type != CHAT_TYPE_PRIVATE: self._add_empty() self.__format_member(user)
python
def format(self, member_info: bool = False): """ :param member_info: If True, adds also chat member info. Please, note that this additional info requires to make ONE api call. """ user = self.api_object self.__format_user(user) if member_info and self.chat.type != CHAT_TYPE_PRIVATE: self._add_empty() self.__format_member(user)
['def', 'format', '(', 'self', ',', 'member_info', ':', 'bool', '=', 'False', ')', ':', 'user', '=', 'self', '.', 'api_object', 'self', '.', '__format_user', '(', 'user', ')', 'if', 'member_info', 'and', 'self', '.', 'chat', '.', 'type', '!=', 'CHAT_TYPE_PRIVATE', ':', 'self', '.', '_add_empty', '(', ')', 'self', '.', '__format_member', '(', 'user', ')']
:param member_info: If True, adds also chat member info. Please, note that this additional info requires to make ONE api call.
[':', 'param', 'member_info', ':', 'If', 'True', 'adds', 'also', 'chat', 'member', 'info', '.', 'Please', 'note', 'that', 'this', 'additional', 'info', 'requires', 'to', 'make', 'ONE', 'api', 'call', '.']
train
https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/action/standard/info/formatter/user.py#L19-L28
9,223
5monkeys/content-io
cio/node.py
Node.namespace_uri
def namespace_uri(self): """ Finds and returns first applied URI of this node that has a namespace. :return str: uri """ try: return next( iter(filter(lambda uri: URI(uri).namespace, self._uri)) ) except StopIteration: return None
python
def namespace_uri(self): """ Finds and returns first applied URI of this node that has a namespace. :return str: uri """ try: return next( iter(filter(lambda uri: URI(uri).namespace, self._uri)) ) except StopIteration: return None
['def', 'namespace_uri', '(', 'self', ')', ':', 'try', ':', 'return', 'next', '(', 'iter', '(', 'filter', '(', 'lambda', 'uri', ':', 'URI', '(', 'uri', ')', '.', 'namespace', ',', 'self', '.', '_uri', ')', ')', ')', 'except', 'StopIteration', ':', 'return', 'None']
Finds and returns first applied URI of this node that has a namespace. :return str: uri
['Finds', 'and', 'returns', 'first', 'applied', 'URI', 'of', 'this', 'node', 'that', 'has', 'a', 'namespace', '.']
train
https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/node.py#L70-L81
9,224
pallets/werkzeug
src/werkzeug/formparser.py
exhaust_stream
def exhaust_stream(f): """Helper decorator for methods that exhausts the stream on return.""" def wrapper(self, stream, *args, **kwargs): try: return f(self, stream, *args, **kwargs) finally: exhaust = getattr(stream, "exhaust", None) if exhaust is not None: exhaust() else: while 1: chunk = stream.read(1024 * 64) if not chunk: break return update_wrapper(wrapper, f)
python
def exhaust_stream(f): """Helper decorator for methods that exhausts the stream on return.""" def wrapper(self, stream, *args, **kwargs): try: return f(self, stream, *args, **kwargs) finally: exhaust = getattr(stream, "exhaust", None) if exhaust is not None: exhaust() else: while 1: chunk = stream.read(1024 * 64) if not chunk: break return update_wrapper(wrapper, f)
['def', 'exhaust_stream', '(', 'f', ')', ':', 'def', 'wrapper', '(', 'self', ',', 'stream', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'try', ':', 'return', 'f', '(', 'self', ',', 'stream', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'finally', ':', 'exhaust', '=', 'getattr', '(', 'stream', ',', '"exhaust"', ',', 'None', ')', 'if', 'exhaust', 'is', 'not', 'None', ':', 'exhaust', '(', ')', 'else', ':', 'while', '1', ':', 'chunk', '=', 'stream', '.', 'read', '(', '1024', '*', '64', ')', 'if', 'not', 'chunk', ':', 'break', 'return', 'update_wrapper', '(', 'wrapper', ',', 'f', ')']
Helper decorator for methods that exhausts the stream on return.
['Helper', 'decorator', 'for', 'methods', 'that', 'exhausts', 'the', 'stream', 'on', 'return', '.']
train
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/formparser.py#L125-L141
9,225
apache/spark
python/pyspark/rdd.py
RDD.lookup
def lookup(self, key): """ Return the list of values in the RDD for key `key`. This operation is done efficiently if the RDD has a known partitioner by only searching the partition that the key maps to. >>> l = range(1000) >>> rdd = sc.parallelize(zip(l, l), 10) >>> rdd.lookup(42) # slow [42] >>> sorted = rdd.sortByKey() >>> sorted.lookup(42) # fast [42] >>> sorted.lookup(1024) [] >>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey() >>> list(rdd2.lookup(('a', 'b'))[0]) ['c'] """ values = self.filter(lambda kv: kv[0] == key).values() if self.partitioner is not None: return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)]) return values.collect()
python
def lookup(self, key): """ Return the list of values in the RDD for key `key`. This operation is done efficiently if the RDD has a known partitioner by only searching the partition that the key maps to. >>> l = range(1000) >>> rdd = sc.parallelize(zip(l, l), 10) >>> rdd.lookup(42) # slow [42] >>> sorted = rdd.sortByKey() >>> sorted.lookup(42) # fast [42] >>> sorted.lookup(1024) [] >>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey() >>> list(rdd2.lookup(('a', 'b'))[0]) ['c'] """ values = self.filter(lambda kv: kv[0] == key).values() if self.partitioner is not None: return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)]) return values.collect()
['def', 'lookup', '(', 'self', ',', 'key', ')', ':', 'values', '=', 'self', '.', 'filter', '(', 'lambda', 'kv', ':', 'kv', '[', '0', ']', '==', 'key', ')', '.', 'values', '(', ')', 'if', 'self', '.', 'partitioner', 'is', 'not', 'None', ':', 'return', 'self', '.', 'ctx', '.', 'runJob', '(', 'values', ',', 'lambda', 'x', ':', 'x', ',', '[', 'self', '.', 'partitioner', '(', 'key', ')', ']', ')', 'return', 'values', '.', 'collect', '(', ')']
Return the list of values in the RDD for key `key`. This operation is done efficiently if the RDD has a known partitioner by only searching the partition that the key maps to. >>> l = range(1000) >>> rdd = sc.parallelize(zip(l, l), 10) >>> rdd.lookup(42) # slow [42] >>> sorted = rdd.sortByKey() >>> sorted.lookup(42) # fast [42] >>> sorted.lookup(1024) [] >>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey() >>> list(rdd2.lookup(('a', 'b'))[0]) ['c']
['Return', 'the', 'list', 'of', 'values', 'in', 'the', 'RDD', 'for', 'key', 'key', '.', 'This', 'operation', 'is', 'done', 'efficiently', 'if', 'the', 'RDD', 'has', 'a', 'known', 'partitioner', 'by', 'only', 'searching', 'the', 'partition', 'that', 'the', 'key', 'maps', 'to', '.']
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L2267-L2291
9,226
python-rope/rope
rope/base/oi/type_hinting/providers/pep0484_type_comments.py
AssignmentProvider._search_type_in_type_comment
def _search_type_in_type_comment(self, code): """ For more info see: https://www.python.org/dev/peps/pep-0484/#type-comments >>> AssignmentProvider()._search_type_in_type_comment('type: int') ['int'] """ for p in self.PEP0484_TYPE_COMMENT_PATTERNS: match = p.search(code) if match: return [match.group(1)]
python
def _search_type_in_type_comment(self, code): """ For more info see: https://www.python.org/dev/peps/pep-0484/#type-comments >>> AssignmentProvider()._search_type_in_type_comment('type: int') ['int'] """ for p in self.PEP0484_TYPE_COMMENT_PATTERNS: match = p.search(code) if match: return [match.group(1)]
['def', '_search_type_in_type_comment', '(', 'self', ',', 'code', ')', ':', 'for', 'p', 'in', 'self', '.', 'PEP0484_TYPE_COMMENT_PATTERNS', ':', 'match', '=', 'p', '.', 'search', '(', 'code', ')', 'if', 'match', ':', 'return', '[', 'match', '.', 'group', '(', '1', ')', ']']
For more info see: https://www.python.org/dev/peps/pep-0484/#type-comments >>> AssignmentProvider()._search_type_in_type_comment('type: int') ['int']
['For', 'more', 'info', 'see', ':', 'https', ':', '//', 'www', '.', 'python', '.', 'org', '/', 'dev', '/', 'peps', '/', 'pep', '-', '0484', '/', '#type', '-', 'comments']
train
https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/base/oi/type_hinting/providers/pep0484_type_comments.py#L32-L42
9,227
rckclmbr/pyportify
pyportify/pkcs1/primes.py
jacobi
def jacobi(a, b): '''Calculates the value of the Jacobi symbol (a/b) where both a and b are positive integers, and b is odd :returns: -1, 0 or 1 ''' assert a > 0 assert b > 0 if a == 0: return 0 result = 1 while a > 1: if a & 1: if ((a-1)*(b-1) >> 2) & 1: result = -result a, b = b % a, a else: if (((b * b) - 1) >> 3) & 1: result = -result a >>= 1 if a == 0: return 0 return result
python
def jacobi(a, b): '''Calculates the value of the Jacobi symbol (a/b) where both a and b are positive integers, and b is odd :returns: -1, 0 or 1 ''' assert a > 0 assert b > 0 if a == 0: return 0 result = 1 while a > 1: if a & 1: if ((a-1)*(b-1) >> 2) & 1: result = -result a, b = b % a, a else: if (((b * b) - 1) >> 3) & 1: result = -result a >>= 1 if a == 0: return 0 return result
['def', 'jacobi', '(', 'a', ',', 'b', ')', ':', 'assert', 'a', '>', '0', 'assert', 'b', '>', '0', 'if', 'a', '==', '0', ':', 'return', '0', 'result', '=', '1', 'while', 'a', '>', '1', ':', 'if', 'a', '&', '1', ':', 'if', '(', '(', 'a', '-', '1', ')', '*', '(', 'b', '-', '1', ')', '>>', '2', ')', '&', '1', ':', 'result', '=', '-', 'result', 'a', ',', 'b', '=', 'b', '%', 'a', ',', 'a', 'else', ':', 'if', '(', '(', '(', 'b', '*', 'b', ')', '-', '1', ')', '>>', '3', ')', '&', '1', ':', 'result', '=', '-', 'result', 'a', '>>=', '1', 'if', 'a', '==', '0', ':', 'return', '0', 'return', 'result']
Calculates the value of the Jacobi symbol (a/b) where both a and b are positive integers, and b is odd :returns: -1, 0 or 1
['Calculates', 'the', 'value', 'of', 'the', 'Jacobi', 'symbol', '(', 'a', '/', 'b', ')', 'where', 'both', 'a', 'and', 'b', 'are', 'positive', 'integers', 'and', 'b', 'is', 'odd']
train
https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/pkcs1/primes.py#L73-L97
9,228
talkincode/toughlib
toughlib/btforms/net.py
validipaddr
def validipaddr(address): """ Returns True if `address` is a valid IPv4 address. >>> validipaddr('192.168.1.1') True >>> validipaddr('192.168.1.800') False >>> validipaddr('192.168.1') False """ try: octets = address.split('.') if len(octets) != 4: return False for x in octets: if not (0 <= int(x) <= 255): return False except ValueError: return False return True
python
def validipaddr(address): """ Returns True if `address` is a valid IPv4 address. >>> validipaddr('192.168.1.1') True >>> validipaddr('192.168.1.800') False >>> validipaddr('192.168.1') False """ try: octets = address.split('.') if len(octets) != 4: return False for x in octets: if not (0 <= int(x) <= 255): return False except ValueError: return False return True
['def', 'validipaddr', '(', 'address', ')', ':', 'try', ':', 'octets', '=', 'address', '.', 'split', '(', "'.'", ')', 'if', 'len', '(', 'octets', ')', '!=', '4', ':', 'return', 'False', 'for', 'x', 'in', 'octets', ':', 'if', 'not', '(', '0', '<=', 'int', '(', 'x', ')', '<=', '255', ')', ':', 'return', 'False', 'except', 'ValueError', ':', 'return', 'False', 'return', 'True']
Returns True if `address` is a valid IPv4 address. >>> validipaddr('192.168.1.1') True >>> validipaddr('192.168.1.800') False >>> validipaddr('192.168.1') False
['Returns', 'True', 'if', 'address', 'is', 'a', 'valid', 'IPv4', 'address', '.', '>>>', 'validipaddr', '(', '192', '.', '168', '.', '1', '.', '1', ')', 'True', '>>>', 'validipaddr', '(', '192', '.', '168', '.', '1', '.', '800', ')', 'False', '>>>', 'validipaddr', '(', '192', '.', '168', '.', '1', ')', 'False']
train
https://github.com/talkincode/toughlib/blob/1c2f7dde3a7f101248f1b5f5d428cc85466995cf/toughlib/btforms/net.py#L14-L34
9,229
tgbugs/pyontutils
ilxutils/ilxutils/simple_rdflib.py
SimpleGraph.remove_triple
def remove_triple( self, subj: URIRef, pred: URIRef, obj: Union[URIRef, Literal] ) -> None: """ Removes triple from rdflib Graph You must input the triple in its URIRef or Literal form for each node exactly the way it was inputed or it will not delete the triple. Args: subj: Entity subject to be removed it its the only node with this subject; else this is just going to delete a desciption I.E. predicate_object of this entity. pred: Entity predicate to be removed obj: Entity object to be removed """ self.g.remove( (subj, pred, obj) )
python
def remove_triple( self, subj: URIRef, pred: URIRef, obj: Union[URIRef, Literal] ) -> None: """ Removes triple from rdflib Graph You must input the triple in its URIRef or Literal form for each node exactly the way it was inputed or it will not delete the triple. Args: subj: Entity subject to be removed it its the only node with this subject; else this is just going to delete a desciption I.E. predicate_object of this entity. pred: Entity predicate to be removed obj: Entity object to be removed """ self.g.remove( (subj, pred, obj) )
['def', 'remove_triple', '(', 'self', ',', 'subj', ':', 'URIRef', ',', 'pred', ':', 'URIRef', ',', 'obj', ':', 'Union', '[', 'URIRef', ',', 'Literal', ']', ')', '->', 'None', ':', 'self', '.', 'g', '.', 'remove', '(', '(', 'subj', ',', 'pred', ',', 'obj', ')', ')']
Removes triple from rdflib Graph You must input the triple in its URIRef or Literal form for each node exactly the way it was inputed or it will not delete the triple. Args: subj: Entity subject to be removed it its the only node with this subject; else this is just going to delete a desciption I.E. predicate_object of this entity. pred: Entity predicate to be removed obj: Entity object to be removed
['Removes', 'triple', 'from', 'rdflib', 'Graph']
train
https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_rdflib.py#L291-L308
9,230
nok/sklearn-porter
sklearn_porter/Template.py
Template.indent
def indent(self, text, n_indents=1, skipping=False): """ Indent text with single spaces. Parameters ---------- :param text : string The text which get a specific indentation. :param n_indents : int, default: 1 The number of indentations. :param skipping : boolean, default: False Whether to skip the initial indentation. Returns ------- return : string The indented text. """ lines = text.splitlines() space = self.TEMPLATES.get(self.target_language).get('indent', ' ') # Single line: if len(lines) == 1: if skipping: return text.strip() return n_indents * space + text.strip() # Multiple lines: indented_lines = [] for idx, line in enumerate(lines): if skipping and idx is 0: indented_lines.append(line) else: line = n_indents * space + line indented_lines.append(line) indented_text = '\n'.join(indented_lines) return indented_text
python
def indent(self, text, n_indents=1, skipping=False): """ Indent text with single spaces. Parameters ---------- :param text : string The text which get a specific indentation. :param n_indents : int, default: 1 The number of indentations. :param skipping : boolean, default: False Whether to skip the initial indentation. Returns ------- return : string The indented text. """ lines = text.splitlines() space = self.TEMPLATES.get(self.target_language).get('indent', ' ') # Single line: if len(lines) == 1: if skipping: return text.strip() return n_indents * space + text.strip() # Multiple lines: indented_lines = [] for idx, line in enumerate(lines): if skipping and idx is 0: indented_lines.append(line) else: line = n_indents * space + line indented_lines.append(line) indented_text = '\n'.join(indented_lines) return indented_text
['def', 'indent', '(', 'self', ',', 'text', ',', 'n_indents', '=', '1', ',', 'skipping', '=', 'False', ')', ':', 'lines', '=', 'text', '.', 'splitlines', '(', ')', 'space', '=', 'self', '.', 'TEMPLATES', '.', 'get', '(', 'self', '.', 'target_language', ')', '.', 'get', '(', "'indent'", ',', "' '", ')', '# Single line:', 'if', 'len', '(', 'lines', ')', '==', '1', ':', 'if', 'skipping', ':', 'return', 'text', '.', 'strip', '(', ')', 'return', 'n_indents', '*', 'space', '+', 'text', '.', 'strip', '(', ')', '# Multiple lines:', 'indented_lines', '=', '[', ']', 'for', 'idx', ',', 'line', 'in', 'enumerate', '(', 'lines', ')', ':', 'if', 'skipping', 'and', 'idx', 'is', '0', ':', 'indented_lines', '.', 'append', '(', 'line', ')', 'else', ':', 'line', '=', 'n_indents', '*', 'space', '+', 'line', 'indented_lines', '.', 'append', '(', 'line', ')', 'indented_text', '=', "'\\n'", '.', 'join', '(', 'indented_lines', ')', 'return', 'indented_text']
Indent text with single spaces. Parameters ---------- :param text : string The text which get a specific indentation. :param n_indents : int, default: 1 The number of indentations. :param skipping : boolean, default: False Whether to skip the initial indentation. Returns ------- return : string The indented text.
['Indent', 'text', 'with', 'single', 'spaces', '.']
train
https://github.com/nok/sklearn-porter/blob/04673f768310bde31f9747a68a5e070592441ef2/sklearn_porter/Template.py#L25-L61
9,231
rm-hull/luma.oled
luma/oled/device/__init__.py
ssd1306.display
def display(self, image): """ Takes a 1-bit :py:mod:`PIL.Image` and dumps it to the OLED display. :param image: Image to display. :type image: :py:mod:`PIL.Image` """ assert(image.mode == self.mode) assert(image.size == self.size) image = self.preprocess(image) self.command( # Column start/end address self._const.COLUMNADDR, self._colstart, self._colend - 1, # Page start/end address self._const.PAGEADDR, 0x00, self._pages - 1) buf = bytearray(self._w * self._pages) off = self._offsets mask = self._mask idx = 0 for pix in image.getdata(): if pix > 0: buf[off[idx]] |= mask[idx] idx += 1 self.data(list(buf))
python
def display(self, image): """ Takes a 1-bit :py:mod:`PIL.Image` and dumps it to the OLED display. :param image: Image to display. :type image: :py:mod:`PIL.Image` """ assert(image.mode == self.mode) assert(image.size == self.size) image = self.preprocess(image) self.command( # Column start/end address self._const.COLUMNADDR, self._colstart, self._colend - 1, # Page start/end address self._const.PAGEADDR, 0x00, self._pages - 1) buf = bytearray(self._w * self._pages) off = self._offsets mask = self._mask idx = 0 for pix in image.getdata(): if pix > 0: buf[off[idx]] |= mask[idx] idx += 1 self.data(list(buf))
['def', 'display', '(', 'self', ',', 'image', ')', ':', 'assert', '(', 'image', '.', 'mode', '==', 'self', '.', 'mode', ')', 'assert', '(', 'image', '.', 'size', '==', 'self', '.', 'size', ')', 'image', '=', 'self', '.', 'preprocess', '(', 'image', ')', 'self', '.', 'command', '(', '# Column start/end address', 'self', '.', '_const', '.', 'COLUMNADDR', ',', 'self', '.', '_colstart', ',', 'self', '.', '_colend', '-', '1', ',', '# Page start/end address', 'self', '.', '_const', '.', 'PAGEADDR', ',', '0x00', ',', 'self', '.', '_pages', '-', '1', ')', 'buf', '=', 'bytearray', '(', 'self', '.', '_w', '*', 'self', '.', '_pages', ')', 'off', '=', 'self', '.', '_offsets', 'mask', '=', 'self', '.', '_mask', 'idx', '=', '0', 'for', 'pix', 'in', 'image', '.', 'getdata', '(', ')', ':', 'if', 'pix', '>', '0', ':', 'buf', '[', 'off', '[', 'idx', ']', ']', '|=', 'mask', '[', 'idx', ']', 'idx', '+=', '1', 'self', '.', 'data', '(', 'list', '(', 'buf', ')', ')']
Takes a 1-bit :py:mod:`PIL.Image` and dumps it to the OLED display. :param image: Image to display. :type image: :py:mod:`PIL.Image`
['Takes', 'a', '1', '-', 'bit', ':', 'py', ':', 'mod', ':', 'PIL', '.', 'Image', 'and', 'dumps', 'it', 'to', 'the', 'OLED', 'display', '.']
train
https://github.com/rm-hull/luma.oled/blob/76055aa2ca486dc2f9def49754b74ffbccdc5491/luma/oled/device/__init__.py#L191-L220
9,232
wakatime/wakatime
wakatime/packages/pygments/lexers/__init__.py
load_lexer_from_file
def load_lexer_from_file(filename, lexername="CustomLexer", **options): """Load a lexer from a file. This method expects a file located relative to the current working directory, which contains a Lexer class. By default, it expects the Lexer to be name CustomLexer; you can specify your own class name as the second argument to this function. Users should be very careful with the input, because this method is equivalent to running eval on the input file. Raises ClassNotFound if there are any problems importing the Lexer. .. versionadded:: 2.2 """ try: # This empty dict will contain the namespace for the exec'd file custom_namespace = {} exec(open(filename, 'rb').read(), custom_namespace) # Retrieve the class `lexername` from that namespace if lexername not in custom_namespace: raise ClassNotFound('no valid %s class found in %s' % (lexername, filename)) lexer_class = custom_namespace[lexername] # And finally instantiate it with the options return lexer_class(**options) except IOError as err: raise ClassNotFound('cannot read %s' % filename) except ClassNotFound as err: raise except Exception as err: raise ClassNotFound('error when loading custom lexer: %s' % err)
python
def load_lexer_from_file(filename, lexername="CustomLexer", **options): """Load a lexer from a file. This method expects a file located relative to the current working directory, which contains a Lexer class. By default, it expects the Lexer to be name CustomLexer; you can specify your own class name as the second argument to this function. Users should be very careful with the input, because this method is equivalent to running eval on the input file. Raises ClassNotFound if there are any problems importing the Lexer. .. versionadded:: 2.2 """ try: # This empty dict will contain the namespace for the exec'd file custom_namespace = {} exec(open(filename, 'rb').read(), custom_namespace) # Retrieve the class `lexername` from that namespace if lexername not in custom_namespace: raise ClassNotFound('no valid %s class found in %s' % (lexername, filename)) lexer_class = custom_namespace[lexername] # And finally instantiate it with the options return lexer_class(**options) except IOError as err: raise ClassNotFound('cannot read %s' % filename) except ClassNotFound as err: raise except Exception as err: raise ClassNotFound('error when loading custom lexer: %s' % err)
['def', 'load_lexer_from_file', '(', 'filename', ',', 'lexername', '=', '"CustomLexer"', ',', '*', '*', 'options', ')', ':', 'try', ':', "# This empty dict will contain the namespace for the exec'd file", 'custom_namespace', '=', '{', '}', 'exec', '(', 'open', '(', 'filename', ',', "'rb'", ')', '.', 'read', '(', ')', ',', 'custom_namespace', ')', '# Retrieve the class `lexername` from that namespace', 'if', 'lexername', 'not', 'in', 'custom_namespace', ':', 'raise', 'ClassNotFound', '(', "'no valid %s class found in %s'", '%', '(', 'lexername', ',', 'filename', ')', ')', 'lexer_class', '=', 'custom_namespace', '[', 'lexername', ']', '# And finally instantiate it with the options', 'return', 'lexer_class', '(', '*', '*', 'options', ')', 'except', 'IOError', 'as', 'err', ':', 'raise', 'ClassNotFound', '(', "'cannot read %s'", '%', 'filename', ')', 'except', 'ClassNotFound', 'as', 'err', ':', 'raise', 'except', 'Exception', 'as', 'err', ':', 'raise', 'ClassNotFound', '(', "'error when loading custom lexer: %s'", '%', 'err', ')']
Load a lexer from a file. This method expects a file located relative to the current working directory, which contains a Lexer class. By default, it expects the Lexer to be name CustomLexer; you can specify your own class name as the second argument to this function. Users should be very careful with the input, because this method is equivalent to running eval on the input file. Raises ClassNotFound if there are any problems importing the Lexer. .. versionadded:: 2.2
['Load', 'a', 'lexer', 'from', 'a', 'file', '.']
train
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/__init__.py#L118-L149
9,233
mwouts/jupytext
jupytext/formats.py
short_form_one_format
def short_form_one_format(jupytext_format): """Represent one jupytext format as a string""" if not isinstance(jupytext_format, dict): return jupytext_format fmt = jupytext_format['extension'] if 'suffix' in jupytext_format: fmt = jupytext_format['suffix'] + fmt elif fmt.startswith('.'): fmt = fmt[1:] if 'prefix' in jupytext_format: fmt = jupytext_format['prefix'] + '/' + fmt if jupytext_format.get('format_name'): if jupytext_format['extension'] not in ['.md', '.Rmd'] or jupytext_format['format_name'] == 'pandoc': fmt = fmt + ':' + jupytext_format['format_name'] return fmt
python
def short_form_one_format(jupytext_format): """Represent one jupytext format as a string""" if not isinstance(jupytext_format, dict): return jupytext_format fmt = jupytext_format['extension'] if 'suffix' in jupytext_format: fmt = jupytext_format['suffix'] + fmt elif fmt.startswith('.'): fmt = fmt[1:] if 'prefix' in jupytext_format: fmt = jupytext_format['prefix'] + '/' + fmt if jupytext_format.get('format_name'): if jupytext_format['extension'] not in ['.md', '.Rmd'] or jupytext_format['format_name'] == 'pandoc': fmt = fmt + ':' + jupytext_format['format_name'] return fmt
['def', 'short_form_one_format', '(', 'jupytext_format', ')', ':', 'if', 'not', 'isinstance', '(', 'jupytext_format', ',', 'dict', ')', ':', 'return', 'jupytext_format', 'fmt', '=', 'jupytext_format', '[', "'extension'", ']', 'if', "'suffix'", 'in', 'jupytext_format', ':', 'fmt', '=', 'jupytext_format', '[', "'suffix'", ']', '+', 'fmt', 'elif', 'fmt', '.', 'startswith', '(', "'.'", ')', ':', 'fmt', '=', 'fmt', '[', '1', ':', ']', 'if', "'prefix'", 'in', 'jupytext_format', ':', 'fmt', '=', 'jupytext_format', '[', "'prefix'", ']', '+', "'/'", '+', 'fmt', 'if', 'jupytext_format', '.', 'get', '(', "'format_name'", ')', ':', 'if', 'jupytext_format', '[', "'extension'", ']', 'not', 'in', '[', "'.md'", ',', "'.Rmd'", ']', 'or', 'jupytext_format', '[', "'format_name'", ']', '==', "'pandoc'", ':', 'fmt', '=', 'fmt', '+', "':'", '+', 'jupytext_format', '[', "'format_name'", ']', 'return', 'fmt']
Represent one jupytext format as a string
['Represent', 'one', 'jupytext', 'format', 'as', 'a', 'string']
train
https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/formats.py#L483-L500
9,234
wdbm/megaparsex
megaparsex.py
trigger_keyphrases
def trigger_keyphrases( text = None, # input text to parse keyphrases = None, # keyphrases for parsing input text response = None, # optional text response on trigger function = None, # optional function on trigger kwargs = None, # optional function keyword arguments confirm = False, # optional return of confirmation confirmation_prompt = "Do you want to continue? (y/n)", confirmation_feedback_confirm = "confirm", confirmation_feedback_deny = "deny" ): """ Parse input text for keyphrases. If any keyphrases are found, respond with text or by seeking confirmation or by engaging a function with optional keyword arguments. Return text or True if triggered and return False if not triggered. If confirmation is required, a confirmation object is returned, encapsulating a function and its optional arguments. """ if any(pattern in text for pattern in keyphrases): if confirm: return confirmation( prompt = confirmation_prompt, feedback_confirm = confirmation_feedback_confirm, feedback_deny = confirmation_feedback_deny, function = function, kwargs = kwargs ) if function and not kwargs: result = function() elif function and kwargs: result = function(**kwargs) else: result = None if response: return response elif not response and result: return str(result) else: return True else: return False
python
def trigger_keyphrases( text = None, # input text to parse keyphrases = None, # keyphrases for parsing input text response = None, # optional text response on trigger function = None, # optional function on trigger kwargs = None, # optional function keyword arguments confirm = False, # optional return of confirmation confirmation_prompt = "Do you want to continue? (y/n)", confirmation_feedback_confirm = "confirm", confirmation_feedback_deny = "deny" ): """ Parse input text for keyphrases. If any keyphrases are found, respond with text or by seeking confirmation or by engaging a function with optional keyword arguments. Return text or True if triggered and return False if not triggered. If confirmation is required, a confirmation object is returned, encapsulating a function and its optional arguments. """ if any(pattern in text for pattern in keyphrases): if confirm: return confirmation( prompt = confirmation_prompt, feedback_confirm = confirmation_feedback_confirm, feedback_deny = confirmation_feedback_deny, function = function, kwargs = kwargs ) if function and not kwargs: result = function() elif function and kwargs: result = function(**kwargs) else: result = None if response: return response elif not response and result: return str(result) else: return True else: return False
['def', 'trigger_keyphrases', '(', 'text', '=', 'None', ',', '# input text to parse', 'keyphrases', '=', 'None', ',', '# keyphrases for parsing input text', 'response', '=', 'None', ',', '# optional text response on trigger', 'function', '=', 'None', ',', '# optional function on trigger', 'kwargs', '=', 'None', ',', '# optional function keyword arguments', 'confirm', '=', 'False', ',', '# optional return of confirmation', 'confirmation_prompt', '=', '"Do you want to continue? (y/n)"', ',', 'confirmation_feedback_confirm', '=', '"confirm"', ',', 'confirmation_feedback_deny', '=', '"deny"', ')', ':', 'if', 'any', '(', 'pattern', 'in', 'text', 'for', 'pattern', 'in', 'keyphrases', ')', ':', 'if', 'confirm', ':', 'return', 'confirmation', '(', 'prompt', '=', 'confirmation_prompt', ',', 'feedback_confirm', '=', 'confirmation_feedback_confirm', ',', 'feedback_deny', '=', 'confirmation_feedback_deny', ',', 'function', '=', 'function', ',', 'kwargs', '=', 'kwargs', ')', 'if', 'function', 'and', 'not', 'kwargs', ':', 'result', '=', 'function', '(', ')', 'elif', 'function', 'and', 'kwargs', ':', 'result', '=', 'function', '(', '*', '*', 'kwargs', ')', 'else', ':', 'result', '=', 'None', 'if', 'response', ':', 'return', 'response', 'elif', 'not', 'response', 'and', 'result', ':', 'return', 'str', '(', 'result', ')', 'else', ':', 'return', 'True', 'else', ':', 'return', 'False']
Parse input text for keyphrases. If any keyphrases are found, respond with text or by seeking confirmation or by engaging a function with optional keyword arguments. Return text or True if triggered and return False if not triggered. If confirmation is required, a confirmation object is returned, encapsulating a function and its optional arguments.
['Parse', 'input', 'text', 'for', 'keyphrases', '.', 'If', 'any', 'keyphrases', 'are', 'found', 'respond', 'with', 'text', 'or', 'by', 'seeking', 'confirmation', 'or', 'by', 'engaging', 'a', 'function', 'with', 'optional', 'keyword', 'arguments', '.', 'Return', 'text', 'or', 'True', 'if', 'triggered', 'and', 'return', 'False', 'if', 'not', 'triggered', '.', 'If', 'confirmation', 'is', 'required', 'a', 'confirmation', 'object', 'is', 'returned', 'encapsulating', 'a', 'function', 'and', 'its', 'optional', 'arguments', '.']
train
https://github.com/wdbm/megaparsex/blob/59da05410aa1cf8682dcee2bf0bd0572fa42bd29/megaparsex.py#L51-L91
9,235
slundberg/shap
shap/benchmark/metrics.py
batch_remove_absolute_retrain__r2
def batch_remove_absolute_retrain__r2(X, y, model_generator, method_name, num_fcounts=11): """ Batch Remove Absolute (retrain) xlabel = "Fraction of features removed" ylabel = "1 - R^2" transform = "one_minus" sort_order = 13 """ return __run_batch_abs_metric(measures.batch_remove_retrain, X, y, model_generator, method_name, sklearn.metrics.r2_score, num_fcounts)
python
def batch_remove_absolute_retrain__r2(X, y, model_generator, method_name, num_fcounts=11): """ Batch Remove Absolute (retrain) xlabel = "Fraction of features removed" ylabel = "1 - R^2" transform = "one_minus" sort_order = 13 """ return __run_batch_abs_metric(measures.batch_remove_retrain, X, y, model_generator, method_name, sklearn.metrics.r2_score, num_fcounts)
['def', 'batch_remove_absolute_retrain__r2', '(', 'X', ',', 'y', ',', 'model_generator', ',', 'method_name', ',', 'num_fcounts', '=', '11', ')', ':', 'return', '__run_batch_abs_metric', '(', 'measures', '.', 'batch_remove_retrain', ',', 'X', ',', 'y', ',', 'model_generator', ',', 'method_name', ',', 'sklearn', '.', 'metrics', '.', 'r2_score', ',', 'num_fcounts', ')']
Batch Remove Absolute (retrain) xlabel = "Fraction of features removed" ylabel = "1 - R^2" transform = "one_minus" sort_order = 13
['Batch', 'Remove', 'Absolute', '(', 'retrain', ')', 'xlabel', '=', 'Fraction', 'of', 'features', 'removed', 'ylabel', '=', '1', '-', 'R^2', 'transform', '=', 'one_minus', 'sort_order', '=', '13']
train
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/benchmark/metrics.py#L394-L401
9,236
praekeltfoundation/molo.polls
molo/polls/admin.py
ParentListFilter.lookups
def lookups(self, request, model_admin): """ Returns a list of tuples. The first element in each tuple is the coded value for the option that will appear in the URL query. The second element is the human-readable name for the option that will appear in the right sidebar. """ return [(q.slug, q.title) for q in Question.objects.all()]
python
def lookups(self, request, model_admin): """ Returns a list of tuples. The first element in each tuple is the coded value for the option that will appear in the URL query. The second element is the human-readable name for the option that will appear in the right sidebar. """ return [(q.slug, q.title) for q in Question.objects.all()]
['def', 'lookups', '(', 'self', ',', 'request', ',', 'model_admin', ')', ':', 'return', '[', '(', 'q', '.', 'slug', ',', 'q', '.', 'title', ')', 'for', 'q', 'in', 'Question', '.', 'objects', '.', 'all', '(', ')', ']']
Returns a list of tuples. The first element in each tuple is the coded value for the option that will appear in the URL query. The second element is the human-readable name for the option that will appear in the right sidebar.
['Returns', 'a', 'list', 'of', 'tuples', '.', 'The', 'first', 'element', 'in', 'each', 'tuple', 'is', 'the', 'coded', 'value', 'for', 'the', 'option', 'that', 'will', 'appear', 'in', 'the', 'URL', 'query', '.', 'The', 'second', 'element', 'is', 'the', 'human', '-', 'readable', 'name', 'for', 'the', 'option', 'that', 'will', 'appear', 'in', 'the', 'right', 'sidebar', '.']
train
https://github.com/praekeltfoundation/molo.polls/blob/7b3e0e8908b2674ea297d2118c89c49333c84703/molo/polls/admin.py#L15-L23
9,237
Vagrants/blackbird
blackbird/utils/logger.py
get_handler_fp
def get_handler_fp(logger): """ Get handler_fp. This method is integrated to LoggerFactory Object in the future. :param logging.Logger logger: Python logging.Logger. logger instance. :rtype: logging.Logger.handlers.BaseRotatingHandler :return: Handler or Handler's stream. We call it `handler_fp`. """ if not hasattr(logger, 'handlers'): raise blackbird.utils.error.BlackbirdError( 'Given logger is not logging.Logger instance!' ) if len(logger.handlers) != 1: raise blackbird.utils.error.BlackbirdError( 'Given logger has invalid handlers.' ) if hasattr(logger.handlers[0], 'stream'): return logger.handlers[0].stream # case of setting SysLogHandler to logger.handlers[0] return logger.handlers[0]
python
def get_handler_fp(logger): """ Get handler_fp. This method is integrated to LoggerFactory Object in the future. :param logging.Logger logger: Python logging.Logger. logger instance. :rtype: logging.Logger.handlers.BaseRotatingHandler :return: Handler or Handler's stream. We call it `handler_fp`. """ if not hasattr(logger, 'handlers'): raise blackbird.utils.error.BlackbirdError( 'Given logger is not logging.Logger instance!' ) if len(logger.handlers) != 1: raise blackbird.utils.error.BlackbirdError( 'Given logger has invalid handlers.' ) if hasattr(logger.handlers[0], 'stream'): return logger.handlers[0].stream # case of setting SysLogHandler to logger.handlers[0] return logger.handlers[0]
['def', 'get_handler_fp', '(', 'logger', ')', ':', 'if', 'not', 'hasattr', '(', 'logger', ',', "'handlers'", ')', ':', 'raise', 'blackbird', '.', 'utils', '.', 'error', '.', 'BlackbirdError', '(', "'Given logger is not logging.Logger instance!'", ')', 'if', 'len', '(', 'logger', '.', 'handlers', ')', '!=', '1', ':', 'raise', 'blackbird', '.', 'utils', '.', 'error', '.', 'BlackbirdError', '(', "'Given logger has invalid handlers.'", ')', 'if', 'hasattr', '(', 'logger', '.', 'handlers', '[', '0', ']', ',', "'stream'", ')', ':', 'return', 'logger', '.', 'handlers', '[', '0', ']', '.', 'stream', '# case of setting SysLogHandler to logger.handlers[0]', 'return', 'logger', '.', 'handlers', '[', '0', ']']
Get handler_fp. This method is integrated to LoggerFactory Object in the future. :param logging.Logger logger: Python logging.Logger. logger instance. :rtype: logging.Logger.handlers.BaseRotatingHandler :return: Handler or Handler's stream. We call it `handler_fp`.
['Get', 'handler_fp', '.', 'This', 'method', 'is', 'integrated', 'to', 'LoggerFactory', 'Object', 'in', 'the', 'future', '.', ':', 'param', 'logging', '.', 'Logger', 'logger', ':', 'Python', 'logging', '.', 'Logger', '.', 'logger', 'instance', '.', ':', 'rtype', ':', 'logging', '.', 'Logger', '.', 'handlers', '.', 'BaseRotatingHandler', ':', 'return', ':', 'Handler', 'or', 'Handler', 's', 'stream', '.', 'We', 'call', 'it', 'handler_fp', '.']
train
https://github.com/Vagrants/blackbird/blob/3b38cd5650caae362e0668dbd38bf8f88233e079/blackbird/utils/logger.py#L69-L91
9,238
markuskiller/textblob-de
textblob_de/ext/_pattern/text/search.py
Pattern.fromstring
def fromstring(cls, s, *args, **kwargs): """ Returns a new Pattern from the given string. Constraints are separated by a space. If a constraint contains a space, it must be wrapped in []. """ s = s.replace("\(", "&lparen;") s = s.replace("\)", "&rparen;") s = s.replace("\[", "&lbrack;") s = s.replace("\]", "&rbrack;") s = s.replace("\{", "&lcurly;") s = s.replace("\}", "&rcurly;") p = [] i = 0 for m in re.finditer(r"\[.*?\]|\(.*?\)", s): # Spaces in a range encapsulated in square brackets are encoded. # "[Windows Vista]" is one range, don't split on space. p.append(s[i:m.start()]) p.append(s[m.start():m.end()].replace(" ", "&space;")); i=m.end() p.append(s[i:]) s = "".join(p) s = s.replace("][", "] [") s = s.replace(")(", ") (") s = s.replace("\|", "&vdash;") s = re.sub(r"\s+\|\s+", "|", s) s = re.sub(r"\s+", " ", s) s = re.sub(r"\{\s+", "{", s) s = re.sub(r"\s+\}", "}", s) s = s.split(" ") s = [v.replace("&space;"," ") for v in s] P = cls([], *args, **kwargs) G, O, i = [], [], 0 for s in s: constraint = Constraint.fromstring(s.strip("{}"), taxonomy=kwargs.get("taxonomy", TAXONOMY)) constraint.index = len(P.sequence) P.sequence.append(constraint) # Push a new group on the stack if string starts with "{". # Parse constraint from string, add it to all open groups. # Pop latest group from stack if string ends with "}". # Insert groups in opened-first order (i). while s.startswith("{"): s = s[1:] G.append((i, [])); i+=1 O.append([]) for g in G: g[1].append(constraint) while s.endswith("}"): s = s[:-1] if G: O[G[-1][0]] = G[-1][1]; G.pop() P.groups = [g for g in O if g] return P
python
def fromstring(cls, s, *args, **kwargs): """ Returns a new Pattern from the given string. Constraints are separated by a space. If a constraint contains a space, it must be wrapped in []. """ s = s.replace("\(", "&lparen;") s = s.replace("\)", "&rparen;") s = s.replace("\[", "&lbrack;") s = s.replace("\]", "&rbrack;") s = s.replace("\{", "&lcurly;") s = s.replace("\}", "&rcurly;") p = [] i = 0 for m in re.finditer(r"\[.*?\]|\(.*?\)", s): # Spaces in a range encapsulated in square brackets are encoded. # "[Windows Vista]" is one range, don't split on space. p.append(s[i:m.start()]) p.append(s[m.start():m.end()].replace(" ", "&space;")); i=m.end() p.append(s[i:]) s = "".join(p) s = s.replace("][", "] [") s = s.replace(")(", ") (") s = s.replace("\|", "&vdash;") s = re.sub(r"\s+\|\s+", "|", s) s = re.sub(r"\s+", " ", s) s = re.sub(r"\{\s+", "{", s) s = re.sub(r"\s+\}", "}", s) s = s.split(" ") s = [v.replace("&space;"," ") for v in s] P = cls([], *args, **kwargs) G, O, i = [], [], 0 for s in s: constraint = Constraint.fromstring(s.strip("{}"), taxonomy=kwargs.get("taxonomy", TAXONOMY)) constraint.index = len(P.sequence) P.sequence.append(constraint) # Push a new group on the stack if string starts with "{". # Parse constraint from string, add it to all open groups. # Pop latest group from stack if string ends with "}". # Insert groups in opened-first order (i). while s.startswith("{"): s = s[1:] G.append((i, [])); i+=1 O.append([]) for g in G: g[1].append(constraint) while s.endswith("}"): s = s[:-1] if G: O[G[-1][0]] = G[-1][1]; G.pop() P.groups = [g for g in O if g] return P
['def', 'fromstring', '(', 'cls', ',', 's', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 's', '=', 's', '.', 'replace', '(', '"\\("', ',', '"&lparen;"', ')', 's', '=', 's', '.', 'replace', '(', '"\\)"', ',', '"&rparen;"', ')', 's', '=', 's', '.', 'replace', '(', '"\\["', ',', '"&lbrack;"', ')', 's', '=', 's', '.', 'replace', '(', '"\\]"', ',', '"&rbrack;"', ')', 's', '=', 's', '.', 'replace', '(', '"\\{"', ',', '"&lcurly;"', ')', 's', '=', 's', '.', 'replace', '(', '"\\}"', ',', '"&rcurly;"', ')', 'p', '=', '[', ']', 'i', '=', '0', 'for', 'm', 'in', 're', '.', 'finditer', '(', 'r"\\[.*?\\]|\\(.*?\\)"', ',', 's', ')', ':', '# Spaces in a range encapsulated in square brackets are encoded.', '# "[Windows Vista]" is one range, don\'t split on space.', 'p', '.', 'append', '(', 's', '[', 'i', ':', 'm', '.', 'start', '(', ')', ']', ')', 'p', '.', 'append', '(', 's', '[', 'm', '.', 'start', '(', ')', ':', 'm', '.', 'end', '(', ')', ']', '.', 'replace', '(', '" "', ',', '"&space;"', ')', ')', 'i', '=', 'm', '.', 'end', '(', ')', 'p', '.', 'append', '(', 's', '[', 'i', ':', ']', ')', 's', '=', '""', '.', 'join', '(', 'p', ')', 's', '=', 's', '.', 'replace', '(', '"]["', ',', '"] ["', ')', 's', '=', 's', '.', 'replace', '(', '")("', ',', '") ("', ')', 's', '=', 's', '.', 'replace', '(', '"\\|"', ',', '"&vdash;"', ')', 's', '=', 're', '.', 'sub', '(', 'r"\\s+\\|\\s+"', ',', '"|"', ',', 's', ')', 's', '=', 're', '.', 'sub', '(', 'r"\\s+"', ',', '" "', ',', 's', ')', 's', '=', 're', '.', 'sub', '(', 'r"\\{\\s+"', ',', '"{"', ',', 's', ')', 's', '=', 're', '.', 'sub', '(', 'r"\\s+\\}"', ',', '"}"', ',', 's', ')', 's', '=', 's', '.', 'split', '(', '" "', ')', 's', '=', '[', 'v', '.', 'replace', '(', '"&space;"', ',', '" "', ')', 'for', 'v', 'in', 's', ']', 'P', '=', 'cls', '(', '[', ']', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'G', ',', 'O', ',', 'i', '=', '[', ']', ',', '[', ']', ',', '0', 'for', 's', 'in', 's', ':', 'constraint', '=', 'Constraint', '.', 'fromstring', '(', 's', '.', 'strip', '(', '"{}"', ')', ',', 'taxonomy', '=', 'kwargs', '.', 'get', '(', '"taxonomy"', ',', 'TAXONOMY', ')', ')', 'constraint', '.', 'index', '=', 'len', '(', 'P', '.', 'sequence', ')', 'P', '.', 'sequence', '.', 'append', '(', 'constraint', ')', '# Push a new group on the stack if string starts with "{".', '# Parse constraint from string, add it to all open groups.', '# Pop latest group from stack if string ends with "}".', '# Insert groups in opened-first order (i).', 'while', 's', '.', 'startswith', '(', '"{"', ')', ':', 's', '=', 's', '[', '1', ':', ']', 'G', '.', 'append', '(', '(', 'i', ',', '[', ']', ')', ')', 'i', '+=', '1', 'O', '.', 'append', '(', '[', ']', ')', 'for', 'g', 'in', 'G', ':', 'g', '[', '1', ']', '.', 'append', '(', 'constraint', ')', 'while', 's', '.', 'endswith', '(', '"}"', ')', ':', 's', '=', 's', '[', ':', '-', '1', ']', 'if', 'G', ':', 'O', '[', 'G', '[', '-', '1', ']', '[', '0', ']', ']', '=', 'G', '[', '-', '1', ']', '[', '1', ']', 'G', '.', 'pop', '(', ')', 'P', '.', 'groups', '=', '[', 'g', 'for', 'g', 'in', 'O', 'if', 'g', ']', 'return', 'P']
Returns a new Pattern from the given string. Constraints are separated by a space. If a constraint contains a space, it must be wrapped in [].
['Returns', 'a', 'new', 'Pattern', 'from', 'the', 'given', 'string', '.', 'Constraints', 'are', 'separated', 'by', 'a', 'space', '.', 'If', 'a', 'constraint', 'contains', 'a', 'space', 'it', 'must', 'be', 'wrapped', 'in', '[]', '.']
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/search.py#L718-L767
9,239
wummel/linkchecker
third_party/dnspython/dns/update.py
Update.present
def present(self, name, *args): """Require that an owner name (and optionally an rdata type, or specific rdataset) exists as a prerequisite to the execution of the update. The first argument is always a name. The other arguments can be: - rdataset... - rdata... - rdtype, string...""" if isinstance(name, (str, unicode)): name = dns.name.from_text(name, None) if len(args) == 0: rrset = self.find_rrset(self.answer, name, dns.rdataclass.ANY, dns.rdatatype.ANY, dns.rdatatype.NONE, None, True, True) elif isinstance(args[0], dns.rdataset.Rdataset) or \ isinstance(args[0], dns.rdata.Rdata) or \ len(args) > 1: if not isinstance(args[0], dns.rdataset.Rdataset): # Add a 0 TTL args = list(args) args.insert(0, 0) self._add(False, self.answer, name, *args) else: rdtype = args[0] if isinstance(rdtype, (str, unicode)): rdtype = dns.rdatatype.from_text(rdtype) rrset = self.find_rrset(self.answer, name, dns.rdataclass.ANY, rdtype, dns.rdatatype.NONE, None, True, True)
python
def present(self, name, *args): """Require that an owner name (and optionally an rdata type, or specific rdataset) exists as a prerequisite to the execution of the update. The first argument is always a name. The other arguments can be: - rdataset... - rdata... - rdtype, string...""" if isinstance(name, (str, unicode)): name = dns.name.from_text(name, None) if len(args) == 0: rrset = self.find_rrset(self.answer, name, dns.rdataclass.ANY, dns.rdatatype.ANY, dns.rdatatype.NONE, None, True, True) elif isinstance(args[0], dns.rdataset.Rdataset) or \ isinstance(args[0], dns.rdata.Rdata) or \ len(args) > 1: if not isinstance(args[0], dns.rdataset.Rdataset): # Add a 0 TTL args = list(args) args.insert(0, 0) self._add(False, self.answer, name, *args) else: rdtype = args[0] if isinstance(rdtype, (str, unicode)): rdtype = dns.rdatatype.from_text(rdtype) rrset = self.find_rrset(self.answer, name, dns.rdataclass.ANY, rdtype, dns.rdatatype.NONE, None, True, True)
['def', 'present', '(', 'self', ',', 'name', ',', '*', 'args', ')', ':', 'if', 'isinstance', '(', 'name', ',', '(', 'str', ',', 'unicode', ')', ')', ':', 'name', '=', 'dns', '.', 'name', '.', 'from_text', '(', 'name', ',', 'None', ')', 'if', 'len', '(', 'args', ')', '==', '0', ':', 'rrset', '=', 'self', '.', 'find_rrset', '(', 'self', '.', 'answer', ',', 'name', ',', 'dns', '.', 'rdataclass', '.', 'ANY', ',', 'dns', '.', 'rdatatype', '.', 'ANY', ',', 'dns', '.', 'rdatatype', '.', 'NONE', ',', 'None', ',', 'True', ',', 'True', ')', 'elif', 'isinstance', '(', 'args', '[', '0', ']', ',', 'dns', '.', 'rdataset', '.', 'Rdataset', ')', 'or', 'isinstance', '(', 'args', '[', '0', ']', ',', 'dns', '.', 'rdata', '.', 'Rdata', ')', 'or', 'len', '(', 'args', ')', '>', '1', ':', 'if', 'not', 'isinstance', '(', 'args', '[', '0', ']', ',', 'dns', '.', 'rdataset', '.', 'Rdataset', ')', ':', '# Add a 0 TTL', 'args', '=', 'list', '(', 'args', ')', 'args', '.', 'insert', '(', '0', ',', '0', ')', 'self', '.', '_add', '(', 'False', ',', 'self', '.', 'answer', ',', 'name', ',', '*', 'args', ')', 'else', ':', 'rdtype', '=', 'args', '[', '0', ']', 'if', 'isinstance', '(', 'rdtype', ',', '(', 'str', ',', 'unicode', ')', ')', ':', 'rdtype', '=', 'dns', '.', 'rdatatype', '.', 'from_text', '(', 'rdtype', ')', 'rrset', '=', 'self', '.', 'find_rrset', '(', 'self', '.', 'answer', ',', 'name', ',', 'dns', '.', 'rdataclass', '.', 'ANY', ',', 'rdtype', ',', 'dns', '.', 'rdatatype', '.', 'NONE', ',', 'None', ',', 'True', ',', 'True', ')']
Require that an owner name (and optionally an rdata type, or specific rdataset) exists as a prerequisite to the execution of the update. The first argument is always a name. The other arguments can be: - rdataset... - rdata... - rdtype, string...
['Require', 'that', 'an', 'owner', 'name', '(', 'and', 'optionally', 'an', 'rdata', 'type', 'or', 'specific', 'rdataset', ')', 'exists', 'as', 'a', 'prerequisite', 'to', 'the', 'execution', 'of', 'the', 'update', '.', 'The', 'first', 'argument', 'is', 'always', 'a', 'name', '.', 'The', 'other', 'arguments', 'can', 'be', ':']
train
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/update.py#L184-L218
9,240
molmod/molmod
molmod/molecular_graphs.py
NRingPattern.complete
def complete(self, match, subject_graph): """Check the completeness of the ring match""" if not CustomPattern.complete(self, match, subject_graph): return False if self.strong: # If the ring is not strong, return False if self.size % 2 == 0: # even ring for i in range(self.size//2): vertex1_start = match.forward[i] vertex1_stop = match.forward[(i+self.size//2)%self.size] paths = list(subject_graph.iter_shortest_paths(vertex1_start, vertex1_stop)) if len(paths) != 2: #print "Even ring must have two paths between opposite vertices" return False for path in paths: if len(path) != self.size//2+1: #print "Paths between opposite vertices must half the size of the ring+1" return False else: # odd ring for i in range(self.size//2+1): vertex1_start = match.forward[i] vertex1_stop = match.forward[(i+self.size//2)%self.size] paths = list(subject_graph.iter_shortest_paths(vertex1_start, vertex1_stop)) if len(paths) > 1: return False if len(paths[0]) != self.size//2+1: return False vertex1_stop = match.forward[(i+self.size//2+1)%self.size] paths = list(subject_graph.iter_shortest_paths(vertex1_start, vertex1_stop)) if len(paths) > 1: return False if len(paths[0]) != self.size//2+1: return False return True
python
def complete(self, match, subject_graph): """Check the completeness of the ring match""" if not CustomPattern.complete(self, match, subject_graph): return False if self.strong: # If the ring is not strong, return False if self.size % 2 == 0: # even ring for i in range(self.size//2): vertex1_start = match.forward[i] vertex1_stop = match.forward[(i+self.size//2)%self.size] paths = list(subject_graph.iter_shortest_paths(vertex1_start, vertex1_stop)) if len(paths) != 2: #print "Even ring must have two paths between opposite vertices" return False for path in paths: if len(path) != self.size//2+1: #print "Paths between opposite vertices must half the size of the ring+1" return False else: # odd ring for i in range(self.size//2+1): vertex1_start = match.forward[i] vertex1_stop = match.forward[(i+self.size//2)%self.size] paths = list(subject_graph.iter_shortest_paths(vertex1_start, vertex1_stop)) if len(paths) > 1: return False if len(paths[0]) != self.size//2+1: return False vertex1_stop = match.forward[(i+self.size//2+1)%self.size] paths = list(subject_graph.iter_shortest_paths(vertex1_start, vertex1_stop)) if len(paths) > 1: return False if len(paths[0]) != self.size//2+1: return False return True
['def', 'complete', '(', 'self', ',', 'match', ',', 'subject_graph', ')', ':', 'if', 'not', 'CustomPattern', '.', 'complete', '(', 'self', ',', 'match', ',', 'subject_graph', ')', ':', 'return', 'False', 'if', 'self', '.', 'strong', ':', '# If the ring is not strong, return False', 'if', 'self', '.', 'size', '%', '2', '==', '0', ':', '# even ring', 'for', 'i', 'in', 'range', '(', 'self', '.', 'size', '//', '2', ')', ':', 'vertex1_start', '=', 'match', '.', 'forward', '[', 'i', ']', 'vertex1_stop', '=', 'match', '.', 'forward', '[', '(', 'i', '+', 'self', '.', 'size', '//', '2', ')', '%', 'self', '.', 'size', ']', 'paths', '=', 'list', '(', 'subject_graph', '.', 'iter_shortest_paths', '(', 'vertex1_start', ',', 'vertex1_stop', ')', ')', 'if', 'len', '(', 'paths', ')', '!=', '2', ':', '#print "Even ring must have two paths between opposite vertices"', 'return', 'False', 'for', 'path', 'in', 'paths', ':', 'if', 'len', '(', 'path', ')', '!=', 'self', '.', 'size', '//', '2', '+', '1', ':', '#print "Paths between opposite vertices must half the size of the ring+1"', 'return', 'False', 'else', ':', '# odd ring', 'for', 'i', 'in', 'range', '(', 'self', '.', 'size', '//', '2', '+', '1', ')', ':', 'vertex1_start', '=', 'match', '.', 'forward', '[', 'i', ']', 'vertex1_stop', '=', 'match', '.', 'forward', '[', '(', 'i', '+', 'self', '.', 'size', '//', '2', ')', '%', 'self', '.', 'size', ']', 'paths', '=', 'list', '(', 'subject_graph', '.', 'iter_shortest_paths', '(', 'vertex1_start', ',', 'vertex1_stop', ')', ')', 'if', 'len', '(', 'paths', ')', '>', '1', ':', 'return', 'False', 'if', 'len', '(', 'paths', '[', '0', ']', ')', '!=', 'self', '.', 'size', '//', '2', '+', '1', ':', 'return', 'False', 'vertex1_stop', '=', 'match', '.', 'forward', '[', '(', 'i', '+', 'self', '.', 'size', '//', '2', '+', '1', ')', '%', 'self', '.', 'size', ']', 'paths', '=', 'list', '(', 'subject_graph', '.', 'iter_shortest_paths', '(', 'vertex1_start', ',', 'vertex1_stop', ')', ')', 'if', 'len', '(', 'paths', ')', '>', '1', ':', 'return', 'False', 'if', 'len', '(', 'paths', '[', '0', ']', ')', '!=', 'self', '.', 'size', '//', '2', '+', '1', ':', 'return', 'False', 'return', 'True']
Check the completeness of the ring match
['Check', 'the', 'completeness', 'of', 'the', 'ring', 'match']
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/molecular_graphs.py#L605-L640
9,241
Ffisegydd/whatis
whatis/_util.py
get_types
def get_types(obj, **kwargs): """Get the types of an iterable.""" max_iterable_length = kwargs.get('max_iterable_length', 100000) it, = itertools.tee(obj, 1) s = set() too_big = False for i, v in enumerate(it): if i <= max_iterable_length: s.add(type(v)) else: too_big = True break return {"types": s, "too_big": too_big}
python
def get_types(obj, **kwargs): """Get the types of an iterable.""" max_iterable_length = kwargs.get('max_iterable_length', 100000) it, = itertools.tee(obj, 1) s = set() too_big = False for i, v in enumerate(it): if i <= max_iterable_length: s.add(type(v)) else: too_big = True break return {"types": s, "too_big": too_big}
['def', 'get_types', '(', 'obj', ',', '*', '*', 'kwargs', ')', ':', 'max_iterable_length', '=', 'kwargs', '.', 'get', '(', "'max_iterable_length'", ',', '100000', ')', 'it', ',', '=', 'itertools', '.', 'tee', '(', 'obj', ',', '1', ')', 's', '=', 'set', '(', ')', 'too_big', '=', 'False', 'for', 'i', ',', 'v', 'in', 'enumerate', '(', 'it', ')', ':', 'if', 'i', '<=', 'max_iterable_length', ':', 's', '.', 'add', '(', 'type', '(', 'v', ')', ')', 'else', ':', 'too_big', '=', 'True', 'break', 'return', '{', '"types"', ':', 's', ',', '"too_big"', ':', 'too_big', '}']
Get the types of an iterable.
['Get', 'the', 'types', 'of', 'an', 'iterable', '.']
train
https://github.com/Ffisegydd/whatis/blob/eef780ced61aae6d001aeeef7574e5e27e613583/whatis/_util.py#L28-L44
9,242
offu/WeRoBot
werobot/session/mongodbstorage.py
MongoDBStorage.get
def get(self, id): """ 根据 id 获取数据。 :param id: 要获取的数据的 id :return: 返回取到的数据,如果是空则返回一个空的 ``dict`` 对象 """ document = self._get_document(id) if document: session_json = document["session"] return json_loads(session_json) return {}
python
def get(self, id): """ 根据 id 获取数据。 :param id: 要获取的数据的 id :return: 返回取到的数据,如果是空则返回一个空的 ``dict`` 对象 """ document = self._get_document(id) if document: session_json = document["session"] return json_loads(session_json) return {}
['def', 'get', '(', 'self', ',', 'id', ')', ':', 'document', '=', 'self', '.', '_get_document', '(', 'id', ')', 'if', 'document', ':', 'session_json', '=', 'document', '[', '"session"', ']', 'return', 'json_loads', '(', 'session_json', ')', 'return', '{', '}']
根据 id 获取数据。 :param id: 要获取的数据的 id :return: 返回取到的数据,如果是空则返回一个空的 ``dict`` 对象
['根据', 'id', '获取数据。']
train
https://github.com/offu/WeRoBot/blob/fd42109105b03f9acf45ebd9dcabb9d5cff98f3c/werobot/session/mongodbstorage.py#L33-L44
9,243
manns/pyspread
pyspread/src/gui/_grid.py
GridEventHandlers.OnFind
def OnFind(self, event): """Find functionality, called from toolbar, returns find position""" # Search starts in next cell after the current one gridpos = list(self.grid.actions.cursor) text, flags = event.text, event.flags findpos = self.grid.actions.find(gridpos, text, flags) if findpos is None: # If nothing is found mention it in the statusbar and return statustext = _("'{text}' not found.").format(text=text) else: # Otherwise select cell with next occurrence if successful self.grid.actions.cursor = findpos # Update statusbar statustext = _(u"Found '{text}' in cell {key}.") statustext = statustext.format(text=text, key=findpos) post_command_event(self.grid.main_window, self.grid.StatusBarMsg, text=statustext) event.Skip()
python
def OnFind(self, event): """Find functionality, called from toolbar, returns find position""" # Search starts in next cell after the current one gridpos = list(self.grid.actions.cursor) text, flags = event.text, event.flags findpos = self.grid.actions.find(gridpos, text, flags) if findpos is None: # If nothing is found mention it in the statusbar and return statustext = _("'{text}' not found.").format(text=text) else: # Otherwise select cell with next occurrence if successful self.grid.actions.cursor = findpos # Update statusbar statustext = _(u"Found '{text}' in cell {key}.") statustext = statustext.format(text=text, key=findpos) post_command_event(self.grid.main_window, self.grid.StatusBarMsg, text=statustext) event.Skip()
['def', 'OnFind', '(', 'self', ',', 'event', ')', ':', '# Search starts in next cell after the current one', 'gridpos', '=', 'list', '(', 'self', '.', 'grid', '.', 'actions', '.', 'cursor', ')', 'text', ',', 'flags', '=', 'event', '.', 'text', ',', 'event', '.', 'flags', 'findpos', '=', 'self', '.', 'grid', '.', 'actions', '.', 'find', '(', 'gridpos', ',', 'text', ',', 'flags', ')', 'if', 'findpos', 'is', 'None', ':', '# If nothing is found mention it in the statusbar and return', 'statustext', '=', '_', '(', '"\'{text}\' not found."', ')', '.', 'format', '(', 'text', '=', 'text', ')', 'else', ':', '# Otherwise select cell with next occurrence if successful', 'self', '.', 'grid', '.', 'actions', '.', 'cursor', '=', 'findpos', '# Update statusbar', 'statustext', '=', '_', '(', 'u"Found \'{text}\' in cell {key}."', ')', 'statustext', '=', 'statustext', '.', 'format', '(', 'text', '=', 'text', ',', 'key', '=', 'findpos', ')', 'post_command_event', '(', 'self', '.', 'grid', '.', 'main_window', ',', 'self', '.', 'grid', '.', 'StatusBarMsg', ',', 'text', '=', 'statustext', ')', 'event', '.', 'Skip', '(', ')']
Find functionality, called from toolbar, returns find position
['Find', 'functionality', 'called', 'from', 'toolbar', 'returns', 'find', 'position']
train
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_grid.py#L1188-L1212
9,244
pyQode/pyqode.core
pyqode/core/widgets/filesystem_treeview.py
FileSystemHelper.paste_from_clipboard
def paste_from_clipboard(self): """ Pastes files from clipboard. """ to = self.get_current_path() if os.path.isfile(to): to = os.path.abspath(os.path.join(to, os.pardir)) mime = QtWidgets.QApplication.clipboard().mimeData() paste_operation = None if mime.hasFormat(self._UrlListMimeData.format(copy=True)): paste_operation = True elif mime.hasFormat(self._UrlListMimeData.format(copy=False)): paste_operation = False if paste_operation is not None: self._paste( self._UrlListMimeData.list_from(mime, copy=paste_operation), to, copy=paste_operation)
python
def paste_from_clipboard(self): """ Pastes files from clipboard. """ to = self.get_current_path() if os.path.isfile(to): to = os.path.abspath(os.path.join(to, os.pardir)) mime = QtWidgets.QApplication.clipboard().mimeData() paste_operation = None if mime.hasFormat(self._UrlListMimeData.format(copy=True)): paste_operation = True elif mime.hasFormat(self._UrlListMimeData.format(copy=False)): paste_operation = False if paste_operation is not None: self._paste( self._UrlListMimeData.list_from(mime, copy=paste_operation), to, copy=paste_operation)
['def', 'paste_from_clipboard', '(', 'self', ')', ':', 'to', '=', 'self', '.', 'get_current_path', '(', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'to', ')', ':', 'to', '=', 'os', '.', 'path', '.', 'abspath', '(', 'os', '.', 'path', '.', 'join', '(', 'to', ',', 'os', '.', 'pardir', ')', ')', 'mime', '=', 'QtWidgets', '.', 'QApplication', '.', 'clipboard', '(', ')', '.', 'mimeData', '(', ')', 'paste_operation', '=', 'None', 'if', 'mime', '.', 'hasFormat', '(', 'self', '.', '_UrlListMimeData', '.', 'format', '(', 'copy', '=', 'True', ')', ')', ':', 'paste_operation', '=', 'True', 'elif', 'mime', '.', 'hasFormat', '(', 'self', '.', '_UrlListMimeData', '.', 'format', '(', 'copy', '=', 'False', ')', ')', ':', 'paste_operation', '=', 'False', 'if', 'paste_operation', 'is', 'not', 'None', ':', 'self', '.', '_paste', '(', 'self', '.', '_UrlListMimeData', '.', 'list_from', '(', 'mime', ',', 'copy', '=', 'paste_operation', ')', ',', 'to', ',', 'copy', '=', 'paste_operation', ')']
Pastes files from clipboard.
['Pastes', 'files', 'from', 'clipboard', '.']
train
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/filesystem_treeview.py#L372-L389
9,245
fake-name/ChromeController
ChromeController/Generator/Generated.py
ChromeRemoteDebugInterface.DOM_setOuterHTML
def DOM_setOuterHTML(self, nodeId, outerHTML): """ Function path: DOM.setOuterHTML Domain: DOM Method name: setOuterHTML Parameters: Required arguments: 'nodeId' (type: NodeId) -> Id of the node to set markup for. 'outerHTML' (type: string) -> Outer HTML markup to set. No return value. Description: Sets node HTML markup, returns new node id. """ assert isinstance(outerHTML, (str,) ), "Argument 'outerHTML' must be of type '['str']'. Received type: '%s'" % type( outerHTML) subdom_funcs = self.synchronous_command('DOM.setOuterHTML', nodeId=nodeId, outerHTML=outerHTML) return subdom_funcs
python
def DOM_setOuterHTML(self, nodeId, outerHTML): """ Function path: DOM.setOuterHTML Domain: DOM Method name: setOuterHTML Parameters: Required arguments: 'nodeId' (type: NodeId) -> Id of the node to set markup for. 'outerHTML' (type: string) -> Outer HTML markup to set. No return value. Description: Sets node HTML markup, returns new node id. """ assert isinstance(outerHTML, (str,) ), "Argument 'outerHTML' must be of type '['str']'. Received type: '%s'" % type( outerHTML) subdom_funcs = self.synchronous_command('DOM.setOuterHTML', nodeId=nodeId, outerHTML=outerHTML) return subdom_funcs
['def', 'DOM_setOuterHTML', '(', 'self', ',', 'nodeId', ',', 'outerHTML', ')', ':', 'assert', 'isinstance', '(', 'outerHTML', ',', '(', 'str', ',', ')', ')', ',', '"Argument \'outerHTML\' must be of type \'[\'str\']\'. Received type: \'%s\'"', '%', 'type', '(', 'outerHTML', ')', 'subdom_funcs', '=', 'self', '.', 'synchronous_command', '(', "'DOM.setOuterHTML'", ',', 'nodeId', '=', 'nodeId', ',', 'outerHTML', '=', 'outerHTML', ')', 'return', 'subdom_funcs']
Function path: DOM.setOuterHTML Domain: DOM Method name: setOuterHTML Parameters: Required arguments: 'nodeId' (type: NodeId) -> Id of the node to set markup for. 'outerHTML' (type: string) -> Outer HTML markup to set. No return value. Description: Sets node HTML markup, returns new node id.
['Function', 'path', ':', 'DOM', '.', 'setOuterHTML', 'Domain', ':', 'DOM', 'Method', 'name', ':', 'setOuterHTML', 'Parameters', ':', 'Required', 'arguments', ':', 'nodeId', '(', 'type', ':', 'NodeId', ')', '-', '>', 'Id', 'of', 'the', 'node', 'to', 'set', 'markup', 'for', '.', 'outerHTML', '(', 'type', ':', 'string', ')', '-', '>', 'Outer', 'HTML', 'markup', 'to', 'set', '.', 'No', 'return', 'value', '.', 'Description', ':', 'Sets', 'node', 'HTML', 'markup', 'returns', 'new', 'node', 'id', '.']
train
https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/Generator/Generated.py#L3440-L3459
9,246
thiagopbueno/tf-rddlsim
tfrddlsim/simulation/policy_simulator.py
PolicySimulationCell.initial_state
def initial_state(self) -> StateTensor: '''Returns the initial state tensor.''' s0 = [] for fluent in self._compiler.compile_initial_state(self._batch_size): s0.append(self._output_size(fluent)) s0 = tuple(s0) return s0
python
def initial_state(self) -> StateTensor: '''Returns the initial state tensor.''' s0 = [] for fluent in self._compiler.compile_initial_state(self._batch_size): s0.append(self._output_size(fluent)) s0 = tuple(s0) return s0
['def', 'initial_state', '(', 'self', ')', '->', 'StateTensor', ':', 's0', '=', '[', ']', 'for', 'fluent', 'in', 'self', '.', '_compiler', '.', 'compile_initial_state', '(', 'self', '.', '_batch_size', ')', ':', 's0', '.', 'append', '(', 'self', '.', '_output_size', '(', 'fluent', ')', ')', 's0', '=', 'tuple', '(', 's0', ')', 'return', 's0']
Returns the initial state tensor.
['Returns', 'the', 'initial', 'state', 'tensor', '.']
train
https://github.com/thiagopbueno/tf-rddlsim/blob/d7102a0ad37d179dbb23141640254ea383d3b43f/tfrddlsim/simulation/policy_simulator.py#L96-L102
9,247
danielperna84/pyhomematic
pyhomematic/devicetypes/helper.py
HelperActorLevel.set_level
def set_level(self, position, channel=None): """Seek a specific value by specifying a float() from 0.0 to 1.0.""" try: position = float(position) except Exception as err: LOG.debug("HelperLevel.set_level: Exception %s" % (err,)) return False self.writeNodeData("LEVEL", position, channel)
python
def set_level(self, position, channel=None): """Seek a specific value by specifying a float() from 0.0 to 1.0.""" try: position = float(position) except Exception as err: LOG.debug("HelperLevel.set_level: Exception %s" % (err,)) return False self.writeNodeData("LEVEL", position, channel)
['def', 'set_level', '(', 'self', ',', 'position', ',', 'channel', '=', 'None', ')', ':', 'try', ':', 'position', '=', 'float', '(', 'position', ')', 'except', 'Exception', 'as', 'err', ':', 'LOG', '.', 'debug', '(', '"HelperLevel.set_level: Exception %s"', '%', '(', 'err', ',', ')', ')', 'return', 'False', 'self', '.', 'writeNodeData', '(', '"LEVEL"', ',', 'position', ',', 'channel', ')']
Seek a specific value by specifying a float() from 0.0 to 1.0.
['Seek', 'a', 'specific', 'value', 'by', 'specifying', 'a', 'float', '()', 'from', '0', '.', '0', 'to', '1', '.', '0', '.']
train
https://github.com/danielperna84/pyhomematic/blob/8b91f3e84c83f05d289c740d507293a0d6759d8e/pyhomematic/devicetypes/helper.py#L158-L166
9,248
SeattleTestbed/seash
modules/uploaddir/__init__.py
upload_directory_contents
def upload_directory_contents(input_dict, environment_dict): """This function serves to upload every file in a user-supplied source directory to all of the vessels in the current target group. It essentially calls seash's `upload` function repeatedly, each time with a file name taken from the source directory. A note on the input_dict argument: `input_dict` contains our own `command_dict` (see below), with the `"[ARGUMENT]"` sub-key of `children` renamed to what argument the user provided. In our case, this will be the source dir to read from. (If not, this is an error!) """ # Check user input and seash state: # 1, Make sure there is an active user key. if environment_dict["currentkeyname"] is None: raise seash_exceptions.UserError("""Error: Please set an identity before using 'uploaddir'! Example: !> loadkeys your_user_name !> as your_user_name your_user_name@ !> """) # 2, Make sure there is a target to work on. if environment_dict["currenttarget"] is None: raise seash_exceptions.UserError("""Error: Please set a target to work on before using 'uploaddir'! Example your_user_name@ !> on browsegood your_user_name@browsegood !> """) # 3, Complain if we don't have a source dir argument try: source_directory = input_dict["uploaddir"]["children"].keys()[0] except IndexError: raise seash_exceptions.UserError("""Error: Missing operand to 'uploaddir' Please specify which source directory's contents you want uploaded, e.g. your_user_name@browsegood !> uploaddir a_local_directory """) # Sanity check: Does the source dir exist? if not os.path.exists(source_directory): raise seash_exceptions.UserError("Error: Source directory '" + source_directory + "' does not exist.") # Sanity check: Is the source dir a directory? if not os.path.isdir(source_directory): raise seash_exceptions.UserError("Error: Source directory '" + source_directory + "' is not a directory.\nDid you mean to use the 'upload' command instead?") # Alright --- user input and seash state seem sane, let's do the work! # These are the files we will need to upload: file_list = os.listdir(source_directory) for filename in file_list: # We construct the filename-to-be uploaded from the source dir, # the OS-specific path separator, and the actual file name. # This is enough for `upload_target` to find the file. path_and_filename = source_directory + os.sep + filename if not os.path.isdir(path_and_filename): print "Uploading '" + path_and_filename + "'..." # Construct an input_dict containing command args for seash's # `upload FILENAME` function. # XXX There might be a cleaner way to do this. faked_input_dict = {"upload": {"name": "upload", "children": {path_and_filename: {"name": "filename"}}}} command_callbacks.upload_filename(faked_input_dict, environment_dict) else: print "Skipping sub-directory '" + filename + "'. You may upload it separately."
python
def upload_directory_contents(input_dict, environment_dict): """This function serves to upload every file in a user-supplied source directory to all of the vessels in the current target group. It essentially calls seash's `upload` function repeatedly, each time with a file name taken from the source directory. A note on the input_dict argument: `input_dict` contains our own `command_dict` (see below), with the `"[ARGUMENT]"` sub-key of `children` renamed to what argument the user provided. In our case, this will be the source dir to read from. (If not, this is an error!) """ # Check user input and seash state: # 1, Make sure there is an active user key. if environment_dict["currentkeyname"] is None: raise seash_exceptions.UserError("""Error: Please set an identity before using 'uploaddir'! Example: !> loadkeys your_user_name !> as your_user_name your_user_name@ !> """) # 2, Make sure there is a target to work on. if environment_dict["currenttarget"] is None: raise seash_exceptions.UserError("""Error: Please set a target to work on before using 'uploaddir'! Example your_user_name@ !> on browsegood your_user_name@browsegood !> """) # 3, Complain if we don't have a source dir argument try: source_directory = input_dict["uploaddir"]["children"].keys()[0] except IndexError: raise seash_exceptions.UserError("""Error: Missing operand to 'uploaddir' Please specify which source directory's contents you want uploaded, e.g. your_user_name@browsegood !> uploaddir a_local_directory """) # Sanity check: Does the source dir exist? if not os.path.exists(source_directory): raise seash_exceptions.UserError("Error: Source directory '" + source_directory + "' does not exist.") # Sanity check: Is the source dir a directory? if not os.path.isdir(source_directory): raise seash_exceptions.UserError("Error: Source directory '" + source_directory + "' is not a directory.\nDid you mean to use the 'upload' command instead?") # Alright --- user input and seash state seem sane, let's do the work! # These are the files we will need to upload: file_list = os.listdir(source_directory) for filename in file_list: # We construct the filename-to-be uploaded from the source dir, # the OS-specific path separator, and the actual file name. # This is enough for `upload_target` to find the file. path_and_filename = source_directory + os.sep + filename if not os.path.isdir(path_and_filename): print "Uploading '" + path_and_filename + "'..." # Construct an input_dict containing command args for seash's # `upload FILENAME` function. # XXX There might be a cleaner way to do this. faked_input_dict = {"upload": {"name": "upload", "children": {path_and_filename: {"name": "filename"}}}} command_callbacks.upload_filename(faked_input_dict, environment_dict) else: print "Skipping sub-directory '" + filename + "'. You may upload it separately."
['def', 'upload_directory_contents', '(', 'input_dict', ',', 'environment_dict', ')', ':', '# Check user input and seash state:', '# 1, Make sure there is an active user key.', 'if', 'environment_dict', '[', '"currentkeyname"', ']', 'is', 'None', ':', 'raise', 'seash_exceptions', '.', 'UserError', '(', '"""Error: Please set an identity before using \'uploaddir\'!\nExample:\n\n !> loadkeys your_user_name\n !> as your_user_name\nyour_user_name@ !>\n"""', ')', '# 2, Make sure there is a target to work on.', 'if', 'environment_dict', '[', '"currenttarget"', ']', 'is', 'None', ':', 'raise', 'seash_exceptions', '.', 'UserError', '(', '"""Error: Please set a target to work on before using \'uploaddir\'!\nExample\nyour_user_name@ !> on browsegood\nyour_user_name@browsegood !> \n"""', ')', "# 3, Complain if we don't have a source dir argument", 'try', ':', 'source_directory', '=', 'input_dict', '[', '"uploaddir"', ']', '[', '"children"', ']', '.', 'keys', '(', ')', '[', '0', ']', 'except', 'IndexError', ':', 'raise', 'seash_exceptions', '.', 'UserError', '(', '"""Error: Missing operand to \'uploaddir\'\n\nPlease specify which source directory\'s contents you want uploaded, e.g.\nyour_user_name@browsegood !> uploaddir a_local_directory\n\n"""', ')', '# Sanity check: Does the source dir exist?', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'source_directory', ')', ':', 'raise', 'seash_exceptions', '.', 'UserError', '(', '"Error: Source directory \'"', '+', 'source_directory', '+', '"\' does not exist."', ')', '# Sanity check: Is the source dir a directory?', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'source_directory', ')', ':', 'raise', 'seash_exceptions', '.', 'UserError', '(', '"Error: Source directory \'"', '+', 'source_directory', '+', '"\' is not a directory.\\nDid you mean to use the \'upload\' command instead?"', ')', "# Alright --- user input and seash state seem sane, let's do the work!", '# These are the files we will need to upload:', 'file_list', '=', 'os', '.', 'listdir', '(', 'source_directory', ')', 'for', 'filename', 'in', 'file_list', ':', '# We construct the filename-to-be uploaded from the source dir, ', '# the OS-specific path separator, and the actual file name. ', '# This is enough for `upload_target` to find the file.', 'path_and_filename', '=', 'source_directory', '+', 'os', '.', 'sep', '+', 'filename', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'path_and_filename', ')', ':', 'print', '"Uploading \'"', '+', 'path_and_filename', '+', '"\'..."', "# Construct an input_dict containing command args for seash's ", '# `upload FILENAME` function.', '# XXX There might be a cleaner way to do this.', 'faked_input_dict', '=', '{', '"upload"', ':', '{', '"name"', ':', '"upload"', ',', '"children"', ':', '{', 'path_and_filename', ':', '{', '"name"', ':', '"filename"', '}', '}', '}', '}', 'command_callbacks', '.', 'upload_filename', '(', 'faked_input_dict', ',', 'environment_dict', ')', 'else', ':', 'print', '"Skipping sub-directory \'"', '+', 'filename', '+', '"\'. You may upload it separately."']
This function serves to upload every file in a user-supplied source directory to all of the vessels in the current target group. It essentially calls seash's `upload` function repeatedly, each time with a file name taken from the source directory. A note on the input_dict argument: `input_dict` contains our own `command_dict` (see below), with the `"[ARGUMENT]"` sub-key of `children` renamed to what argument the user provided. In our case, this will be the source dir to read from. (If not, this is an error!)
['This', 'function', 'serves', 'to', 'upload', 'every', 'file', 'in', 'a', 'user', '-', 'supplied', 'source', 'directory', 'to', 'all', 'of', 'the', 'vessels', 'in', 'the', 'current', 'target', 'group', '.', 'It', 'essentially', 'calls', 'seash', 's', 'upload', 'function', 'repeatedly', 'each', 'time', 'with', 'a', 'file', 'name', 'taken', 'from', 'the', 'source', 'directory', '.']
train
https://github.com/SeattleTestbed/seash/blob/40f9d2285662ff8b61e0468b4196acee089b273b/modules/uploaddir/__init__.py#L26-L95
9,249
cslarsen/crianza
crianza/genetic.py
randomize
def randomize(vm, length=(10,10), ints=(0,999), strs=(1,10), chars=(32,126), instruction_ratio=0.5, number_string_ratio=0.8, exclude=map(crianza.instructions.lookup, [".", "exit", "read", "write", "str"]), restrict_to=None): """Replaces existing code with completely random instructions. Does not optimize code after generating it. Args: length: Tuple of minimum and maximum code lengths. Code length will be a random number between these two, inclusive values. ints: Integers in the code will be selected at random from this inclusive range. strs: Inclusive range of the length of strings in the code. chars: Inclusive range of characters in random strings. instruction_ratio: Ratio of instructions to numbers/strings, meaning that if this value is 0.5 then there will just as many instructions in the code as there are numbers and strings. number_string_ratio: Ratio of numbers to strings. exclude: Excluded instructions. For genetic programming, one wants to avoid the program to hang for user input. The default value is to exclude console i/o and debug instructions. restrict_to: Limit instructions to the given list. Returns: The VM. """ vm.code = [] instructions = set(vm.instructions.values()) - set(exclude) if restrict_to is not None: instructions = instructions.intersection(set(restrict_to)) instructions = list(instructions) for _ in xrange(random.randint(*length)): r = random.random() if r <= instruction_ratio: # Generate a random instruction vm.code.append(random.choice(instructions)) elif r <= number_string_ratio: # Generate a random number vm.code.append(crianza.compiler.make_embedded_push(random.randint(*ints))) else: # Generate a random string vm.code.append(crianza.compiler.make_embedded_push('%s' % "".join(chr(random.randint(*chars)) for n in xrange(0, random.randint(*strs))))) return vm
python
def randomize(vm, length=(10,10), ints=(0,999), strs=(1,10), chars=(32,126), instruction_ratio=0.5, number_string_ratio=0.8, exclude=map(crianza.instructions.lookup, [".", "exit", "read", "write", "str"]), restrict_to=None): """Replaces existing code with completely random instructions. Does not optimize code after generating it. Args: length: Tuple of minimum and maximum code lengths. Code length will be a random number between these two, inclusive values. ints: Integers in the code will be selected at random from this inclusive range. strs: Inclusive range of the length of strings in the code. chars: Inclusive range of characters in random strings. instruction_ratio: Ratio of instructions to numbers/strings, meaning that if this value is 0.5 then there will just as many instructions in the code as there are numbers and strings. number_string_ratio: Ratio of numbers to strings. exclude: Excluded instructions. For genetic programming, one wants to avoid the program to hang for user input. The default value is to exclude console i/o and debug instructions. restrict_to: Limit instructions to the given list. Returns: The VM. """ vm.code = [] instructions = set(vm.instructions.values()) - set(exclude) if restrict_to is not None: instructions = instructions.intersection(set(restrict_to)) instructions = list(instructions) for _ in xrange(random.randint(*length)): r = random.random() if r <= instruction_ratio: # Generate a random instruction vm.code.append(random.choice(instructions)) elif r <= number_string_ratio: # Generate a random number vm.code.append(crianza.compiler.make_embedded_push(random.randint(*ints))) else: # Generate a random string vm.code.append(crianza.compiler.make_embedded_push('%s' % "".join(chr(random.randint(*chars)) for n in xrange(0, random.randint(*strs))))) return vm
['def', 'randomize', '(', 'vm', ',', 'length', '=', '(', '10', ',', '10', ')', ',', 'ints', '=', '(', '0', ',', '999', ')', ',', 'strs', '=', '(', '1', ',', '10', ')', ',', 'chars', '=', '(', '32', ',', '126', ')', ',', 'instruction_ratio', '=', '0.5', ',', 'number_string_ratio', '=', '0.8', ',', 'exclude', '=', 'map', '(', 'crianza', '.', 'instructions', '.', 'lookup', ',', '[', '"."', ',', '"exit"', ',', '"read"', ',', '"write"', ',', '"str"', ']', ')', ',', 'restrict_to', '=', 'None', ')', ':', 'vm', '.', 'code', '=', '[', ']', 'instructions', '=', 'set', '(', 'vm', '.', 'instructions', '.', 'values', '(', ')', ')', '-', 'set', '(', 'exclude', ')', 'if', 'restrict_to', 'is', 'not', 'None', ':', 'instructions', '=', 'instructions', '.', 'intersection', '(', 'set', '(', 'restrict_to', ')', ')', 'instructions', '=', 'list', '(', 'instructions', ')', 'for', '_', 'in', 'xrange', '(', 'random', '.', 'randint', '(', '*', 'length', ')', ')', ':', 'r', '=', 'random', '.', 'random', '(', ')', 'if', 'r', '<=', 'instruction_ratio', ':', '# Generate a random instruction', 'vm', '.', 'code', '.', 'append', '(', 'random', '.', 'choice', '(', 'instructions', ')', ')', 'elif', 'r', '<=', 'number_string_ratio', ':', '# Generate a random number', 'vm', '.', 'code', '.', 'append', '(', 'crianza', '.', 'compiler', '.', 'make_embedded_push', '(', 'random', '.', 'randint', '(', '*', 'ints', ')', ')', ')', 'else', ':', '# Generate a random string', 'vm', '.', 'code', '.', 'append', '(', 'crianza', '.', 'compiler', '.', 'make_embedded_push', '(', "'%s'", '%', '""', '.', 'join', '(', 'chr', '(', 'random', '.', 'randint', '(', '*', 'chars', ')', ')', 'for', 'n', 'in', 'xrange', '(', '0', ',', 'random', '.', 'randint', '(', '*', 'strs', ')', ')', ')', ')', ')', 'return', 'vm']
Replaces existing code with completely random instructions. Does not optimize code after generating it. Args: length: Tuple of minimum and maximum code lengths. Code length will be a random number between these two, inclusive values. ints: Integers in the code will be selected at random from this inclusive range. strs: Inclusive range of the length of strings in the code. chars: Inclusive range of characters in random strings. instruction_ratio: Ratio of instructions to numbers/strings, meaning that if this value is 0.5 then there will just as many instructions in the code as there are numbers and strings. number_string_ratio: Ratio of numbers to strings. exclude: Excluded instructions. For genetic programming, one wants to avoid the program to hang for user input. The default value is to exclude console i/o and debug instructions. restrict_to: Limit instructions to the given list. Returns: The VM.
['Replaces', 'existing', 'code', 'with', 'completely', 'random', 'instructions', '.', 'Does', 'not', 'optimize', 'code', 'after', 'generating', 'it', '.']
train
https://github.com/cslarsen/crianza/blob/fa044f9d491f37cc06892bad14b2c80b8ac5a7cd/crianza/genetic.py#L67-L127
9,250
mdiener/grace
grace/py27/cssmin.py
remove_unnecessary_whitespace
def remove_unnecessary_whitespace(css): """Remove unnecessary whitespace characters.""" def pseudoclasscolon(css): """ Prevents 'p :link' from becoming 'p:link'. Translates 'p :link' into 'p ___PSEUDOCLASSCOLON___link'; this is translated back again later. """ regex = re.compile(r"(^|\})(([^\{\:])+\:)+([^\{]*\{)") match = regex.search(css) while match: css = ''.join([ css[:match.start()], match.group().replace(":", "___PSEUDOCLASSCOLON___"), css[match.end():]]) match = regex.search(css) return css css = pseudoclasscolon(css) # Remove spaces from before things. css = re.sub(r"\s+([!{};:>+\(\)\],])", r"\1", css) # If there is a `@charset`, then only allow one, and move to the beginning. css = re.sub(r"^(.*)(@charset \"[^\"]*\";)", r"\2\1", css) css = re.sub(r"^(\s*@charset [^;]+;\s*)+", r"\1", css) # Put the space back in for a few cases, such as `@media screen` and # `(-webkit-min-device-pixel-ratio:0)`. css = re.sub(r"\band\(", "and (", css) # Put the colons back. css = css.replace('___PSEUDOCLASSCOLON___', ':') # Remove spaces from after things. css = re.sub(r"([!{}:;>+\(\[,])\s+", r"\1", css) return css
python
def remove_unnecessary_whitespace(css): """Remove unnecessary whitespace characters.""" def pseudoclasscolon(css): """ Prevents 'p :link' from becoming 'p:link'. Translates 'p :link' into 'p ___PSEUDOCLASSCOLON___link'; this is translated back again later. """ regex = re.compile(r"(^|\})(([^\{\:])+\:)+([^\{]*\{)") match = regex.search(css) while match: css = ''.join([ css[:match.start()], match.group().replace(":", "___PSEUDOCLASSCOLON___"), css[match.end():]]) match = regex.search(css) return css css = pseudoclasscolon(css) # Remove spaces from before things. css = re.sub(r"\s+([!{};:>+\(\)\],])", r"\1", css) # If there is a `@charset`, then only allow one, and move to the beginning. css = re.sub(r"^(.*)(@charset \"[^\"]*\";)", r"\2\1", css) css = re.sub(r"^(\s*@charset [^;]+;\s*)+", r"\1", css) # Put the space back in for a few cases, such as `@media screen` and # `(-webkit-min-device-pixel-ratio:0)`. css = re.sub(r"\band\(", "and (", css) # Put the colons back. css = css.replace('___PSEUDOCLASSCOLON___', ':') # Remove spaces from after things. css = re.sub(r"([!{}:;>+\(\[,])\s+", r"\1", css) return css
['def', 'remove_unnecessary_whitespace', '(', 'css', ')', ':', 'def', 'pseudoclasscolon', '(', 'css', ')', ':', '"""\n Prevents \'p :link\' from becoming \'p:link\'.\n\n Translates \'p :link\' into \'p ___PSEUDOCLASSCOLON___link\'; this is\n translated back again later.\n """', 'regex', '=', 're', '.', 'compile', '(', 'r"(^|\\})(([^\\{\\:])+\\:)+([^\\{]*\\{)"', ')', 'match', '=', 'regex', '.', 'search', '(', 'css', ')', 'while', 'match', ':', 'css', '=', "''", '.', 'join', '(', '[', 'css', '[', ':', 'match', '.', 'start', '(', ')', ']', ',', 'match', '.', 'group', '(', ')', '.', 'replace', '(', '":"', ',', '"___PSEUDOCLASSCOLON___"', ')', ',', 'css', '[', 'match', '.', 'end', '(', ')', ':', ']', ']', ')', 'match', '=', 'regex', '.', 'search', '(', 'css', ')', 'return', 'css', 'css', '=', 'pseudoclasscolon', '(', 'css', ')', '# Remove spaces from before things.', 'css', '=', 're', '.', 'sub', '(', 'r"\\s+([!{};:>+\\(\\)\\],])"', ',', 'r"\\1"', ',', 'css', ')', '# If there is a `@charset`, then only allow one, and move to the beginning.', 'css', '=', 're', '.', 'sub', '(', 'r"^(.*)(@charset \\"[^\\"]*\\";)"', ',', 'r"\\2\\1"', ',', 'css', ')', 'css', '=', 're', '.', 'sub', '(', 'r"^(\\s*@charset [^;]+;\\s*)+"', ',', 'r"\\1"', ',', 'css', ')', '# Put the space back in for a few cases, such as `@media screen` and', '# `(-webkit-min-device-pixel-ratio:0)`.', 'css', '=', 're', '.', 'sub', '(', 'r"\\band\\("', ',', '"and ("', ',', 'css', ')', '# Put the colons back.', 'css', '=', 'css', '.', 'replace', '(', "'___PSEUDOCLASSCOLON___'", ',', "':'", ')', '# Remove spaces from after things.', 'css', '=', 're', '.', 'sub', '(', 'r"([!{}:;>+\\(\\[,])\\s+"', ',', 'r"\\1"', ',', 'css', ')', 'return', 'css']
Remove unnecessary whitespace characters.
['Remove', 'unnecessary', 'whitespace', 'characters', '.']
train
https://github.com/mdiener/grace/blob/2dab13a2cf636da5da989904c5885166fc94d36d/grace/py27/cssmin.py#L53-L93
9,251
aws/sagemaker-containers
src/sagemaker_containers/_modules.py
prepare
def prepare(path, name): # type: (str, str) -> None """Prepare a Python script (or module) to be imported as a module. If the script does not contain a setup.py file, it creates a minimal setup. Args: path (str): path to directory with the script or module. name (str): name of the script or module. """ setup_path = os.path.join(path, 'setup.py') if not os.path.exists(setup_path): data = textwrap.dedent(""" from setuptools import setup setup(packages=[''], name="%s", version='1.0.0', include_package_data=True) """ % name) logger.info('Module %s does not provide a setup.py. \nGenerating setup.py' % name) _files.write_file(setup_path, data) data = textwrap.dedent(""" [wheel] universal = 1 """) logger.info('Generating setup.cfg') _files.write_file(os.path.join(path, 'setup.cfg'), data) data = textwrap.dedent(""" recursive-include . * recursive-exclude . __pycache__* recursive-exclude . *.pyc recursive-exclude . *.pyo """) logger.info('Generating MANIFEST.in') _files.write_file(os.path.join(path, 'MANIFEST.in'), data)
python
def prepare(path, name): # type: (str, str) -> None """Prepare a Python script (or module) to be imported as a module. If the script does not contain a setup.py file, it creates a minimal setup. Args: path (str): path to directory with the script or module. name (str): name of the script or module. """ setup_path = os.path.join(path, 'setup.py') if not os.path.exists(setup_path): data = textwrap.dedent(""" from setuptools import setup setup(packages=[''], name="%s", version='1.0.0', include_package_data=True) """ % name) logger.info('Module %s does not provide a setup.py. \nGenerating setup.py' % name) _files.write_file(setup_path, data) data = textwrap.dedent(""" [wheel] universal = 1 """) logger.info('Generating setup.cfg') _files.write_file(os.path.join(path, 'setup.cfg'), data) data = textwrap.dedent(""" recursive-include . * recursive-exclude . __pycache__* recursive-exclude . *.pyc recursive-exclude . *.pyo """) logger.info('Generating MANIFEST.in') _files.write_file(os.path.join(path, 'MANIFEST.in'), data)
['def', 'prepare', '(', 'path', ',', 'name', ')', ':', '# type: (str, str) -> None', 'setup_path', '=', 'os', '.', 'path', '.', 'join', '(', 'path', ',', "'setup.py'", ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'setup_path', ')', ':', 'data', '=', 'textwrap', '.', 'dedent', '(', '"""\n from setuptools import setup\n setup(packages=[\'\'],\n name="%s",\n version=\'1.0.0\',\n include_package_data=True)\n """', '%', 'name', ')', 'logger', '.', 'info', '(', "'Module %s does not provide a setup.py. \\nGenerating setup.py'", '%', 'name', ')', '_files', '.', 'write_file', '(', 'setup_path', ',', 'data', ')', 'data', '=', 'textwrap', '.', 'dedent', '(', '"""\n [wheel]\n universal = 1\n """', ')', 'logger', '.', 'info', '(', "'Generating setup.cfg'", ')', '_files', '.', 'write_file', '(', 'os', '.', 'path', '.', 'join', '(', 'path', ',', "'setup.cfg'", ')', ',', 'data', ')', 'data', '=', 'textwrap', '.', 'dedent', '(', '"""\n recursive-include . *\n recursive-exclude . __pycache__*\n recursive-exclude . *.pyc\n recursive-exclude . *.pyo\n """', ')', 'logger', '.', 'info', '(', "'Generating MANIFEST.in'", ')', '_files', '.', 'write_file', '(', 'os', '.', 'path', '.', 'join', '(', 'path', ',', "'MANIFEST.in'", ')', ',', 'data', ')']
Prepare a Python script (or module) to be imported as a module. If the script does not contain a setup.py file, it creates a minimal setup. Args: path (str): path to directory with the script or module. name (str): name of the script or module.
['Prepare', 'a', 'Python', 'script', '(', 'or', 'module', ')', 'to', 'be', 'imported', 'as', 'a', 'module', '.', 'If', 'the', 'script', 'does', 'not', 'contain', 'a', 'setup', '.', 'py', 'file', 'it', 'creates', 'a', 'minimal', 'setup', '.', 'Args', ':', 'path', '(', 'str', ')', ':', 'path', 'to', 'directory', 'with', 'the', 'script', 'or', 'module', '.', 'name', '(', 'str', ')', ':', 'name', 'of', 'the', 'script', 'or', 'module', '.']
train
https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_modules.py#L54-L93
9,252
grycap/RADL
radl/radl.py
Features.hasFeature
def hasFeature(self, prop, check_softs=False): """Return if there is a property with that name.""" return prop in self.props or (check_softs and any([fs.hasFeature(prop) for fs in self.props.get(SoftFeatures.SOFT, [])]))
python
def hasFeature(self, prop, check_softs=False): """Return if there is a property with that name.""" return prop in self.props or (check_softs and any([fs.hasFeature(prop) for fs in self.props.get(SoftFeatures.SOFT, [])]))
['def', 'hasFeature', '(', 'self', ',', 'prop', ',', 'check_softs', '=', 'False', ')', ':', 'return', 'prop', 'in', 'self', '.', 'props', 'or', '(', 'check_softs', 'and', 'any', '(', '[', 'fs', '.', 'hasFeature', '(', 'prop', ')', 'for', 'fs', 'in', 'self', '.', 'props', '.', 'get', '(', 'SoftFeatures', '.', 'SOFT', ',', '[', ']', ')', ']', ')', ')']
Return if there is a property with that name.
['Return', 'if', 'there', 'is', 'a', 'property', 'with', 'that', 'name', '.']
train
https://github.com/grycap/RADL/blob/03ccabb0313a48a5aa0e20c1f7983fddcb95e9cb/radl/radl.py#L312-L316
9,253
dhylands/rshell
rshell/main.py
rm
def rm(filename, recursive=False, force=False): """Removes a file or directory tree.""" return auto(remove_file, filename, recursive, force)
python
def rm(filename, recursive=False, force=False): """Removes a file or directory tree.""" return auto(remove_file, filename, recursive, force)
['def', 'rm', '(', 'filename', ',', 'recursive', '=', 'False', ',', 'force', '=', 'False', ')', ':', 'return', 'auto', '(', 'remove_file', ',', 'filename', ',', 'recursive', ',', 'force', ')']
Removes a file or directory tree.
['Removes', 'a', 'file', 'or', 'directory', 'tree', '.']
train
https://github.com/dhylands/rshell/blob/a92a8fa8074ac792241c83c640a51b394667c324/rshell/main.py#L834-L836
9,254
camptocamp/Studio
studio/controllers/mapfiles.py
MapfilesController._get_map_from_user_by_id
def _get_map_from_user_by_id(self, user, map_id): """ Get a mapfile owned by a user from the database by map_id. """ req = Session.query(Map).select_from(join(Map, User)) try: return req.filter(and_(User.login==user, Map.id==map_id)).one() except Exception, e: return None
python
def _get_map_from_user_by_id(self, user, map_id): """ Get a mapfile owned by a user from the database by map_id. """ req = Session.query(Map).select_from(join(Map, User)) try: return req.filter(and_(User.login==user, Map.id==map_id)).one() except Exception, e: return None
['def', '_get_map_from_user_by_id', '(', 'self', ',', 'user', ',', 'map_id', ')', ':', 'req', '=', 'Session', '.', 'query', '(', 'Map', ')', '.', 'select_from', '(', 'join', '(', 'Map', ',', 'User', ')', ')', 'try', ':', 'return', 'req', '.', 'filter', '(', 'and_', '(', 'User', '.', 'login', '==', 'user', ',', 'Map', '.', 'id', '==', 'map_id', ')', ')', '.', 'one', '(', ')', 'except', 'Exception', ',', 'e', ':', 'return', 'None']
Get a mapfile owned by a user from the database by map_id.
['Get', 'a', 'mapfile', 'owned', 'by', 'a', 'user', 'from', 'the', 'database', 'by', 'map_id', '.']
train
https://github.com/camptocamp/Studio/blob/43cb7298434fb606b15136801b79b03571a2f27e/studio/controllers/mapfiles.py#L197-L204
9,255
f3at/feat
src/feat/common/reflect.py
named_module
def named_module(name): """Returns a module given its name.""" module = __import__(name) packages = name.split(".")[1:] m = module for p in packages: m = getattr(m, p) return m
python
def named_module(name): """Returns a module given its name.""" module = __import__(name) packages = name.split(".")[1:] m = module for p in packages: m = getattr(m, p) return m
['def', 'named_module', '(', 'name', ')', ':', 'module', '=', '__import__', '(', 'name', ')', 'packages', '=', 'name', '.', 'split', '(', '"."', ')', '[', '1', ':', ']', 'm', '=', 'module', 'for', 'p', 'in', 'packages', ':', 'm', '=', 'getattr', '(', 'm', ',', 'p', ')', 'return', 'm']
Returns a module given its name.
['Returns', 'a', 'module', 'given', 'its', 'name', '.']
train
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/common/reflect.py#L55-L62
9,256
Shizmob/pydle
pydle/features/isupport.py
ISUPPORTSupport.on_isupport_maxbans
async def on_isupport_maxbans(self, value): """ Maximum entries in ban list. Replaced by MAXLIST. """ if 'MAXLIST' not in self._isupport: if not self._list_limits: self._list_limits = {} self._list_limits['b'] = int(value)
python
async def on_isupport_maxbans(self, value): """ Maximum entries in ban list. Replaced by MAXLIST. """ if 'MAXLIST' not in self._isupport: if not self._list_limits: self._list_limits = {} self._list_limits['b'] = int(value)
['async', 'def', 'on_isupport_maxbans', '(', 'self', ',', 'value', ')', ':', 'if', "'MAXLIST'", 'not', 'in', 'self', '.', '_isupport', ':', 'if', 'not', 'self', '.', '_list_limits', ':', 'self', '.', '_list_limits', '=', '{', '}', 'self', '.', '_list_limits', '[', "'b'", ']', '=', 'int', '(', 'value', ')']
Maximum entries in ban list. Replaced by MAXLIST.
['Maximum', 'entries', 'in', 'ban', 'list', '.', 'Replaced', 'by', 'MAXLIST', '.']
train
https://github.com/Shizmob/pydle/blob/7ec7d65d097318ed0bcdc5d8401470287d8c7cf7/pydle/features/isupport.py#L144-L149
9,257
ArtoLabs/SimpleSteem
simplesteem/simplesteem.py
SimpleSteem.rshares_to_steem
def rshares_to_steem (self, rshares): ''' Gets the reward pool balances then calculates rshares to steem ''' self.reward_pool_balances() return round( rshares * self.reward_balance / self.recent_claims * self.base, 4)
python
def rshares_to_steem (self, rshares): ''' Gets the reward pool balances then calculates rshares to steem ''' self.reward_pool_balances() return round( rshares * self.reward_balance / self.recent_claims * self.base, 4)
['def', 'rshares_to_steem', '(', 'self', ',', 'rshares', ')', ':', 'self', '.', 'reward_pool_balances', '(', ')', 'return', 'round', '(', 'rshares', '*', 'self', '.', 'reward_balance', '/', 'self', '.', 'recent_claims', '*', 'self', '.', 'base', ',', '4', ')']
Gets the reward pool balances then calculates rshares to steem
['Gets', 'the', 'reward', 'pool', 'balances', 'then', 'calculates', 'rshares', 'to', 'steem']
train
https://github.com/ArtoLabs/SimpleSteem/blob/ce8be0ae81f8878b460bc156693f1957f7dd34a3/simplesteem/simplesteem.py#L202-L211
9,258
peterbrittain/asciimatics
asciimatics/particles.py
ParticleEmitter.update
def update(self): """ The function to draw a new frame for the particle system. """ # Spawn new particles if required if self.time_left > 0: self.time_left -= 1 for _ in range(self._count): new_particle = self._new_particle() if new_particle is not None: self.particles.append(new_particle) # Now draw them all for particle in self.particles: # Clear our the old particle last = particle.last() if last is not None: char, x, y, fg, attr, bg = last screen_data = self._screen.get_from(x, y) if self._blend and screen_data: index = self._find_colour(particle, 0, screen_data) - 1 fg, attr, bg = particle.colours[max(index, 0)] self._screen.print_at(" ", x, y, fg, attr, bg) if particle.time < particle.life_time: # Draw the new one char, x, y, fg, attr, bg = particle.next() screen_data = self._screen.get_from(x, y) if self._blend and screen_data: index = self._find_colour(particle, -1, screen_data) + 1 fg, attr, bg = \ particle.colours[min(index, len(particle.colours) - 1)] self._screen.print_at(char, x, y, fg, attr, bg) else: self.particles.remove(particle)
python
def update(self): """ The function to draw a new frame for the particle system. """ # Spawn new particles if required if self.time_left > 0: self.time_left -= 1 for _ in range(self._count): new_particle = self._new_particle() if new_particle is not None: self.particles.append(new_particle) # Now draw them all for particle in self.particles: # Clear our the old particle last = particle.last() if last is not None: char, x, y, fg, attr, bg = last screen_data = self._screen.get_from(x, y) if self._blend and screen_data: index = self._find_colour(particle, 0, screen_data) - 1 fg, attr, bg = particle.colours[max(index, 0)] self._screen.print_at(" ", x, y, fg, attr, bg) if particle.time < particle.life_time: # Draw the new one char, x, y, fg, attr, bg = particle.next() screen_data = self._screen.get_from(x, y) if self._blend and screen_data: index = self._find_colour(particle, -1, screen_data) + 1 fg, attr, bg = \ particle.colours[min(index, len(particle.colours) - 1)] self._screen.print_at(char, x, y, fg, attr, bg) else: self.particles.remove(particle)
['def', 'update', '(', 'self', ')', ':', '# Spawn new particles if required', 'if', 'self', '.', 'time_left', '>', '0', ':', 'self', '.', 'time_left', '-=', '1', 'for', '_', 'in', 'range', '(', 'self', '.', '_count', ')', ':', 'new_particle', '=', 'self', '.', '_new_particle', '(', ')', 'if', 'new_particle', 'is', 'not', 'None', ':', 'self', '.', 'particles', '.', 'append', '(', 'new_particle', ')', '# Now draw them all', 'for', 'particle', 'in', 'self', '.', 'particles', ':', '# Clear our the old particle', 'last', '=', 'particle', '.', 'last', '(', ')', 'if', 'last', 'is', 'not', 'None', ':', 'char', ',', 'x', ',', 'y', ',', 'fg', ',', 'attr', ',', 'bg', '=', 'last', 'screen_data', '=', 'self', '.', '_screen', '.', 'get_from', '(', 'x', ',', 'y', ')', 'if', 'self', '.', '_blend', 'and', 'screen_data', ':', 'index', '=', 'self', '.', '_find_colour', '(', 'particle', ',', '0', ',', 'screen_data', ')', '-', '1', 'fg', ',', 'attr', ',', 'bg', '=', 'particle', '.', 'colours', '[', 'max', '(', 'index', ',', '0', ')', ']', 'self', '.', '_screen', '.', 'print_at', '(', '" "', ',', 'x', ',', 'y', ',', 'fg', ',', 'attr', ',', 'bg', ')', 'if', 'particle', '.', 'time', '<', 'particle', '.', 'life_time', ':', '# Draw the new one', 'char', ',', 'x', ',', 'y', ',', 'fg', ',', 'attr', ',', 'bg', '=', 'particle', '.', 'next', '(', ')', 'screen_data', '=', 'self', '.', '_screen', '.', 'get_from', '(', 'x', ',', 'y', ')', 'if', 'self', '.', '_blend', 'and', 'screen_data', ':', 'index', '=', 'self', '.', '_find_colour', '(', 'particle', ',', '-', '1', ',', 'screen_data', ')', '+', '1', 'fg', ',', 'attr', ',', 'bg', '=', 'particle', '.', 'colours', '[', 'min', '(', 'index', ',', 'len', '(', 'particle', '.', 'colours', ')', '-', '1', ')', ']', 'self', '.', '_screen', '.', 'print_at', '(', 'char', ',', 'x', ',', 'y', ',', 'fg', ',', 'attr', ',', 'bg', ')', 'else', ':', 'self', '.', 'particles', '.', 'remove', '(', 'particle', ')']
The function to draw a new frame for the particle system.
['The', 'function', 'to', 'draw', 'a', 'new', 'frame', 'for', 'the', 'particle', 'system', '.']
train
https://github.com/peterbrittain/asciimatics/blob/f471427d7786ce2d5f1eeb2dae0e67d19e46e085/asciimatics/particles.py#L167-L201
9,259
dreipol/django-collectfaster
collectfaster/management/commands/collectstatic.py
Command.gevent_worker
def gevent_worker(self): """ Process one task after another by calling the handler (`copy_file` or `copy_link`) method of the super class. """ while not self.task_queue.empty(): task_kwargs = self.task_queue.get() handler_type = task_kwargs.pop('handler_type') if handler_type == 'link': super(Command, self).link_file(**task_kwargs) else: super(Command, self).copy_file(**task_kwargs)
python
def gevent_worker(self): """ Process one task after another by calling the handler (`copy_file` or `copy_link`) method of the super class. """ while not self.task_queue.empty(): task_kwargs = self.task_queue.get() handler_type = task_kwargs.pop('handler_type') if handler_type == 'link': super(Command, self).link_file(**task_kwargs) else: super(Command, self).copy_file(**task_kwargs)
['def', 'gevent_worker', '(', 'self', ')', ':', 'while', 'not', 'self', '.', 'task_queue', '.', 'empty', '(', ')', ':', 'task_kwargs', '=', 'self', '.', 'task_queue', '.', 'get', '(', ')', 'handler_type', '=', 'task_kwargs', '.', 'pop', '(', "'handler_type'", ')', 'if', 'handler_type', '==', "'link'", ':', 'super', '(', 'Command', ',', 'self', ')', '.', 'link_file', '(', '*', '*', 'task_kwargs', ')', 'else', ':', 'super', '(', 'Command', ',', 'self', ')', '.', 'copy_file', '(', '*', '*', 'task_kwargs', ')']
Process one task after another by calling the handler (`copy_file` or `copy_link`) method of the super class.
['Process', 'one', 'task', 'after', 'another', 'by', 'calling', 'the', 'handler', '(', 'copy_file', 'or', 'copy_link', ')', 'method', 'of', 'the', 'super', 'class', '.']
train
https://github.com/dreipol/django-collectfaster/blob/13ac0df7d153a49b8c3596692741dcab441d57ce/collectfaster/management/commands/collectstatic.py#L128-L139
9,260
b3j0f/schema
b3j0f/schema/lang/factory.py
getresource
def getresource(self, schemacls, name): """Get a resource from a builder name. :param type schemacls: waited schema class. :param str name: builder name to use. :return: resource returned by the right builder.getresource(schema). """ return _SCHEMAFACTORY.getresource(schemacls=schemacls, name=name)
python
def getresource(self, schemacls, name): """Get a resource from a builder name. :param type schemacls: waited schema class. :param str name: builder name to use. :return: resource returned by the right builder.getresource(schema). """ return _SCHEMAFACTORY.getresource(schemacls=schemacls, name=name)
['def', 'getresource', '(', 'self', ',', 'schemacls', ',', 'name', ')', ':', 'return', '_SCHEMAFACTORY', '.', 'getresource', '(', 'schemacls', '=', 'schemacls', ',', 'name', '=', 'name', ')']
Get a resource from a builder name. :param type schemacls: waited schema class. :param str name: builder name to use. :return: resource returned by the right builder.getresource(schema).
['Get', 'a', 'resource', 'from', 'a', 'builder', 'name', '.']
train
https://github.com/b3j0f/schema/blob/1c88c23337f5fef50254e65bd407112c43396dd9/b3j0f/schema/lang/factory.py#L216-L223
9,261
gwastro/pycbc
pycbc/inference/option_utils.py
data_from_cli
def data_from_cli(opts): """Loads the data needed for a model from the given command-line options. Gates specifed on the command line are also applied. Parameters ---------- opts : ArgumentParser parsed args Argument options parsed from a command line string (the sort of thing returned by `parser.parse_args`). Returns ------- strain_dict : dict Dictionary of instruments -> `TimeSeries` strain. stilde_dict : dict Dictionary of instruments -> `FrequencySeries` strain. psd_dict : dict Dictionary of instruments -> `FrequencySeries` psds. """ # get gates to apply gates = gates_from_cli(opts) psd_gates = psd_gates_from_cli(opts) # get strain time series instruments = opts.instruments if opts.instruments is not None else [] strain_dict = strain_from_cli_multi_ifos(opts, instruments, precision="double") # apply gates if not waiting to overwhiten if not opts.gate_overwhitened: strain_dict = apply_gates_to_td(strain_dict, gates) # get strain time series to use for PSD estimation # if user has not given the PSD time options then use same data as analysis if opts.psd_start_time and opts.psd_end_time: logging.info("Will generate a different time series for PSD " "estimation") psd_opts = opts psd_opts.gps_start_time = psd_opts.psd_start_time psd_opts.gps_end_time = psd_opts.psd_end_time psd_strain_dict = strain_from_cli_multi_ifos(psd_opts, instruments, precision="double") # apply any gates logging.info("Applying gates to PSD data") psd_strain_dict = apply_gates_to_td(psd_strain_dict, psd_gates) elif opts.psd_start_time or opts.psd_end_time: raise ValueError("Must give --psd-start-time and --psd-end-time") else: psd_strain_dict = strain_dict # FFT strain and save each of the length of the FFT, delta_f, and # low frequency cutoff to a dict stilde_dict = {} length_dict = {} delta_f_dict = {} low_frequency_cutoff_dict = low_frequency_cutoff_from_cli(opts) for ifo in instruments: stilde_dict[ifo] = strain_dict[ifo].to_frequencyseries() length_dict[ifo] = len(stilde_dict[ifo]) delta_f_dict[ifo] = stilde_dict[ifo].delta_f # get PSD as frequency series psd_dict = psd_from_cli_multi_ifos( opts, length_dict, delta_f_dict, low_frequency_cutoff_dict, instruments, strain_dict=psd_strain_dict, precision="double") # apply any gates to overwhitened data, if desired if opts.gate_overwhitened and opts.gate is not None: logging.info("Applying gates to overwhitened data") # overwhiten the data for ifo in gates: stilde_dict[ifo] /= psd_dict[ifo] stilde_dict = apply_gates_to_fd(stilde_dict, gates) # unwhiten the data for the model for ifo in gates: stilde_dict[ifo] *= psd_dict[ifo] return strain_dict, stilde_dict, psd_dict
python
def data_from_cli(opts): """Loads the data needed for a model from the given command-line options. Gates specifed on the command line are also applied. Parameters ---------- opts : ArgumentParser parsed args Argument options parsed from a command line string (the sort of thing returned by `parser.parse_args`). Returns ------- strain_dict : dict Dictionary of instruments -> `TimeSeries` strain. stilde_dict : dict Dictionary of instruments -> `FrequencySeries` strain. psd_dict : dict Dictionary of instruments -> `FrequencySeries` psds. """ # get gates to apply gates = gates_from_cli(opts) psd_gates = psd_gates_from_cli(opts) # get strain time series instruments = opts.instruments if opts.instruments is not None else [] strain_dict = strain_from_cli_multi_ifos(opts, instruments, precision="double") # apply gates if not waiting to overwhiten if not opts.gate_overwhitened: strain_dict = apply_gates_to_td(strain_dict, gates) # get strain time series to use for PSD estimation # if user has not given the PSD time options then use same data as analysis if opts.psd_start_time and opts.psd_end_time: logging.info("Will generate a different time series for PSD " "estimation") psd_opts = opts psd_opts.gps_start_time = psd_opts.psd_start_time psd_opts.gps_end_time = psd_opts.psd_end_time psd_strain_dict = strain_from_cli_multi_ifos(psd_opts, instruments, precision="double") # apply any gates logging.info("Applying gates to PSD data") psd_strain_dict = apply_gates_to_td(psd_strain_dict, psd_gates) elif opts.psd_start_time or opts.psd_end_time: raise ValueError("Must give --psd-start-time and --psd-end-time") else: psd_strain_dict = strain_dict # FFT strain and save each of the length of the FFT, delta_f, and # low frequency cutoff to a dict stilde_dict = {} length_dict = {} delta_f_dict = {} low_frequency_cutoff_dict = low_frequency_cutoff_from_cli(opts) for ifo in instruments: stilde_dict[ifo] = strain_dict[ifo].to_frequencyseries() length_dict[ifo] = len(stilde_dict[ifo]) delta_f_dict[ifo] = stilde_dict[ifo].delta_f # get PSD as frequency series psd_dict = psd_from_cli_multi_ifos( opts, length_dict, delta_f_dict, low_frequency_cutoff_dict, instruments, strain_dict=psd_strain_dict, precision="double") # apply any gates to overwhitened data, if desired if opts.gate_overwhitened and opts.gate is not None: logging.info("Applying gates to overwhitened data") # overwhiten the data for ifo in gates: stilde_dict[ifo] /= psd_dict[ifo] stilde_dict = apply_gates_to_fd(stilde_dict, gates) # unwhiten the data for the model for ifo in gates: stilde_dict[ifo] *= psd_dict[ifo] return strain_dict, stilde_dict, psd_dict
['def', 'data_from_cli', '(', 'opts', ')', ':', '# get gates to apply', 'gates', '=', 'gates_from_cli', '(', 'opts', ')', 'psd_gates', '=', 'psd_gates_from_cli', '(', 'opts', ')', '# get strain time series', 'instruments', '=', 'opts', '.', 'instruments', 'if', 'opts', '.', 'instruments', 'is', 'not', 'None', 'else', '[', ']', 'strain_dict', '=', 'strain_from_cli_multi_ifos', '(', 'opts', ',', 'instruments', ',', 'precision', '=', '"double"', ')', '# apply gates if not waiting to overwhiten', 'if', 'not', 'opts', '.', 'gate_overwhitened', ':', 'strain_dict', '=', 'apply_gates_to_td', '(', 'strain_dict', ',', 'gates', ')', '# get strain time series to use for PSD estimation', '# if user has not given the PSD time options then use same data as analysis', 'if', 'opts', '.', 'psd_start_time', 'and', 'opts', '.', 'psd_end_time', ':', 'logging', '.', 'info', '(', '"Will generate a different time series for PSD "', '"estimation"', ')', 'psd_opts', '=', 'opts', 'psd_opts', '.', 'gps_start_time', '=', 'psd_opts', '.', 'psd_start_time', 'psd_opts', '.', 'gps_end_time', '=', 'psd_opts', '.', 'psd_end_time', 'psd_strain_dict', '=', 'strain_from_cli_multi_ifos', '(', 'psd_opts', ',', 'instruments', ',', 'precision', '=', '"double"', ')', '# apply any gates', 'logging', '.', 'info', '(', '"Applying gates to PSD data"', ')', 'psd_strain_dict', '=', 'apply_gates_to_td', '(', 'psd_strain_dict', ',', 'psd_gates', ')', 'elif', 'opts', '.', 'psd_start_time', 'or', 'opts', '.', 'psd_end_time', ':', 'raise', 'ValueError', '(', '"Must give --psd-start-time and --psd-end-time"', ')', 'else', ':', 'psd_strain_dict', '=', 'strain_dict', '# FFT strain and save each of the length of the FFT, delta_f, and', '# low frequency cutoff to a dict', 'stilde_dict', '=', '{', '}', 'length_dict', '=', '{', '}', 'delta_f_dict', '=', '{', '}', 'low_frequency_cutoff_dict', '=', 'low_frequency_cutoff_from_cli', '(', 'opts', ')', 'for', 'ifo', 'in', 'instruments', ':', 'stilde_dict', '[', 'ifo', ']', '=', 'strain_dict', '[', 'ifo', ']', '.', 'to_frequencyseries', '(', ')', 'length_dict', '[', 'ifo', ']', '=', 'len', '(', 'stilde_dict', '[', 'ifo', ']', ')', 'delta_f_dict', '[', 'ifo', ']', '=', 'stilde_dict', '[', 'ifo', ']', '.', 'delta_f', '# get PSD as frequency series', 'psd_dict', '=', 'psd_from_cli_multi_ifos', '(', 'opts', ',', 'length_dict', ',', 'delta_f_dict', ',', 'low_frequency_cutoff_dict', ',', 'instruments', ',', 'strain_dict', '=', 'psd_strain_dict', ',', 'precision', '=', '"double"', ')', '# apply any gates to overwhitened data, if desired', 'if', 'opts', '.', 'gate_overwhitened', 'and', 'opts', '.', 'gate', 'is', 'not', 'None', ':', 'logging', '.', 'info', '(', '"Applying gates to overwhitened data"', ')', '# overwhiten the data', 'for', 'ifo', 'in', 'gates', ':', 'stilde_dict', '[', 'ifo', ']', '/=', 'psd_dict', '[', 'ifo', ']', 'stilde_dict', '=', 'apply_gates_to_fd', '(', 'stilde_dict', ',', 'gates', ')', '# unwhiten the data for the model', 'for', 'ifo', 'in', 'gates', ':', 'stilde_dict', '[', 'ifo', ']', '*=', 'psd_dict', '[', 'ifo', ']', 'return', 'strain_dict', ',', 'stilde_dict', ',', 'psd_dict']
Loads the data needed for a model from the given command-line options. Gates specifed on the command line are also applied. Parameters ---------- opts : ArgumentParser parsed args Argument options parsed from a command line string (the sort of thing returned by `parser.parse_args`). Returns ------- strain_dict : dict Dictionary of instruments -> `TimeSeries` strain. stilde_dict : dict Dictionary of instruments -> `FrequencySeries` strain. psd_dict : dict Dictionary of instruments -> `FrequencySeries` psds.
['Loads', 'the', 'data', 'needed', 'for', 'a', 'model', 'from', 'the', 'given', 'command', '-', 'line', 'options', '.', 'Gates', 'specifed', 'on', 'the', 'command', 'line', 'are', 'also', 'applied', '.']
train
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/option_utils.py#L63-L141
9,262
espenak/djangosenchatools
djangosenchatools/management/commands/senchatoolsbuild.py
get_installed_extjs_apps
def get_installed_extjs_apps(): """ Get all installed extjs apps. :return: List of ``(appdir, module, appname)``. """ installed_apps = [] checked = set() for app in settings.INSTALLED_APPS: if not app.startswith('django.') and not app in checked: checked.add(app) try: installed_apps.append(get_appinfo(app)) except LookupError, e: pass return installed_apps
python
def get_installed_extjs_apps(): """ Get all installed extjs apps. :return: List of ``(appdir, module, appname)``. """ installed_apps = [] checked = set() for app in settings.INSTALLED_APPS: if not app.startswith('django.') and not app in checked: checked.add(app) try: installed_apps.append(get_appinfo(app)) except LookupError, e: pass return installed_apps
['def', 'get_installed_extjs_apps', '(', ')', ':', 'installed_apps', '=', '[', ']', 'checked', '=', 'set', '(', ')', 'for', 'app', 'in', 'settings', '.', 'INSTALLED_APPS', ':', 'if', 'not', 'app', '.', 'startswith', '(', "'django.'", ')', 'and', 'not', 'app', 'in', 'checked', ':', 'checked', '.', 'add', '(', 'app', ')', 'try', ':', 'installed_apps', '.', 'append', '(', 'get_appinfo', '(', 'app', ')', ')', 'except', 'LookupError', ',', 'e', ':', 'pass', 'return', 'installed_apps']
Get all installed extjs apps. :return: List of ``(appdir, module, appname)``.
['Get', 'all', 'installed', 'extjs', 'apps', '.']
train
https://github.com/espenak/djangosenchatools/blob/da1bca9365300de303e833de4b4bd57671c1d11a/djangosenchatools/management/commands/senchatoolsbuild.py#L34-L49
9,263
UCL-INGI/INGInious
inginious/frontend/accessible_time.py
parse_date
def parse_date(date, default=None): """ Parse a valid date """ if date == "": if default is not None: return default else: raise Exception("Unknown format for " + date) for format_type in ["%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M", "%Y-%m-%d %H", "%Y-%m-%d", "%d/%m/%Y %H:%M:%S", "%d/%m/%Y %H:%M", "%d/%m/%Y %H", "%d/%m/%Y"]: try: return datetime.strptime(date, format_type) except ValueError: pass raise Exception("Unknown format for " + date)
python
def parse_date(date, default=None): """ Parse a valid date """ if date == "": if default is not None: return default else: raise Exception("Unknown format for " + date) for format_type in ["%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M", "%Y-%m-%d %H", "%Y-%m-%d", "%d/%m/%Y %H:%M:%S", "%d/%m/%Y %H:%M", "%d/%m/%Y %H", "%d/%m/%Y"]: try: return datetime.strptime(date, format_type) except ValueError: pass raise Exception("Unknown format for " + date)
['def', 'parse_date', '(', 'date', ',', 'default', '=', 'None', ')', ':', 'if', 'date', '==', '""', ':', 'if', 'default', 'is', 'not', 'None', ':', 'return', 'default', 'else', ':', 'raise', 'Exception', '(', '"Unknown format for "', '+', 'date', ')', 'for', 'format_type', 'in', '[', '"%Y-%m-%d %H:%M:%S"', ',', '"%Y-%m-%d %H:%M"', ',', '"%Y-%m-%d %H"', ',', '"%Y-%m-%d"', ',', '"%d/%m/%Y %H:%M:%S"', ',', '"%d/%m/%Y %H:%M"', ',', '"%d/%m/%Y %H"', ',', '"%d/%m/%Y"', ']', ':', 'try', ':', 'return', 'datetime', '.', 'strptime', '(', 'date', ',', 'format_type', ')', 'except', 'ValueError', ':', 'pass', 'raise', 'Exception', '(', '"Unknown format for "', '+', 'date', ')']
Parse a valid date
['Parse', 'a', 'valid', 'date']
train
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/accessible_time.py#L11-L25
9,264
CamDavidsonPilon/lifelines
lifelines/fitters/log_logistic_aft_fitter.py
LogLogisticAFTFitter.predict_cumulative_hazard
def predict_cumulative_hazard(self, X, times=None, ancillary_X=None): """ Return the cumulative hazard rate of subjects in X at time points. Parameters ---------- X: numpy array or DataFrame a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns can be in any order. If a numpy array, columns must be in the same order as the training data. times: iterable, optional an iterable of increasing times to predict the cumulative hazard at. Default is the set of all durations (observed and unobserved). Uses a linear interpolation if points in time are not in the index. ancillary_X: numpy array or DataFrame, optional a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns can be in any order. If a numpy array, columns must be in the same order as the training data. Returns ------- cumulative_hazard_ : DataFrame the cumulative hazard of individuals over the timeline """ times = coalesce(times, self.timeline, np.unique(self.durations)) alpha_, beta_ = self._prep_inputs_for_prediction_and_return_scores(X, ancillary_X) return pd.DataFrame(np.log1p(np.outer(times, 1 / alpha_) ** beta_), columns=_get_index(X), index=times)
python
def predict_cumulative_hazard(self, X, times=None, ancillary_X=None): """ Return the cumulative hazard rate of subjects in X at time points. Parameters ---------- X: numpy array or DataFrame a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns can be in any order. If a numpy array, columns must be in the same order as the training data. times: iterable, optional an iterable of increasing times to predict the cumulative hazard at. Default is the set of all durations (observed and unobserved). Uses a linear interpolation if points in time are not in the index. ancillary_X: numpy array or DataFrame, optional a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns can be in any order. If a numpy array, columns must be in the same order as the training data. Returns ------- cumulative_hazard_ : DataFrame the cumulative hazard of individuals over the timeline """ times = coalesce(times, self.timeline, np.unique(self.durations)) alpha_, beta_ = self._prep_inputs_for_prediction_and_return_scores(X, ancillary_X) return pd.DataFrame(np.log1p(np.outer(times, 1 / alpha_) ** beta_), columns=_get_index(X), index=times)
['def', 'predict_cumulative_hazard', '(', 'self', ',', 'X', ',', 'times', '=', 'None', ',', 'ancillary_X', '=', 'None', ')', ':', 'times', '=', 'coalesce', '(', 'times', ',', 'self', '.', 'timeline', ',', 'np', '.', 'unique', '(', 'self', '.', 'durations', ')', ')', 'alpha_', ',', 'beta_', '=', 'self', '.', '_prep_inputs_for_prediction_and_return_scores', '(', 'X', ',', 'ancillary_X', ')', 'return', 'pd', '.', 'DataFrame', '(', 'np', '.', 'log1p', '(', 'np', '.', 'outer', '(', 'times', ',', '1', '/', 'alpha_', ')', '**', 'beta_', ')', ',', 'columns', '=', '_get_index', '(', 'X', ')', ',', 'index', '=', 'times', ')']
Return the cumulative hazard rate of subjects in X at time points. Parameters ---------- X: numpy array or DataFrame a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns can be in any order. If a numpy array, columns must be in the same order as the training data. times: iterable, optional an iterable of increasing times to predict the cumulative hazard at. Default is the set of all durations (observed and unobserved). Uses a linear interpolation if points in time are not in the index. ancillary_X: numpy array or DataFrame, optional a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns can be in any order. If a numpy array, columns must be in the same order as the training data. Returns ------- cumulative_hazard_ : DataFrame the cumulative hazard of individuals over the timeline
['Return', 'the', 'cumulative', 'hazard', 'rate', 'of', 'subjects', 'in', 'X', 'at', 'time', 'points', '.']
train
https://github.com/CamDavidsonPilon/lifelines/blob/bdf6be6f1d10eea4c46365ee0ee6a47d8c30edf8/lifelines/fitters/log_logistic_aft_fitter.py#L157-L183
9,265
dylanaraps/bum
bum/song.py
get_art
def get_art(cache_dir, size, client): """Get the album art.""" song = client.currentsong() if len(song) < 2: print("album: Nothing currently playing.") return file_name = f"{song['artist']}_{song['album']}_{size}.jpg".replace("/", "") file_name = cache_dir / file_name if file_name.is_file(): shutil.copy(file_name, cache_dir / "current.jpg") print("album: Found cached art.") else: print("album: Downloading album art...") brainz.init() album_art = brainz.get_cover(song, size) if album_art: util.bytes_to_file(album_art, cache_dir / file_name) util.bytes_to_file(album_art, cache_dir / "current.jpg") print(f"album: Swapped art to {song['artist']}, {song['album']}.")
python
def get_art(cache_dir, size, client): """Get the album art.""" song = client.currentsong() if len(song) < 2: print("album: Nothing currently playing.") return file_name = f"{song['artist']}_{song['album']}_{size}.jpg".replace("/", "") file_name = cache_dir / file_name if file_name.is_file(): shutil.copy(file_name, cache_dir / "current.jpg") print("album: Found cached art.") else: print("album: Downloading album art...") brainz.init() album_art = brainz.get_cover(song, size) if album_art: util.bytes_to_file(album_art, cache_dir / file_name) util.bytes_to_file(album_art, cache_dir / "current.jpg") print(f"album: Swapped art to {song['artist']}, {song['album']}.")
['def', 'get_art', '(', 'cache_dir', ',', 'size', ',', 'client', ')', ':', 'song', '=', 'client', '.', 'currentsong', '(', ')', 'if', 'len', '(', 'song', ')', '<', '2', ':', 'print', '(', '"album: Nothing currently playing."', ')', 'return', 'file_name', '=', 'f"{song[\'artist\']}_{song[\'album\']}_{size}.jpg"', '.', 'replace', '(', '"/"', ',', '""', ')', 'file_name', '=', 'cache_dir', '/', 'file_name', 'if', 'file_name', '.', 'is_file', '(', ')', ':', 'shutil', '.', 'copy', '(', 'file_name', ',', 'cache_dir', '/', '"current.jpg"', ')', 'print', '(', '"album: Found cached art."', ')', 'else', ':', 'print', '(', '"album: Downloading album art..."', ')', 'brainz', '.', 'init', '(', ')', 'album_art', '=', 'brainz', '.', 'get_cover', '(', 'song', ',', 'size', ')', 'if', 'album_art', ':', 'util', '.', 'bytes_to_file', '(', 'album_art', ',', 'cache_dir', '/', 'file_name', ')', 'util', '.', 'bytes_to_file', '(', 'album_art', ',', 'cache_dir', '/', '"current.jpg"', ')', 'print', '(', 'f"album: Swapped art to {song[\'artist\']}, {song[\'album\']}."', ')']
Get the album art.
['Get', 'the', 'album', 'art', '.']
train
https://github.com/dylanaraps/bum/blob/004d795a67398e79f2c098d7775e9cd97231646b/bum/song.py#L25-L50
9,266
unitedstack/steth
stetho/agent/common/utils.py
get_interface
def get_interface(interface): """Support Centos standard physical interface, such as eth0. """ # Supported CentOS Version supported_dists = ['7.0', '6.5'] def format_centos_7_0(inf): pattern = r'<([A-Z]+)' state = re.search(pattern, stdout[0]).groups()[0] state = 'UP' if not cmp(state, 'UP') else 'DOWN' inf.state = state stdout.pop(0) pattern = r'inet\s(.*)\s\snetmask\s(.*)\s\sbroadcast\s(.*)' for line in stdout: if line.startswith('inet '): tmp = re.search(pattern, line).groups() (inf.inet, inf.netmask, inf.broadcast) = tmp stdout.remove(line) break for line in stdout: if line.startswith('ether'): inf.ether = line[6:23] break return stdcode, '', inf.make_dict() def format_centos_6_5(inf): pattern = r'HWaddr\s(.*)' inf.ether = re.search(pattern, stdout[0]).groups()[0] stdout.pop(0) pattern = r'addr:(.*)\s\sBcast:(.*)\s\sMask:(.*)' for line in stdout: if line.startswith('inet '): tmp = re.search(pattern, line).groups() (inf.inet, inf.broadcast, inf.netmask) = tmp stdout.remove(line) break inf.state = 'DOWN' for line in stdout: if 'RUNNING' in line: state = line[:2] state = 'UP' if not cmp(state, 'UP') else 'DOWN' inf.state = state break return stdcode, '', inf.make_dict() linux_dist = platform.linux_distribution()[1][:3] if linux_dist in supported_dists: try: cmd = ['ifconfig', interface] stdcode, stdout = execute(cmd) inf = resource.Interface(interface) if not cmp(linux_dist, '6.5'): return format_centos_6_5(inf) elif not cmp(linux_dist, '7.0'): return format_centos_7_0(inf) except Exception as e: message = stdout.pop(0) return stdcode, message, None # Unsupported OS distribute message = 'Unsupported OS distribute %s, only support for CentOS %s.' message = message % (linux_dist, str(supported_dists)) return 1, message, None
python
def get_interface(interface): """Support Centos standard physical interface, such as eth0. """ # Supported CentOS Version supported_dists = ['7.0', '6.5'] def format_centos_7_0(inf): pattern = r'<([A-Z]+)' state = re.search(pattern, stdout[0]).groups()[0] state = 'UP' if not cmp(state, 'UP') else 'DOWN' inf.state = state stdout.pop(0) pattern = r'inet\s(.*)\s\snetmask\s(.*)\s\sbroadcast\s(.*)' for line in stdout: if line.startswith('inet '): tmp = re.search(pattern, line).groups() (inf.inet, inf.netmask, inf.broadcast) = tmp stdout.remove(line) break for line in stdout: if line.startswith('ether'): inf.ether = line[6:23] break return stdcode, '', inf.make_dict() def format_centos_6_5(inf): pattern = r'HWaddr\s(.*)' inf.ether = re.search(pattern, stdout[0]).groups()[0] stdout.pop(0) pattern = r'addr:(.*)\s\sBcast:(.*)\s\sMask:(.*)' for line in stdout: if line.startswith('inet '): tmp = re.search(pattern, line).groups() (inf.inet, inf.broadcast, inf.netmask) = tmp stdout.remove(line) break inf.state = 'DOWN' for line in stdout: if 'RUNNING' in line: state = line[:2] state = 'UP' if not cmp(state, 'UP') else 'DOWN' inf.state = state break return stdcode, '', inf.make_dict() linux_dist = platform.linux_distribution()[1][:3] if linux_dist in supported_dists: try: cmd = ['ifconfig', interface] stdcode, stdout = execute(cmd) inf = resource.Interface(interface) if not cmp(linux_dist, '6.5'): return format_centos_6_5(inf) elif not cmp(linux_dist, '7.0'): return format_centos_7_0(inf) except Exception as e: message = stdout.pop(0) return stdcode, message, None # Unsupported OS distribute message = 'Unsupported OS distribute %s, only support for CentOS %s.' message = message % (linux_dist, str(supported_dists)) return 1, message, None
['def', 'get_interface', '(', 'interface', ')', ':', '# Supported CentOS Version', 'supported_dists', '=', '[', "'7.0'", ',', "'6.5'", ']', 'def', 'format_centos_7_0', '(', 'inf', ')', ':', 'pattern', '=', "r'<([A-Z]+)'", 'state', '=', 're', '.', 'search', '(', 'pattern', ',', 'stdout', '[', '0', ']', ')', '.', 'groups', '(', ')', '[', '0', ']', 'state', '=', "'UP'", 'if', 'not', 'cmp', '(', 'state', ',', "'UP'", ')', 'else', "'DOWN'", 'inf', '.', 'state', '=', 'state', 'stdout', '.', 'pop', '(', '0', ')', 'pattern', '=', "r'inet\\s(.*)\\s\\snetmask\\s(.*)\\s\\sbroadcast\\s(.*)'", 'for', 'line', 'in', 'stdout', ':', 'if', 'line', '.', 'startswith', '(', "'inet '", ')', ':', 'tmp', '=', 're', '.', 'search', '(', 'pattern', ',', 'line', ')', '.', 'groups', '(', ')', '(', 'inf', '.', 'inet', ',', 'inf', '.', 'netmask', ',', 'inf', '.', 'broadcast', ')', '=', 'tmp', 'stdout', '.', 'remove', '(', 'line', ')', 'break', 'for', 'line', 'in', 'stdout', ':', 'if', 'line', '.', 'startswith', '(', "'ether'", ')', ':', 'inf', '.', 'ether', '=', 'line', '[', '6', ':', '23', ']', 'break', 'return', 'stdcode', ',', "''", ',', 'inf', '.', 'make_dict', '(', ')', 'def', 'format_centos_6_5', '(', 'inf', ')', ':', 'pattern', '=', "r'HWaddr\\s(.*)'", 'inf', '.', 'ether', '=', 're', '.', 'search', '(', 'pattern', ',', 'stdout', '[', '0', ']', ')', '.', 'groups', '(', ')', '[', '0', ']', 'stdout', '.', 'pop', '(', '0', ')', 'pattern', '=', "r'addr:(.*)\\s\\sBcast:(.*)\\s\\sMask:(.*)'", 'for', 'line', 'in', 'stdout', ':', 'if', 'line', '.', 'startswith', '(', "'inet '", ')', ':', 'tmp', '=', 're', '.', 'search', '(', 'pattern', ',', 'line', ')', '.', 'groups', '(', ')', '(', 'inf', '.', 'inet', ',', 'inf', '.', 'broadcast', ',', 'inf', '.', 'netmask', ')', '=', 'tmp', 'stdout', '.', 'remove', '(', 'line', ')', 'break', 'inf', '.', 'state', '=', "'DOWN'", 'for', 'line', 'in', 'stdout', ':', 'if', "'RUNNING'", 'in', 'line', ':', 'state', '=', 'line', '[', ':', '2', ']', 'state', '=', "'UP'", 'if', 'not', 'cmp', '(', 'state', ',', "'UP'", ')', 'else', "'DOWN'", 'inf', '.', 'state', '=', 'state', 'break', 'return', 'stdcode', ',', "''", ',', 'inf', '.', 'make_dict', '(', ')', 'linux_dist', '=', 'platform', '.', 'linux_distribution', '(', ')', '[', '1', ']', '[', ':', '3', ']', 'if', 'linux_dist', 'in', 'supported_dists', ':', 'try', ':', 'cmd', '=', '[', "'ifconfig'", ',', 'interface', ']', 'stdcode', ',', 'stdout', '=', 'execute', '(', 'cmd', ')', 'inf', '=', 'resource', '.', 'Interface', '(', 'interface', ')', 'if', 'not', 'cmp', '(', 'linux_dist', ',', "'6.5'", ')', ':', 'return', 'format_centos_6_5', '(', 'inf', ')', 'elif', 'not', 'cmp', '(', 'linux_dist', ',', "'7.0'", ')', ':', 'return', 'format_centos_7_0', '(', 'inf', ')', 'except', 'Exception', 'as', 'e', ':', 'message', '=', 'stdout', '.', 'pop', '(', '0', ')', 'return', 'stdcode', ',', 'message', ',', 'None', '# Unsupported OS distribute', 'message', '=', "'Unsupported OS distribute %s, only support for CentOS %s.'", 'message', '=', 'message', '%', '(', 'linux_dist', ',', 'str', '(', 'supported_dists', ')', ')', 'return', '1', ',', 'message', ',', 'None']
Support Centos standard physical interface, such as eth0.
['Support', 'Centos', 'standard', 'physical', 'interface', 'such', 'as', 'eth0', '.']
train
https://github.com/unitedstack/steth/blob/955884ceebf3bdc474c93cc5cf555e67d16458f1/stetho/agent/common/utils.py#L84-L147
9,267
trevisanj/a99
a99/fileio.py
crunch_dir
def crunch_dir(name, n=50): """Puts "..." in the middle of a directory name if lengh > n.""" if len(name) > n + 3: name = "..." + name[-n:] return name
python
def crunch_dir(name, n=50): """Puts "..." in the middle of a directory name if lengh > n.""" if len(name) > n + 3: name = "..." + name[-n:] return name
['def', 'crunch_dir', '(', 'name', ',', 'n', '=', '50', ')', ':', 'if', 'len', '(', 'name', ')', '>', 'n', '+', '3', ':', 'name', '=', '"..."', '+', 'name', '[', '-', 'n', ':', ']', 'return', 'name']
Puts "..." in the middle of a directory name if lengh > n.
['Puts', '...', 'in', 'the', 'middle', 'of', 'a', 'directory', 'name', 'if', 'lengh', '>', 'n', '.']
train
https://github.com/trevisanj/a99/blob/193e6e3c9b3e4f4a0ba7eb3eece846fe7045c539/a99/fileio.py#L32-L36
9,268
michael-lazar/rtv
rtv/packages/praw/decorators.py
alias_function
def alias_function(function, class_name): """Create a RedditContentObject function mapped to a BaseReddit function. The BaseReddit classes define the majority of the API's functions. The first argument for many of these functions is the RedditContentObject that they operate on. This factory returns functions appropriate to be called on a RedditContent object that maps to the corresponding BaseReddit function. """ @wraps(function) def wrapped(self, *args, **kwargs): func_args = _make_func_args(function) if 'subreddit' in func_args and func_args.index('subreddit') != 1: # Only happens for search kwargs['subreddit'] = self return function(self.reddit_session, *args, **kwargs) else: return function(self.reddit_session, self, *args, **kwargs) # Only grab the short-line doc and add a link to the complete doc if wrapped.__doc__ is not None: wrapped.__doc__ = wrapped.__doc__.split('\n', 1)[0] wrapped.__doc__ += ('\n\nSee :meth:`.{0}.{1}` for complete usage. ' 'Note that you should exclude the subreddit ' 'parameter when calling this convenience method.' .format(class_name, function.__name__)) # Don't hide from sphinx as this is a parameter modifying decorator return wrapped
python
def alias_function(function, class_name): """Create a RedditContentObject function mapped to a BaseReddit function. The BaseReddit classes define the majority of the API's functions. The first argument for many of these functions is the RedditContentObject that they operate on. This factory returns functions appropriate to be called on a RedditContent object that maps to the corresponding BaseReddit function. """ @wraps(function) def wrapped(self, *args, **kwargs): func_args = _make_func_args(function) if 'subreddit' in func_args and func_args.index('subreddit') != 1: # Only happens for search kwargs['subreddit'] = self return function(self.reddit_session, *args, **kwargs) else: return function(self.reddit_session, self, *args, **kwargs) # Only grab the short-line doc and add a link to the complete doc if wrapped.__doc__ is not None: wrapped.__doc__ = wrapped.__doc__.split('\n', 1)[0] wrapped.__doc__ += ('\n\nSee :meth:`.{0}.{1}` for complete usage. ' 'Note that you should exclude the subreddit ' 'parameter when calling this convenience method.' .format(class_name, function.__name__)) # Don't hide from sphinx as this is a parameter modifying decorator return wrapped
['def', 'alias_function', '(', 'function', ',', 'class_name', ')', ':', '@', 'wraps', '(', 'function', ')', 'def', 'wrapped', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'func_args', '=', '_make_func_args', '(', 'function', ')', 'if', "'subreddit'", 'in', 'func_args', 'and', 'func_args', '.', 'index', '(', "'subreddit'", ')', '!=', '1', ':', '# Only happens for search', 'kwargs', '[', "'subreddit'", ']', '=', 'self', 'return', 'function', '(', 'self', '.', 'reddit_session', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'else', ':', 'return', 'function', '(', 'self', '.', 'reddit_session', ',', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', '# Only grab the short-line doc and add a link to the complete doc', 'if', 'wrapped', '.', '__doc__', 'is', 'not', 'None', ':', 'wrapped', '.', '__doc__', '=', 'wrapped', '.', '__doc__', '.', 'split', '(', "'\\n'", ',', '1', ')', '[', '0', ']', 'wrapped', '.', '__doc__', '+=', '(', "'\\n\\nSee :meth:`.{0}.{1}` for complete usage. '", "'Note that you should exclude the subreddit '", "'parameter when calling this convenience method.'", '.', 'format', '(', 'class_name', ',', 'function', '.', '__name__', ')', ')', "# Don't hide from sphinx as this is a parameter modifying decorator", 'return', 'wrapped']
Create a RedditContentObject function mapped to a BaseReddit function. The BaseReddit classes define the majority of the API's functions. The first argument for many of these functions is the RedditContentObject that they operate on. This factory returns functions appropriate to be called on a RedditContent object that maps to the corresponding BaseReddit function.
['Create', 'a', 'RedditContentObject', 'function', 'mapped', 'to', 'a', 'BaseReddit', 'function', '.']
train
https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/decorators.py#L44-L70
9,269
robhowley/nhlscrapi
nhlscrapi/games/faceoffcomp.py
FaceOffComparison.fo_pct_by_zone
def fo_pct_by_zone(self): """ Get the by team face-off win % by zone. Format is :returns: dict ``{ 'home/away': { 'off/def/neut': % } }`` """ bz = self.by_zone return { t: { z: bz[t][z]['won']/(1.0*bz[t][z]['total']) if bz[t][z]['total'] else 0.0 for z in self.__zones if z != 'all' } for t in [ 'home', 'away' ] }
python
def fo_pct_by_zone(self): """ Get the by team face-off win % by zone. Format is :returns: dict ``{ 'home/away': { 'off/def/neut': % } }`` """ bz = self.by_zone return { t: { z: bz[t][z]['won']/(1.0*bz[t][z]['total']) if bz[t][z]['total'] else 0.0 for z in self.__zones if z != 'all' } for t in [ 'home', 'away' ] }
['def', 'fo_pct_by_zone', '(', 'self', ')', ':', 'bz', '=', 'self', '.', 'by_zone', 'return', '{', 't', ':', '{', 'z', ':', 'bz', '[', 't', ']', '[', 'z', ']', '[', "'won'", ']', '/', '(', '1.0', '*', 'bz', '[', 't', ']', '[', 'z', ']', '[', "'total'", ']', ')', 'if', 'bz', '[', 't', ']', '[', 'z', ']', '[', "'total'", ']', 'else', '0.0', 'for', 'z', 'in', 'self', '.', '__zones', 'if', 'z', '!=', "'all'", '}', 'for', 't', 'in', '[', "'home'", ',', "'away'", ']', '}']
Get the by team face-off win % by zone. Format is :returns: dict ``{ 'home/away': { 'off/def/neut': % } }``
['Get', 'the', 'by', 'team', 'face', '-', 'off', 'win', '%', 'by', 'zone', '.', 'Format', 'is', ':', 'returns', ':', 'dict', '{', 'home', '/', 'away', ':', '{', 'off', '/', 'def', '/', 'neut', ':', '%', '}', '}']
train
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/games/faceoffcomp.py#L129-L144
9,270
collectiveacuity/labPack
labpack/events/meetup.py
meetupClient.update_member_profile
def update_member_profile(self, brief_details, profile_details): ''' a method to update user profile details on meetup :param brief_details: dictionary with member brief details with updated values :param profile_details: dictionary with member profile details with updated values :return: dictionary with partial profile details inside [json] key ''' # https://www.meetup.com/meetup_api/docs/members/:member_id/#edit title = '%s.update_member_profile' % self.__class__.__name__ # validate permissions if not 'profile_edit' in self.service_scope: raise ValueError('%s requires group_join as part of oauth2 service_scope permissions.' % title) # validate inputs brief_details = self.objects.profile_brief.validate(brief_details) profile_details = self.objects.profile.validate(profile_details) # construct request fields url = '%s/members/%s' % (self.endpoint, str(profile_details['id'])) params = { 'bio': profile_details['bio'], 'bio_privacy': profile_details['privacy']['bio'], 'fields': 'gender,birthday,last_event,messaging_pref,next_event,other_services,privacy,self,stats', 'gender': profile_details['gender'], 'groups_privacy': profile_details['privacy']['groups'], 'lang': brief_details['lang'].replace('_', '-'), 'lat': str(profile_details['lat']), 'lon': str(profile_details['lon']), 'messaging_pref': profile_details['messaging_pref'], 'name': profile_details['name'], 'photo_id': profile_details['photo']['id'], 'sync_photo': True, 'topics_privacy': profile_details['privacy']['topics'], 'zip': brief_details['zip'] } if profile_details['privacy']['facebook']: params['facebook_privacy'] = profile_details['privacy']['facebook'] birthday_value = False for key, value in profile_details['birthday'].items(): if value: birthday_value = True break if not birthday_value: params['birthday'] = '-1' else: birthday_string = '' b_day = profile_details['birthday'] if b_day['day'] and b_day['month']: if b_day['month'] < 10: birthday_string += '0' birthday_string += str(b_day['month']) if b_day['day'] < 10: birthday_string += '0' birthday_string += str(b_day['day']) birthday_string += str(b_day['year']) params['birthday'] = birthday_string # send requests profile_details = self._patch_request(url, params=params) return profile_details
python
def update_member_profile(self, brief_details, profile_details): ''' a method to update user profile details on meetup :param brief_details: dictionary with member brief details with updated values :param profile_details: dictionary with member profile details with updated values :return: dictionary with partial profile details inside [json] key ''' # https://www.meetup.com/meetup_api/docs/members/:member_id/#edit title = '%s.update_member_profile' % self.__class__.__name__ # validate permissions if not 'profile_edit' in self.service_scope: raise ValueError('%s requires group_join as part of oauth2 service_scope permissions.' % title) # validate inputs brief_details = self.objects.profile_brief.validate(brief_details) profile_details = self.objects.profile.validate(profile_details) # construct request fields url = '%s/members/%s' % (self.endpoint, str(profile_details['id'])) params = { 'bio': profile_details['bio'], 'bio_privacy': profile_details['privacy']['bio'], 'fields': 'gender,birthday,last_event,messaging_pref,next_event,other_services,privacy,self,stats', 'gender': profile_details['gender'], 'groups_privacy': profile_details['privacy']['groups'], 'lang': brief_details['lang'].replace('_', '-'), 'lat': str(profile_details['lat']), 'lon': str(profile_details['lon']), 'messaging_pref': profile_details['messaging_pref'], 'name': profile_details['name'], 'photo_id': profile_details['photo']['id'], 'sync_photo': True, 'topics_privacy': profile_details['privacy']['topics'], 'zip': brief_details['zip'] } if profile_details['privacy']['facebook']: params['facebook_privacy'] = profile_details['privacy']['facebook'] birthday_value = False for key, value in profile_details['birthday'].items(): if value: birthday_value = True break if not birthday_value: params['birthday'] = '-1' else: birthday_string = '' b_day = profile_details['birthday'] if b_day['day'] and b_day['month']: if b_day['month'] < 10: birthday_string += '0' birthday_string += str(b_day['month']) if b_day['day'] < 10: birthday_string += '0' birthday_string += str(b_day['day']) birthday_string += str(b_day['year']) params['birthday'] = birthday_string # send requests profile_details = self._patch_request(url, params=params) return profile_details
['def', 'update_member_profile', '(', 'self', ',', 'brief_details', ',', 'profile_details', ')', ':', '# https://www.meetup.com/meetup_api/docs/members/:member_id/#edit\r', 'title', '=', "'%s.update_member_profile'", '%', 'self', '.', '__class__', '.', '__name__', '# validate permissions\r', 'if', 'not', "'profile_edit'", 'in', 'self', '.', 'service_scope', ':', 'raise', 'ValueError', '(', "'%s requires group_join as part of oauth2 service_scope permissions.'", '%', 'title', ')', '# validate inputs\r', 'brief_details', '=', 'self', '.', 'objects', '.', 'profile_brief', '.', 'validate', '(', 'brief_details', ')', 'profile_details', '=', 'self', '.', 'objects', '.', 'profile', '.', 'validate', '(', 'profile_details', ')', '# construct request fields\r', 'url', '=', "'%s/members/%s'", '%', '(', 'self', '.', 'endpoint', ',', 'str', '(', 'profile_details', '[', "'id'", ']', ')', ')', 'params', '=', '{', "'bio'", ':', 'profile_details', '[', "'bio'", ']', ',', "'bio_privacy'", ':', 'profile_details', '[', "'privacy'", ']', '[', "'bio'", ']', ',', "'fields'", ':', "'gender,birthday,last_event,messaging_pref,next_event,other_services,privacy,self,stats'", ',', "'gender'", ':', 'profile_details', '[', "'gender'", ']', ',', "'groups_privacy'", ':', 'profile_details', '[', "'privacy'", ']', '[', "'groups'", ']', ',', "'lang'", ':', 'brief_details', '[', "'lang'", ']', '.', 'replace', '(', "'_'", ',', "'-'", ')', ',', "'lat'", ':', 'str', '(', 'profile_details', '[', "'lat'", ']', ')', ',', "'lon'", ':', 'str', '(', 'profile_details', '[', "'lon'", ']', ')', ',', "'messaging_pref'", ':', 'profile_details', '[', "'messaging_pref'", ']', ',', "'name'", ':', 'profile_details', '[', "'name'", ']', ',', "'photo_id'", ':', 'profile_details', '[', "'photo'", ']', '[', "'id'", ']', ',', "'sync_photo'", ':', 'True', ',', "'topics_privacy'", ':', 'profile_details', '[', "'privacy'", ']', '[', "'topics'", ']', ',', "'zip'", ':', 'brief_details', '[', "'zip'", ']', '}', 'if', 'profile_details', '[', "'privacy'", ']', '[', "'facebook'", ']', ':', 'params', '[', "'facebook_privacy'", ']', '=', 'profile_details', '[', "'privacy'", ']', '[', "'facebook'", ']', 'birthday_value', '=', 'False', 'for', 'key', ',', 'value', 'in', 'profile_details', '[', "'birthday'", ']', '.', 'items', '(', ')', ':', 'if', 'value', ':', 'birthday_value', '=', 'True', 'break', 'if', 'not', 'birthday_value', ':', 'params', '[', "'birthday'", ']', '=', "'-1'", 'else', ':', 'birthday_string', '=', "''", 'b_day', '=', 'profile_details', '[', "'birthday'", ']', 'if', 'b_day', '[', "'day'", ']', 'and', 'b_day', '[', "'month'", ']', ':', 'if', 'b_day', '[', "'month'", ']', '<', '10', ':', 'birthday_string', '+=', "'0'", 'birthday_string', '+=', 'str', '(', 'b_day', '[', "'month'", ']', ')', 'if', 'b_day', '[', "'day'", ']', '<', '10', ':', 'birthday_string', '+=', "'0'", 'birthday_string', '+=', 'str', '(', 'b_day', '[', "'day'", ']', ')', 'birthday_string', '+=', 'str', '(', 'b_day', '[', "'year'", ']', ')', 'params', '[', "'birthday'", ']', '=', 'birthday_string', '# send requests\r', 'profile_details', '=', 'self', '.', '_patch_request', '(', 'url', ',', 'params', '=', 'params', ')', 'return', 'profile_details']
a method to update user profile details on meetup :param brief_details: dictionary with member brief details with updated values :param profile_details: dictionary with member profile details with updated values :return: dictionary with partial profile details inside [json] key
['a', 'method', 'to', 'update', 'user', 'profile', 'details', 'on', 'meetup', ':', 'param', 'brief_details', ':', 'dictionary', 'with', 'member', 'brief', 'details', 'with', 'updated', 'values', ':', 'param', 'profile_details', ':', 'dictionary', 'with', 'member', 'profile', 'details', 'with', 'updated', 'values', ':', 'return', ':', 'dictionary', 'with', 'partial', 'profile', 'details', 'inside', '[', 'json', ']', 'key']
train
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/events/meetup.py#L1178-L1242
9,271
saltstack/salt
salt/states/zpool.py
present
def present(name, properties=None, filesystem_properties=None, layout=None, config=None): ''' ensure storage pool is present on the system name : string name of storage pool properties : dict optional set of properties to set for the storage pool filesystem_properties : dict optional set of filesystem properties to set for the storage pool (creation only) layout: dict disk layout to use if the pool does not exist (creation only) config : dict fine grain control over this state .. note:: The following configuration properties can be toggled in the config parameter. - import (true) - try to import the pool before creating it if absent - import_dirs (None) - specify additional locations to scan for devices on import (comma-seperated) - device_dir (None, SunOS=/dev/dsk, Linux=/dev) - specify device directory to prepend for none absolute device paths - force (false) - try to force the import or creation .. note:: It is no longer needed to give a unique name to each top-level vdev, the old layout format is still supported but no longer recommended. .. code-block:: yaml - mirror: - /tmp/vdisk3 - /tmp/vdisk2 - mirror: - /tmp/vdisk0 - /tmp/vdisk1 The above yaml will always result in the following zpool create: .. code-block:: bash zpool create mypool mirror /tmp/vdisk3 /tmp/vdisk2 mirror /tmp/vdisk0 /tmp/vdisk1 .. warning:: The legacy format is also still supported but not recommended, because ID's inside the layout dict must be unique they need to have a suffix. .. code-block:: yaml mirror-0: /tmp/vdisk3 /tmp/vdisk2 mirror-1: /tmp/vdisk0 /tmp/vdisk1 .. warning:: Pay attention to the order of your dict! .. code-block:: yaml - mirror: - /tmp/vdisk0 - /tmp/vdisk1 - /tmp/vdisk2 The above will result in the following zpool create: .. code-block:: bash zpool create mypool mirror /tmp/vdisk0 /tmp/vdisk1 /tmp/vdisk2 Creating a 3-way mirror! While you probably expect it to be mirror root vdev with 2 devices + a root vdev of 1 device! ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} # config defaults default_config = { 'import': True, 'import_dirs': None, 'device_dir': None, 'force': False } if __grains__['kernel'] == 'SunOS': default_config['device_dir'] = '/dev/dsk' elif __grains__['kernel'] == 'Linux': default_config['device_dir'] = '/dev' # merge state config if config: default_config.update(config) config = default_config # ensure properties are zfs values if properties: properties = __utils__['zfs.from_auto_dict'](properties) elif properties is None: properties = {} if filesystem_properties: filesystem_properties = __utils__['zfs.from_auto_dict'](filesystem_properties) elif filesystem_properties is None: filesystem_properties = {} # parse layout vdevs = _layout_to_vdev(layout, config['device_dir']) if vdevs: vdevs.insert(0, name) # log configuration log.debug('zpool.present::%s::config - %s', name, config) log.debug('zpool.present::%s::vdevs - %s', name, vdevs) log.debug('zpool.present::%s::properties - %s', name, properties) log.debug('zpool.present::%s::filesystem_properties - %s', name, filesystem_properties) # ensure the pool is present ret['result'] = False # don't do anything because this is a test if __opts__['test']: ret['result'] = True if __salt__['zpool.exists'](name): ret['changes'][name] = 'uptodate' else: ret['changes'][name] = 'imported' if config['import'] else 'created' ret['comment'] = 'storage pool {0} was {1}'.format(name, ret['changes'][name]) # update pool elif __salt__['zpool.exists'](name): ret['result'] = True # fetch current pool properties properties_current = __salt__['zpool.get'](name, parsable=True) # build list of properties to update properties_update = [] if properties: for prop in properties: # skip unexisting properties if prop not in properties_current: log.warning('zpool.present::%s::update - unknown property: %s', name, prop) continue # compare current and wanted value if properties_current[prop] != properties[prop]: properties_update.append(prop) # update pool properties for prop in properties_update: res = __salt__['zpool.set'](name, prop, properties[prop]) if res['set']: if name not in ret['changes']: ret['changes'][name] = {} ret['changes'][name][prop] = properties[prop] else: ret['result'] = False if ret['comment'] == '': ret['comment'] = 'The following properties were not updated:' ret['comment'] = '{0} {1}'.format(ret['comment'], prop) if ret['result']: ret['comment'] = 'properties updated' if ret['changes'] else 'no update needed' # import or create the pool (at least try to anyway) else: # import pool if config['import']: mod_res = __salt__['zpool.import']( name, force=config['force'], dir=config['import_dirs'], ) ret['result'] = mod_res['imported'] if ret['result']: ret['changes'][name] = 'imported' ret['comment'] = 'storage pool {0} was imported'.format(name) # create pool if not ret['result'] and vdevs: log.debug('zpool.present::%s::creating', name) # execute zpool.create mod_res = __salt__['zpool.create']( *vdevs, force=config['force'], properties=properties, filesystem_properties=filesystem_properties ) ret['result'] = mod_res['created'] if ret['result']: ret['changes'][name] = 'created' ret['comment'] = 'storage pool {0} was created'.format(name) elif 'error' in mod_res: ret['comment'] = mod_res['error'] else: ret['comment'] = 'could not create storage pool {0}'.format(name) # give up, we cannot import the pool and we do not have a layout to create it if not ret['result'] and not vdevs: ret['comment'] = 'storage pool {0} was not imported, no (valid) layout specified for creation'.format(name) return ret
python
def present(name, properties=None, filesystem_properties=None, layout=None, config=None): ''' ensure storage pool is present on the system name : string name of storage pool properties : dict optional set of properties to set for the storage pool filesystem_properties : dict optional set of filesystem properties to set for the storage pool (creation only) layout: dict disk layout to use if the pool does not exist (creation only) config : dict fine grain control over this state .. note:: The following configuration properties can be toggled in the config parameter. - import (true) - try to import the pool before creating it if absent - import_dirs (None) - specify additional locations to scan for devices on import (comma-seperated) - device_dir (None, SunOS=/dev/dsk, Linux=/dev) - specify device directory to prepend for none absolute device paths - force (false) - try to force the import or creation .. note:: It is no longer needed to give a unique name to each top-level vdev, the old layout format is still supported but no longer recommended. .. code-block:: yaml - mirror: - /tmp/vdisk3 - /tmp/vdisk2 - mirror: - /tmp/vdisk0 - /tmp/vdisk1 The above yaml will always result in the following zpool create: .. code-block:: bash zpool create mypool mirror /tmp/vdisk3 /tmp/vdisk2 mirror /tmp/vdisk0 /tmp/vdisk1 .. warning:: The legacy format is also still supported but not recommended, because ID's inside the layout dict must be unique they need to have a suffix. .. code-block:: yaml mirror-0: /tmp/vdisk3 /tmp/vdisk2 mirror-1: /tmp/vdisk0 /tmp/vdisk1 .. warning:: Pay attention to the order of your dict! .. code-block:: yaml - mirror: - /tmp/vdisk0 - /tmp/vdisk1 - /tmp/vdisk2 The above will result in the following zpool create: .. code-block:: bash zpool create mypool mirror /tmp/vdisk0 /tmp/vdisk1 /tmp/vdisk2 Creating a 3-way mirror! While you probably expect it to be mirror root vdev with 2 devices + a root vdev of 1 device! ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} # config defaults default_config = { 'import': True, 'import_dirs': None, 'device_dir': None, 'force': False } if __grains__['kernel'] == 'SunOS': default_config['device_dir'] = '/dev/dsk' elif __grains__['kernel'] == 'Linux': default_config['device_dir'] = '/dev' # merge state config if config: default_config.update(config) config = default_config # ensure properties are zfs values if properties: properties = __utils__['zfs.from_auto_dict'](properties) elif properties is None: properties = {} if filesystem_properties: filesystem_properties = __utils__['zfs.from_auto_dict'](filesystem_properties) elif filesystem_properties is None: filesystem_properties = {} # parse layout vdevs = _layout_to_vdev(layout, config['device_dir']) if vdevs: vdevs.insert(0, name) # log configuration log.debug('zpool.present::%s::config - %s', name, config) log.debug('zpool.present::%s::vdevs - %s', name, vdevs) log.debug('zpool.present::%s::properties - %s', name, properties) log.debug('zpool.present::%s::filesystem_properties - %s', name, filesystem_properties) # ensure the pool is present ret['result'] = False # don't do anything because this is a test if __opts__['test']: ret['result'] = True if __salt__['zpool.exists'](name): ret['changes'][name] = 'uptodate' else: ret['changes'][name] = 'imported' if config['import'] else 'created' ret['comment'] = 'storage pool {0} was {1}'.format(name, ret['changes'][name]) # update pool elif __salt__['zpool.exists'](name): ret['result'] = True # fetch current pool properties properties_current = __salt__['zpool.get'](name, parsable=True) # build list of properties to update properties_update = [] if properties: for prop in properties: # skip unexisting properties if prop not in properties_current: log.warning('zpool.present::%s::update - unknown property: %s', name, prop) continue # compare current and wanted value if properties_current[prop] != properties[prop]: properties_update.append(prop) # update pool properties for prop in properties_update: res = __salt__['zpool.set'](name, prop, properties[prop]) if res['set']: if name not in ret['changes']: ret['changes'][name] = {} ret['changes'][name][prop] = properties[prop] else: ret['result'] = False if ret['comment'] == '': ret['comment'] = 'The following properties were not updated:' ret['comment'] = '{0} {1}'.format(ret['comment'], prop) if ret['result']: ret['comment'] = 'properties updated' if ret['changes'] else 'no update needed' # import or create the pool (at least try to anyway) else: # import pool if config['import']: mod_res = __salt__['zpool.import']( name, force=config['force'], dir=config['import_dirs'], ) ret['result'] = mod_res['imported'] if ret['result']: ret['changes'][name] = 'imported' ret['comment'] = 'storage pool {0} was imported'.format(name) # create pool if not ret['result'] and vdevs: log.debug('zpool.present::%s::creating', name) # execute zpool.create mod_res = __salt__['zpool.create']( *vdevs, force=config['force'], properties=properties, filesystem_properties=filesystem_properties ) ret['result'] = mod_res['created'] if ret['result']: ret['changes'][name] = 'created' ret['comment'] = 'storage pool {0} was created'.format(name) elif 'error' in mod_res: ret['comment'] = mod_res['error'] else: ret['comment'] = 'could not create storage pool {0}'.format(name) # give up, we cannot import the pool and we do not have a layout to create it if not ret['result'] and not vdevs: ret['comment'] = 'storage pool {0} was not imported, no (valid) layout specified for creation'.format(name) return ret
['def', 'present', '(', 'name', ',', 'properties', '=', 'None', ',', 'filesystem_properties', '=', 'None', ',', 'layout', '=', 'None', ',', 'config', '=', 'None', ')', ':', 'ret', '=', '{', "'name'", ':', 'name', ',', "'changes'", ':', '{', '}', ',', "'result'", ':', 'None', ',', "'comment'", ':', "''", '}', '# config defaults', 'default_config', '=', '{', "'import'", ':', 'True', ',', "'import_dirs'", ':', 'None', ',', "'device_dir'", ':', 'None', ',', "'force'", ':', 'False', '}', 'if', '__grains__', '[', "'kernel'", ']', '==', "'SunOS'", ':', 'default_config', '[', "'device_dir'", ']', '=', "'/dev/dsk'", 'elif', '__grains__', '[', "'kernel'", ']', '==', "'Linux'", ':', 'default_config', '[', "'device_dir'", ']', '=', "'/dev'", '# merge state config', 'if', 'config', ':', 'default_config', '.', 'update', '(', 'config', ')', 'config', '=', 'default_config', '# ensure properties are zfs values', 'if', 'properties', ':', 'properties', '=', '__utils__', '[', "'zfs.from_auto_dict'", ']', '(', 'properties', ')', 'elif', 'properties', 'is', 'None', ':', 'properties', '=', '{', '}', 'if', 'filesystem_properties', ':', 'filesystem_properties', '=', '__utils__', '[', "'zfs.from_auto_dict'", ']', '(', 'filesystem_properties', ')', 'elif', 'filesystem_properties', 'is', 'None', ':', 'filesystem_properties', '=', '{', '}', '# parse layout', 'vdevs', '=', '_layout_to_vdev', '(', 'layout', ',', 'config', '[', "'device_dir'", ']', ')', 'if', 'vdevs', ':', 'vdevs', '.', 'insert', '(', '0', ',', 'name', ')', '# log configuration', 'log', '.', 'debug', '(', "'zpool.present::%s::config - %s'", ',', 'name', ',', 'config', ')', 'log', '.', 'debug', '(', "'zpool.present::%s::vdevs - %s'", ',', 'name', ',', 'vdevs', ')', 'log', '.', 'debug', '(', "'zpool.present::%s::properties - %s'", ',', 'name', ',', 'properties', ')', 'log', '.', 'debug', '(', "'zpool.present::%s::filesystem_properties - %s'", ',', 'name', ',', 'filesystem_properties', ')', '# ensure the pool is present', 'ret', '[', "'result'", ']', '=', 'False', "# don't do anything because this is a test", 'if', '__opts__', '[', "'test'", ']', ':', 'ret', '[', "'result'", ']', '=', 'True', 'if', '__salt__', '[', "'zpool.exists'", ']', '(', 'name', ')', ':', 'ret', '[', "'changes'", ']', '[', 'name', ']', '=', "'uptodate'", 'else', ':', 'ret', '[', "'changes'", ']', '[', 'name', ']', '=', "'imported'", 'if', 'config', '[', "'import'", ']', 'else', "'created'", 'ret', '[', "'comment'", ']', '=', "'storage pool {0} was {1}'", '.', 'format', '(', 'name', ',', 'ret', '[', "'changes'", ']', '[', 'name', ']', ')', '# update pool', 'elif', '__salt__', '[', "'zpool.exists'", ']', '(', 'name', ')', ':', 'ret', '[', "'result'", ']', '=', 'True', '# fetch current pool properties', 'properties_current', '=', '__salt__', '[', "'zpool.get'", ']', '(', 'name', ',', 'parsable', '=', 'True', ')', '# build list of properties to update', 'properties_update', '=', '[', ']', 'if', 'properties', ':', 'for', 'prop', 'in', 'properties', ':', '# skip unexisting properties', 'if', 'prop', 'not', 'in', 'properties_current', ':', 'log', '.', 'warning', '(', "'zpool.present::%s::update - unknown property: %s'", ',', 'name', ',', 'prop', ')', 'continue', '# compare current and wanted value', 'if', 'properties_current', '[', 'prop', ']', '!=', 'properties', '[', 'prop', ']', ':', 'properties_update', '.', 'append', '(', 'prop', ')', '# update pool properties', 'for', 'prop', 'in', 'properties_update', ':', 'res', '=', '__salt__', '[', "'zpool.set'", ']', '(', 'name', ',', 'prop', ',', 'properties', '[', 'prop', ']', ')', 'if', 'res', '[', "'set'", ']', ':', 'if', 'name', 'not', 'in', 'ret', '[', "'changes'", ']', ':', 'ret', '[', "'changes'", ']', '[', 'name', ']', '=', '{', '}', 'ret', '[', "'changes'", ']', '[', 'name', ']', '[', 'prop', ']', '=', 'properties', '[', 'prop', ']', 'else', ':', 'ret', '[', "'result'", ']', '=', 'False', 'if', 'ret', '[', "'comment'", ']', '==', "''", ':', 'ret', '[', "'comment'", ']', '=', "'The following properties were not updated:'", 'ret', '[', "'comment'", ']', '=', "'{0} {1}'", '.', 'format', '(', 'ret', '[', "'comment'", ']', ',', 'prop', ')', 'if', 'ret', '[', "'result'", ']', ':', 'ret', '[', "'comment'", ']', '=', "'properties updated'", 'if', 'ret', '[', "'changes'", ']', 'else', "'no update needed'", '# import or create the pool (at least try to anyway)', 'else', ':', '# import pool', 'if', 'config', '[', "'import'", ']', ':', 'mod_res', '=', '__salt__', '[', "'zpool.import'", ']', '(', 'name', ',', 'force', '=', 'config', '[', "'force'", ']', ',', 'dir', '=', 'config', '[', "'import_dirs'", ']', ',', ')', 'ret', '[', "'result'", ']', '=', 'mod_res', '[', "'imported'", ']', 'if', 'ret', '[', "'result'", ']', ':', 'ret', '[', "'changes'", ']', '[', 'name', ']', '=', "'imported'", 'ret', '[', "'comment'", ']', '=', "'storage pool {0} was imported'", '.', 'format', '(', 'name', ')', '# create pool', 'if', 'not', 'ret', '[', "'result'", ']', 'and', 'vdevs', ':', 'log', '.', 'debug', '(', "'zpool.present::%s::creating'", ',', 'name', ')', '# execute zpool.create', 'mod_res', '=', '__salt__', '[', "'zpool.create'", ']', '(', '*', 'vdevs', ',', 'force', '=', 'config', '[', "'force'", ']', ',', 'properties', '=', 'properties', ',', 'filesystem_properties', '=', 'filesystem_properties', ')', 'ret', '[', "'result'", ']', '=', 'mod_res', '[', "'created'", ']', 'if', 'ret', '[', "'result'", ']', ':', 'ret', '[', "'changes'", ']', '[', 'name', ']', '=', "'created'", 'ret', '[', "'comment'", ']', '=', "'storage pool {0} was created'", '.', 'format', '(', 'name', ')', 'elif', "'error'", 'in', 'mod_res', ':', 'ret', '[', "'comment'", ']', '=', 'mod_res', '[', "'error'", ']', 'else', ':', 'ret', '[', "'comment'", ']', '=', "'could not create storage pool {0}'", '.', 'format', '(', 'name', ')', '# give up, we cannot import the pool and we do not have a layout to create it', 'if', 'not', 'ret', '[', "'result'", ']', 'and', 'not', 'vdevs', ':', 'ret', '[', "'comment'", ']', '=', "'storage pool {0} was not imported, no (valid) layout specified for creation'", '.', 'format', '(', 'name', ')', 'return', 'ret']
ensure storage pool is present on the system name : string name of storage pool properties : dict optional set of properties to set for the storage pool filesystem_properties : dict optional set of filesystem properties to set for the storage pool (creation only) layout: dict disk layout to use if the pool does not exist (creation only) config : dict fine grain control over this state .. note:: The following configuration properties can be toggled in the config parameter. - import (true) - try to import the pool before creating it if absent - import_dirs (None) - specify additional locations to scan for devices on import (comma-seperated) - device_dir (None, SunOS=/dev/dsk, Linux=/dev) - specify device directory to prepend for none absolute device paths - force (false) - try to force the import or creation .. note:: It is no longer needed to give a unique name to each top-level vdev, the old layout format is still supported but no longer recommended. .. code-block:: yaml - mirror: - /tmp/vdisk3 - /tmp/vdisk2 - mirror: - /tmp/vdisk0 - /tmp/vdisk1 The above yaml will always result in the following zpool create: .. code-block:: bash zpool create mypool mirror /tmp/vdisk3 /tmp/vdisk2 mirror /tmp/vdisk0 /tmp/vdisk1 .. warning:: The legacy format is also still supported but not recommended, because ID's inside the layout dict must be unique they need to have a suffix. .. code-block:: yaml mirror-0: /tmp/vdisk3 /tmp/vdisk2 mirror-1: /tmp/vdisk0 /tmp/vdisk1 .. warning:: Pay attention to the order of your dict! .. code-block:: yaml - mirror: - /tmp/vdisk0 - /tmp/vdisk1 - /tmp/vdisk2 The above will result in the following zpool create: .. code-block:: bash zpool create mypool mirror /tmp/vdisk0 /tmp/vdisk1 /tmp/vdisk2 Creating a 3-way mirror! While you probably expect it to be mirror root vdev with 2 devices + a root vdev of 1 device!
['ensure', 'storage', 'pool', 'is', 'present', 'on', 'the', 'system']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/zpool.py#L170-L381
9,272
gwastro/pycbc
pycbc/waveform/bank.py
sigma_cached
def sigma_cached(self, psd): """ Cache sigma calculate for use in tandem with the FilterBank class """ if not hasattr(self, '_sigmasq'): from pycbc.opt import LimitedSizeDict self._sigmasq = LimitedSizeDict(size_limit=2**5) key = id(psd) if not hasattr(psd, '_sigma_cached_key'): psd._sigma_cached_key = {} if key not in self._sigmasq or id(self) not in psd._sigma_cached_key: psd._sigma_cached_key[id(self)] = True # If possible, we precalculate the sigmasq vector for all possible waveforms if pycbc.waveform.waveform_norm_exists(self.approximant): if not hasattr(psd, 'sigmasq_vec'): psd.sigmasq_vec = {} if self.approximant not in psd.sigmasq_vec: psd.sigmasq_vec[self.approximant] = pycbc.waveform.get_waveform_filter_norm( self.approximant, psd, len(psd), psd.delta_f, self.f_lower) if not hasattr(self, 'sigma_scale'): # Get an amplitude normalization (mass dependant constant norm) amp_norm = pycbc.waveform.get_template_amplitude_norm( self.params, approximant=self.approximant) amp_norm = 1 if amp_norm is None else amp_norm self.sigma_scale = (DYN_RANGE_FAC * amp_norm) ** 2.0 self._sigmasq[key] = self.sigma_scale * \ psd.sigmasq_vec[self.approximant][self.end_idx-1] else: if not hasattr(self, 'sigma_view'): from pycbc.filter.matchedfilter import get_cutoff_indices N = (len(self) -1) * 2 kmin, kmax = get_cutoff_indices( self.min_f_lower or self.f_lower, self.end_frequency, self.delta_f, N) self.sslice = slice(kmin, kmax) self.sigma_view = self[self.sslice].squared_norm() * 4.0 * self.delta_f if not hasattr(psd, 'invsqrt'): psd.invsqrt = 1.0 / psd[self.sslice] self._sigmasq[key] = self.sigma_view.inner(psd.invsqrt) return self._sigmasq[key]
python
def sigma_cached(self, psd): """ Cache sigma calculate for use in tandem with the FilterBank class """ if not hasattr(self, '_sigmasq'): from pycbc.opt import LimitedSizeDict self._sigmasq = LimitedSizeDict(size_limit=2**5) key = id(psd) if not hasattr(psd, '_sigma_cached_key'): psd._sigma_cached_key = {} if key not in self._sigmasq or id(self) not in psd._sigma_cached_key: psd._sigma_cached_key[id(self)] = True # If possible, we precalculate the sigmasq vector for all possible waveforms if pycbc.waveform.waveform_norm_exists(self.approximant): if not hasattr(psd, 'sigmasq_vec'): psd.sigmasq_vec = {} if self.approximant not in psd.sigmasq_vec: psd.sigmasq_vec[self.approximant] = pycbc.waveform.get_waveform_filter_norm( self.approximant, psd, len(psd), psd.delta_f, self.f_lower) if not hasattr(self, 'sigma_scale'): # Get an amplitude normalization (mass dependant constant norm) amp_norm = pycbc.waveform.get_template_amplitude_norm( self.params, approximant=self.approximant) amp_norm = 1 if amp_norm is None else amp_norm self.sigma_scale = (DYN_RANGE_FAC * amp_norm) ** 2.0 self._sigmasq[key] = self.sigma_scale * \ psd.sigmasq_vec[self.approximant][self.end_idx-1] else: if not hasattr(self, 'sigma_view'): from pycbc.filter.matchedfilter import get_cutoff_indices N = (len(self) -1) * 2 kmin, kmax = get_cutoff_indices( self.min_f_lower or self.f_lower, self.end_frequency, self.delta_f, N) self.sslice = slice(kmin, kmax) self.sigma_view = self[self.sslice].squared_norm() * 4.0 * self.delta_f if not hasattr(psd, 'invsqrt'): psd.invsqrt = 1.0 / psd[self.sslice] self._sigmasq[key] = self.sigma_view.inner(psd.invsqrt) return self._sigmasq[key]
['def', 'sigma_cached', '(', 'self', ',', 'psd', ')', ':', 'if', 'not', 'hasattr', '(', 'self', ',', "'_sigmasq'", ')', ':', 'from', 'pycbc', '.', 'opt', 'import', 'LimitedSizeDict', 'self', '.', '_sigmasq', '=', 'LimitedSizeDict', '(', 'size_limit', '=', '2', '**', '5', ')', 'key', '=', 'id', '(', 'psd', ')', 'if', 'not', 'hasattr', '(', 'psd', ',', "'_sigma_cached_key'", ')', ':', 'psd', '.', '_sigma_cached_key', '=', '{', '}', 'if', 'key', 'not', 'in', 'self', '.', '_sigmasq', 'or', 'id', '(', 'self', ')', 'not', 'in', 'psd', '.', '_sigma_cached_key', ':', 'psd', '.', '_sigma_cached_key', '[', 'id', '(', 'self', ')', ']', '=', 'True', '# If possible, we precalculate the sigmasq vector for all possible waveforms', 'if', 'pycbc', '.', 'waveform', '.', 'waveform_norm_exists', '(', 'self', '.', 'approximant', ')', ':', 'if', 'not', 'hasattr', '(', 'psd', ',', "'sigmasq_vec'", ')', ':', 'psd', '.', 'sigmasq_vec', '=', '{', '}', 'if', 'self', '.', 'approximant', 'not', 'in', 'psd', '.', 'sigmasq_vec', ':', 'psd', '.', 'sigmasq_vec', '[', 'self', '.', 'approximant', ']', '=', 'pycbc', '.', 'waveform', '.', 'get_waveform_filter_norm', '(', 'self', '.', 'approximant', ',', 'psd', ',', 'len', '(', 'psd', ')', ',', 'psd', '.', 'delta_f', ',', 'self', '.', 'f_lower', ')', 'if', 'not', 'hasattr', '(', 'self', ',', "'sigma_scale'", ')', ':', '# Get an amplitude normalization (mass dependant constant norm)', 'amp_norm', '=', 'pycbc', '.', 'waveform', '.', 'get_template_amplitude_norm', '(', 'self', '.', 'params', ',', 'approximant', '=', 'self', '.', 'approximant', ')', 'amp_norm', '=', '1', 'if', 'amp_norm', 'is', 'None', 'else', 'amp_norm', 'self', '.', 'sigma_scale', '=', '(', 'DYN_RANGE_FAC', '*', 'amp_norm', ')', '**', '2.0', 'self', '.', '_sigmasq', '[', 'key', ']', '=', 'self', '.', 'sigma_scale', '*', 'psd', '.', 'sigmasq_vec', '[', 'self', '.', 'approximant', ']', '[', 'self', '.', 'end_idx', '-', '1', ']', 'else', ':', 'if', 'not', 'hasattr', '(', 'self', ',', "'sigma_view'", ')', ':', 'from', 'pycbc', '.', 'filter', '.', 'matchedfilter', 'import', 'get_cutoff_indices', 'N', '=', '(', 'len', '(', 'self', ')', '-', '1', ')', '*', '2', 'kmin', ',', 'kmax', '=', 'get_cutoff_indices', '(', 'self', '.', 'min_f_lower', 'or', 'self', '.', 'f_lower', ',', 'self', '.', 'end_frequency', ',', 'self', '.', 'delta_f', ',', 'N', ')', 'self', '.', 'sslice', '=', 'slice', '(', 'kmin', ',', 'kmax', ')', 'self', '.', 'sigma_view', '=', 'self', '[', 'self', '.', 'sslice', ']', '.', 'squared_norm', '(', ')', '*', '4.0', '*', 'self', '.', 'delta_f', 'if', 'not', 'hasattr', '(', 'psd', ',', "'invsqrt'", ')', ':', 'psd', '.', 'invsqrt', '=', '1.0', '/', 'psd', '[', 'self', '.', 'sslice', ']', 'self', '.', '_sigmasq', '[', 'key', ']', '=', 'self', '.', 'sigma_view', '.', 'inner', '(', 'psd', '.', 'invsqrt', ')', 'return', 'self', '.', '_sigmasq', '[', 'key', ']']
Cache sigma calculate for use in tandem with the FilterBank class
['Cache', 'sigma', 'calculate', 'for', 'use', 'in', 'tandem', 'with', 'the', 'FilterBank', 'class']
train
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/waveform/bank.py#L42-L89
9,273
spyder-ide/spyder
spyder/config/user.py
UserConfig.get
def get(self, section, option, default=NoDefault): """ Get an option section=None: attribute a default section name default: default value (if not specified, an exception will be raised if option doesn't exist) """ section = self._check_section_option(section, option) if not self.has_section(section): if default is NoDefault: raise cp.NoSectionError(section) else: self.add_section(section) if not self.has_option(section, option): if default is NoDefault: raise cp.NoOptionError(option, section) else: self.set(section, option, default) return default value = cp.ConfigParser.get(self, section, option, raw=self.raw) # Use type of default_value to parse value correctly default_value = self.get_default(section, option) if isinstance(default_value, bool): value = ast.literal_eval(value) elif isinstance(default_value, float): value = float(value) elif isinstance(default_value, int): value = int(value) elif is_text_string(default_value): if PY2: try: value = value.decode('utf-8') try: # Some str config values expect to be eval after decoding new_value = ast.literal_eval(value) if is_text_string(new_value): value = new_value except (SyntaxError, ValueError): pass except (UnicodeEncodeError, UnicodeDecodeError): pass else: try: # lists, tuples, ... value = ast.literal_eval(value) except (SyntaxError, ValueError): pass return value
python
def get(self, section, option, default=NoDefault): """ Get an option section=None: attribute a default section name default: default value (if not specified, an exception will be raised if option doesn't exist) """ section = self._check_section_option(section, option) if not self.has_section(section): if default is NoDefault: raise cp.NoSectionError(section) else: self.add_section(section) if not self.has_option(section, option): if default is NoDefault: raise cp.NoOptionError(option, section) else: self.set(section, option, default) return default value = cp.ConfigParser.get(self, section, option, raw=self.raw) # Use type of default_value to parse value correctly default_value = self.get_default(section, option) if isinstance(default_value, bool): value = ast.literal_eval(value) elif isinstance(default_value, float): value = float(value) elif isinstance(default_value, int): value = int(value) elif is_text_string(default_value): if PY2: try: value = value.decode('utf-8') try: # Some str config values expect to be eval after decoding new_value = ast.literal_eval(value) if is_text_string(new_value): value = new_value except (SyntaxError, ValueError): pass except (UnicodeEncodeError, UnicodeDecodeError): pass else: try: # lists, tuples, ... value = ast.literal_eval(value) except (SyntaxError, ValueError): pass return value
['def', 'get', '(', 'self', ',', 'section', ',', 'option', ',', 'default', '=', 'NoDefault', ')', ':', 'section', '=', 'self', '.', '_check_section_option', '(', 'section', ',', 'option', ')', 'if', 'not', 'self', '.', 'has_section', '(', 'section', ')', ':', 'if', 'default', 'is', 'NoDefault', ':', 'raise', 'cp', '.', 'NoSectionError', '(', 'section', ')', 'else', ':', 'self', '.', 'add_section', '(', 'section', ')', 'if', 'not', 'self', '.', 'has_option', '(', 'section', ',', 'option', ')', ':', 'if', 'default', 'is', 'NoDefault', ':', 'raise', 'cp', '.', 'NoOptionError', '(', 'option', ',', 'section', ')', 'else', ':', 'self', '.', 'set', '(', 'section', ',', 'option', ',', 'default', ')', 'return', 'default', 'value', '=', 'cp', '.', 'ConfigParser', '.', 'get', '(', 'self', ',', 'section', ',', 'option', ',', 'raw', '=', 'self', '.', 'raw', ')', '# Use type of default_value to parse value correctly\r', 'default_value', '=', 'self', '.', 'get_default', '(', 'section', ',', 'option', ')', 'if', 'isinstance', '(', 'default_value', ',', 'bool', ')', ':', 'value', '=', 'ast', '.', 'literal_eval', '(', 'value', ')', 'elif', 'isinstance', '(', 'default_value', ',', 'float', ')', ':', 'value', '=', 'float', '(', 'value', ')', 'elif', 'isinstance', '(', 'default_value', ',', 'int', ')', ':', 'value', '=', 'int', '(', 'value', ')', 'elif', 'is_text_string', '(', 'default_value', ')', ':', 'if', 'PY2', ':', 'try', ':', 'value', '=', 'value', '.', 'decode', '(', "'utf-8'", ')', 'try', ':', '# Some str config values expect to be eval after decoding\r', 'new_value', '=', 'ast', '.', 'literal_eval', '(', 'value', ')', 'if', 'is_text_string', '(', 'new_value', ')', ':', 'value', '=', 'new_value', 'except', '(', 'SyntaxError', ',', 'ValueError', ')', ':', 'pass', 'except', '(', 'UnicodeEncodeError', ',', 'UnicodeDecodeError', ')', ':', 'pass', 'else', ':', 'try', ':', '# lists, tuples, ...\r', 'value', '=', 'ast', '.', 'literal_eval', '(', 'value', ')', 'except', '(', 'SyntaxError', ',', 'ValueError', ')', ':', 'pass', 'return', 'value']
Get an option section=None: attribute a default section name default: default value (if not specified, an exception will be raised if option doesn't exist)
['Get', 'an', 'option', 'section', '=', 'None', ':', 'attribute', 'a', 'default', 'section', 'name', 'default', ':', 'default', 'value', '(', 'if', 'not', 'specified', 'an', 'exception', 'will', 'be', 'raised', 'if', 'option', 'doesn', 't', 'exist', ')']
train
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/config/user.py#L365-L415
9,274
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/worker.py
EvaluationWorker.run_attacks
def run_attacks(self): """Method which evaluates all attack work. In a loop this method queries not completed attack work, picks one attack work and runs it. """ logging.info('******** Start evaluation of attacks ********') prev_submission_id = None while True: # wait until work is available self.attack_work.read_all_from_datastore() if not self.attack_work.work: logging.info('Work is not populated, waiting...') time.sleep(SLEEP_TIME) continue if self.attack_work.is_all_work_competed(): logging.info('All attack work completed.') break # download all attacks data and dataset self.fetch_attacks_data() # pick piece of work work_id = self.attack_work.try_pick_piece_of_work( self.worker_id, submission_id=prev_submission_id) if not work_id: logging.info('Failed to pick work, waiting...') time.sleep(SLEEP_TIME_SHORT) continue logging.info('Selected work_id: %s', work_id) # execute work try: elapsed_time_sec, prev_submission_id = self.run_attack_work(work_id) logging.info('Work %s is done', work_id) # indicate that work is completed is_work_update = self.attack_work.update_work_as_completed( self.worker_id, work_id, other_values={'elapsed_time': elapsed_time_sec}) except WorkerError as e: logging.info('Failed to run work:\n%s', str(e)) is_work_update = self.attack_work.update_work_as_completed( self.worker_id, work_id, error=str(e)) if not is_work_update: logging.warning('Can''t update work "%s" as completed by worker %d', work_id, self.worker_id) logging.info('******** Finished evaluation of attacks ********')
python
def run_attacks(self): """Method which evaluates all attack work. In a loop this method queries not completed attack work, picks one attack work and runs it. """ logging.info('******** Start evaluation of attacks ********') prev_submission_id = None while True: # wait until work is available self.attack_work.read_all_from_datastore() if not self.attack_work.work: logging.info('Work is not populated, waiting...') time.sleep(SLEEP_TIME) continue if self.attack_work.is_all_work_competed(): logging.info('All attack work completed.') break # download all attacks data and dataset self.fetch_attacks_data() # pick piece of work work_id = self.attack_work.try_pick_piece_of_work( self.worker_id, submission_id=prev_submission_id) if not work_id: logging.info('Failed to pick work, waiting...') time.sleep(SLEEP_TIME_SHORT) continue logging.info('Selected work_id: %s', work_id) # execute work try: elapsed_time_sec, prev_submission_id = self.run_attack_work(work_id) logging.info('Work %s is done', work_id) # indicate that work is completed is_work_update = self.attack_work.update_work_as_completed( self.worker_id, work_id, other_values={'elapsed_time': elapsed_time_sec}) except WorkerError as e: logging.info('Failed to run work:\n%s', str(e)) is_work_update = self.attack_work.update_work_as_completed( self.worker_id, work_id, error=str(e)) if not is_work_update: logging.warning('Can''t update work "%s" as completed by worker %d', work_id, self.worker_id) logging.info('******** Finished evaluation of attacks ********')
['def', 'run_attacks', '(', 'self', ')', ':', 'logging', '.', 'info', '(', "'******** Start evaluation of attacks ********'", ')', 'prev_submission_id', '=', 'None', 'while', 'True', ':', '# wait until work is available', 'self', '.', 'attack_work', '.', 'read_all_from_datastore', '(', ')', 'if', 'not', 'self', '.', 'attack_work', '.', 'work', ':', 'logging', '.', 'info', '(', "'Work is not populated, waiting...'", ')', 'time', '.', 'sleep', '(', 'SLEEP_TIME', ')', 'continue', 'if', 'self', '.', 'attack_work', '.', 'is_all_work_competed', '(', ')', ':', 'logging', '.', 'info', '(', "'All attack work completed.'", ')', 'break', '# download all attacks data and dataset', 'self', '.', 'fetch_attacks_data', '(', ')', '# pick piece of work', 'work_id', '=', 'self', '.', 'attack_work', '.', 'try_pick_piece_of_work', '(', 'self', '.', 'worker_id', ',', 'submission_id', '=', 'prev_submission_id', ')', 'if', 'not', 'work_id', ':', 'logging', '.', 'info', '(', "'Failed to pick work, waiting...'", ')', 'time', '.', 'sleep', '(', 'SLEEP_TIME_SHORT', ')', 'continue', 'logging', '.', 'info', '(', "'Selected work_id: %s'", ',', 'work_id', ')', '# execute work', 'try', ':', 'elapsed_time_sec', ',', 'prev_submission_id', '=', 'self', '.', 'run_attack_work', '(', 'work_id', ')', 'logging', '.', 'info', '(', "'Work %s is done'", ',', 'work_id', ')', '# indicate that work is completed', 'is_work_update', '=', 'self', '.', 'attack_work', '.', 'update_work_as_completed', '(', 'self', '.', 'worker_id', ',', 'work_id', ',', 'other_values', '=', '{', "'elapsed_time'", ':', 'elapsed_time_sec', '}', ')', 'except', 'WorkerError', 'as', 'e', ':', 'logging', '.', 'info', '(', "'Failed to run work:\\n%s'", ',', 'str', '(', 'e', ')', ')', 'is_work_update', '=', 'self', '.', 'attack_work', '.', 'update_work_as_completed', '(', 'self', '.', 'worker_id', ',', 'work_id', ',', 'error', '=', 'str', '(', 'e', ')', ')', 'if', 'not', 'is_work_update', ':', 'logging', '.', 'warning', '(', "'Can'", '\'t update work "%s" as completed by worker %d\'', ',', 'work_id', ',', 'self', '.', 'worker_id', ')', 'logging', '.', 'info', '(', "'******** Finished evaluation of attacks ********'", ')']
Method which evaluates all attack work. In a loop this method queries not completed attack work, picks one attack work and runs it.
['Method', 'which', 'evaluates', 'all', 'attack', 'work', '.']
train
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/worker.py#L689-L732
9,275
saltstack/salt
salt/modules/napalm_network.py
_filter_dict
def _filter_dict(input_dict, search_key, search_value): ''' Filters a dictionary of dictionaries by a key-value pair. :param input_dict: is a dictionary whose values are lists of dictionaries :param search_key: is the key in the leaf dictionaries :param search_values: is the value in the leaf dictionaries :return: filtered dictionary ''' output_dict = dict() for key, key_list in six.iteritems(input_dict): key_list_filtered = _filter_list(key_list, search_key, search_value) if key_list_filtered: output_dict[key] = key_list_filtered return output_dict
python
def _filter_dict(input_dict, search_key, search_value): ''' Filters a dictionary of dictionaries by a key-value pair. :param input_dict: is a dictionary whose values are lists of dictionaries :param search_key: is the key in the leaf dictionaries :param search_values: is the value in the leaf dictionaries :return: filtered dictionary ''' output_dict = dict() for key, key_list in six.iteritems(input_dict): key_list_filtered = _filter_list(key_list, search_key, search_value) if key_list_filtered: output_dict[key] = key_list_filtered return output_dict
['def', '_filter_dict', '(', 'input_dict', ',', 'search_key', ',', 'search_value', ')', ':', 'output_dict', '=', 'dict', '(', ')', 'for', 'key', ',', 'key_list', 'in', 'six', '.', 'iteritems', '(', 'input_dict', ')', ':', 'key_list_filtered', '=', '_filter_list', '(', 'key_list', ',', 'search_key', ',', 'search_value', ')', 'if', 'key_list_filtered', ':', 'output_dict', '[', 'key', ']', '=', 'key_list_filtered', 'return', 'output_dict']
Filters a dictionary of dictionaries by a key-value pair. :param input_dict: is a dictionary whose values are lists of dictionaries :param search_key: is the key in the leaf dictionaries :param search_values: is the value in the leaf dictionaries :return: filtered dictionary
['Filters', 'a', 'dictionary', 'of', 'dictionaries', 'by', 'a', 'key', '-', 'value', 'pair', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/napalm_network.py#L90-L108
9,276
kageurufu/AsyncIRC
asyncirc/ircclient.py
IRCClient._async_recv
def _async_recv(self): """No raw bytes should escape from this, all byte encoding and decoding should be handling inside this function""" logging.info("Receive loop started") recbuffer = b"" while not self._stop_event.is_set(): time.sleep(0.01) try: recbuffer = recbuffer + self._socket.recv(1024) data = recbuffer.split(b'\r\n') recbuffer = data.pop() if data: for line in data: self._process_data(line.decode(encoding='UTF-8', errors='ignore')) except BlockingIOError as e: pass logging.info("Receive loop stopped")
python
def _async_recv(self): """No raw bytes should escape from this, all byte encoding and decoding should be handling inside this function""" logging.info("Receive loop started") recbuffer = b"" while not self._stop_event.is_set(): time.sleep(0.01) try: recbuffer = recbuffer + self._socket.recv(1024) data = recbuffer.split(b'\r\n') recbuffer = data.pop() if data: for line in data: self._process_data(line.decode(encoding='UTF-8', errors='ignore')) except BlockingIOError as e: pass logging.info("Receive loop stopped")
['def', '_async_recv', '(', 'self', ')', ':', 'logging', '.', 'info', '(', '"Receive loop started"', ')', 'recbuffer', '=', 'b""', 'while', 'not', 'self', '.', '_stop_event', '.', 'is_set', '(', ')', ':', 'time', '.', 'sleep', '(', '0.01', ')', 'try', ':', 'recbuffer', '=', 'recbuffer', '+', 'self', '.', '_socket', '.', 'recv', '(', '1024', ')', 'data', '=', 'recbuffer', '.', 'split', '(', "b'\\r\\n'", ')', 'recbuffer', '=', 'data', '.', 'pop', '(', ')', 'if', 'data', ':', 'for', 'line', 'in', 'data', ':', 'self', '.', '_process_data', '(', 'line', '.', 'decode', '(', 'encoding', '=', "'UTF-8'", ',', 'errors', '=', "'ignore'", ')', ')', 'except', 'BlockingIOError', 'as', 'e', ':', 'pass', 'logging', '.', 'info', '(', '"Receive loop stopped"', ')']
No raw bytes should escape from this, all byte encoding and decoding should be handling inside this function
['No', 'raw', 'bytes', 'should', 'escape', 'from', 'this', 'all', 'byte', 'encoding', 'and', 'decoding', 'should', 'be', 'handling', 'inside', 'this', 'function']
train
https://github.com/kageurufu/AsyncIRC/blob/73e2c14ad87a2e86a64c7e79fc5289b0333246f9/asyncirc/ircclient.py#L111-L129
9,277
ramses-tech/ramses
ramses/utils.py
get_static_parent
def get_static_parent(raml_resource, method=None): """ Get static parent resource of :raml_resource: with HTTP method :method:. :param raml_resource:Instance of ramlfications.raml.ResourceNode. :param method: HTTP method name which matching static resource must have. """ parent = raml_resource.parent while is_dynamic_resource(parent): parent = parent.parent if parent is None: return parent match_method = method is not None if match_method: if parent.method.upper() == method.upper(): return parent else: return parent for res in parent.root.resources: if res.path == parent.path: if res.method.upper() == method.upper(): return res
python
def get_static_parent(raml_resource, method=None): """ Get static parent resource of :raml_resource: with HTTP method :method:. :param raml_resource:Instance of ramlfications.raml.ResourceNode. :param method: HTTP method name which matching static resource must have. """ parent = raml_resource.parent while is_dynamic_resource(parent): parent = parent.parent if parent is None: return parent match_method = method is not None if match_method: if parent.method.upper() == method.upper(): return parent else: return parent for res in parent.root.resources: if res.path == parent.path: if res.method.upper() == method.upper(): return res
['def', 'get_static_parent', '(', 'raml_resource', ',', 'method', '=', 'None', ')', ':', 'parent', '=', 'raml_resource', '.', 'parent', 'while', 'is_dynamic_resource', '(', 'parent', ')', ':', 'parent', '=', 'parent', '.', 'parent', 'if', 'parent', 'is', 'None', ':', 'return', 'parent', 'match_method', '=', 'method', 'is', 'not', 'None', 'if', 'match_method', ':', 'if', 'parent', '.', 'method', '.', 'upper', '(', ')', '==', 'method', '.', 'upper', '(', ')', ':', 'return', 'parent', 'else', ':', 'return', 'parent', 'for', 'res', 'in', 'parent', '.', 'root', '.', 'resources', ':', 'if', 'res', '.', 'path', '==', 'parent', '.', 'path', ':', 'if', 'res', '.', 'method', '.', 'upper', '(', ')', '==', 'method', '.', 'upper', '(', ')', ':', 'return', 'res']
Get static parent resource of :raml_resource: with HTTP method :method:. :param raml_resource:Instance of ramlfications.raml.ResourceNode. :param method: HTTP method name which matching static resource must have.
['Get', 'static', 'parent', 'resource', 'of', ':', 'raml_resource', ':', 'with', 'HTTP', 'method', ':', 'method', ':', '.']
train
https://github.com/ramses-tech/ramses/blob/ea2e1e896325b7256cdf5902309e05fd98e0c14c/ramses/utils.py#L189-L214
9,278
pantsbuild/pants
contrib/go/src/python/pants/contrib/go/tasks/go_compile.py
GoCompile._sync_binary_dep_links
def _sync_binary_dep_links(self, target, gopath, lib_binary_map): """Syncs symlinks under gopath to the library binaries of target's transitive dependencies. :param Target target: Target whose transitive dependencies must be linked. :param str gopath: $GOPATH of target whose "pkg/" directory must be populated with links to library binaries. :param dict<Target, str> lib_binary_map: Dictionary mapping a remote/local Go library to the path of the compiled binary (the ".a" file) of the library. Required links to binary dependencies under gopath's "pkg/" dir are either created if non-existent, or refreshed if the link is older than the underlying binary. Any pre-existing links within gopath's "pkg/" dir that do not correspond to a transitive dependency of target are deleted. """ required_links = set() for dep in target.closure(): if dep == target: continue if not isinstance(dep, GoTarget): continue lib_binary = lib_binary_map[dep] lib_binary_link = os.path.join(gopath, os.path.relpath(lib_binary, self.get_gopath(dep))) safe_mkdir(os.path.dirname(lib_binary_link)) if os.path.islink(lib_binary_link): if os.stat(lib_binary).st_mtime > os.lstat(lib_binary_link).st_mtime: # The binary under the link was updated after the link was created. Refresh # the link so the mtime (modification time) of the link is greater than the # mtime of the binary. This stops Go from needlessly re-compiling the library. os.unlink(lib_binary_link) os.symlink(lib_binary, lib_binary_link) else: os.symlink(lib_binary, lib_binary_link) required_links.add(lib_binary_link) self.remove_unused_links(os.path.join(gopath, 'pkg'), required_links)
python
def _sync_binary_dep_links(self, target, gopath, lib_binary_map): """Syncs symlinks under gopath to the library binaries of target's transitive dependencies. :param Target target: Target whose transitive dependencies must be linked. :param str gopath: $GOPATH of target whose "pkg/" directory must be populated with links to library binaries. :param dict<Target, str> lib_binary_map: Dictionary mapping a remote/local Go library to the path of the compiled binary (the ".a" file) of the library. Required links to binary dependencies under gopath's "pkg/" dir are either created if non-existent, or refreshed if the link is older than the underlying binary. Any pre-existing links within gopath's "pkg/" dir that do not correspond to a transitive dependency of target are deleted. """ required_links = set() for dep in target.closure(): if dep == target: continue if not isinstance(dep, GoTarget): continue lib_binary = lib_binary_map[dep] lib_binary_link = os.path.join(gopath, os.path.relpath(lib_binary, self.get_gopath(dep))) safe_mkdir(os.path.dirname(lib_binary_link)) if os.path.islink(lib_binary_link): if os.stat(lib_binary).st_mtime > os.lstat(lib_binary_link).st_mtime: # The binary under the link was updated after the link was created. Refresh # the link so the mtime (modification time) of the link is greater than the # mtime of the binary. This stops Go from needlessly re-compiling the library. os.unlink(lib_binary_link) os.symlink(lib_binary, lib_binary_link) else: os.symlink(lib_binary, lib_binary_link) required_links.add(lib_binary_link) self.remove_unused_links(os.path.join(gopath, 'pkg'), required_links)
['def', '_sync_binary_dep_links', '(', 'self', ',', 'target', ',', 'gopath', ',', 'lib_binary_map', ')', ':', 'required_links', '=', 'set', '(', ')', 'for', 'dep', 'in', 'target', '.', 'closure', '(', ')', ':', 'if', 'dep', '==', 'target', ':', 'continue', 'if', 'not', 'isinstance', '(', 'dep', ',', 'GoTarget', ')', ':', 'continue', 'lib_binary', '=', 'lib_binary_map', '[', 'dep', ']', 'lib_binary_link', '=', 'os', '.', 'path', '.', 'join', '(', 'gopath', ',', 'os', '.', 'path', '.', 'relpath', '(', 'lib_binary', ',', 'self', '.', 'get_gopath', '(', 'dep', ')', ')', ')', 'safe_mkdir', '(', 'os', '.', 'path', '.', 'dirname', '(', 'lib_binary_link', ')', ')', 'if', 'os', '.', 'path', '.', 'islink', '(', 'lib_binary_link', ')', ':', 'if', 'os', '.', 'stat', '(', 'lib_binary', ')', '.', 'st_mtime', '>', 'os', '.', 'lstat', '(', 'lib_binary_link', ')', '.', 'st_mtime', ':', '# The binary under the link was updated after the link was created. Refresh', '# the link so the mtime (modification time) of the link is greater than the', '# mtime of the binary. This stops Go from needlessly re-compiling the library.', 'os', '.', 'unlink', '(', 'lib_binary_link', ')', 'os', '.', 'symlink', '(', 'lib_binary', ',', 'lib_binary_link', ')', 'else', ':', 'os', '.', 'symlink', '(', 'lib_binary', ',', 'lib_binary_link', ')', 'required_links', '.', 'add', '(', 'lib_binary_link', ')', 'self', '.', 'remove_unused_links', '(', 'os', '.', 'path', '.', 'join', '(', 'gopath', ',', "'pkg'", ')', ',', 'required_links', ')']
Syncs symlinks under gopath to the library binaries of target's transitive dependencies. :param Target target: Target whose transitive dependencies must be linked. :param str gopath: $GOPATH of target whose "pkg/" directory must be populated with links to library binaries. :param dict<Target, str> lib_binary_map: Dictionary mapping a remote/local Go library to the path of the compiled binary (the ".a" file) of the library. Required links to binary dependencies under gopath's "pkg/" dir are either created if non-existent, or refreshed if the link is older than the underlying binary. Any pre-existing links within gopath's "pkg/" dir that do not correspond to a transitive dependency of target are deleted.
['Syncs', 'symlinks', 'under', 'gopath', 'to', 'the', 'library', 'binaries', 'of', 'target', 's', 'transitive', 'dependencies', '.']
train
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/contrib/go/src/python/pants/contrib/go/tasks/go_compile.py#L110-L144
9,279
napalm-automation/napalm
napalm/base/helpers.py
canonical_interface_name
def canonical_interface_name(interface, addl_name_map=None): """Function to return an interface's canonical name (fully expanded name). Use of explicit matches used to indicate a clear understanding on any potential match. Regex and other looser matching methods were not implmented to avoid false positive matches. As an example, it would make sense to do "[P|p][O|o]" which would incorrectly match PO = POS and Po = Port-channel, leading to a false positive, not easily troubleshot, found, or known. :param interface: The interface you are attempting to expand. :param addl_name_map (optional): A dict containing key/value pairs that updates the base mapping. Used if an OS has specific differences. e.g. {"Po": "PortChannel"} vs {"Po": "Port-Channel"} """ name_map = {} name_map.update(base_interfaces) interface_type, interface_number = split_interface(interface) if isinstance(addl_name_map, dict): name_map.update(addl_name_map) # check in dict for mapping if name_map.get(interface_type): long_int = name_map.get(interface_type) return long_int + py23_compat.text_type(interface_number) # if nothing matched, return the original name else: return interface
python
def canonical_interface_name(interface, addl_name_map=None): """Function to return an interface's canonical name (fully expanded name). Use of explicit matches used to indicate a clear understanding on any potential match. Regex and other looser matching methods were not implmented to avoid false positive matches. As an example, it would make sense to do "[P|p][O|o]" which would incorrectly match PO = POS and Po = Port-channel, leading to a false positive, not easily troubleshot, found, or known. :param interface: The interface you are attempting to expand. :param addl_name_map (optional): A dict containing key/value pairs that updates the base mapping. Used if an OS has specific differences. e.g. {"Po": "PortChannel"} vs {"Po": "Port-Channel"} """ name_map = {} name_map.update(base_interfaces) interface_type, interface_number = split_interface(interface) if isinstance(addl_name_map, dict): name_map.update(addl_name_map) # check in dict for mapping if name_map.get(interface_type): long_int = name_map.get(interface_type) return long_int + py23_compat.text_type(interface_number) # if nothing matched, return the original name else: return interface
['def', 'canonical_interface_name', '(', 'interface', ',', 'addl_name_map', '=', 'None', ')', ':', 'name_map', '=', '{', '}', 'name_map', '.', 'update', '(', 'base_interfaces', ')', 'interface_type', ',', 'interface_number', '=', 'split_interface', '(', 'interface', ')', 'if', 'isinstance', '(', 'addl_name_map', ',', 'dict', ')', ':', 'name_map', '.', 'update', '(', 'addl_name_map', ')', '# check in dict for mapping', 'if', 'name_map', '.', 'get', '(', 'interface_type', ')', ':', 'long_int', '=', 'name_map', '.', 'get', '(', 'interface_type', ')', 'return', 'long_int', '+', 'py23_compat', '.', 'text_type', '(', 'interface_number', ')', '# if nothing matched, return the original name', 'else', ':', 'return', 'interface']
Function to return an interface's canonical name (fully expanded name). Use of explicit matches used to indicate a clear understanding on any potential match. Regex and other looser matching methods were not implmented to avoid false positive matches. As an example, it would make sense to do "[P|p][O|o]" which would incorrectly match PO = POS and Po = Port-channel, leading to a false positive, not easily troubleshot, found, or known. :param interface: The interface you are attempting to expand. :param addl_name_map (optional): A dict containing key/value pairs that updates the base mapping. Used if an OS has specific differences. e.g. {"Po": "PortChannel"} vs {"Po": "Port-Channel"}
['Function', 'to', 'return', 'an', 'interface', 's', 'canonical', 'name', '(', 'fully', 'expanded', 'name', ')', '.']
train
https://github.com/napalm-automation/napalm/blob/c11ae8bb5ce395698704a0051cdf8d144fbb150d/napalm/base/helpers.py#L353-L380
9,280
apache/incubator-mxnet
python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
convert_clip
def convert_clip(node, **kwargs): """Map MXNet's Clip operator attributes to onnx's Clip operator and return the created node. """ name, input_nodes, attrs = get_inputs(node, kwargs) a_min = np.float(attrs.get('a_min', -np.inf)) a_max = np.float(attrs.get('a_max', np.inf)) clip_node = onnx.helper.make_node( "Clip", input_nodes, [name], name=name, min=a_min, max=a_max ) return [clip_node]
python
def convert_clip(node, **kwargs): """Map MXNet's Clip operator attributes to onnx's Clip operator and return the created node. """ name, input_nodes, attrs = get_inputs(node, kwargs) a_min = np.float(attrs.get('a_min', -np.inf)) a_max = np.float(attrs.get('a_max', np.inf)) clip_node = onnx.helper.make_node( "Clip", input_nodes, [name], name=name, min=a_min, max=a_max ) return [clip_node]
['def', 'convert_clip', '(', 'node', ',', '*', '*', 'kwargs', ')', ':', 'name', ',', 'input_nodes', ',', 'attrs', '=', 'get_inputs', '(', 'node', ',', 'kwargs', ')', 'a_min', '=', 'np', '.', 'float', '(', 'attrs', '.', 'get', '(', "'a_min'", ',', '-', 'np', '.', 'inf', ')', ')', 'a_max', '=', 'np', '.', 'float', '(', 'attrs', '.', 'get', '(', "'a_max'", ',', 'np', '.', 'inf', ')', ')', 'clip_node', '=', 'onnx', '.', 'helper', '.', 'make_node', '(', '"Clip"', ',', 'input_nodes', ',', '[', 'name', ']', ',', 'name', '=', 'name', ',', 'min', '=', 'a_min', ',', 'max', '=', 'a_max', ')', 'return', '[', 'clip_node', ']']
Map MXNet's Clip operator attributes to onnx's Clip operator and return the created node.
['Map', 'MXNet', 's', 'Clip', 'operator', 'attributes', 'to', 'onnx', 's', 'Clip', 'operator', 'and', 'return', 'the', 'created', 'node', '.']
train
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py#L972-L989
9,281
datastax/python-driver
cassandra/cluster.py
Session.on_down
def on_down(self, host): """ Called by the parent Cluster instance when a node is marked down. Only intended for internal use. """ future = self.remove_pool(host) if future: future.add_done_callback(lambda f: self.update_created_pools())
python
def on_down(self, host): """ Called by the parent Cluster instance when a node is marked down. Only intended for internal use. """ future = self.remove_pool(host) if future: future.add_done_callback(lambda f: self.update_created_pools())
['def', 'on_down', '(', 'self', ',', 'host', ')', ':', 'future', '=', 'self', '.', 'remove_pool', '(', 'host', ')', 'if', 'future', ':', 'future', '.', 'add_done_callback', '(', 'lambda', 'f', ':', 'self', '.', 'update_created_pools', '(', ')', ')']
Called by the parent Cluster instance when a node is marked down. Only intended for internal use.
['Called', 'by', 'the', 'parent', 'Cluster', 'instance', 'when', 'a', 'node', 'is', 'marked', 'down', '.', 'Only', 'intended', 'for', 'internal', 'use', '.']
train
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cluster.py#L2674-L2681
9,282
yyuu/botornado
boto/mturk/connection.py
MTurkConnection._get_pages
def _get_pages(page_size, total_records): """ Given a page size (records per page) and a total number of records, return the page numbers to be retrieved. """ pages = total_records/page_size+bool(total_records%page_size) return range(1, pages+1)
python
def _get_pages(page_size, total_records): """ Given a page size (records per page) and a total number of records, return the page numbers to be retrieved. """ pages = total_records/page_size+bool(total_records%page_size) return range(1, pages+1)
['def', '_get_pages', '(', 'page_size', ',', 'total_records', ')', ':', 'pages', '=', 'total_records', '/', 'page_size', '+', 'bool', '(', 'total_records', '%', 'page_size', ')', 'return', 'range', '(', '1', ',', 'pages', '+', '1', ')']
Given a page size (records per page) and a total number of records, return the page numbers to be retrieved.
['Given', 'a', 'page', 'size', '(', 'records', 'per', 'page', ')', 'and', 'a', 'total', 'number', 'of', 'records', 'return', 'the', 'page', 'numbers', 'to', 'be', 'retrieved', '.']
train
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/mturk/connection.py#L261-L267
9,283
mjj4791/python-buienradar
buienradar/buienradar_xml.py
parse_xml_data
def parse_xml_data(content, raincontent, latitude=52.091579, longitude=5.119734, timeframe=60): """Parse the raw data and return as data dictionary.""" result = {SUCCESS: False, MESSAGE: None, DATA: None} if timeframe < 5 or timeframe > 120: raise ValueError("Timeframe must be >=5 and <=120.") if content is not None: result = __parse_ws_data(content, latitude, longitude) if result[SUCCESS] and raincontent is not None: data = __parse_precipfc_data(raincontent, timeframe) result[DATA][PRECIPITATION_FORECAST] = data log.debug("Extracted weather-data: %s", result[DATA]) return result
python
def parse_xml_data(content, raincontent, latitude=52.091579, longitude=5.119734, timeframe=60): """Parse the raw data and return as data dictionary.""" result = {SUCCESS: False, MESSAGE: None, DATA: None} if timeframe < 5 or timeframe > 120: raise ValueError("Timeframe must be >=5 and <=120.") if content is not None: result = __parse_ws_data(content, latitude, longitude) if result[SUCCESS] and raincontent is not None: data = __parse_precipfc_data(raincontent, timeframe) result[DATA][PRECIPITATION_FORECAST] = data log.debug("Extracted weather-data: %s", result[DATA]) return result
['def', 'parse_xml_data', '(', 'content', ',', 'raincontent', ',', 'latitude', '=', '52.091579', ',', 'longitude', '=', '5.119734', ',', 'timeframe', '=', '60', ')', ':', 'result', '=', '{', 'SUCCESS', ':', 'False', ',', 'MESSAGE', ':', 'None', ',', 'DATA', ':', 'None', '}', 'if', 'timeframe', '<', '5', 'or', 'timeframe', '>', '120', ':', 'raise', 'ValueError', '(', '"Timeframe must be >=5 and <=120."', ')', 'if', 'content', 'is', 'not', 'None', ':', 'result', '=', '__parse_ws_data', '(', 'content', ',', 'latitude', ',', 'longitude', ')', 'if', 'result', '[', 'SUCCESS', ']', 'and', 'raincontent', 'is', 'not', 'None', ':', 'data', '=', '__parse_precipfc_data', '(', 'raincontent', ',', 'timeframe', ')', 'result', '[', 'DATA', ']', '[', 'PRECIPITATION_FORECAST', ']', '=', 'data', 'log', '.', 'debug', '(', '"Extracted weather-data: %s"', ',', 'result', '[', 'DATA', ']', ')', 'return', 'result']
Parse the raw data and return as data dictionary.
['Parse', 'the', 'raw', 'data', 'and', 'return', 'as', 'data', 'dictionary', '.']
train
https://github.com/mjj4791/python-buienradar/blob/a70436f54e007ce921d5210cb296cf3e4adf9d09/buienradar/buienradar_xml.py#L186-L202
9,284
iotile/coretools
iotilecore/iotile/core/utilities/intelhex/__init__.py
Record.data
def data(offset, bytes): """Return Data record. This constructs the full record, including the length information, the record type (0x00), the checksum, and the offset. @param offset load offset of first byte. @param bytes list of byte values to pack into record. @return String representation of one HEX record """ assert 0 <= offset < 65536 assert 0 < len(bytes) < 256 b = [len(bytes), (offset>>8)&0x0FF, offset&0x0FF, 0x00] + bytes return Record._from_bytes(b)
python
def data(offset, bytes): """Return Data record. This constructs the full record, including the length information, the record type (0x00), the checksum, and the offset. @param offset load offset of first byte. @param bytes list of byte values to pack into record. @return String representation of one HEX record """ assert 0 <= offset < 65536 assert 0 < len(bytes) < 256 b = [len(bytes), (offset>>8)&0x0FF, offset&0x0FF, 0x00] + bytes return Record._from_bytes(b)
['def', 'data', '(', 'offset', ',', 'bytes', ')', ':', 'assert', '0', '<=', 'offset', '<', '65536', 'assert', '0', '<', 'len', '(', 'bytes', ')', '<', '256', 'b', '=', '[', 'len', '(', 'bytes', ')', ',', '(', 'offset', '>>', '8', ')', '&', '0x0FF', ',', 'offset', '&', '0x0FF', ',', '0x00', ']', '+', 'bytes', 'return', 'Record', '.', '_from_bytes', '(', 'b', ')']
Return Data record. This constructs the full record, including the length information, the record type (0x00), the checksum, and the offset. @param offset load offset of first byte. @param bytes list of byte values to pack into record. @return String representation of one HEX record
['Return', 'Data', 'record', '.', 'This', 'constructs', 'the', 'full', 'record', 'including', 'the', 'length', 'information', 'the', 'record', 'type', '(', '0x00', ')', 'the', 'checksum', 'and', 'the', 'offset', '.']
train
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/utilities/intelhex/__init__.py#L1145-L1158
9,285
alixnovosi/botskeleton
botskeleton/botskeleton.py
BotSkeleton.store_extra_info
def store_extra_info(self, key: str, value: Any) -> None: """ Store some extra value in the messaging storage. :param key: key of dictionary entry to add. :param value: value of dictionary entry to add. :returns: None """ self.extra_keys[key] = value
python
def store_extra_info(self, key: str, value: Any) -> None: """ Store some extra value in the messaging storage. :param key: key of dictionary entry to add. :param value: value of dictionary entry to add. :returns: None """ self.extra_keys[key] = value
['def', 'store_extra_info', '(', 'self', ',', 'key', ':', 'str', ',', 'value', ':', 'Any', ')', '->', 'None', ':', 'self', '.', 'extra_keys', '[', 'key', ']', '=', 'value']
Store some extra value in the messaging storage. :param key: key of dictionary entry to add. :param value: value of dictionary entry to add. :returns: None
['Store', 'some', 'extra', 'value', 'in', 'the', 'messaging', 'storage', '.']
train
https://github.com/alixnovosi/botskeleton/blob/55bfc1b8a3623c10437e4ab2cd0b0ec8d35907a9/botskeleton/botskeleton.py#L353-L361
9,286
hydraplatform/hydra-base
hydra_base/lib/template.py
validate_attrs
def validate_attrs(resource_attr_ids, scenario_id, template_id=None): """ Check that multiple resource attribute satisfy the requirements of the types of resources to which the they are attached. """ multi_rs = db.DBSession.query(ResourceScenario).\ filter(ResourceScenario.resource_attr_id.in_(resource_attr_ids),\ ResourceScenario.scenario_id==scenario_id).\ options(joinedload_all("resourceattr")).\ options(joinedload_all("dataset")).all() errors = [] for rs in multi_rs: try: _do_validate_resourcescenario(rs, template_id) except HydraError as e: error = dict( ref_key = rs.resourceattr.ref_key, ref_id = rs.resourceattr.get_resource_id(), ref_name = rs.resourceattr.get_resource().get_name(), resource_attr_id = rs.resource_attr_id, attr_id = rs.resourceattr.attr.id, attr_name = rs.resourceattr.attr.name, dataset_id = rs.dataset_id, scenario_id = scenario_id, template_id = template_id, error_text = e.args[0]) errors.append(error) return errors
python
def validate_attrs(resource_attr_ids, scenario_id, template_id=None): """ Check that multiple resource attribute satisfy the requirements of the types of resources to which the they are attached. """ multi_rs = db.DBSession.query(ResourceScenario).\ filter(ResourceScenario.resource_attr_id.in_(resource_attr_ids),\ ResourceScenario.scenario_id==scenario_id).\ options(joinedload_all("resourceattr")).\ options(joinedload_all("dataset")).all() errors = [] for rs in multi_rs: try: _do_validate_resourcescenario(rs, template_id) except HydraError as e: error = dict( ref_key = rs.resourceattr.ref_key, ref_id = rs.resourceattr.get_resource_id(), ref_name = rs.resourceattr.get_resource().get_name(), resource_attr_id = rs.resource_attr_id, attr_id = rs.resourceattr.attr.id, attr_name = rs.resourceattr.attr.name, dataset_id = rs.dataset_id, scenario_id = scenario_id, template_id = template_id, error_text = e.args[0]) errors.append(error) return errors
['def', 'validate_attrs', '(', 'resource_attr_ids', ',', 'scenario_id', ',', 'template_id', '=', 'None', ')', ':', 'multi_rs', '=', 'db', '.', 'DBSession', '.', 'query', '(', 'ResourceScenario', ')', '.', 'filter', '(', 'ResourceScenario', '.', 'resource_attr_id', '.', 'in_', '(', 'resource_attr_ids', ')', ',', 'ResourceScenario', '.', 'scenario_id', '==', 'scenario_id', ')', '.', 'options', '(', 'joinedload_all', '(', '"resourceattr"', ')', ')', '.', 'options', '(', 'joinedload_all', '(', '"dataset"', ')', ')', '.', 'all', '(', ')', 'errors', '=', '[', ']', 'for', 'rs', 'in', 'multi_rs', ':', 'try', ':', '_do_validate_resourcescenario', '(', 'rs', ',', 'template_id', ')', 'except', 'HydraError', 'as', 'e', ':', 'error', '=', 'dict', '(', 'ref_key', '=', 'rs', '.', 'resourceattr', '.', 'ref_key', ',', 'ref_id', '=', 'rs', '.', 'resourceattr', '.', 'get_resource_id', '(', ')', ',', 'ref_name', '=', 'rs', '.', 'resourceattr', '.', 'get_resource', '(', ')', '.', 'get_name', '(', ')', ',', 'resource_attr_id', '=', 'rs', '.', 'resource_attr_id', ',', 'attr_id', '=', 'rs', '.', 'resourceattr', '.', 'attr', '.', 'id', ',', 'attr_name', '=', 'rs', '.', 'resourceattr', '.', 'attr', '.', 'name', ',', 'dataset_id', '=', 'rs', '.', 'dataset_id', ',', 'scenario_id', '=', 'scenario_id', ',', 'template_id', '=', 'template_id', ',', 'error_text', '=', 'e', '.', 'args', '[', '0', ']', ')', 'errors', '.', 'append', '(', 'error', ')', 'return', 'errors']
Check that multiple resource attribute satisfy the requirements of the types of resources to which the they are attached.
['Check', 'that', 'multiple', 'resource', 'attribute', 'satisfy', 'the', 'requirements', 'of', 'the', 'types', 'of', 'resources', 'to', 'which', 'the', 'they', 'are', 'attached', '.']
train
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/template.py#L1755-L1786
9,287
qubell/contrib-python-qubell-client
qubell/api/private/organization.py
Organization.instance
def instance(self, id=None, application=None, name=None, revision=None, environment=None, parameters=None, submodules=None, destroyInterval=None): """ Smart method. It does everything, to return Instance with given parameters within the application. If instance found running and given parameters are actual: return it. If instance found, but parameters differs - reconfigure instance with new parameters. If instance not found: launch instance with given parameters. Return: Instance object. """ instance = self.get_or_create_instance(id, application, revision, environment, name, parameters, submodules, destroyInterval) reconfigure = False # if found: # if revision and revision is not found.revision: # reconfigure = True # if parameters and parameters is not found.parameters: # reconfigure = True # We need to reconfigure instance if reconfigure: instance.reconfigure(revision=revision, parameters=parameters) return instance
python
def instance(self, id=None, application=None, name=None, revision=None, environment=None, parameters=None, submodules=None, destroyInterval=None): """ Smart method. It does everything, to return Instance with given parameters within the application. If instance found running and given parameters are actual: return it. If instance found, but parameters differs - reconfigure instance with new parameters. If instance not found: launch instance with given parameters. Return: Instance object. """ instance = self.get_or_create_instance(id, application, revision, environment, name, parameters, submodules, destroyInterval) reconfigure = False # if found: # if revision and revision is not found.revision: # reconfigure = True # if parameters and parameters is not found.parameters: # reconfigure = True # We need to reconfigure instance if reconfigure: instance.reconfigure(revision=revision, parameters=parameters) return instance
['def', 'instance', '(', 'self', ',', 'id', '=', 'None', ',', 'application', '=', 'None', ',', 'name', '=', 'None', ',', 'revision', '=', 'None', ',', 'environment', '=', 'None', ',', 'parameters', '=', 'None', ',', 'submodules', '=', 'None', ',', 'destroyInterval', '=', 'None', ')', ':', 'instance', '=', 'self', '.', 'get_or_create_instance', '(', 'id', ',', 'application', ',', 'revision', ',', 'environment', ',', 'name', ',', 'parameters', ',', 'submodules', ',', 'destroyInterval', ')', 'reconfigure', '=', 'False', '# if found:', '# if revision and revision is not found.revision:', '# reconfigure = True', '# if parameters and parameters is not found.parameters:', '# reconfigure = True', '# We need to reconfigure instance', 'if', 'reconfigure', ':', 'instance', '.', 'reconfigure', '(', 'revision', '=', 'revision', ',', 'parameters', '=', 'parameters', ')', 'return', 'instance']
Smart method. It does everything, to return Instance with given parameters within the application. If instance found running and given parameters are actual: return it. If instance found, but parameters differs - reconfigure instance with new parameters. If instance not found: launch instance with given parameters. Return: Instance object.
['Smart', 'method', '.', 'It', 'does', 'everything', 'to', 'return', 'Instance', 'with', 'given', 'parameters', 'within', 'the', 'application', '.', 'If', 'instance', 'found', 'running', 'and', 'given', 'parameters', 'are', 'actual', ':', 'return', 'it', '.', 'If', 'instance', 'found', 'but', 'parameters', 'differs', '-', 'reconfigure', 'instance', 'with', 'new', 'parameters', '.', 'If', 'instance', 'not', 'found', ':', 'launch', 'instance', 'with', 'given', 'parameters', '.', 'Return', ':', 'Instance', 'object', '.']
train
https://github.com/qubell/contrib-python-qubell-client/blob/4586ea11d5103c2ff9607d3ed922b5a0991b8845/qubell/api/private/organization.py#L309-L329
9,288
PythonCharmers/python-future
src/future/backports/urllib/request.py
URLopener._open_generic_http
def _open_generic_http(self, connection_factory, url, data): """Make an HTTP connection using connection_class. This is an internal method that should be called from open_http() or open_https(). Arguments: - connection_factory should take a host name and return an HTTPConnection instance. - url is the url to retrieval or a host, relative-path pair. - data is payload for a POST request or None. """ user_passwd = None proxy_passwd= None if isinstance(url, str): host, selector = splithost(url) if host: user_passwd, host = splituser(host) host = unquote(host) realhost = host else: host, selector = url # check whether the proxy contains authorization information proxy_passwd, host = splituser(host) # now we proceed with the url we want to obtain urltype, rest = splittype(selector) url = rest user_passwd = None if urltype.lower() != 'http': realhost = None else: realhost, rest = splithost(rest) if realhost: user_passwd, realhost = splituser(realhost) if user_passwd: selector = "%s://%s%s" % (urltype, realhost, rest) if proxy_bypass(realhost): host = realhost if not host: raise IOError('http error', 'no host given') if proxy_passwd: proxy_passwd = unquote(proxy_passwd) proxy_auth = base64.b64encode(proxy_passwd.encode()).decode('ascii') else: proxy_auth = None if user_passwd: user_passwd = unquote(user_passwd) auth = base64.b64encode(user_passwd.encode()).decode('ascii') else: auth = None http_conn = connection_factory(host) headers = {} if proxy_auth: headers["Proxy-Authorization"] = "Basic %s" % proxy_auth if auth: headers["Authorization"] = "Basic %s" % auth if realhost: headers["Host"] = realhost # Add Connection:close as we don't support persistent connections yet. # This helps in closing the socket and avoiding ResourceWarning headers["Connection"] = "close" for header, value in self.addheaders: headers[header] = value if data is not None: headers["Content-Type"] = "application/x-www-form-urlencoded" http_conn.request("POST", selector, data, headers) else: http_conn.request("GET", selector, headers=headers) try: response = http_conn.getresponse() except http_client.BadStatusLine: # something went wrong with the HTTP status line raise URLError("http protocol error: bad status line") # According to RFC 2616, "2xx" code indicates that the client's # request was successfully received, understood, and accepted. if 200 <= response.status < 300: return addinfourl(response, response.msg, "http:" + url, response.status) else: return self.http_error( url, response.fp, response.status, response.reason, response.msg, data)
python
def _open_generic_http(self, connection_factory, url, data): """Make an HTTP connection using connection_class. This is an internal method that should be called from open_http() or open_https(). Arguments: - connection_factory should take a host name and return an HTTPConnection instance. - url is the url to retrieval or a host, relative-path pair. - data is payload for a POST request or None. """ user_passwd = None proxy_passwd= None if isinstance(url, str): host, selector = splithost(url) if host: user_passwd, host = splituser(host) host = unquote(host) realhost = host else: host, selector = url # check whether the proxy contains authorization information proxy_passwd, host = splituser(host) # now we proceed with the url we want to obtain urltype, rest = splittype(selector) url = rest user_passwd = None if urltype.lower() != 'http': realhost = None else: realhost, rest = splithost(rest) if realhost: user_passwd, realhost = splituser(realhost) if user_passwd: selector = "%s://%s%s" % (urltype, realhost, rest) if proxy_bypass(realhost): host = realhost if not host: raise IOError('http error', 'no host given') if proxy_passwd: proxy_passwd = unquote(proxy_passwd) proxy_auth = base64.b64encode(proxy_passwd.encode()).decode('ascii') else: proxy_auth = None if user_passwd: user_passwd = unquote(user_passwd) auth = base64.b64encode(user_passwd.encode()).decode('ascii') else: auth = None http_conn = connection_factory(host) headers = {} if proxy_auth: headers["Proxy-Authorization"] = "Basic %s" % proxy_auth if auth: headers["Authorization"] = "Basic %s" % auth if realhost: headers["Host"] = realhost # Add Connection:close as we don't support persistent connections yet. # This helps in closing the socket and avoiding ResourceWarning headers["Connection"] = "close" for header, value in self.addheaders: headers[header] = value if data is not None: headers["Content-Type"] = "application/x-www-form-urlencoded" http_conn.request("POST", selector, data, headers) else: http_conn.request("GET", selector, headers=headers) try: response = http_conn.getresponse() except http_client.BadStatusLine: # something went wrong with the HTTP status line raise URLError("http protocol error: bad status line") # According to RFC 2616, "2xx" code indicates that the client's # request was successfully received, understood, and accepted. if 200 <= response.status < 300: return addinfourl(response, response.msg, "http:" + url, response.status) else: return self.http_error( url, response.fp, response.status, response.reason, response.msg, data)
['def', '_open_generic_http', '(', 'self', ',', 'connection_factory', ',', 'url', ',', 'data', ')', ':', 'user_passwd', '=', 'None', 'proxy_passwd', '=', 'None', 'if', 'isinstance', '(', 'url', ',', 'str', ')', ':', 'host', ',', 'selector', '=', 'splithost', '(', 'url', ')', 'if', 'host', ':', 'user_passwd', ',', 'host', '=', 'splituser', '(', 'host', ')', 'host', '=', 'unquote', '(', 'host', ')', 'realhost', '=', 'host', 'else', ':', 'host', ',', 'selector', '=', 'url', '# check whether the proxy contains authorization information', 'proxy_passwd', ',', 'host', '=', 'splituser', '(', 'host', ')', '# now we proceed with the url we want to obtain', 'urltype', ',', 'rest', '=', 'splittype', '(', 'selector', ')', 'url', '=', 'rest', 'user_passwd', '=', 'None', 'if', 'urltype', '.', 'lower', '(', ')', '!=', "'http'", ':', 'realhost', '=', 'None', 'else', ':', 'realhost', ',', 'rest', '=', 'splithost', '(', 'rest', ')', 'if', 'realhost', ':', 'user_passwd', ',', 'realhost', '=', 'splituser', '(', 'realhost', ')', 'if', 'user_passwd', ':', 'selector', '=', '"%s://%s%s"', '%', '(', 'urltype', ',', 'realhost', ',', 'rest', ')', 'if', 'proxy_bypass', '(', 'realhost', ')', ':', 'host', '=', 'realhost', 'if', 'not', 'host', ':', 'raise', 'IOError', '(', "'http error'", ',', "'no host given'", ')', 'if', 'proxy_passwd', ':', 'proxy_passwd', '=', 'unquote', '(', 'proxy_passwd', ')', 'proxy_auth', '=', 'base64', '.', 'b64encode', '(', 'proxy_passwd', '.', 'encode', '(', ')', ')', '.', 'decode', '(', "'ascii'", ')', 'else', ':', 'proxy_auth', '=', 'None', 'if', 'user_passwd', ':', 'user_passwd', '=', 'unquote', '(', 'user_passwd', ')', 'auth', '=', 'base64', '.', 'b64encode', '(', 'user_passwd', '.', 'encode', '(', ')', ')', '.', 'decode', '(', "'ascii'", ')', 'else', ':', 'auth', '=', 'None', 'http_conn', '=', 'connection_factory', '(', 'host', ')', 'headers', '=', '{', '}', 'if', 'proxy_auth', ':', 'headers', '[', '"Proxy-Authorization"', ']', '=', '"Basic %s"', '%', 'proxy_auth', 'if', 'auth', ':', 'headers', '[', '"Authorization"', ']', '=', '"Basic %s"', '%', 'auth', 'if', 'realhost', ':', 'headers', '[', '"Host"', ']', '=', 'realhost', "# Add Connection:close as we don't support persistent connections yet.", '# This helps in closing the socket and avoiding ResourceWarning', 'headers', '[', '"Connection"', ']', '=', '"close"', 'for', 'header', ',', 'value', 'in', 'self', '.', 'addheaders', ':', 'headers', '[', 'header', ']', '=', 'value', 'if', 'data', 'is', 'not', 'None', ':', 'headers', '[', '"Content-Type"', ']', '=', '"application/x-www-form-urlencoded"', 'http_conn', '.', 'request', '(', '"POST"', ',', 'selector', ',', 'data', ',', 'headers', ')', 'else', ':', 'http_conn', '.', 'request', '(', '"GET"', ',', 'selector', ',', 'headers', '=', 'headers', ')', 'try', ':', 'response', '=', 'http_conn', '.', 'getresponse', '(', ')', 'except', 'http_client', '.', 'BadStatusLine', ':', '# something went wrong with the HTTP status line', 'raise', 'URLError', '(', '"http protocol error: bad status line"', ')', '# According to RFC 2616, "2xx" code indicates that the client\'s', '# request was successfully received, understood, and accepted.', 'if', '200', '<=', 'response', '.', 'status', '<', '300', ':', 'return', 'addinfourl', '(', 'response', ',', 'response', '.', 'msg', ',', '"http:"', '+', 'url', ',', 'response', '.', 'status', ')', 'else', ':', 'return', 'self', '.', 'http_error', '(', 'url', ',', 'response', '.', 'fp', ',', 'response', '.', 'status', ',', 'response', '.', 'reason', ',', 'response', '.', 'msg', ',', 'data', ')']
Make an HTTP connection using connection_class. This is an internal method that should be called from open_http() or open_https(). Arguments: - connection_factory should take a host name and return an HTTPConnection instance. - url is the url to retrieval or a host, relative-path pair. - data is payload for a POST request or None.
['Make', 'an', 'HTTP', 'connection', 'using', 'connection_class', '.']
train
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/urllib/request.py#L1782-L1872
9,289
ewels/MultiQC
multiqc/modules/hicpro/hicpro.py
MultiqcModule.hicpro_capture_chart
def hicpro_capture_chart (self): """ Generate Capture Hi-C plot""" keys = OrderedDict() keys['valid_pairs_on_target_cap_cap'] = { 'color': '#0039e6', 'name': 'Capture-Capture interactions' } keys['valid_pairs_on_target_cap_rep'] = { 'color': '#809fff', 'name': 'Capture-Reporter interactions' } keys['valid_pairs_off_target'] = { 'color': '#cccccc', 'name': 'Off-target valid pairs' } # Check capture info are available num_samples = 0 for s_name in self.hicpro_data: for k in keys: num_samples += sum([1 if k in self.hicpro_data[s_name] else 0]) if num_samples == 0: return False # Config for the plot config = { 'id': 'hicpro_cap_plot', 'title': 'HiC-Pro: Capture Statistics', 'ylab': '# Pairs', 'cpswitch_counts_label': 'Number of Pairs' } return bargraph.plot(self.hicpro_data, keys, config)
python
def hicpro_capture_chart (self): """ Generate Capture Hi-C plot""" keys = OrderedDict() keys['valid_pairs_on_target_cap_cap'] = { 'color': '#0039e6', 'name': 'Capture-Capture interactions' } keys['valid_pairs_on_target_cap_rep'] = { 'color': '#809fff', 'name': 'Capture-Reporter interactions' } keys['valid_pairs_off_target'] = { 'color': '#cccccc', 'name': 'Off-target valid pairs' } # Check capture info are available num_samples = 0 for s_name in self.hicpro_data: for k in keys: num_samples += sum([1 if k in self.hicpro_data[s_name] else 0]) if num_samples == 0: return False # Config for the plot config = { 'id': 'hicpro_cap_plot', 'title': 'HiC-Pro: Capture Statistics', 'ylab': '# Pairs', 'cpswitch_counts_label': 'Number of Pairs' } return bargraph.plot(self.hicpro_data, keys, config)
['def', 'hicpro_capture_chart', '(', 'self', ')', ':', 'keys', '=', 'OrderedDict', '(', ')', 'keys', '[', "'valid_pairs_on_target_cap_cap'", ']', '=', '{', "'color'", ':', "'#0039e6'", ',', "'name'", ':', "'Capture-Capture interactions'", '}', 'keys', '[', "'valid_pairs_on_target_cap_rep'", ']', '=', '{', "'color'", ':', "'#809fff'", ',', "'name'", ':', "'Capture-Reporter interactions'", '}', 'keys', '[', "'valid_pairs_off_target'", ']', '=', '{', "'color'", ':', "'#cccccc'", ',', "'name'", ':', "'Off-target valid pairs'", '}', '# Check capture info are available', 'num_samples', '=', '0', 'for', 's_name', 'in', 'self', '.', 'hicpro_data', ':', 'for', 'k', 'in', 'keys', ':', 'num_samples', '+=', 'sum', '(', '[', '1', 'if', 'k', 'in', 'self', '.', 'hicpro_data', '[', 's_name', ']', 'else', '0', ']', ')', 'if', 'num_samples', '==', '0', ':', 'return', 'False', '# Config for the plot', 'config', '=', '{', "'id'", ':', "'hicpro_cap_plot'", ',', "'title'", ':', "'HiC-Pro: Capture Statistics'", ',', "'ylab'", ':', "'# Pairs'", ',', "'cpswitch_counts_label'", ':', "'Number of Pairs'", '}', 'return', 'bargraph', '.', 'plot', '(', 'self', '.', 'hicpro_data', ',', 'keys', ',', 'config', ')']
Generate Capture Hi-C plot
['Generate', 'Capture', 'Hi', '-', 'C', 'plot']
train
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/hicpro/hicpro.py#L399-L423
9,290
snare/voltron
voltron/core.py
Server.dispatch_request
def dispatch_request(self, req): """ Dispatch a request object. """ log.debug("Dispatching request: {}".format(str(req))) # make sure it's valid res = None try: req.validate() except MissingFieldError as e: res = APIMissingFieldErrorResponse(str(e)) # dispatch the request if not res: try: res = req.dispatch() except Exception as e: msg = "Exception raised while dispatching request: {}".format(repr(e)) log.exception(msg) res = APIGenericErrorResponse(msg) log.debug("Response: {}".format(str(res))) return res
python
def dispatch_request(self, req): """ Dispatch a request object. """ log.debug("Dispatching request: {}".format(str(req))) # make sure it's valid res = None try: req.validate() except MissingFieldError as e: res = APIMissingFieldErrorResponse(str(e)) # dispatch the request if not res: try: res = req.dispatch() except Exception as e: msg = "Exception raised while dispatching request: {}".format(repr(e)) log.exception(msg) res = APIGenericErrorResponse(msg) log.debug("Response: {}".format(str(res))) return res
['def', 'dispatch_request', '(', 'self', ',', 'req', ')', ':', 'log', '.', 'debug', '(', '"Dispatching request: {}"', '.', 'format', '(', 'str', '(', 'req', ')', ')', ')', "# make sure it's valid", 'res', '=', 'None', 'try', ':', 'req', '.', 'validate', '(', ')', 'except', 'MissingFieldError', 'as', 'e', ':', 'res', '=', 'APIMissingFieldErrorResponse', '(', 'str', '(', 'e', ')', ')', '# dispatch the request', 'if', 'not', 'res', ':', 'try', ':', 'res', '=', 'req', '.', 'dispatch', '(', ')', 'except', 'Exception', 'as', 'e', ':', 'msg', '=', '"Exception raised while dispatching request: {}"', '.', 'format', '(', 'repr', '(', 'e', ')', ')', 'log', '.', 'exception', '(', 'msg', ')', 'res', '=', 'APIGenericErrorResponse', '(', 'msg', ')', 'log', '.', 'debug', '(', '"Response: {}"', '.', 'format', '(', 'str', '(', 'res', ')', ')', ')', 'return', 'res']
Dispatch a request object.
['Dispatch', 'a', 'request', 'object', '.']
train
https://github.com/snare/voltron/blob/4ee3cbe6f7c1e38303f5dc6114c48b60217253c3/voltron/core.py#L279-L303
9,291
OnroerendErfgoed/crabpy_pyramid
crabpy_pyramid/renderers/crab.py
list_gebouwen_adapter
def list_gebouwen_adapter(obj, request): """ Adapter for rendering a list of :class:`crabpy.gateway.crab.Gebouw` to json. """ return { 'id': obj.id, 'aard': { 'id': obj.aard.id, 'naam': obj.aard.naam, 'definitie': obj.aard.definitie }, 'status': { 'id': obj.status.id, 'naam': obj.status.naam, 'definitie': obj.status.definitie } }
python
def list_gebouwen_adapter(obj, request): """ Adapter for rendering a list of :class:`crabpy.gateway.crab.Gebouw` to json. """ return { 'id': obj.id, 'aard': { 'id': obj.aard.id, 'naam': obj.aard.naam, 'definitie': obj.aard.definitie }, 'status': { 'id': obj.status.id, 'naam': obj.status.naam, 'definitie': obj.status.definitie } }
['def', 'list_gebouwen_adapter', '(', 'obj', ',', 'request', ')', ':', 'return', '{', "'id'", ':', 'obj', '.', 'id', ',', "'aard'", ':', '{', "'id'", ':', 'obj', '.', 'aard', '.', 'id', ',', "'naam'", ':', 'obj', '.', 'aard', '.', 'naam', ',', "'definitie'", ':', 'obj', '.', 'aard', '.', 'definitie', '}', ',', "'status'", ':', '{', "'id'", ':', 'obj', '.', 'status', '.', 'id', ',', "'naam'", ':', 'obj', '.', 'status', '.', 'naam', ',', "'definitie'", ':', 'obj', '.', 'status', '.', 'definitie', '}', '}']
Adapter for rendering a list of :class:`crabpy.gateway.crab.Gebouw` to json.
['Adapter', 'for', 'rendering', 'a', 'list', 'of', ':', 'class', ':', 'crabpy', '.', 'gateway', '.', 'crab', '.', 'Gebouw', 'to', 'json', '.']
train
https://github.com/OnroerendErfgoed/crabpy_pyramid/blob/b727ea55838d71575db96e987b536a0bac9f6a7a/crabpy_pyramid/renderers/crab.py#L111-L128
9,292
python-cmd2/cmd2
cmd2/cmd2.py
Cmd.pfeedback
def pfeedback(self, msg: str) -> None: """For printing nonessential feedback. Can be silenced with `quiet`. Inclusion in redirected output is controlled by `feedback_to_output`.""" if not self.quiet: if self.feedback_to_output: self.poutput(msg) else: self.decolorized_write(sys.stderr, "{}\n".format(msg))
python
def pfeedback(self, msg: str) -> None: """For printing nonessential feedback. Can be silenced with `quiet`. Inclusion in redirected output is controlled by `feedback_to_output`.""" if not self.quiet: if self.feedback_to_output: self.poutput(msg) else: self.decolorized_write(sys.stderr, "{}\n".format(msg))
['def', 'pfeedback', '(', 'self', ',', 'msg', ':', 'str', ')', '->', 'None', ':', 'if', 'not', 'self', '.', 'quiet', ':', 'if', 'self', '.', 'feedback_to_output', ':', 'self', '.', 'poutput', '(', 'msg', ')', 'else', ':', 'self', '.', 'decolorized_write', '(', 'sys', '.', 'stderr', ',', '"{}\\n"', '.', 'format', '(', 'msg', ')', ')']
For printing nonessential feedback. Can be silenced with `quiet`. Inclusion in redirected output is controlled by `feedback_to_output`.
['For', 'printing', 'nonessential', 'feedback', '.', 'Can', 'be', 'silenced', 'with', 'quiet', '.', 'Inclusion', 'in', 'redirected', 'output', 'is', 'controlled', 'by', 'feedback_to_output', '.']
train
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/cmd2/cmd2.py#L646-L653
9,293
annayqho/TheCannon
code/aaomega/aaomega_munge_data.py
load_data
def load_data(): data_dir = "/Users/annaho/Data/AAOmega" out_dir = "%s/%s" %(data_dir, "Run_13_July") """ Use all the above functions to set data up for The Cannon """ ff, wl, tr_flux, tr_ivar = load_ref_spectra() """ pick one that doesn't have extra dead pixels """ skylines = tr_ivar[4,:] # should be the same across all obj np.savez("%s/skylines.npz" %out_dir, skylines) contmask = np.load("%s/contmask_regions.npz" %data_dir)['arr_0'] scatter = estimate_noise(tr_flux, contmask) ids, labels = load_labels() # Select the objects in the catalog corresponding to the files inds = [] ff_short = [] for fname in ff: val = fname.split("/")[-1] short = (val.split('.')[0] + '.' + val.split('.')[1]) ff_short.append(short) if short in ids: ind = np.where(ids==short)[0][0] inds.append(ind) # choose the labels tr_id = ids[inds] tr_label = labels[inds] # find the corresponding spectra ff_short = np.array(ff_short) inds = np.array([np.where(ff_short==val)[0][0] for val in tr_id]) tr_flux_choose = tr_flux[inds] tr_ivar_choose = tr_ivar[inds] scatter_choose = scatter[inds] np.savez("%s/wl.npz" %out_dir, wl) np.savez("%s/ref_id_all.npz" %out_dir, tr_id) np.savez("%s/ref_flux_all.npz" %out_dir, tr_flux_choose) np.savez("%s/ref_ivar_all.npz" %out_dir, tr_ivar_choose) np.savez("%s/ref_label_all.npz" %out_dir, tr_label) np.savez("%s/ref_spec_scat_all.npz" %out_dir, scatter_choose) # now, the test spectra test_id, test_flux = load_test_spectra() scatter = estimate_noise(test_flux, contmask) np.savez("%s/test_id.npz" %out_dir, test_id) np.savez("%s/test_flux.npz" %out_dir, test_flux) np.savez("%s/test_spec_scat.npz" %out_dir, scatter)
python
def load_data(): data_dir = "/Users/annaho/Data/AAOmega" out_dir = "%s/%s" %(data_dir, "Run_13_July") """ Use all the above functions to set data up for The Cannon """ ff, wl, tr_flux, tr_ivar = load_ref_spectra() """ pick one that doesn't have extra dead pixels """ skylines = tr_ivar[4,:] # should be the same across all obj np.savez("%s/skylines.npz" %out_dir, skylines) contmask = np.load("%s/contmask_regions.npz" %data_dir)['arr_0'] scatter = estimate_noise(tr_flux, contmask) ids, labels = load_labels() # Select the objects in the catalog corresponding to the files inds = [] ff_short = [] for fname in ff: val = fname.split("/")[-1] short = (val.split('.')[0] + '.' + val.split('.')[1]) ff_short.append(short) if short in ids: ind = np.where(ids==short)[0][0] inds.append(ind) # choose the labels tr_id = ids[inds] tr_label = labels[inds] # find the corresponding spectra ff_short = np.array(ff_short) inds = np.array([np.where(ff_short==val)[0][0] for val in tr_id]) tr_flux_choose = tr_flux[inds] tr_ivar_choose = tr_ivar[inds] scatter_choose = scatter[inds] np.savez("%s/wl.npz" %out_dir, wl) np.savez("%s/ref_id_all.npz" %out_dir, tr_id) np.savez("%s/ref_flux_all.npz" %out_dir, tr_flux_choose) np.savez("%s/ref_ivar_all.npz" %out_dir, tr_ivar_choose) np.savez("%s/ref_label_all.npz" %out_dir, tr_label) np.savez("%s/ref_spec_scat_all.npz" %out_dir, scatter_choose) # now, the test spectra test_id, test_flux = load_test_spectra() scatter = estimate_noise(test_flux, contmask) np.savez("%s/test_id.npz" %out_dir, test_id) np.savez("%s/test_flux.npz" %out_dir, test_flux) np.savez("%s/test_spec_scat.npz" %out_dir, scatter)
['def', 'load_data', '(', ')', ':', 'data_dir', '=', '"/Users/annaho/Data/AAOmega"', 'out_dir', '=', '"%s/%s"', '%', '(', 'data_dir', ',', '"Run_13_July"', ')', 'ff', ',', 'wl', ',', 'tr_flux', ',', 'tr_ivar', '=', 'load_ref_spectra', '(', ')', '""" pick one that doesn\'t have extra dead pixels """', 'skylines', '=', 'tr_ivar', '[', '4', ',', ':', ']', '# should be the same across all obj', 'np', '.', 'savez', '(', '"%s/skylines.npz"', '%', 'out_dir', ',', 'skylines', ')', 'contmask', '=', 'np', '.', 'load', '(', '"%s/contmask_regions.npz"', '%', 'data_dir', ')', '[', "'arr_0'", ']', 'scatter', '=', 'estimate_noise', '(', 'tr_flux', ',', 'contmask', ')', 'ids', ',', 'labels', '=', 'load_labels', '(', ')', '# Select the objects in the catalog corresponding to the files', 'inds', '=', '[', ']', 'ff_short', '=', '[', ']', 'for', 'fname', 'in', 'ff', ':', 'val', '=', 'fname', '.', 'split', '(', '"/"', ')', '[', '-', '1', ']', 'short', '=', '(', 'val', '.', 'split', '(', "'.'", ')', '[', '0', ']', '+', "'.'", '+', 'val', '.', 'split', '(', "'.'", ')', '[', '1', ']', ')', 'ff_short', '.', 'append', '(', 'short', ')', 'if', 'short', 'in', 'ids', ':', 'ind', '=', 'np', '.', 'where', '(', 'ids', '==', 'short', ')', '[', '0', ']', '[', '0', ']', 'inds', '.', 'append', '(', 'ind', ')', '# choose the labels', 'tr_id', '=', 'ids', '[', 'inds', ']', 'tr_label', '=', 'labels', '[', 'inds', ']', '# find the corresponding spectra', 'ff_short', '=', 'np', '.', 'array', '(', 'ff_short', ')', 'inds', '=', 'np', '.', 'array', '(', '[', 'np', '.', 'where', '(', 'ff_short', '==', 'val', ')', '[', '0', ']', '[', '0', ']', 'for', 'val', 'in', 'tr_id', ']', ')', 'tr_flux_choose', '=', 'tr_flux', '[', 'inds', ']', 'tr_ivar_choose', '=', 'tr_ivar', '[', 'inds', ']', 'scatter_choose', '=', 'scatter', '[', 'inds', ']', 'np', '.', 'savez', '(', '"%s/wl.npz"', '%', 'out_dir', ',', 'wl', ')', 'np', '.', 'savez', '(', '"%s/ref_id_all.npz"', '%', 'out_dir', ',', 'tr_id', ')', 'np', '.', 'savez', '(', '"%s/ref_flux_all.npz"', '%', 'out_dir', ',', 'tr_flux_choose', ')', 'np', '.', 'savez', '(', '"%s/ref_ivar_all.npz"', '%', 'out_dir', ',', 'tr_ivar_choose', ')', 'np', '.', 'savez', '(', '"%s/ref_label_all.npz"', '%', 'out_dir', ',', 'tr_label', ')', 'np', '.', 'savez', '(', '"%s/ref_spec_scat_all.npz"', '%', 'out_dir', ',', 'scatter_choose', ')', '# now, the test spectra', 'test_id', ',', 'test_flux', '=', 'load_test_spectra', '(', ')', 'scatter', '=', 'estimate_noise', '(', 'test_flux', ',', 'contmask', ')', 'np', '.', 'savez', '(', '"%s/test_id.npz"', '%', 'out_dir', ',', 'test_id', ')', 'np', '.', 'savez', '(', '"%s/test_flux.npz"', '%', 'out_dir', ',', 'test_flux', ')', 'np', '.', 'savez', '(', '"%s/test_spec_scat.npz"', '%', 'out_dir', ',', 'scatter', ')']
Use all the above functions to set data up for The Cannon
['Use', 'all', 'the', 'above', 'functions', 'to', 'set', 'data', 'up', 'for', 'The', 'Cannon']
train
https://github.com/annayqho/TheCannon/blob/8010a0a5dc9a3f9bb91efa79d7756f79b3c7ba9a/code/aaomega/aaomega_munge_data.py#L109-L157
9,294
briney/abutils
abutils/utils/s3.py
configure
def configure(access_key=None, secret_key=None, logger=None): ''' Configures s3cmd prior to first use. If no arguments are provided, you will be prompted to enter the access key and secret key interactively. Args: access_key (str): AWS access key secret_key (str): AWS secret key ''' if not logger: logger = log.get_logger('s3') if not all([access_key, secret_key]): logger.info('') access_key = input('AWS Access Key: ') secret_key = input('AWS Secret Key: ') _write_config(access_key, secret_key) logger.info('') logger.info('Completed writing S3 config file.') logger.info('')
python
def configure(access_key=None, secret_key=None, logger=None): ''' Configures s3cmd prior to first use. If no arguments are provided, you will be prompted to enter the access key and secret key interactively. Args: access_key (str): AWS access key secret_key (str): AWS secret key ''' if not logger: logger = log.get_logger('s3') if not all([access_key, secret_key]): logger.info('') access_key = input('AWS Access Key: ') secret_key = input('AWS Secret Key: ') _write_config(access_key, secret_key) logger.info('') logger.info('Completed writing S3 config file.') logger.info('')
['def', 'configure', '(', 'access_key', '=', 'None', ',', 'secret_key', '=', 'None', ',', 'logger', '=', 'None', ')', ':', 'if', 'not', 'logger', ':', 'logger', '=', 'log', '.', 'get_logger', '(', "'s3'", ')', 'if', 'not', 'all', '(', '[', 'access_key', ',', 'secret_key', ']', ')', ':', 'logger', '.', 'info', '(', "''", ')', 'access_key', '=', 'input', '(', "'AWS Access Key: '", ')', 'secret_key', '=', 'input', '(', "'AWS Secret Key: '", ')', '_write_config', '(', 'access_key', ',', 'secret_key', ')', 'logger', '.', 'info', '(', "''", ')', 'logger', '.', 'info', '(', "'Completed writing S3 config file.'", ')', 'logger', '.', 'info', '(', "''", ')']
Configures s3cmd prior to first use. If no arguments are provided, you will be prompted to enter the access key and secret key interactively. Args: access_key (str): AWS access key secret_key (str): AWS secret key
['Configures', 's3cmd', 'prior', 'to', 'first', 'use', '.']
train
https://github.com/briney/abutils/blob/944755fc7d28bfc7d4f1ffad94ca0bf9d74ec54b/abutils/utils/s3.py#L201-L223
9,295
pantsbuild/pants
src/python/pants/backend/native/tasks/native_compile.py
NativeCompile.get_sources_headers_for_target
def get_sources_headers_for_target(self, target): """Return a list of file arguments to provide to the compiler. NB: result list will contain both header and source files! :raises: :class:`NativeCompile.NativeCompileError` if there is an error processing the sources. """ # Get source paths relative to the target base so the exception message with the target and # paths makes sense. target_relative_sources = target.sources_relative_to_target_base() rel_root = target_relative_sources.rel_root # Unique file names are required because we just dump object files into a single directory, and # the compiler will silently just produce a single object file if provided non-unique filenames. # TODO: add some shading to file names so we can remove this check. # NB: It shouldn't matter if header files have the same name, but this will raise an error in # that case as well. We won't need to do any shading of header file names. seen_filenames = defaultdict(list) for src in target_relative_sources: seen_filenames[os.path.basename(src)].append(src) duplicate_filename_err_msgs = [] for fname, source_paths in seen_filenames.items(): if len(source_paths) > 1: duplicate_filename_err_msgs.append("filename: {}, paths: {}".format(fname, source_paths)) if duplicate_filename_err_msgs: raise self.NativeCompileError( "Error in target '{}': source files must have a unique filename within a '{}' target. " "Conflicting filenames:\n{}" .format(target.address.spec, target.alias(), '\n'.join(duplicate_filename_err_msgs))) return [os.path.join(get_buildroot(), rel_root, src) for src in target_relative_sources]
python
def get_sources_headers_for_target(self, target): """Return a list of file arguments to provide to the compiler. NB: result list will contain both header and source files! :raises: :class:`NativeCompile.NativeCompileError` if there is an error processing the sources. """ # Get source paths relative to the target base so the exception message with the target and # paths makes sense. target_relative_sources = target.sources_relative_to_target_base() rel_root = target_relative_sources.rel_root # Unique file names are required because we just dump object files into a single directory, and # the compiler will silently just produce a single object file if provided non-unique filenames. # TODO: add some shading to file names so we can remove this check. # NB: It shouldn't matter if header files have the same name, but this will raise an error in # that case as well. We won't need to do any shading of header file names. seen_filenames = defaultdict(list) for src in target_relative_sources: seen_filenames[os.path.basename(src)].append(src) duplicate_filename_err_msgs = [] for fname, source_paths in seen_filenames.items(): if len(source_paths) > 1: duplicate_filename_err_msgs.append("filename: {}, paths: {}".format(fname, source_paths)) if duplicate_filename_err_msgs: raise self.NativeCompileError( "Error in target '{}': source files must have a unique filename within a '{}' target. " "Conflicting filenames:\n{}" .format(target.address.spec, target.alias(), '\n'.join(duplicate_filename_err_msgs))) return [os.path.join(get_buildroot(), rel_root, src) for src in target_relative_sources]
['def', 'get_sources_headers_for_target', '(', 'self', ',', 'target', ')', ':', '# Get source paths relative to the target base so the exception message with the target and', '# paths makes sense.', 'target_relative_sources', '=', 'target', '.', 'sources_relative_to_target_base', '(', ')', 'rel_root', '=', 'target_relative_sources', '.', 'rel_root', '# Unique file names are required because we just dump object files into a single directory, and', '# the compiler will silently just produce a single object file if provided non-unique filenames.', '# TODO: add some shading to file names so we can remove this check.', "# NB: It shouldn't matter if header files have the same name, but this will raise an error in", "# that case as well. We won't need to do any shading of header file names.", 'seen_filenames', '=', 'defaultdict', '(', 'list', ')', 'for', 'src', 'in', 'target_relative_sources', ':', 'seen_filenames', '[', 'os', '.', 'path', '.', 'basename', '(', 'src', ')', ']', '.', 'append', '(', 'src', ')', 'duplicate_filename_err_msgs', '=', '[', ']', 'for', 'fname', ',', 'source_paths', 'in', 'seen_filenames', '.', 'items', '(', ')', ':', 'if', 'len', '(', 'source_paths', ')', '>', '1', ':', 'duplicate_filename_err_msgs', '.', 'append', '(', '"filename: {}, paths: {}"', '.', 'format', '(', 'fname', ',', 'source_paths', ')', ')', 'if', 'duplicate_filename_err_msgs', ':', 'raise', 'self', '.', 'NativeCompileError', '(', '"Error in target \'{}\': source files must have a unique filename within a \'{}\' target. "', '"Conflicting filenames:\\n{}"', '.', 'format', '(', 'target', '.', 'address', '.', 'spec', ',', 'target', '.', 'alias', '(', ')', ',', "'\\n'", '.', 'join', '(', 'duplicate_filename_err_msgs', ')', ')', ')', 'return', '[', 'os', '.', 'path', '.', 'join', '(', 'get_buildroot', '(', ')', ',', 'rel_root', ',', 'src', ')', 'for', 'src', 'in', 'target_relative_sources', ']']
Return a list of file arguments to provide to the compiler. NB: result list will contain both header and source files! :raises: :class:`NativeCompile.NativeCompileError` if there is an error processing the sources.
['Return', 'a', 'list', 'of', 'file', 'arguments', 'to', 'provide', 'to', 'the', 'compiler', '.']
train
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/native/tasks/native_compile.py#L96-L126
9,296
phaethon/kamene
kamene/layers/inet.py
defrag
def defrag(plist): """defrag(plist) -> ([not fragmented], [defragmented], [ [bad fragments], [bad fragments], ... ])""" frags = defaultdict(PacketList) nofrag = PacketList() for p in plist: ip = p[IP] if IP not in p: nofrag.append(p) continue if ip.frag == 0 and ip.flags & 1 == 0: nofrag.append(p) continue uniq = (ip.id, ip.src, ip.dst, ip.proto) frags[uniq].append(p) defrag = [] missfrag = [] for lst in frags.values(): lst.sort(key=lambda x: x.frag) p = lst[0] lastp = lst[-1] if p.frag > 0 or lastp.flags & 1 != 0: # first or last fragment missing missfrag.append(lst) continue p = p.copy() if conf.padding_layer in p: del p[conf.padding_layer].underlayer.payload ip = p[IP] if ip.len is None or ip.ihl is None: clen = len(ip.payload) else: clen = ip.len - (ip.ihl << 2) txt = conf.raw_layer() for q in lst[1:]: if clen != q.frag << 3: # Wrong fragmentation offset if clen > q.frag << 3: warning("Fragment overlap (%i > %i) %r || %r || %r" % (clen, q.frag << 3, p, txt, q)) missfrag.append(lst) break if q[IP].len is None or q[IP].ihl is None: clen += len(q[IP].payload) else: clen += q[IP].len - (q[IP].ihl << 2) if conf.padding_layer in q: del q[conf.padding_layer].underlayer.payload txt.add_payload(q[IP].payload.copy()) else: ip.flags &= ~1 # !MF del ip.chksum del ip.len p = p / txt defrag.append(p) defrag2 = PacketList() for p in defrag: defrag2.append(p.__class__(bytes(p))) return nofrag, defrag2, missfrag
python
def defrag(plist): """defrag(plist) -> ([not fragmented], [defragmented], [ [bad fragments], [bad fragments], ... ])""" frags = defaultdict(PacketList) nofrag = PacketList() for p in plist: ip = p[IP] if IP not in p: nofrag.append(p) continue if ip.frag == 0 and ip.flags & 1 == 0: nofrag.append(p) continue uniq = (ip.id, ip.src, ip.dst, ip.proto) frags[uniq].append(p) defrag = [] missfrag = [] for lst in frags.values(): lst.sort(key=lambda x: x.frag) p = lst[0] lastp = lst[-1] if p.frag > 0 or lastp.flags & 1 != 0: # first or last fragment missing missfrag.append(lst) continue p = p.copy() if conf.padding_layer in p: del p[conf.padding_layer].underlayer.payload ip = p[IP] if ip.len is None or ip.ihl is None: clen = len(ip.payload) else: clen = ip.len - (ip.ihl << 2) txt = conf.raw_layer() for q in lst[1:]: if clen != q.frag << 3: # Wrong fragmentation offset if clen > q.frag << 3: warning("Fragment overlap (%i > %i) %r || %r || %r" % (clen, q.frag << 3, p, txt, q)) missfrag.append(lst) break if q[IP].len is None or q[IP].ihl is None: clen += len(q[IP].payload) else: clen += q[IP].len - (q[IP].ihl << 2) if conf.padding_layer in q: del q[conf.padding_layer].underlayer.payload txt.add_payload(q[IP].payload.copy()) else: ip.flags &= ~1 # !MF del ip.chksum del ip.len p = p / txt defrag.append(p) defrag2 = PacketList() for p in defrag: defrag2.append(p.__class__(bytes(p))) return nofrag, defrag2, missfrag
['def', 'defrag', '(', 'plist', ')', ':', 'frags', '=', 'defaultdict', '(', 'PacketList', ')', 'nofrag', '=', 'PacketList', '(', ')', 'for', 'p', 'in', 'plist', ':', 'ip', '=', 'p', '[', 'IP', ']', 'if', 'IP', 'not', 'in', 'p', ':', 'nofrag', '.', 'append', '(', 'p', ')', 'continue', 'if', 'ip', '.', 'frag', '==', '0', 'and', 'ip', '.', 'flags', '&', '1', '==', '0', ':', 'nofrag', '.', 'append', '(', 'p', ')', 'continue', 'uniq', '=', '(', 'ip', '.', 'id', ',', 'ip', '.', 'src', ',', 'ip', '.', 'dst', ',', 'ip', '.', 'proto', ')', 'frags', '[', 'uniq', ']', '.', 'append', '(', 'p', ')', 'defrag', '=', '[', ']', 'missfrag', '=', '[', ']', 'for', 'lst', 'in', 'frags', '.', 'values', '(', ')', ':', 'lst', '.', 'sort', '(', 'key', '=', 'lambda', 'x', ':', 'x', '.', 'frag', ')', 'p', '=', 'lst', '[', '0', ']', 'lastp', '=', 'lst', '[', '-', '1', ']', 'if', 'p', '.', 'frag', '>', '0', 'or', 'lastp', '.', 'flags', '&', '1', '!=', '0', ':', '# first or last fragment missing', 'missfrag', '.', 'append', '(', 'lst', ')', 'continue', 'p', '=', 'p', '.', 'copy', '(', ')', 'if', 'conf', '.', 'padding_layer', 'in', 'p', ':', 'del', 'p', '[', 'conf', '.', 'padding_layer', ']', '.', 'underlayer', '.', 'payload', 'ip', '=', 'p', '[', 'IP', ']', 'if', 'ip', '.', 'len', 'is', 'None', 'or', 'ip', '.', 'ihl', 'is', 'None', ':', 'clen', '=', 'len', '(', 'ip', '.', 'payload', ')', 'else', ':', 'clen', '=', 'ip', '.', 'len', '-', '(', 'ip', '.', 'ihl', '<<', '2', ')', 'txt', '=', 'conf', '.', 'raw_layer', '(', ')', 'for', 'q', 'in', 'lst', '[', '1', ':', ']', ':', 'if', 'clen', '!=', 'q', '.', 'frag', '<<', '3', ':', '# Wrong fragmentation offset', 'if', 'clen', '>', 'q', '.', 'frag', '<<', '3', ':', 'warning', '(', '"Fragment overlap (%i > %i) %r || %r || %r"', '%', '(', 'clen', ',', 'q', '.', 'frag', '<<', '3', ',', 'p', ',', 'txt', ',', 'q', ')', ')', 'missfrag', '.', 'append', '(', 'lst', ')', 'break', 'if', 'q', '[', 'IP', ']', '.', 'len', 'is', 'None', 'or', 'q', '[', 'IP', ']', '.', 'ihl', 'is', 'None', ':', 'clen', '+=', 'len', '(', 'q', '[', 'IP', ']', '.', 'payload', ')', 'else', ':', 'clen', '+=', 'q', '[', 'IP', ']', '.', 'len', '-', '(', 'q', '[', 'IP', ']', '.', 'ihl', '<<', '2', ')', 'if', 'conf', '.', 'padding_layer', 'in', 'q', ':', 'del', 'q', '[', 'conf', '.', 'padding_layer', ']', '.', 'underlayer', '.', 'payload', 'txt', '.', 'add_payload', '(', 'q', '[', 'IP', ']', '.', 'payload', '.', 'copy', '(', ')', ')', 'else', ':', 'ip', '.', 'flags', '&=', '~', '1', '# !MF', 'del', 'ip', '.', 'chksum', 'del', 'ip', '.', 'len', 'p', '=', 'p', '/', 'txt', 'defrag', '.', 'append', '(', 'p', ')', 'defrag2', '=', 'PacketList', '(', ')', 'for', 'p', 'in', 'defrag', ':', 'defrag2', '.', 'append', '(', 'p', '.', '__class__', '(', 'bytes', '(', 'p', ')', ')', ')', 'return', 'nofrag', ',', 'defrag2', ',', 'missfrag']
defrag(plist) -> ([not fragmented], [defragmented], [ [bad fragments], [bad fragments], ... ])
['defrag', '(', 'plist', ')', '-', '>', '(', '[', 'not', 'fragmented', ']', '[', 'defragmented', ']', '[', '[', 'bad', 'fragments', ']', '[', 'bad', 'fragments', ']', '...', ']', ')']
train
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/layers/inet.py#L901-L956
9,297
globality-corp/microcosm
microcosm/hooks.py
_invoke_hook
def _invoke_hook(hook_name, target): """ Generic hook invocation. """ try: for value in getattr(target, hook_name): func, args, kwargs = value func(target, *args, **kwargs) except AttributeError: # no hook defined pass except (TypeError, ValueError): # hook not properly defined (might be a mock) pass
python
def _invoke_hook(hook_name, target): """ Generic hook invocation. """ try: for value in getattr(target, hook_name): func, args, kwargs = value func(target, *args, **kwargs) except AttributeError: # no hook defined pass except (TypeError, ValueError): # hook not properly defined (might be a mock) pass
['def', '_invoke_hook', '(', 'hook_name', ',', 'target', ')', ':', 'try', ':', 'for', 'value', 'in', 'getattr', '(', 'target', ',', 'hook_name', ')', ':', 'func', ',', 'args', ',', 'kwargs', '=', 'value', 'func', '(', 'target', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'except', 'AttributeError', ':', '# no hook defined', 'pass', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', '# hook not properly defined (might be a mock)', 'pass']
Generic hook invocation.
['Generic', 'hook', 'invocation', '.']
train
https://github.com/globality-corp/microcosm/blob/6856200ca295da4269c8c1c9de7db0b97c1f4523/microcosm/hooks.py#L45-L59
9,298
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_genobstacles.py
GenobstaclesModule.mavlink_packet
def mavlink_packet(self, m): '''trigger sends from ATTITUDE packets''' if not self.have_home and m.get_type() == 'GPS_RAW_INT' and m.fix_type >= 3: gen_settings.home_lat = m.lat * 1.0e-7 gen_settings.home_lon = m.lon * 1.0e-7 self.have_home = True if self.pending_start: self.start() if m.get_type() != 'ATTITUDE': return t = self.get_time() dt = t - self.last_t if dt < 0 or dt > 10: self.last_t = t return if dt > 10 or dt < 0.9: return self.last_t = t for a in self.aircraft: if not gen_settings.stop: a.update(1.0) self.pkt_queue.append(a.pickled()) while len(self.pkt_queue) > len(self.aircraft)*2: self.pkt_queue.pop(0) if self.module('map') is not None and not self.menu_added_map: self.menu_added_map = True self.module('map').add_menu(self.menu)
python
def mavlink_packet(self, m): '''trigger sends from ATTITUDE packets''' if not self.have_home and m.get_type() == 'GPS_RAW_INT' and m.fix_type >= 3: gen_settings.home_lat = m.lat * 1.0e-7 gen_settings.home_lon = m.lon * 1.0e-7 self.have_home = True if self.pending_start: self.start() if m.get_type() != 'ATTITUDE': return t = self.get_time() dt = t - self.last_t if dt < 0 or dt > 10: self.last_t = t return if dt > 10 or dt < 0.9: return self.last_t = t for a in self.aircraft: if not gen_settings.stop: a.update(1.0) self.pkt_queue.append(a.pickled()) while len(self.pkt_queue) > len(self.aircraft)*2: self.pkt_queue.pop(0) if self.module('map') is not None and not self.menu_added_map: self.menu_added_map = True self.module('map').add_menu(self.menu)
['def', 'mavlink_packet', '(', 'self', ',', 'm', ')', ':', 'if', 'not', 'self', '.', 'have_home', 'and', 'm', '.', 'get_type', '(', ')', '==', "'GPS_RAW_INT'", 'and', 'm', '.', 'fix_type', '>=', '3', ':', 'gen_settings', '.', 'home_lat', '=', 'm', '.', 'lat', '*', '1.0e-7', 'gen_settings', '.', 'home_lon', '=', 'm', '.', 'lon', '*', '1.0e-7', 'self', '.', 'have_home', '=', 'True', 'if', 'self', '.', 'pending_start', ':', 'self', '.', 'start', '(', ')', 'if', 'm', '.', 'get_type', '(', ')', '!=', "'ATTITUDE'", ':', 'return', 't', '=', 'self', '.', 'get_time', '(', ')', 'dt', '=', 't', '-', 'self', '.', 'last_t', 'if', 'dt', '<', '0', 'or', 'dt', '>', '10', ':', 'self', '.', 'last_t', '=', 't', 'return', 'if', 'dt', '>', '10', 'or', 'dt', '<', '0.9', ':', 'return', 'self', '.', 'last_t', '=', 't', 'for', 'a', 'in', 'self', '.', 'aircraft', ':', 'if', 'not', 'gen_settings', '.', 'stop', ':', 'a', '.', 'update', '(', '1.0', ')', 'self', '.', 'pkt_queue', '.', 'append', '(', 'a', '.', 'pickled', '(', ')', ')', 'while', 'len', '(', 'self', '.', 'pkt_queue', ')', '>', 'len', '(', 'self', '.', 'aircraft', ')', '*', '2', ':', 'self', '.', 'pkt_queue', '.', 'pop', '(', '0', ')', 'if', 'self', '.', 'module', '(', "'map'", ')', 'is', 'not', 'None', 'and', 'not', 'self', '.', 'menu_added_map', ':', 'self', '.', 'menu_added_map', '=', 'True', 'self', '.', 'module', '(', "'map'", ')', '.', 'add_menu', '(', 'self', '.', 'menu', ')']
trigger sends from ATTITUDE packets
['trigger', 'sends', 'from', 'ATTITUDE', 'packets']
train
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_genobstacles.py#L409-L436
9,299
twilio/twilio-python
twilio/rest/taskrouter/v1/workspace/task/__init__.py
TaskList.create
def create(self, timeout=values.unset, priority=values.unset, task_channel=values.unset, workflow_sid=values.unset, attributes=values.unset): """ Create a new TaskInstance :param unicode timeout: The amount of time in seconds the task is allowed to live up to a maximum of 2 weeks. :param unicode priority: Override priority for the Task. :param unicode task_channel: When MultiTasking is enabled specify the type of the task by passing either TaskChannel Unique Name or Task Channel Sid. :param unicode workflow_sid: The WorkflowSid for the Workflow that you would like to handle routing for this Task. :param unicode attributes: Url-encoded JSON string describing the attributes of this task. :returns: Newly created TaskInstance :rtype: twilio.rest.taskrouter.v1.workspace.task.TaskInstance """ data = values.of({ 'Timeout': timeout, 'Priority': priority, 'TaskChannel': task_channel, 'WorkflowSid': workflow_sid, 'Attributes': attributes, }) payload = self._version.create( 'POST', self._uri, data=data, ) return TaskInstance(self._version, payload, workspace_sid=self._solution['workspace_sid'], )
python
def create(self, timeout=values.unset, priority=values.unset, task_channel=values.unset, workflow_sid=values.unset, attributes=values.unset): """ Create a new TaskInstance :param unicode timeout: The amount of time in seconds the task is allowed to live up to a maximum of 2 weeks. :param unicode priority: Override priority for the Task. :param unicode task_channel: When MultiTasking is enabled specify the type of the task by passing either TaskChannel Unique Name or Task Channel Sid. :param unicode workflow_sid: The WorkflowSid for the Workflow that you would like to handle routing for this Task. :param unicode attributes: Url-encoded JSON string describing the attributes of this task. :returns: Newly created TaskInstance :rtype: twilio.rest.taskrouter.v1.workspace.task.TaskInstance """ data = values.of({ 'Timeout': timeout, 'Priority': priority, 'TaskChannel': task_channel, 'WorkflowSid': workflow_sid, 'Attributes': attributes, }) payload = self._version.create( 'POST', self._uri, data=data, ) return TaskInstance(self._version, payload, workspace_sid=self._solution['workspace_sid'], )
['def', 'create', '(', 'self', ',', 'timeout', '=', 'values', '.', 'unset', ',', 'priority', '=', 'values', '.', 'unset', ',', 'task_channel', '=', 'values', '.', 'unset', ',', 'workflow_sid', '=', 'values', '.', 'unset', ',', 'attributes', '=', 'values', '.', 'unset', ')', ':', 'data', '=', 'values', '.', 'of', '(', '{', "'Timeout'", ':', 'timeout', ',', "'Priority'", ':', 'priority', ',', "'TaskChannel'", ':', 'task_channel', ',', "'WorkflowSid'", ':', 'workflow_sid', ',', "'Attributes'", ':', 'attributes', ',', '}', ')', 'payload', '=', 'self', '.', '_version', '.', 'create', '(', "'POST'", ',', 'self', '.', '_uri', ',', 'data', '=', 'data', ',', ')', 'return', 'TaskInstance', '(', 'self', '.', '_version', ',', 'payload', ',', 'workspace_sid', '=', 'self', '.', '_solution', '[', "'workspace_sid'", ']', ',', ')']
Create a new TaskInstance :param unicode timeout: The amount of time in seconds the task is allowed to live up to a maximum of 2 weeks. :param unicode priority: Override priority for the Task. :param unicode task_channel: When MultiTasking is enabled specify the type of the task by passing either TaskChannel Unique Name or Task Channel Sid. :param unicode workflow_sid: The WorkflowSid for the Workflow that you would like to handle routing for this Task. :param unicode attributes: Url-encoded JSON string describing the attributes of this task. :returns: Newly created TaskInstance :rtype: twilio.rest.taskrouter.v1.workspace.task.TaskInstance
['Create', 'a', 'new', 'TaskInstance']
train
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/taskrouter/v1/workspace/task/__init__.py#L194-L223