Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
3,000
saltstack/salt
salt/modules/drbd.py
_analyse_overview_field
def _analyse_overview_field(content): ''' Split the field in drbd-overview ''' if "(" in content: # Output like "Connected(2*)" or "UpToDate(2*)" return content.split("(")[0], content.split("(")[0] elif "/" in content: # Output like "Primar/Second" or "UpToDa/UpToDa" return content.split("/")[0], content.split("/")[1] return content, ""
python
def _analyse_overview_field(content): ''' Split the field in drbd-overview ''' if "(" in content: # Output like "Connected(2*)" or "UpToDate(2*)" return content.split("(")[0], content.split("(")[0] elif "/" in content: # Output like "Primar/Second" or "UpToDa/UpToDa" return content.split("/")[0], content.split("/")[1] return content, ""
['def', '_analyse_overview_field', '(', 'content', ')', ':', 'if', '"("', 'in', 'content', ':', '# Output like "Connected(2*)" or "UpToDate(2*)"', 'return', 'content', '.', 'split', '(', '"("', ')', '[', '0', ']', ',', 'content', '.', 'split', '(', '"("', ')', '[', '0', ']', 'elif', '"/"', 'in', 'content', ':', '# Output like "Primar/Second" or "UpToDa/UpToDa"', 'return', 'content', '.', 'split', '(', '"/"', ')', '[', '0', ']', ',', 'content', '.', 'split', '(', '"/"', ')', '[', '1', ']', 'return', 'content', ',', '""']
Split the field in drbd-overview
['Split', 'the', 'field', 'in', 'drbd', '-', 'overview']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/drbd.py#L13-L24
3,001
quora/qcore
qcore/events.py
EventHub.trigger
def trigger(self, event, *args): """Triggers the specified event by invoking EventHook.trigger under the hood. @param event: event to trigger. Any object can be passed as event, but string is preferable. If qcore.EnumBase instance is passed, its name is used as event key. @param args: event arguments. @return: self, so calls like this can be chained together. """ event_hook = self.get_or_create(event) event_hook.trigger(*args) return self
python
def trigger(self, event, *args): """Triggers the specified event by invoking EventHook.trigger under the hood. @param event: event to trigger. Any object can be passed as event, but string is preferable. If qcore.EnumBase instance is passed, its name is used as event key. @param args: event arguments. @return: self, so calls like this can be chained together. """ event_hook = self.get_or_create(event) event_hook.trigger(*args) return self
['def', 'trigger', '(', 'self', ',', 'event', ',', '*', 'args', ')', ':', 'event_hook', '=', 'self', '.', 'get_or_create', '(', 'event', ')', 'event_hook', '.', 'trigger', '(', '*', 'args', ')', 'return', 'self']
Triggers the specified event by invoking EventHook.trigger under the hood. @param event: event to trigger. Any object can be passed as event, but string is preferable. If qcore.EnumBase instance is passed, its name is used as event key. @param args: event arguments. @return: self, so calls like this can be chained together.
['Triggers', 'the', 'specified', 'event', 'by', 'invoking', 'EventHook', '.', 'trigger', 'under', 'the', 'hood', '.']
train
https://github.com/quora/qcore/blob/fa5cd438eea554db35fd29cbc8dfbde69f09961c/qcore/events.py#L245-L257
3,002
watson-developer-cloud/python-sdk
ibm_watson/speech_to_text_v1.py
CustomWord._to_dict
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'word') and self.word is not None: _dict['word'] = self.word if hasattr(self, 'sounds_like') and self.sounds_like is not None: _dict['sounds_like'] = self.sounds_like if hasattr(self, 'display_as') and self.display_as is not None: _dict['display_as'] = self.display_as return _dict
python
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'word') and self.word is not None: _dict['word'] = self.word if hasattr(self, 'sounds_like') and self.sounds_like is not None: _dict['sounds_like'] = self.sounds_like if hasattr(self, 'display_as') and self.display_as is not None: _dict['display_as'] = self.display_as return _dict
['def', '_to_dict', '(', 'self', ')', ':', '_dict', '=', '{', '}', 'if', 'hasattr', '(', 'self', ',', "'word'", ')', 'and', 'self', '.', 'word', 'is', 'not', 'None', ':', '_dict', '[', "'word'", ']', '=', 'self', '.', 'word', 'if', 'hasattr', '(', 'self', ',', "'sounds_like'", ')', 'and', 'self', '.', 'sounds_like', 'is', 'not', 'None', ':', '_dict', '[', "'sounds_like'", ']', '=', 'self', '.', 'sounds_like', 'if', 'hasattr', '(', 'self', ',', "'display_as'", ')', 'and', 'self', '.', 'display_as', 'is', 'not', 'None', ':', '_dict', '[', "'display_as'", ']', '=', 'self', '.', 'display_as', 'return', '_dict']
Return a json dictionary representing this model.
['Return', 'a', 'json', 'dictionary', 'representing', 'this', 'model', '.']
train
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/speech_to_text_v1.py#L3691-L3700
3,003
callowayproject/Transmogrify
transmogrify/images2gif.py
GifWriter.get_app_ext
def get_app_ext(self, loops=float('inf')): """ get_app_ext(loops=float('inf')) Application extention. This part specifies the amount of loops. If loops is 0 or inf, it goes on infinitely. """ if loops == 0 or loops == float('inf'): loops = 2**16 - 1 # bb = "" # application extension should not be used (the extension interprets zero loops to mean an infinite number of loops) Mmm, does not seem to work if True: bb = "\x21\xFF\x0B" # application extension bb += "NETSCAPE2.0" bb += "\x03\x01" bb += int_to_bin(loops) bb += '\x00' # end return bb
python
def get_app_ext(self, loops=float('inf')): """ get_app_ext(loops=float('inf')) Application extention. This part specifies the amount of loops. If loops is 0 or inf, it goes on infinitely. """ if loops == 0 or loops == float('inf'): loops = 2**16 - 1 # bb = "" # application extension should not be used (the extension interprets zero loops to mean an infinite number of loops) Mmm, does not seem to work if True: bb = "\x21\xFF\x0B" # application extension bb += "NETSCAPE2.0" bb += "\x03\x01" bb += int_to_bin(loops) bb += '\x00' # end return bb
['def', 'get_app_ext', '(', 'self', ',', 'loops', '=', 'float', '(', "'inf'", ')', ')', ':', 'if', 'loops', '==', '0', 'or', 'loops', '==', 'float', '(', "'inf'", ')', ':', 'loops', '=', '2', '**', '16', '-', '1', '# bb = "" # application extension should not be used (the extension interprets zero loops to mean an infinite number of loops) Mmm, does not seem to work', 'if', 'True', ':', 'bb', '=', '"\\x21\\xFF\\x0B"', '# application extension', 'bb', '+=', '"NETSCAPE2.0"', 'bb', '+=', '"\\x03\\x01"', 'bb', '+=', 'int_to_bin', '(', 'loops', ')', 'bb', '+=', "'\\x00'", '# end', 'return', 'bb']
get_app_ext(loops=float('inf')) Application extention. This part specifies the amount of loops. If loops is 0 or inf, it goes on infinitely.
['get_app_ext', '(', 'loops', '=', 'float', '(', 'inf', '))']
train
https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/images2gif.py#L199-L216
3,004
automl/HpBandSter
hpbandster/optimizers/kde/kernels.py
WangRyzinOrdinal.sample
def sample(self, sample_indices=None, num_samples=1): """ returns samples according to the KDE Parameters ---------- sample_inices: list of ints Indices into the training data used as centers for the samples num_samples: int if samples_indices is None, this specifies how many samples are drawn. """ if sample_indices is None: sample_indices = np.random.choice(self.data.shape[0], size=num_samples) samples = self.data[sample_indices] possible_steps = np.arange(-self.num_values+1,self.num_values) idx = (np.abs(possible_steps) < 1e-2) ps = 0.5*(1-self.bw) * np.power(self.bw, np.abs(possible_steps)) ps[idx] = (1-self.bw) ps /= ps.sum() delta = np.zeros_like(samples) oob_idx = np.arange(samples.shape[0]) while len(oob_idx) > 0: samples[oob_idx] -= delta[oob_idx] # revert move delta[oob_idx] = np.random.choice(possible_steps, size=len(oob_idx), p=ps) samples[oob_idx] += delta[oob_idx] #import pdb; pdb.set_trace() oob_idx = oob_idx[np.argwhere(np.logical_or(samples[oob_idx] > self.num_values-0.9, samples[oob_idx] < -0.1)).flatten()] return(np.rint(samples))
python
def sample(self, sample_indices=None, num_samples=1): """ returns samples according to the KDE Parameters ---------- sample_inices: list of ints Indices into the training data used as centers for the samples num_samples: int if samples_indices is None, this specifies how many samples are drawn. """ if sample_indices is None: sample_indices = np.random.choice(self.data.shape[0], size=num_samples) samples = self.data[sample_indices] possible_steps = np.arange(-self.num_values+1,self.num_values) idx = (np.abs(possible_steps) < 1e-2) ps = 0.5*(1-self.bw) * np.power(self.bw, np.abs(possible_steps)) ps[idx] = (1-self.bw) ps /= ps.sum() delta = np.zeros_like(samples) oob_idx = np.arange(samples.shape[0]) while len(oob_idx) > 0: samples[oob_idx] -= delta[oob_idx] # revert move delta[oob_idx] = np.random.choice(possible_steps, size=len(oob_idx), p=ps) samples[oob_idx] += delta[oob_idx] #import pdb; pdb.set_trace() oob_idx = oob_idx[np.argwhere(np.logical_or(samples[oob_idx] > self.num_values-0.9, samples[oob_idx] < -0.1)).flatten()] return(np.rint(samples))
['def', 'sample', '(', 'self', ',', 'sample_indices', '=', 'None', ',', 'num_samples', '=', '1', ')', ':', 'if', 'sample_indices', 'is', 'None', ':', 'sample_indices', '=', 'np', '.', 'random', '.', 'choice', '(', 'self', '.', 'data', '.', 'shape', '[', '0', ']', ',', 'size', '=', 'num_samples', ')', 'samples', '=', 'self', '.', 'data', '[', 'sample_indices', ']', 'possible_steps', '=', 'np', '.', 'arange', '(', '-', 'self', '.', 'num_values', '+', '1', ',', 'self', '.', 'num_values', ')', 'idx', '=', '(', 'np', '.', 'abs', '(', 'possible_steps', ')', '<', '1e-2', ')', 'ps', '=', '0.5', '*', '(', '1', '-', 'self', '.', 'bw', ')', '*', 'np', '.', 'power', '(', 'self', '.', 'bw', ',', 'np', '.', 'abs', '(', 'possible_steps', ')', ')', 'ps', '[', 'idx', ']', '=', '(', '1', '-', 'self', '.', 'bw', ')', 'ps', '/=', 'ps', '.', 'sum', '(', ')', 'delta', '=', 'np', '.', 'zeros_like', '(', 'samples', ')', 'oob_idx', '=', 'np', '.', 'arange', '(', 'samples', '.', 'shape', '[', '0', ']', ')', 'while', 'len', '(', 'oob_idx', ')', '>', '0', ':', 'samples', '[', 'oob_idx', ']', '-=', 'delta', '[', 'oob_idx', ']', '# revert move', 'delta', '[', 'oob_idx', ']', '=', 'np', '.', 'random', '.', 'choice', '(', 'possible_steps', ',', 'size', '=', 'len', '(', 'oob_idx', ')', ',', 'p', '=', 'ps', ')', 'samples', '[', 'oob_idx', ']', '+=', 'delta', '[', 'oob_idx', ']', '#import pdb; pdb.set_trace()', 'oob_idx', '=', 'oob_idx', '[', 'np', '.', 'argwhere', '(', 'np', '.', 'logical_or', '(', 'samples', '[', 'oob_idx', ']', '>', 'self', '.', 'num_values', '-', '0.9', ',', 'samples', '[', 'oob_idx', ']', '<', '-', '0.1', ')', ')', '.', 'flatten', '(', ')', ']', 'return', '(', 'np', '.', 'rint', '(', 'samples', ')', ')']
returns samples according to the KDE Parameters ---------- sample_inices: list of ints Indices into the training data used as centers for the samples num_samples: int if samples_indices is None, this specifies how many samples are drawn.
['returns', 'samples', 'according', 'to', 'the', 'KDE', 'Parameters', '----------', 'sample_inices', ':', 'list', 'of', 'ints', 'Indices', 'into', 'the', 'training', 'data', 'used', 'as', 'centers', 'for', 'the', 'samples', 'num_samples', ':', 'int', 'if', 'samples_indices', 'is', 'None', 'this', 'specifies', 'how', 'many', 'samples', 'are', 'drawn', '.']
train
https://github.com/automl/HpBandSter/blob/841db4b827f342e5eb7f725723ea6461ac52d45a/hpbandster/optimizers/kde/kernels.py#L157-L190
3,005
google/grr
grr/server/grr_response_server/export_utils.py
IterateAllClients.GetInput
def GetInput(self): """Yield client urns.""" client_list = GetAllClients(token=self.token) logging.debug("Got %d clients", len(client_list)) for client_group in collection.Batch(client_list, self.client_chunksize): for fd in aff4.FACTORY.MultiOpen( client_group, mode="r", aff4_type=aff4_grr.VFSGRRClient, token=self.token): if isinstance(fd, aff4_grr.VFSGRRClient): # Skip if older than max_age oldest_time = (time.time() - self.max_age) * 1e6 if fd.Get(aff4_grr.VFSGRRClient.SchemaCls.PING) >= oldest_time: yield fd
python
def GetInput(self): """Yield client urns.""" client_list = GetAllClients(token=self.token) logging.debug("Got %d clients", len(client_list)) for client_group in collection.Batch(client_list, self.client_chunksize): for fd in aff4.FACTORY.MultiOpen( client_group, mode="r", aff4_type=aff4_grr.VFSGRRClient, token=self.token): if isinstance(fd, aff4_grr.VFSGRRClient): # Skip if older than max_age oldest_time = (time.time() - self.max_age) * 1e6 if fd.Get(aff4_grr.VFSGRRClient.SchemaCls.PING) >= oldest_time: yield fd
['def', 'GetInput', '(', 'self', ')', ':', 'client_list', '=', 'GetAllClients', '(', 'token', '=', 'self', '.', 'token', ')', 'logging', '.', 'debug', '(', '"Got %d clients"', ',', 'len', '(', 'client_list', ')', ')', 'for', 'client_group', 'in', 'collection', '.', 'Batch', '(', 'client_list', ',', 'self', '.', 'client_chunksize', ')', ':', 'for', 'fd', 'in', 'aff4', '.', 'FACTORY', '.', 'MultiOpen', '(', 'client_group', ',', 'mode', '=', '"r"', ',', 'aff4_type', '=', 'aff4_grr', '.', 'VFSGRRClient', ',', 'token', '=', 'self', '.', 'token', ')', ':', 'if', 'isinstance', '(', 'fd', ',', 'aff4_grr', '.', 'VFSGRRClient', ')', ':', '# Skip if older than max_age', 'oldest_time', '=', '(', 'time', '.', 'time', '(', ')', '-', 'self', '.', 'max_age', ')', '*', '1e6', 'if', 'fd', '.', 'Get', '(', 'aff4_grr', '.', 'VFSGRRClient', '.', 'SchemaCls', '.', 'PING', ')', '>=', 'oldest_time', ':', 'yield', 'fd']
Yield client urns.
['Yield', 'client', 'urns', '.']
train
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/export_utils.py#L118-L132
3,006
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_firmware.py
brocade_firmware.logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_swbd
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_swbd(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status") config = logical_chassis_fwdl_status output = ET.SubElement(logical_chassis_fwdl_status, "output") cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries") fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries") blade_swbd = ET.SubElement(fwdl_entries, "blade-swbd") blade_swbd.text = kwargs.pop('blade_swbd') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_swbd(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status") config = logical_chassis_fwdl_status output = ET.SubElement(logical_chassis_fwdl_status, "output") cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries") fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries") blade_swbd = ET.SubElement(fwdl_entries, "blade-swbd") blade_swbd.text = kwargs.pop('blade_swbd') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_swbd', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'logical_chassis_fwdl_status', '=', 'ET', '.', 'Element', '(', '"logical_chassis_fwdl_status"', ')', 'config', '=', 'logical_chassis_fwdl_status', 'output', '=', 'ET', '.', 'SubElement', '(', 'logical_chassis_fwdl_status', ',', '"output"', ')', 'cluster_fwdl_entries', '=', 'ET', '.', 'SubElement', '(', 'output', ',', '"cluster-fwdl-entries"', ')', 'fwdl_entries', '=', 'ET', '.', 'SubElement', '(', 'cluster_fwdl_entries', ',', '"fwdl-entries"', ')', 'blade_swbd', '=', 'ET', '.', 'SubElement', '(', 'fwdl_entries', ',', '"blade-swbd"', ')', 'blade_swbd', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'blade_swbd'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_firmware.py#L956-L969
3,007
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/subscribe/subscribe.py
RoutingBase.list_all
def list_all(self): """All items""" return list(set( item for items in self._routes.values() for item in items ))
python
def list_all(self): """All items""" return list(set( item for items in self._routes.values() for item in items ))
['def', 'list_all', '(', 'self', ')', ':', 'return', 'list', '(', 'set', '(', 'item', 'for', 'items', 'in', 'self', '.', '_routes', '.', 'values', '(', ')', 'for', 'item', 'in', 'items', ')', ')']
All items
['All', 'items']
train
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/subscribe/subscribe.py#L104-L108
3,008
garyp/sifter
sifter/grammar/grammar.py
p_block
def p_block(p): """block : '{' commands '}' """ # section 3.2: REQUIRE command must come before any other commands, # which means it can't be in the block of another command if any(command.RULE_IDENTIFIER == 'REQUIRE' for command in p[2].commands): print("REQUIRE command not allowed inside of a block (line %d)" % (p.lineno(2))) raise SyntaxError p[0] = p[2]
python
def p_block(p): """block : '{' commands '}' """ # section 3.2: REQUIRE command must come before any other commands, # which means it can't be in the block of another command if any(command.RULE_IDENTIFIER == 'REQUIRE' for command in p[2].commands): print("REQUIRE command not allowed inside of a block (line %d)" % (p.lineno(2))) raise SyntaxError p[0] = p[2]
['def', 'p_block', '(', 'p', ')', ':', '# section 3.2: REQUIRE command must come before any other commands,', "# which means it can't be in the block of another command", 'if', 'any', '(', 'command', '.', 'RULE_IDENTIFIER', '==', "'REQUIRE'", 'for', 'command', 'in', 'p', '[', '2', ']', '.', 'commands', ')', ':', 'print', '(', '"REQUIRE command not allowed inside of a block (line %d)"', '%', '(', 'p', '.', 'lineno', '(', '2', ')', ')', ')', 'raise', 'SyntaxError', 'p', '[', '0', ']', '=', 'p', '[', '2', ']']
block : '{' commands '}'
['block', ':', '{', 'commands', '}']
train
https://github.com/garyp/sifter/blob/9c472af76853c1196387141e017114d282637474/sifter/grammar/grammar.py#L63-L72
3,009
sbmlteam/libCombine
examples/python/printExample.py
printMetaDataFor
def printMetaDataFor(archive, location): """ Prints metadata for given location. :param archive: CombineArchive instance :param location: :return: """ desc = archive.getMetadataForLocation(location) if desc.isEmpty(): print(" no metadata for '{0}'".format(location)) return None print(" metadata for '{0}':".format(location)) print(" Created : {0}".format(desc.getCreated().getDateAsString())) for i in range(desc.getNumModified()): print(" Modified : {0}".format(desc.getModified(i).getDateAsString())) print(" # Creators: {0}".format(desc.getNumCreators())) for i in range(desc.getNumCreators()): creator = desc.getCreator(i) print(" {0} {1}".format(creator.getGivenName(), creator.getFamilyName()))
python
def printMetaDataFor(archive, location): """ Prints metadata for given location. :param archive: CombineArchive instance :param location: :return: """ desc = archive.getMetadataForLocation(location) if desc.isEmpty(): print(" no metadata for '{0}'".format(location)) return None print(" metadata for '{0}':".format(location)) print(" Created : {0}".format(desc.getCreated().getDateAsString())) for i in range(desc.getNumModified()): print(" Modified : {0}".format(desc.getModified(i).getDateAsString())) print(" # Creators: {0}".format(desc.getNumCreators())) for i in range(desc.getNumCreators()): creator = desc.getCreator(i) print(" {0} {1}".format(creator.getGivenName(), creator.getFamilyName()))
['def', 'printMetaDataFor', '(', 'archive', ',', 'location', ')', ':', 'desc', '=', 'archive', '.', 'getMetadataForLocation', '(', 'location', ')', 'if', 'desc', '.', 'isEmpty', '(', ')', ':', 'print', '(', '" no metadata for \'{0}\'"', '.', 'format', '(', 'location', ')', ')', 'return', 'None', 'print', '(', '" metadata for \'{0}\':"', '.', 'format', '(', 'location', ')', ')', 'print', '(', '" Created : {0}"', '.', 'format', '(', 'desc', '.', 'getCreated', '(', ')', '.', 'getDateAsString', '(', ')', ')', ')', 'for', 'i', 'in', 'range', '(', 'desc', '.', 'getNumModified', '(', ')', ')', ':', 'print', '(', '" Modified : {0}"', '.', 'format', '(', 'desc', '.', 'getModified', '(', 'i', ')', '.', 'getDateAsString', '(', ')', ')', ')', 'print', '(', '" # Creators: {0}"', '.', 'format', '(', 'desc', '.', 'getNumCreators', '(', ')', ')', ')', 'for', 'i', 'in', 'range', '(', 'desc', '.', 'getNumCreators', '(', ')', ')', ':', 'creator', '=', 'desc', '.', 'getCreator', '(', 'i', ')', 'print', '(', '" {0} {1}"', '.', 'format', '(', 'creator', '.', 'getGivenName', '(', ')', ',', 'creator', '.', 'getFamilyName', '(', ')', ')', ')']
Prints metadata for given location. :param archive: CombineArchive instance :param location: :return:
['Prints', 'metadata', 'for', 'given', 'location', '.']
train
https://github.com/sbmlteam/libCombine/blob/d7c11a90129dedbcc8bdba8d204be03f1dd0c3e4/examples/python/printExample.py#L11-L31
3,010
earlye/nephele
nephele/AwsStack.py
AwsStack.do_parameter
def do_parameter(self,args): """Print a parameter""" parser = CommandArgumentParser("parameter") parser.add_argument(dest="id",help="Parameter to print") args = vars(parser.parse_args(args)) print "printing parameter {}".format(args['id']) try: index = int(args['id']) parameter = self.wrappedStack['resourcesByTypeName']['parameters'][index] except ValueError: parameter = self.wrappedStack['resourcesByTypeName']['parameters'][args['id']] print(parameter.resource_status)
python
def do_parameter(self,args): """Print a parameter""" parser = CommandArgumentParser("parameter") parser.add_argument(dest="id",help="Parameter to print") args = vars(parser.parse_args(args)) print "printing parameter {}".format(args['id']) try: index = int(args['id']) parameter = self.wrappedStack['resourcesByTypeName']['parameters'][index] except ValueError: parameter = self.wrappedStack['resourcesByTypeName']['parameters'][args['id']] print(parameter.resource_status)
['def', 'do_parameter', '(', 'self', ',', 'args', ')', ':', 'parser', '=', 'CommandArgumentParser', '(', '"parameter"', ')', 'parser', '.', 'add_argument', '(', 'dest', '=', '"id"', ',', 'help', '=', '"Parameter to print"', ')', 'args', '=', 'vars', '(', 'parser', '.', 'parse_args', '(', 'args', ')', ')', 'print', '"printing parameter {}"', '.', 'format', '(', 'args', '[', "'id'", ']', ')', 'try', ':', 'index', '=', 'int', '(', 'args', '[', "'id'", ']', ')', 'parameter', '=', 'self', '.', 'wrappedStack', '[', "'resourcesByTypeName'", ']', '[', "'parameters'", ']', '[', 'index', ']', 'except', 'ValueError', ':', 'parameter', '=', 'self', '.', 'wrappedStack', '[', "'resourcesByTypeName'", ']', '[', "'parameters'", ']', '[', 'args', '[', "'id'", ']', ']', 'print', '(', 'parameter', '.', 'resource_status', ')']
Print a parameter
['Print', 'a', 'parameter']
train
https://github.com/earlye/nephele/blob/a7dadc68f4124671457f09119419978c4d22013e/nephele/AwsStack.py#L278-L291
3,011
soasme/rio
rio/setup.py
configure_app
def configure_app(app): """Configure Flask/Celery application. * Rio will find environment variable `RIO_SETTINGS` first:: $ export RIO_SETTINGS=/path/to/settings.cfg $ rio worker * If `RIO_SETTINGS` is missing, Rio will try to load configuration module in `rio.settings` according to another environment variable `RIO_ENV`. Default load `rio.settings.dev`. $ export RIO_ENV=prod $ rio worker """ app.config_from_object('rio.settings.default') if environ.get('RIO_SETTINGS'): app.config_from_envvar('RIO_SETTINGS') return config_map = { 'dev': 'rio.settings.dev', 'stag': 'rio.settings.stag', 'prod': 'rio.settings.prod', 'test': 'rio.settings.test', } rio_env = environ.get('RIO_ENV', 'dev') config = config_map.get(rio_env, config_map['dev']) app.config_from_object(config)
python
def configure_app(app): """Configure Flask/Celery application. * Rio will find environment variable `RIO_SETTINGS` first:: $ export RIO_SETTINGS=/path/to/settings.cfg $ rio worker * If `RIO_SETTINGS` is missing, Rio will try to load configuration module in `rio.settings` according to another environment variable `RIO_ENV`. Default load `rio.settings.dev`. $ export RIO_ENV=prod $ rio worker """ app.config_from_object('rio.settings.default') if environ.get('RIO_SETTINGS'): app.config_from_envvar('RIO_SETTINGS') return config_map = { 'dev': 'rio.settings.dev', 'stag': 'rio.settings.stag', 'prod': 'rio.settings.prod', 'test': 'rio.settings.test', } rio_env = environ.get('RIO_ENV', 'dev') config = config_map.get(rio_env, config_map['dev']) app.config_from_object(config)
['def', 'configure_app', '(', 'app', ')', ':', 'app', '.', 'config_from_object', '(', "'rio.settings.default'", ')', 'if', 'environ', '.', 'get', '(', "'RIO_SETTINGS'", ')', ':', 'app', '.', 'config_from_envvar', '(', "'RIO_SETTINGS'", ')', 'return', 'config_map', '=', '{', "'dev'", ':', "'rio.settings.dev'", ',', "'stag'", ':', "'rio.settings.stag'", ',', "'prod'", ':', "'rio.settings.prod'", ',', "'test'", ':', "'rio.settings.test'", ',', '}', 'rio_env', '=', 'environ', '.', 'get', '(', "'RIO_ENV'", ',', "'dev'", ')', 'config', '=', 'config_map', '.', 'get', '(', 'rio_env', ',', 'config_map', '[', "'dev'", ']', ')', 'app', '.', 'config_from_object', '(', 'config', ')']
Configure Flask/Celery application. * Rio will find environment variable `RIO_SETTINGS` first:: $ export RIO_SETTINGS=/path/to/settings.cfg $ rio worker * If `RIO_SETTINGS` is missing, Rio will try to load configuration module in `rio.settings` according to another environment variable `RIO_ENV`. Default load `rio.settings.dev`. $ export RIO_ENV=prod $ rio worker
['Configure', 'Flask', '/', 'Celery', 'application', '.']
train
https://github.com/soasme/rio/blob/f722eb0ff4b0382bceaff77737f0b87cb78429e7/rio/setup.py#L19-L49
3,012
PmagPy/PmagPy
dev_setup.py
windows_install
def windows_install(path_to_python=""): """ Sets the .py extension to be associated with the ftype Python which is then set to the python.exe you provide in the path_to_python variable or after the -p flag if run as a script. Once the python environment is set up the function proceeds to set PATH and PYTHONPATH using setx. Parameters ---------- path_to_python : the path the python.exe you want windows to execute when running .py files """ if not path_to_python: print("Please enter the path to your python.exe you wish Windows to use to run python files. If you do not, this script will not be able to set up a full python environment in Windows. If you already have a python environment set up in Windows such that you can run python scripts from command prompt with just a file name then ignore this message. Otherwise, you will need to run dev_setup.py again with the command line option '-p' followed by the correct full path to python.\nRun dev_setup.py with the -h flag for more details") print("Would you like to continue? [y/N] ") ans = input() if ans == 'y': pass else: return # be sure to add python.exe if the user forgets to include the file name if os.path.isdir(path_to_python): path_to_python = os.path.join(path_to_python, "python.exe") if not os.path.isfile(path_to_python): print("The path to python provided is not a full path to the python.exe file or this path does not exist, was given %s.\nPlease run again with the command line option '-p' followed by the correct full path to python.\nRun dev_setup.py with the -h flag for more details" % path_to_python) return # make windows associate .py with python subprocess.check_call('assoc .py=Python', shell=True) subprocess.check_call('ftype Python=%s ' % path_to_python + '"%1" %*', shell=True) PmagPyDir = os.path.abspath(".") ProgramsDir = os.path.join(PmagPyDir, 'programs') dirs_to_add = [ProgramsDir] for d in next(os.walk(ProgramsDir))[1]: dirs_to_add.append(os.path.join(ProgramsDir, d)) path = str(subprocess.check_output('echo %PATH%', shell=True)).strip('\n') if "PATH" in path: path = '' pypath = str(subprocess.check_output( 'echo %PYTHONPATH%', shell=True)).strip('\n') if "PYTHONPATH" in pypath: pypath = PmagPyDir + ';' + ProgramsDir else: pypath += ';' + PmagPyDir + ';' + ProgramsDir for d_add in dirs_to_add: path += ';' + d_add unique_path_list = [] for p in path.split(';'): p = p.replace('"', '') if p not in unique_path_list: unique_path_list.append(p) unique_pypath_list = [] for p in pypath.split(';'): p = p.replace('"', '') if p not in unique_pypath_list: unique_pypath_list.append(p) path = functools.reduce(lambda x, y: x + ';' + y, unique_path_list) pypath = functools.reduce(lambda x, y: x + ';' + y, unique_pypath_list) print('setx PATH "%s"' % path) subprocess.call('setx PATH "%s"' % path, shell=True) print('setx PYTHONPATH "%s"' % pypath) subprocess.call('setx PYTHONPATH "%s"' % (pypath), shell=True) print("Install complete. Please restart the command prompt to complete install")
python
def windows_install(path_to_python=""): """ Sets the .py extension to be associated with the ftype Python which is then set to the python.exe you provide in the path_to_python variable or after the -p flag if run as a script. Once the python environment is set up the function proceeds to set PATH and PYTHONPATH using setx. Parameters ---------- path_to_python : the path the python.exe you want windows to execute when running .py files """ if not path_to_python: print("Please enter the path to your python.exe you wish Windows to use to run python files. If you do not, this script will not be able to set up a full python environment in Windows. If you already have a python environment set up in Windows such that you can run python scripts from command prompt with just a file name then ignore this message. Otherwise, you will need to run dev_setup.py again with the command line option '-p' followed by the correct full path to python.\nRun dev_setup.py with the -h flag for more details") print("Would you like to continue? [y/N] ") ans = input() if ans == 'y': pass else: return # be sure to add python.exe if the user forgets to include the file name if os.path.isdir(path_to_python): path_to_python = os.path.join(path_to_python, "python.exe") if not os.path.isfile(path_to_python): print("The path to python provided is not a full path to the python.exe file or this path does not exist, was given %s.\nPlease run again with the command line option '-p' followed by the correct full path to python.\nRun dev_setup.py with the -h flag for more details" % path_to_python) return # make windows associate .py with python subprocess.check_call('assoc .py=Python', shell=True) subprocess.check_call('ftype Python=%s ' % path_to_python + '"%1" %*', shell=True) PmagPyDir = os.path.abspath(".") ProgramsDir = os.path.join(PmagPyDir, 'programs') dirs_to_add = [ProgramsDir] for d in next(os.walk(ProgramsDir))[1]: dirs_to_add.append(os.path.join(ProgramsDir, d)) path = str(subprocess.check_output('echo %PATH%', shell=True)).strip('\n') if "PATH" in path: path = '' pypath = str(subprocess.check_output( 'echo %PYTHONPATH%', shell=True)).strip('\n') if "PYTHONPATH" in pypath: pypath = PmagPyDir + ';' + ProgramsDir else: pypath += ';' + PmagPyDir + ';' + ProgramsDir for d_add in dirs_to_add: path += ';' + d_add unique_path_list = [] for p in path.split(';'): p = p.replace('"', '') if p not in unique_path_list: unique_path_list.append(p) unique_pypath_list = [] for p in pypath.split(';'): p = p.replace('"', '') if p not in unique_pypath_list: unique_pypath_list.append(p) path = functools.reduce(lambda x, y: x + ';' + y, unique_path_list) pypath = functools.reduce(lambda x, y: x + ';' + y, unique_pypath_list) print('setx PATH "%s"' % path) subprocess.call('setx PATH "%s"' % path, shell=True) print('setx PYTHONPATH "%s"' % pypath) subprocess.call('setx PYTHONPATH "%s"' % (pypath), shell=True) print("Install complete. Please restart the command prompt to complete install")
['def', 'windows_install', '(', 'path_to_python', '=', '""', ')', ':', 'if', 'not', 'path_to_python', ':', 'print', '(', '"Please enter the path to your python.exe you wish Windows to use to run python files. If you do not, this script will not be able to set up a full python environment in Windows. If you already have a python environment set up in Windows such that you can run python scripts from command prompt with just a file name then ignore this message. Otherwise, you will need to run dev_setup.py again with the command line option \'-p\' followed by the correct full path to python.\\nRun dev_setup.py with the -h flag for more details"', ')', 'print', '(', '"Would you like to continue? [y/N] "', ')', 'ans', '=', 'input', '(', ')', 'if', 'ans', '==', "'y'", ':', 'pass', 'else', ':', 'return', '# be sure to add python.exe if the user forgets to include the file name', 'if', 'os', '.', 'path', '.', 'isdir', '(', 'path_to_python', ')', ':', 'path_to_python', '=', 'os', '.', 'path', '.', 'join', '(', 'path_to_python', ',', '"python.exe"', ')', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'path_to_python', ')', ':', 'print', '(', '"The path to python provided is not a full path to the python.exe file or this path does not exist, was given %s.\\nPlease run again with the command line option \'-p\' followed by the correct full path to python.\\nRun dev_setup.py with the -h flag for more details"', '%', 'path_to_python', ')', 'return', '# make windows associate .py with python', 'subprocess', '.', 'check_call', '(', "'assoc .py=Python'", ',', 'shell', '=', 'True', ')', 'subprocess', '.', 'check_call', '(', "'ftype Python=%s '", '%', 'path_to_python', '+', '\'"%1" %*\'', ',', 'shell', '=', 'True', ')', 'PmagPyDir', '=', 'os', '.', 'path', '.', 'abspath', '(', '"."', ')', 'ProgramsDir', '=', 'os', '.', 'path', '.', 'join', '(', 'PmagPyDir', ',', "'programs'", ')', 'dirs_to_add', '=', '[', 'ProgramsDir', ']', 'for', 'd', 'in', 'next', '(', 'os', '.', 'walk', '(', 'ProgramsDir', ')', ')', '[', '1', ']', ':', 'dirs_to_add', '.', 'append', '(', 'os', '.', 'path', '.', 'join', '(', 'ProgramsDir', ',', 'd', ')', ')', 'path', '=', 'str', '(', 'subprocess', '.', 'check_output', '(', "'echo %PATH%'", ',', 'shell', '=', 'True', ')', ')', '.', 'strip', '(', "'\\n'", ')', 'if', '"PATH"', 'in', 'path', ':', 'path', '=', "''", 'pypath', '=', 'str', '(', 'subprocess', '.', 'check_output', '(', "'echo %PYTHONPATH%'", ',', 'shell', '=', 'True', ')', ')', '.', 'strip', '(', "'\\n'", ')', 'if', '"PYTHONPATH"', 'in', 'pypath', ':', 'pypath', '=', 'PmagPyDir', '+', "';'", '+', 'ProgramsDir', 'else', ':', 'pypath', '+=', "';'", '+', 'PmagPyDir', '+', "';'", '+', 'ProgramsDir', 'for', 'd_add', 'in', 'dirs_to_add', ':', 'path', '+=', "';'", '+', 'd_add', 'unique_path_list', '=', '[', ']', 'for', 'p', 'in', 'path', '.', 'split', '(', "';'", ')', ':', 'p', '=', 'p', '.', 'replace', '(', '\'"\'', ',', "''", ')', 'if', 'p', 'not', 'in', 'unique_path_list', ':', 'unique_path_list', '.', 'append', '(', 'p', ')', 'unique_pypath_list', '=', '[', ']', 'for', 'p', 'in', 'pypath', '.', 'split', '(', "';'", ')', ':', 'p', '=', 'p', '.', 'replace', '(', '\'"\'', ',', "''", ')', 'if', 'p', 'not', 'in', 'unique_pypath_list', ':', 'unique_pypath_list', '.', 'append', '(', 'p', ')', 'path', '=', 'functools', '.', 'reduce', '(', 'lambda', 'x', ',', 'y', ':', 'x', '+', "';'", '+', 'y', ',', 'unique_path_list', ')', 'pypath', '=', 'functools', '.', 'reduce', '(', 'lambda', 'x', ',', 'y', ':', 'x', '+', "';'", '+', 'y', ',', 'unique_pypath_list', ')', 'print', '(', '\'setx PATH "%s"\'', '%', 'path', ')', 'subprocess', '.', 'call', '(', '\'setx PATH "%s"\'', '%', 'path', ',', 'shell', '=', 'True', ')', 'print', '(', '\'setx PYTHONPATH "%s"\'', '%', 'pypath', ')', 'subprocess', '.', 'call', '(', '\'setx PYTHONPATH "%s"\'', '%', '(', 'pypath', ')', ',', 'shell', '=', 'True', ')', 'print', '(', '"Install complete. Please restart the command prompt to complete install"', ')']
Sets the .py extension to be associated with the ftype Python which is then set to the python.exe you provide in the path_to_python variable or after the -p flag if run as a script. Once the python environment is set up the function proceeds to set PATH and PYTHONPATH using setx. Parameters ---------- path_to_python : the path the python.exe you want windows to execute when running .py files
['Sets', 'the', '.', 'py', 'extension', 'to', 'be', 'associated', 'with', 'the', 'ftype', 'Python', 'which', 'is', 'then', 'set', 'to', 'the', 'python', '.', 'exe', 'you', 'provide', 'in', 'the', 'path_to_python', 'variable', 'or', 'after', 'the', '-', 'p', 'flag', 'if', 'run', 'as', 'a', 'script', '.', 'Once', 'the', 'python', 'environment', 'is', 'set', 'up', 'the', 'function', 'proceeds', 'to', 'set', 'PATH', 'and', 'PYTHONPATH', 'using', 'setx', '.']
train
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dev_setup.py#L127-L193
3,013
chaoss/grimoirelab-elk
grimoire_elk/enriched/jira.py
JiraEnrich.get_item_sh
def get_item_sh(self, item, roles=None, date_field=None): """Add sorting hat enrichment fields""" eitem_sh = {} created = str_to_datetime(date_field) for rol in roles: identity = self.get_sh_identity(item, rol) eitem_sh.update(self.get_item_sh_fields(identity, created, rol=rol)) if not eitem_sh[rol + '_org_name']: eitem_sh[rol + '_org_name'] = SH_UNKNOWN_VALUE if not eitem_sh[rol + '_name']: eitem_sh[rol + '_name'] = SH_UNKNOWN_VALUE if not eitem_sh[rol + '_user_name']: eitem_sh[rol + '_user_name'] = SH_UNKNOWN_VALUE # Add the author field common in all data sources if rol == self.get_field_author(): identity = self.get_sh_identity(item, rol) eitem_sh.update(self.get_item_sh_fields(identity, created, rol="author")) if not eitem_sh['author_org_name']: eitem_sh['author_org_name'] = SH_UNKNOWN_VALUE if not eitem_sh['author_name']: eitem_sh['author_name'] = SH_UNKNOWN_VALUE if not eitem_sh['author_user_name']: eitem_sh['author_user_name'] = SH_UNKNOWN_VALUE return eitem_sh
python
def get_item_sh(self, item, roles=None, date_field=None): """Add sorting hat enrichment fields""" eitem_sh = {} created = str_to_datetime(date_field) for rol in roles: identity = self.get_sh_identity(item, rol) eitem_sh.update(self.get_item_sh_fields(identity, created, rol=rol)) if not eitem_sh[rol + '_org_name']: eitem_sh[rol + '_org_name'] = SH_UNKNOWN_VALUE if not eitem_sh[rol + '_name']: eitem_sh[rol + '_name'] = SH_UNKNOWN_VALUE if not eitem_sh[rol + '_user_name']: eitem_sh[rol + '_user_name'] = SH_UNKNOWN_VALUE # Add the author field common in all data sources if rol == self.get_field_author(): identity = self.get_sh_identity(item, rol) eitem_sh.update(self.get_item_sh_fields(identity, created, rol="author")) if not eitem_sh['author_org_name']: eitem_sh['author_org_name'] = SH_UNKNOWN_VALUE if not eitem_sh['author_name']: eitem_sh['author_name'] = SH_UNKNOWN_VALUE if not eitem_sh['author_user_name']: eitem_sh['author_user_name'] = SH_UNKNOWN_VALUE return eitem_sh
['def', 'get_item_sh', '(', 'self', ',', 'item', ',', 'roles', '=', 'None', ',', 'date_field', '=', 'None', ')', ':', 'eitem_sh', '=', '{', '}', 'created', '=', 'str_to_datetime', '(', 'date_field', ')', 'for', 'rol', 'in', 'roles', ':', 'identity', '=', 'self', '.', 'get_sh_identity', '(', 'item', ',', 'rol', ')', 'eitem_sh', '.', 'update', '(', 'self', '.', 'get_item_sh_fields', '(', 'identity', ',', 'created', ',', 'rol', '=', 'rol', ')', ')', 'if', 'not', 'eitem_sh', '[', 'rol', '+', "'_org_name'", ']', ':', 'eitem_sh', '[', 'rol', '+', "'_org_name'", ']', '=', 'SH_UNKNOWN_VALUE', 'if', 'not', 'eitem_sh', '[', 'rol', '+', "'_name'", ']', ':', 'eitem_sh', '[', 'rol', '+', "'_name'", ']', '=', 'SH_UNKNOWN_VALUE', 'if', 'not', 'eitem_sh', '[', 'rol', '+', "'_user_name'", ']', ':', 'eitem_sh', '[', 'rol', '+', "'_user_name'", ']', '=', 'SH_UNKNOWN_VALUE', '# Add the author field common in all data sources', 'if', 'rol', '==', 'self', '.', 'get_field_author', '(', ')', ':', 'identity', '=', 'self', '.', 'get_sh_identity', '(', 'item', ',', 'rol', ')', 'eitem_sh', '.', 'update', '(', 'self', '.', 'get_item_sh_fields', '(', 'identity', ',', 'created', ',', 'rol', '=', '"author"', ')', ')', 'if', 'not', 'eitem_sh', '[', "'author_org_name'", ']', ':', 'eitem_sh', '[', "'author_org_name'", ']', '=', 'SH_UNKNOWN_VALUE', 'if', 'not', 'eitem_sh', '[', "'author_name'", ']', ':', 'eitem_sh', '[', "'author_name'", ']', '=', 'SH_UNKNOWN_VALUE', 'if', 'not', 'eitem_sh', '[', "'author_user_name'", ']', ':', 'eitem_sh', '[', "'author_user_name'", ']', '=', 'SH_UNKNOWN_VALUE', 'return', 'eitem_sh']
Add sorting hat enrichment fields
['Add', 'sorting', 'hat', 'enrichment', 'fields']
train
https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/jira.py#L109-L142
3,014
mrstephenneal/pdfconduit
pdf/transform/slice.py
slicer
def slicer(document, first_page=None, last_page=None, suffix='sliced', tempdir=None): """Slice a PDF document to remove pages.""" # Set output file name if tempdir: with NamedTemporaryFile(suffix='.pdf', dir=tempdir, delete=False) as temp: output = temp.name elif suffix: output = os.path.join(os.path.dirname(document), add_suffix(document, suffix)) else: with NamedTemporaryFile(suffix='.pdf') as temp: output = temp.name # Reindex page selections for simple user input first_page = first_page - 1 if not None else None # Validate page range by comparing selection to number of pages in PDF document pages = Info(document).pages invalid = 'Number of pages: ' + str(pages) + ' ----> Page Range Input: ' + str(first_page) + '-' + str(last_page) assert first_page <= last_page <= pages, invalid pdf = PdfFileReader(document) writer = PdfFileWriter() pages = list(range(pdf.getNumPages()))[first_page:last_page] for page in pages: writer.addPage(pdf.getPage(page)) with open(output, 'wb') as out: writer.write(out) return output
python
def slicer(document, first_page=None, last_page=None, suffix='sliced', tempdir=None): """Slice a PDF document to remove pages.""" # Set output file name if tempdir: with NamedTemporaryFile(suffix='.pdf', dir=tempdir, delete=False) as temp: output = temp.name elif suffix: output = os.path.join(os.path.dirname(document), add_suffix(document, suffix)) else: with NamedTemporaryFile(suffix='.pdf') as temp: output = temp.name # Reindex page selections for simple user input first_page = first_page - 1 if not None else None # Validate page range by comparing selection to number of pages in PDF document pages = Info(document).pages invalid = 'Number of pages: ' + str(pages) + ' ----> Page Range Input: ' + str(first_page) + '-' + str(last_page) assert first_page <= last_page <= pages, invalid pdf = PdfFileReader(document) writer = PdfFileWriter() pages = list(range(pdf.getNumPages()))[first_page:last_page] for page in pages: writer.addPage(pdf.getPage(page)) with open(output, 'wb') as out: writer.write(out) return output
['def', 'slicer', '(', 'document', ',', 'first_page', '=', 'None', ',', 'last_page', '=', 'None', ',', 'suffix', '=', "'sliced'", ',', 'tempdir', '=', 'None', ')', ':', '# Set output file name', 'if', 'tempdir', ':', 'with', 'NamedTemporaryFile', '(', 'suffix', '=', "'.pdf'", ',', 'dir', '=', 'tempdir', ',', 'delete', '=', 'False', ')', 'as', 'temp', ':', 'output', '=', 'temp', '.', 'name', 'elif', 'suffix', ':', 'output', '=', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'path', '.', 'dirname', '(', 'document', ')', ',', 'add_suffix', '(', 'document', ',', 'suffix', ')', ')', 'else', ':', 'with', 'NamedTemporaryFile', '(', 'suffix', '=', "'.pdf'", ')', 'as', 'temp', ':', 'output', '=', 'temp', '.', 'name', '# Reindex page selections for simple user input', 'first_page', '=', 'first_page', '-', '1', 'if', 'not', 'None', 'else', 'None', '# Validate page range by comparing selection to number of pages in PDF document', 'pages', '=', 'Info', '(', 'document', ')', '.', 'pages', 'invalid', '=', "'Number of pages: '", '+', 'str', '(', 'pages', ')', '+', "' ----> Page Range Input: '", '+', 'str', '(', 'first_page', ')', '+', "'-'", '+', 'str', '(', 'last_page', ')', 'assert', 'first_page', '<=', 'last_page', '<=', 'pages', ',', 'invalid', 'pdf', '=', 'PdfFileReader', '(', 'document', ')', 'writer', '=', 'PdfFileWriter', '(', ')', 'pages', '=', 'list', '(', 'range', '(', 'pdf', '.', 'getNumPages', '(', ')', ')', ')', '[', 'first_page', ':', 'last_page', ']', 'for', 'page', 'in', 'pages', ':', 'writer', '.', 'addPage', '(', 'pdf', '.', 'getPage', '(', 'page', ')', ')', 'with', 'open', '(', 'output', ',', "'wb'", ')', 'as', 'out', ':', 'writer', '.', 'write', '(', 'out', ')', 'return', 'output']
Slice a PDF document to remove pages.
['Slice', 'a', 'PDF', 'document', 'to', 'remove', 'pages', '.']
train
https://github.com/mrstephenneal/pdfconduit/blob/993421cc087eefefe01ff09afabd893bcc2718ec/pdf/transform/slice.py#L11-L40
3,015
tensorpack/tensorpack
examples/FasterRCNN/model_cascade.py
CascadeRCNNHead.decoded_output_boxes
def decoded_output_boxes(self): """ Returns: Nx#classx4 """ ret = self._cascade_boxes[-1] ret = tf.expand_dims(ret, 1) # class-agnostic return tf.tile(ret, [1, self.num_classes, 1])
python
def decoded_output_boxes(self): """ Returns: Nx#classx4 """ ret = self._cascade_boxes[-1] ret = tf.expand_dims(ret, 1) # class-agnostic return tf.tile(ret, [1, self.num_classes, 1])
['def', 'decoded_output_boxes', '(', 'self', ')', ':', 'ret', '=', 'self', '.', '_cascade_boxes', '[', '-', '1', ']', 'ret', '=', 'tf', '.', 'expand_dims', '(', 'ret', ',', '1', ')', '# class-agnostic', 'return', 'tf', '.', 'tile', '(', 'ret', ',', '[', '1', ',', 'self', '.', 'num_classes', ',', '1', ']', ')']
Returns: Nx#classx4
['Returns', ':', 'Nx#classx4']
train
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_cascade.py#L103-L110
3,016
xolox/python-vcs-repo-mgr
vcs_repo_mgr/__init__.py
Repository.release_to_branch
def release_to_branch(self, release_id): """ Shortcut to translate a release identifier to a branch name. :param release_id: A :attr:`Release.identifier` value (a string). :returns: A branch name (a string). :raises: :exc:`~exceptions.TypeError` when :attr:`release_scheme` isn't 'branches'. """ self.ensure_release_scheme('branches') return self.releases[release_id].revision.branch
python
def release_to_branch(self, release_id): """ Shortcut to translate a release identifier to a branch name. :param release_id: A :attr:`Release.identifier` value (a string). :returns: A branch name (a string). :raises: :exc:`~exceptions.TypeError` when :attr:`release_scheme` isn't 'branches'. """ self.ensure_release_scheme('branches') return self.releases[release_id].revision.branch
['def', 'release_to_branch', '(', 'self', ',', 'release_id', ')', ':', 'self', '.', 'ensure_release_scheme', '(', "'branches'", ')', 'return', 'self', '.', 'releases', '[', 'release_id', ']', '.', 'revision', '.', 'branch']
Shortcut to translate a release identifier to a branch name. :param release_id: A :attr:`Release.identifier` value (a string). :returns: A branch name (a string). :raises: :exc:`~exceptions.TypeError` when :attr:`release_scheme` isn't 'branches'.
['Shortcut', 'to', 'translate', 'a', 'release', 'identifier', 'to', 'a', 'branch', 'name', '.']
train
https://github.com/xolox/python-vcs-repo-mgr/blob/fdad2441a3e7ba5deeeddfa1c2f5ebc00c393aed/vcs_repo_mgr/__init__.py#L1825-L1835
3,017
gem/oq-engine
openquake/hazardlib/geo/surface/multi.py
MultiSurface._setup_gc2_framework
def _setup_gc2_framework(self): """ This method establishes the GC2 framework for a multi-segment (and indeed multi-typology) case based on the description in Spudich & Chiou (2015) - see section on Generalized Coordinate System for Multiple Rupture Traces """ # Generate cartesian edge set edge_sets = self._get_cartesian_edge_set() self.gc2_config = {} # Determine furthest two points apart endpoint_set = numpy.vstack([cep for cep in self.cartesian_endpoints]) dmat = squareform(pdist(endpoint_set)) irow, icol = numpy.unravel_index(numpy.argmax(dmat), dmat.shape) # Join further points to form a vector (a_hat in Spudich & Chiou) # According to Spudich & Chiou, a_vec should be eastward trending if endpoint_set[irow, 0] > endpoint_set[icol, 0]: # Row point is to the east of column point beginning = endpoint_set[icol, :2] ending = endpoint_set[irow, :2] else: # Column point is to the east of row point beginning = endpoint_set[irow, :2] ending = endpoint_set[icol, :2] # Convert to unit vector a_vec = ending - beginning self.gc2_config["a_hat"] = a_vec / numpy.linalg.norm(a_vec) # Get e_j set self.gc2_config["ejs"] = [] for c_edges in self.cartesian_edges: self.gc2_config["ejs"].append( numpy.dot(c_edges[-1, :2] - c_edges[0, :2], self.gc2_config["a_hat"])) # A "total E" is defined as the sum of the e_j values self.gc2_config["e_tot"] = sum(self.gc2_config["ejs"]) sign_etot = numpy.sign(self.gc2_config["e_tot"]) b_vec = numpy.zeros(2) self.gc2_config["sign"] = [] for i, c_edges in enumerate(self.cartesian_edges): segment_sign = numpy.sign(self.gc2_config["ejs"][i]) * sign_etot self.gc2_config["sign"].append(segment_sign) if segment_sign < 0: # Segment is discordant - reverse the points c_edges = numpy.flipud(c_edges) self.cartesian_edges[i] = c_edges self.cartesian_endpoints[i] = numpy.flipud( self.cartesian_endpoints[i]) b_vec += (c_edges[-1, :2] - c_edges[0, :2]) # Get unit vector self.gc2_config["b_hat"] = b_vec / numpy.linalg.norm(b_vec) if numpy.dot(a_vec, self.gc2_config["b_hat"]) >= 0.0: self.p0 = beginning else: self.p0 = ending # To later calculate Ry0 it is necessary to determine the maximum # GC2-U coordinate for the fault self._get_gc2_coordinates_for_rupture(edge_sets)
python
def _setup_gc2_framework(self): """ This method establishes the GC2 framework for a multi-segment (and indeed multi-typology) case based on the description in Spudich & Chiou (2015) - see section on Generalized Coordinate System for Multiple Rupture Traces """ # Generate cartesian edge set edge_sets = self._get_cartesian_edge_set() self.gc2_config = {} # Determine furthest two points apart endpoint_set = numpy.vstack([cep for cep in self.cartesian_endpoints]) dmat = squareform(pdist(endpoint_set)) irow, icol = numpy.unravel_index(numpy.argmax(dmat), dmat.shape) # Join further points to form a vector (a_hat in Spudich & Chiou) # According to Spudich & Chiou, a_vec should be eastward trending if endpoint_set[irow, 0] > endpoint_set[icol, 0]: # Row point is to the east of column point beginning = endpoint_set[icol, :2] ending = endpoint_set[irow, :2] else: # Column point is to the east of row point beginning = endpoint_set[irow, :2] ending = endpoint_set[icol, :2] # Convert to unit vector a_vec = ending - beginning self.gc2_config["a_hat"] = a_vec / numpy.linalg.norm(a_vec) # Get e_j set self.gc2_config["ejs"] = [] for c_edges in self.cartesian_edges: self.gc2_config["ejs"].append( numpy.dot(c_edges[-1, :2] - c_edges[0, :2], self.gc2_config["a_hat"])) # A "total E" is defined as the sum of the e_j values self.gc2_config["e_tot"] = sum(self.gc2_config["ejs"]) sign_etot = numpy.sign(self.gc2_config["e_tot"]) b_vec = numpy.zeros(2) self.gc2_config["sign"] = [] for i, c_edges in enumerate(self.cartesian_edges): segment_sign = numpy.sign(self.gc2_config["ejs"][i]) * sign_etot self.gc2_config["sign"].append(segment_sign) if segment_sign < 0: # Segment is discordant - reverse the points c_edges = numpy.flipud(c_edges) self.cartesian_edges[i] = c_edges self.cartesian_endpoints[i] = numpy.flipud( self.cartesian_endpoints[i]) b_vec += (c_edges[-1, :2] - c_edges[0, :2]) # Get unit vector self.gc2_config["b_hat"] = b_vec / numpy.linalg.norm(b_vec) if numpy.dot(a_vec, self.gc2_config["b_hat"]) >= 0.0: self.p0 = beginning else: self.p0 = ending # To later calculate Ry0 it is necessary to determine the maximum # GC2-U coordinate for the fault self._get_gc2_coordinates_for_rupture(edge_sets)
['def', '_setup_gc2_framework', '(', 'self', ')', ':', '# Generate cartesian edge set', 'edge_sets', '=', 'self', '.', '_get_cartesian_edge_set', '(', ')', 'self', '.', 'gc2_config', '=', '{', '}', '# Determine furthest two points apart', 'endpoint_set', '=', 'numpy', '.', 'vstack', '(', '[', 'cep', 'for', 'cep', 'in', 'self', '.', 'cartesian_endpoints', ']', ')', 'dmat', '=', 'squareform', '(', 'pdist', '(', 'endpoint_set', ')', ')', 'irow', ',', 'icol', '=', 'numpy', '.', 'unravel_index', '(', 'numpy', '.', 'argmax', '(', 'dmat', ')', ',', 'dmat', '.', 'shape', ')', '# Join further points to form a vector (a_hat in Spudich & Chiou)', '# According to Spudich & Chiou, a_vec should be eastward trending', 'if', 'endpoint_set', '[', 'irow', ',', '0', ']', '>', 'endpoint_set', '[', 'icol', ',', '0', ']', ':', '# Row point is to the east of column point', 'beginning', '=', 'endpoint_set', '[', 'icol', ',', ':', '2', ']', 'ending', '=', 'endpoint_set', '[', 'irow', ',', ':', '2', ']', 'else', ':', '# Column point is to the east of row point', 'beginning', '=', 'endpoint_set', '[', 'irow', ',', ':', '2', ']', 'ending', '=', 'endpoint_set', '[', 'icol', ',', ':', '2', ']', '# Convert to unit vector', 'a_vec', '=', 'ending', '-', 'beginning', 'self', '.', 'gc2_config', '[', '"a_hat"', ']', '=', 'a_vec', '/', 'numpy', '.', 'linalg', '.', 'norm', '(', 'a_vec', ')', '# Get e_j set', 'self', '.', 'gc2_config', '[', '"ejs"', ']', '=', '[', ']', 'for', 'c_edges', 'in', 'self', '.', 'cartesian_edges', ':', 'self', '.', 'gc2_config', '[', '"ejs"', ']', '.', 'append', '(', 'numpy', '.', 'dot', '(', 'c_edges', '[', '-', '1', ',', ':', '2', ']', '-', 'c_edges', '[', '0', ',', ':', '2', ']', ',', 'self', '.', 'gc2_config', '[', '"a_hat"', ']', ')', ')', '# A "total E" is defined as the sum of the e_j values', 'self', '.', 'gc2_config', '[', '"e_tot"', ']', '=', 'sum', '(', 'self', '.', 'gc2_config', '[', '"ejs"', ']', ')', 'sign_etot', '=', 'numpy', '.', 'sign', '(', 'self', '.', 'gc2_config', '[', '"e_tot"', ']', ')', 'b_vec', '=', 'numpy', '.', 'zeros', '(', '2', ')', 'self', '.', 'gc2_config', '[', '"sign"', ']', '=', '[', ']', 'for', 'i', ',', 'c_edges', 'in', 'enumerate', '(', 'self', '.', 'cartesian_edges', ')', ':', 'segment_sign', '=', 'numpy', '.', 'sign', '(', 'self', '.', 'gc2_config', '[', '"ejs"', ']', '[', 'i', ']', ')', '*', 'sign_etot', 'self', '.', 'gc2_config', '[', '"sign"', ']', '.', 'append', '(', 'segment_sign', ')', 'if', 'segment_sign', '<', '0', ':', '# Segment is discordant - reverse the points', 'c_edges', '=', 'numpy', '.', 'flipud', '(', 'c_edges', ')', 'self', '.', 'cartesian_edges', '[', 'i', ']', '=', 'c_edges', 'self', '.', 'cartesian_endpoints', '[', 'i', ']', '=', 'numpy', '.', 'flipud', '(', 'self', '.', 'cartesian_endpoints', '[', 'i', ']', ')', 'b_vec', '+=', '(', 'c_edges', '[', '-', '1', ',', ':', '2', ']', '-', 'c_edges', '[', '0', ',', ':', '2', ']', ')', '# Get unit vector', 'self', '.', 'gc2_config', '[', '"b_hat"', ']', '=', 'b_vec', '/', 'numpy', '.', 'linalg', '.', 'norm', '(', 'b_vec', ')', 'if', 'numpy', '.', 'dot', '(', 'a_vec', ',', 'self', '.', 'gc2_config', '[', '"b_hat"', ']', ')', '>=', '0.0', ':', 'self', '.', 'p0', '=', 'beginning', 'else', ':', 'self', '.', 'p0', '=', 'ending', '# To later calculate Ry0 it is necessary to determine the maximum', '# GC2-U coordinate for the fault', 'self', '.', '_get_gc2_coordinates_for_rupture', '(', 'edge_sets', ')']
This method establishes the GC2 framework for a multi-segment (and indeed multi-typology) case based on the description in Spudich & Chiou (2015) - see section on Generalized Coordinate System for Multiple Rupture Traces
['This', 'method', 'establishes', 'the', 'GC2', 'framework', 'for', 'a', 'multi', '-', 'segment', '(', 'and', 'indeed', 'multi', '-', 'typology', ')', 'case', 'based', 'on', 'the', 'description', 'in', 'Spudich', '&', 'Chiou', '(', '2015', ')', '-', 'see', 'section', 'on', 'Generalized', 'Coordinate', 'System', 'for', 'Multiple', 'Rupture', 'Traces']
train
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/geo/surface/multi.py#L394-L452
3,018
miccoli/pyownet
src/pyownet/protocol.py
_OwnetConnection._read_msg
def _read_msg(self): """read message from server""" # # NOTE: # '_recv_socket(nbytes)' was implemented as # 'socket.recv(nbytes, socket.MSG_WAITALL)' # but socket.MSG_WAITALL proved not reliable # def _recv_socket(nbytes): """read nbytes bytes from self.socket""" # # code below is written under the assumption that # 'nbytes' is smallish so that the 'while len(buf) < nbytes' loop # is entered rarerly # try: buf = self.socket.recv(nbytes) except IOError as err: raise ConnError(*err.args) if not buf: raise ShortRead(0, nbytes) while len(buf) < nbytes: try: tmp = self.socket.recv(nbytes - len(buf)) except IOError as err: raise ConnError(*err.args) if not tmp: if self.verbose: print('ee', repr(buf)) raise ShortRead(len(buf), nbytes) buf += tmp assert len(buf) == nbytes, (buf, len(buf), nbytes) return buf data = _recv_socket(_FromServerHeader.header_size) header = _FromServerHeader(data) if self.verbose: print('<-', repr(header)) # error conditions if header.version != 0: raise MalformedHeader('bad version', header) if header.payload > MAX_PAYLOAD: raise MalformedHeader('huge payload, unwilling to read', header) if header.payload > 0: payload = _recv_socket(header.payload) if self.verbose: print('..', repr(payload)) assert header.size <= header.payload payload = payload[:header.size] else: payload = bytes() return header, payload
python
def _read_msg(self): """read message from server""" # # NOTE: # '_recv_socket(nbytes)' was implemented as # 'socket.recv(nbytes, socket.MSG_WAITALL)' # but socket.MSG_WAITALL proved not reliable # def _recv_socket(nbytes): """read nbytes bytes from self.socket""" # # code below is written under the assumption that # 'nbytes' is smallish so that the 'while len(buf) < nbytes' loop # is entered rarerly # try: buf = self.socket.recv(nbytes) except IOError as err: raise ConnError(*err.args) if not buf: raise ShortRead(0, nbytes) while len(buf) < nbytes: try: tmp = self.socket.recv(nbytes - len(buf)) except IOError as err: raise ConnError(*err.args) if not tmp: if self.verbose: print('ee', repr(buf)) raise ShortRead(len(buf), nbytes) buf += tmp assert len(buf) == nbytes, (buf, len(buf), nbytes) return buf data = _recv_socket(_FromServerHeader.header_size) header = _FromServerHeader(data) if self.verbose: print('<-', repr(header)) # error conditions if header.version != 0: raise MalformedHeader('bad version', header) if header.payload > MAX_PAYLOAD: raise MalformedHeader('huge payload, unwilling to read', header) if header.payload > 0: payload = _recv_socket(header.payload) if self.verbose: print('..', repr(payload)) assert header.size <= header.payload payload = payload[:header.size] else: payload = bytes() return header, payload
['def', '_read_msg', '(', 'self', ')', ':', '#', '# NOTE:', "# '_recv_socket(nbytes)' was implemented as", "# 'socket.recv(nbytes, socket.MSG_WAITALL)'", '# but socket.MSG_WAITALL proved not reliable', '#', 'def', '_recv_socket', '(', 'nbytes', ')', ':', '"""read nbytes bytes from self.socket"""', '#', '# code below is written under the assumption that', "# 'nbytes' is smallish so that the 'while len(buf) < nbytes' loop", '# is entered rarerly', '#', 'try', ':', 'buf', '=', 'self', '.', 'socket', '.', 'recv', '(', 'nbytes', ')', 'except', 'IOError', 'as', 'err', ':', 'raise', 'ConnError', '(', '*', 'err', '.', 'args', ')', 'if', 'not', 'buf', ':', 'raise', 'ShortRead', '(', '0', ',', 'nbytes', ')', 'while', 'len', '(', 'buf', ')', '<', 'nbytes', ':', 'try', ':', 'tmp', '=', 'self', '.', 'socket', '.', 'recv', '(', 'nbytes', '-', 'len', '(', 'buf', ')', ')', 'except', 'IOError', 'as', 'err', ':', 'raise', 'ConnError', '(', '*', 'err', '.', 'args', ')', 'if', 'not', 'tmp', ':', 'if', 'self', '.', 'verbose', ':', 'print', '(', "'ee'", ',', 'repr', '(', 'buf', ')', ')', 'raise', 'ShortRead', '(', 'len', '(', 'buf', ')', ',', 'nbytes', ')', 'buf', '+=', 'tmp', 'assert', 'len', '(', 'buf', ')', '==', 'nbytes', ',', '(', 'buf', ',', 'len', '(', 'buf', ')', ',', 'nbytes', ')', 'return', 'buf', 'data', '=', '_recv_socket', '(', '_FromServerHeader', '.', 'header_size', ')', 'header', '=', '_FromServerHeader', '(', 'data', ')', 'if', 'self', '.', 'verbose', ':', 'print', '(', "'<-'", ',', 'repr', '(', 'header', ')', ')', '# error conditions', 'if', 'header', '.', 'version', '!=', '0', ':', 'raise', 'MalformedHeader', '(', "'bad version'", ',', 'header', ')', 'if', 'header', '.', 'payload', '>', 'MAX_PAYLOAD', ':', 'raise', 'MalformedHeader', '(', "'huge payload, unwilling to read'", ',', 'header', ')', 'if', 'header', '.', 'payload', '>', '0', ':', 'payload', '=', '_recv_socket', '(', 'header', '.', 'payload', ')', 'if', 'self', '.', 'verbose', ':', 'print', '(', "'..'", ',', 'repr', '(', 'payload', ')', ')', 'assert', 'header', '.', 'size', '<=', 'header', '.', 'payload', 'payload', '=', 'payload', '[', ':', 'header', '.', 'size', ']', 'else', ':', 'payload', '=', 'bytes', '(', ')', 'return', 'header', ',', 'payload']
read message from server
['read', 'message', 'from', 'server']
train
https://github.com/miccoli/pyownet/blob/190afea6a72705772b942d7929bc0aa6561043e0/src/pyownet/protocol.py#L458-L519
3,019
sods/ods
pods/datasets.py
airline_delay
def airline_delay(data_set='airline_delay', num_train=700000, num_test=100000, seed=default_seed): """Airline delay data used in Gaussian Processes for Big Data by Hensman, Fusi and Lawrence""" if not data_available(data_set): download_data(data_set) dir_path = os.path.join(data_path, data_set) filename = os.path.join(dir_path, 'filtered_data.pickle') # 1. Load the dataset import pandas as pd data = pd.read_pickle(filename) # WARNING: removing year data.pop('Year') # Get data matrices Yall = data.pop('ArrDelay').values[:,None] Xall = data.values # Subset the data (memory!!) all_data = num_train+num_test Xall = Xall[:all_data] Yall = Yall[:all_data] # Get testing points np.random.seed(seed=seed) N_shuffled = permute(Yall.shape[0]) train, test = N_shuffled[num_test:], N_shuffled[:num_test] X, Y = Xall[train], Yall[train] Xtest, Ytest = Xall[test], Yall[test] covariates = ['month', 'day of month', 'day of week', 'departure time', 'arrival time', 'air time', 'distance to travel', 'age of aircraft / years'] response = ['delay'] return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'seed' : seed, 'info': "Airline delay data used for demonstrating Gaussian processes for big data.", 'covariates': covariates, 'response': response}, data_set)
python
def airline_delay(data_set='airline_delay', num_train=700000, num_test=100000, seed=default_seed): """Airline delay data used in Gaussian Processes for Big Data by Hensman, Fusi and Lawrence""" if not data_available(data_set): download_data(data_set) dir_path = os.path.join(data_path, data_set) filename = os.path.join(dir_path, 'filtered_data.pickle') # 1. Load the dataset import pandas as pd data = pd.read_pickle(filename) # WARNING: removing year data.pop('Year') # Get data matrices Yall = data.pop('ArrDelay').values[:,None] Xall = data.values # Subset the data (memory!!) all_data = num_train+num_test Xall = Xall[:all_data] Yall = Yall[:all_data] # Get testing points np.random.seed(seed=seed) N_shuffled = permute(Yall.shape[0]) train, test = N_shuffled[num_test:], N_shuffled[:num_test] X, Y = Xall[train], Yall[train] Xtest, Ytest = Xall[test], Yall[test] covariates = ['month', 'day of month', 'day of week', 'departure time', 'arrival time', 'air time', 'distance to travel', 'age of aircraft / years'] response = ['delay'] return data_details_return({'X': X, 'Y': Y, 'Xtest': Xtest, 'Ytest': Ytest, 'seed' : seed, 'info': "Airline delay data used for demonstrating Gaussian processes for big data.", 'covariates': covariates, 'response': response}, data_set)
['def', 'airline_delay', '(', 'data_set', '=', "'airline_delay'", ',', 'num_train', '=', '700000', ',', 'num_test', '=', '100000', ',', 'seed', '=', 'default_seed', ')', ':', 'if', 'not', 'data_available', '(', 'data_set', ')', ':', 'download_data', '(', 'data_set', ')', 'dir_path', '=', 'os', '.', 'path', '.', 'join', '(', 'data_path', ',', 'data_set', ')', 'filename', '=', 'os', '.', 'path', '.', 'join', '(', 'dir_path', ',', "'filtered_data.pickle'", ')', '# 1. Load the dataset', 'import', 'pandas', 'as', 'pd', 'data', '=', 'pd', '.', 'read_pickle', '(', 'filename', ')', '# WARNING: removing year', 'data', '.', 'pop', '(', "'Year'", ')', '# Get data matrices', 'Yall', '=', 'data', '.', 'pop', '(', "'ArrDelay'", ')', '.', 'values', '[', ':', ',', 'None', ']', 'Xall', '=', 'data', '.', 'values', '# Subset the data (memory!!)', 'all_data', '=', 'num_train', '+', 'num_test', 'Xall', '=', 'Xall', '[', ':', 'all_data', ']', 'Yall', '=', 'Yall', '[', ':', 'all_data', ']', '# Get testing points', 'np', '.', 'random', '.', 'seed', '(', 'seed', '=', 'seed', ')', 'N_shuffled', '=', 'permute', '(', 'Yall', '.', 'shape', '[', '0', ']', ')', 'train', ',', 'test', '=', 'N_shuffled', '[', 'num_test', ':', ']', ',', 'N_shuffled', '[', ':', 'num_test', ']', 'X', ',', 'Y', '=', 'Xall', '[', 'train', ']', ',', 'Yall', '[', 'train', ']', 'Xtest', ',', 'Ytest', '=', 'Xall', '[', 'test', ']', ',', 'Yall', '[', 'test', ']', 'covariates', '=', '[', "'month'", ',', "'day of month'", ',', "'day of week'", ',', "'departure time'", ',', "'arrival time'", ',', "'air time'", ',', "'distance to travel'", ',', "'age of aircraft / years'", ']', 'response', '=', '[', "'delay'", ']', 'return', 'data_details_return', '(', '{', "'X'", ':', 'X', ',', "'Y'", ':', 'Y', ',', "'Xtest'", ':', 'Xtest', ',', "'Ytest'", ':', 'Ytest', ',', "'seed'", ':', 'seed', ',', "'info'", ':', '"Airline delay data used for demonstrating Gaussian processes for big data."', ',', "'covariates'", ':', 'covariates', ',', "'response'", ':', 'response', '}', ',', 'data_set', ')']
Airline delay data used in Gaussian Processes for Big Data by Hensman, Fusi and Lawrence
['Airline', 'delay', 'data', 'used', 'in', 'Gaussian', 'Processes', 'for', 'Big', 'Data', 'by', 'Hensman', 'Fusi', 'and', 'Lawrence']
train
https://github.com/sods/ods/blob/3995c659f25a0a640f6009ed7fcc2559ce659b1d/pods/datasets.py#L1122-L1155
3,020
SylvanasSun/FishFishJump
fish_dashboard/scrapyd/scrapyd_agent.py
ScrapydAgent.get_job_list
def get_job_list(self, project_name): """ Get the list of pending, running and finished jobs of some project. :param project_name: the project name :return: a dictionary that list inculde job name and status example: {"status": "ok", "pending": [{"id": "78391cc0fcaf11e1b0090800272a6d06", "spider": "spider1"}], "running": [{"id": "422e608f9f28cef127b3d5ef93fe9399", "spider": "spider2", "start_time": "2012-09-12 10:14:03.594664"}], "finished": [{"id": "2f16646cfcaf11e1b0090800272a6d06", "spider": "spider3", "start_time": "2012-09-12 10:14:03.594664", "end_time": "2012-09-12 10:24:03.594664"}]} """ url, method = self.command_set['listjobs'][0], self.command_set['listjobs'][1] data = {'project': project_name} response = http_utils.request(url, method_type=method, data=data, return_type=http_utils.RETURN_JSON) if response is None: logging.warning('%s failure: not found or connection fail' % sys._getframe().f_code.co_name) response = JobList().__dict__ return response
python
def get_job_list(self, project_name): """ Get the list of pending, running and finished jobs of some project. :param project_name: the project name :return: a dictionary that list inculde job name and status example: {"status": "ok", "pending": [{"id": "78391cc0fcaf11e1b0090800272a6d06", "spider": "spider1"}], "running": [{"id": "422e608f9f28cef127b3d5ef93fe9399", "spider": "spider2", "start_time": "2012-09-12 10:14:03.594664"}], "finished": [{"id": "2f16646cfcaf11e1b0090800272a6d06", "spider": "spider3", "start_time": "2012-09-12 10:14:03.594664", "end_time": "2012-09-12 10:24:03.594664"}]} """ url, method = self.command_set['listjobs'][0], self.command_set['listjobs'][1] data = {'project': project_name} response = http_utils.request(url, method_type=method, data=data, return_type=http_utils.RETURN_JSON) if response is None: logging.warning('%s failure: not found or connection fail' % sys._getframe().f_code.co_name) response = JobList().__dict__ return response
['def', 'get_job_list', '(', 'self', ',', 'project_name', ')', ':', 'url', ',', 'method', '=', 'self', '.', 'command_set', '[', "'listjobs'", ']', '[', '0', ']', ',', 'self', '.', 'command_set', '[', "'listjobs'", ']', '[', '1', ']', 'data', '=', '{', "'project'", ':', 'project_name', '}', 'response', '=', 'http_utils', '.', 'request', '(', 'url', ',', 'method_type', '=', 'method', ',', 'data', '=', 'data', ',', 'return_type', '=', 'http_utils', '.', 'RETURN_JSON', ')', 'if', 'response', 'is', 'None', ':', 'logging', '.', 'warning', '(', "'%s failure: not found or connection fail'", '%', 'sys', '.', '_getframe', '(', ')', '.', 'f_code', '.', 'co_name', ')', 'response', '=', 'JobList', '(', ')', '.', '__dict__', 'return', 'response']
Get the list of pending, running and finished jobs of some project. :param project_name: the project name :return: a dictionary that list inculde job name and status example: {"status": "ok", "pending": [{"id": "78391cc0fcaf11e1b0090800272a6d06", "spider": "spider1"}], "running": [{"id": "422e608f9f28cef127b3d5ef93fe9399", "spider": "spider2", "start_time": "2012-09-12 10:14:03.594664"}], "finished": [{"id": "2f16646cfcaf11e1b0090800272a6d06", "spider": "spider3", "start_time": "2012-09-12 10:14:03.594664", "end_time": "2012-09-12 10:24:03.594664"}]}
['Get', 'the', 'list', 'of', 'pending', 'running', 'and', 'finished', 'jobs', 'of', 'some', 'project', '.', ':', 'param', 'project_name', ':', 'the', 'project', 'name', ':', 'return', ':', 'a', 'dictionary', 'that', 'list', 'inculde', 'job', 'name', 'and', 'status', 'example', ':', '{', 'status', ':', 'ok', 'pending', ':', '[', '{', 'id', ':', '78391cc0fcaf11e1b0090800272a6d06', 'spider', ':', 'spider1', '}', ']', 'running', ':', '[', '{', 'id', ':', '422e608f9f28cef127b3d5ef93fe9399', 'spider', ':', 'spider2', 'start_time', ':', '2012', '-', '09', '-', '12', '10', ':', '14', ':', '03', '.', '594664', '}', ']', 'finished', ':', '[', '{', 'id', ':', '2f16646cfcaf11e1b0090800272a6d06', 'spider', ':', 'spider3', 'start_time', ':', '2012', '-', '09', '-', '12', '10', ':', '14', ':', '03', '.', '594664', 'end_time', ':', '2012', '-', '09', '-', '12', '10', ':', '24', ':', '03', '.', '594664', '}', ']', '}']
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_dashboard/scrapyd/scrapyd_agent.py#L214-L233
3,021
PyCQA/astroid
astroid/as_string.py
AsStringVisitor.visit_keyword
def visit_keyword(self, node): """return an astroid.Keyword node as string""" if node.arg is None: return "**%s" % node.value.accept(self) return "%s=%s" % (node.arg, node.value.accept(self))
python
def visit_keyword(self, node): """return an astroid.Keyword node as string""" if node.arg is None: return "**%s" % node.value.accept(self) return "%s=%s" % (node.arg, node.value.accept(self))
['def', 'visit_keyword', '(', 'self', ',', 'node', ')', ':', 'if', 'node', '.', 'arg', 'is', 'None', ':', 'return', '"**%s"', '%', 'node', '.', 'value', '.', 'accept', '(', 'self', ')', 'return', '"%s=%s"', '%', '(', 'node', '.', 'arg', ',', 'node', '.', 'value', '.', 'accept', '(', 'self', ')', ')']
return an astroid.Keyword node as string
['return', 'an', 'astroid', '.', 'Keyword', 'node', 'as', 'string']
train
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/as_string.py#L363-L367
3,022
Kunstmord/datalib
src/misc.py
cutoff_filename
def cutoff_filename(prefix, suffix, input_str): """ Cuts off the start and end of a string, as specified by 2 parameters Parameters ---------- prefix : string, if input_str starts with prefix, will cut off prefix suffix : string, if input_str end with suffix, will cut off suffix input_str : the string to be processed Returns ------- A string, from which the start and end have been cut """ if prefix is not '': if input_str.startswith(prefix): input_str = input_str[len(prefix):] if suffix is not '': if input_str.endswith(suffix): input_str = input_str[:-len(suffix)] return input_str
python
def cutoff_filename(prefix, suffix, input_str): """ Cuts off the start and end of a string, as specified by 2 parameters Parameters ---------- prefix : string, if input_str starts with prefix, will cut off prefix suffix : string, if input_str end with suffix, will cut off suffix input_str : the string to be processed Returns ------- A string, from which the start and end have been cut """ if prefix is not '': if input_str.startswith(prefix): input_str = input_str[len(prefix):] if suffix is not '': if input_str.endswith(suffix): input_str = input_str[:-len(suffix)] return input_str
['def', 'cutoff_filename', '(', 'prefix', ',', 'suffix', ',', 'input_str', ')', ':', 'if', 'prefix', 'is', 'not', "''", ':', 'if', 'input_str', '.', 'startswith', '(', 'prefix', ')', ':', 'input_str', '=', 'input_str', '[', 'len', '(', 'prefix', ')', ':', ']', 'if', 'suffix', 'is', 'not', "''", ':', 'if', 'input_str', '.', 'endswith', '(', 'suffix', ')', ':', 'input_str', '=', 'input_str', '[', ':', '-', 'len', '(', 'suffix', ')', ']', 'return', 'input_str']
Cuts off the start and end of a string, as specified by 2 parameters Parameters ---------- prefix : string, if input_str starts with prefix, will cut off prefix suffix : string, if input_str end with suffix, will cut off suffix input_str : the string to be processed Returns ------- A string, from which the start and end have been cut
['Cuts', 'off', 'the', 'start', 'and', 'end', 'of', 'a', 'string', 'as', 'specified', 'by', '2', 'parameters']
train
https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/misc.py#L34-L54
3,023
mandiant/ioc_writer
ioc_writer/utils/xmlutils.py
delete_namespace
def delete_namespace(parsed_xml): """ Identifies the namespace associated with the root node of a XML document and removes that names from the document. :param parsed_xml: lxml.Etree object. :return: Returns the sources document with the namespace removed. """ if parsed_xml.getroot().tag.startswith('{'): root = parsed_xml.getroot().tag end_ns = root.find('}') remove_namespace(parsed_xml, root[1:end_ns]) return parsed_xml
python
def delete_namespace(parsed_xml): """ Identifies the namespace associated with the root node of a XML document and removes that names from the document. :param parsed_xml: lxml.Etree object. :return: Returns the sources document with the namespace removed. """ if parsed_xml.getroot().tag.startswith('{'): root = parsed_xml.getroot().tag end_ns = root.find('}') remove_namespace(parsed_xml, root[1:end_ns]) return parsed_xml
['def', 'delete_namespace', '(', 'parsed_xml', ')', ':', 'if', 'parsed_xml', '.', 'getroot', '(', ')', '.', 'tag', '.', 'startswith', '(', "'{'", ')', ':', 'root', '=', 'parsed_xml', '.', 'getroot', '(', ')', '.', 'tag', 'end_ns', '=', 'root', '.', 'find', '(', "'}'", ')', 'remove_namespace', '(', 'parsed_xml', ',', 'root', '[', '1', ':', 'end_ns', ']', ')', 'return', 'parsed_xml']
Identifies the namespace associated with the root node of a XML document and removes that names from the document. :param parsed_xml: lxml.Etree object. :return: Returns the sources document with the namespace removed.
['Identifies', 'the', 'namespace', 'associated', 'with', 'the', 'root', 'node', 'of', 'a', 'XML', 'document', 'and', 'removes', 'that', 'names', 'from', 'the', 'document', '.']
train
https://github.com/mandiant/ioc_writer/blob/712247f3a10bdc2584fa18ac909fc763f71df21a/ioc_writer/utils/xmlutils.py#L99-L111
3,024
sixty-north/asq
asq/queryables.py
Queryable.select_many_with_index
def select_many_with_index( self, collection_selector=IndexedElement, result_selector=lambda source_element, collection_element: collection_element): '''Projects each element of a sequence to an intermediate new sequence, incorporating the index of the element, flattens the resulting sequence into one sequence and optionally transforms the flattened sequence using a selector function. Note: This method uses deferred execution. Args: collection_selector: A binary function mapping each element of the source sequence into an intermediate sequence, by incorporating its index in the source sequence. The two positional arguments to the function are the zero-based index of the source element and the value of the element. The result of the function should be an iterable derived from the index and element value. If no collection_selector is provided, the elements of the intermediate sequence will consist of tuples of (index, element) from the source sequence. result_selector: An optional binary function mapping the elements in the flattened intermediate sequence together with their corresponding source elements to elements of the result sequence. The two positional arguments of the result_selector are, first the source element corresponding to an element from the intermediate sequence, and second the actual element from the intermediate sequence. The return value should be the corresponding value in the result sequence. If no result_selector function is provided, the elements of the flattened intermediate sequence are returned untransformed. Returns: A Queryable over a generated sequence whose elements are the result of applying the one-to-many collection_selector to each element of the source sequence which incorporates both the index and value of the source element, concatenating the results into an intermediate sequence, and then mapping each of those elements through the result_selector into the result sequence. Raises: ValueError: If this Queryable has been closed. TypeError: If projector [and selector] are not callable. ''' if self.closed(): raise ValueError("Attempt to call select_many_with_index() on a " "closed Queryable.") if not is_callable(collection_selector): raise TypeError("select_many_with_index() parameter " "projector={0} is not callable".format(repr(collection_selector))) if not is_callable(result_selector): raise TypeError("select_many_with_index() parameter " "selector={0} is not callable".format(repr(result_selector))) return self._create( self._generate_select_many_with_index(collection_selector, result_selector))
python
def select_many_with_index( self, collection_selector=IndexedElement, result_selector=lambda source_element, collection_element: collection_element): '''Projects each element of a sequence to an intermediate new sequence, incorporating the index of the element, flattens the resulting sequence into one sequence and optionally transforms the flattened sequence using a selector function. Note: This method uses deferred execution. Args: collection_selector: A binary function mapping each element of the source sequence into an intermediate sequence, by incorporating its index in the source sequence. The two positional arguments to the function are the zero-based index of the source element and the value of the element. The result of the function should be an iterable derived from the index and element value. If no collection_selector is provided, the elements of the intermediate sequence will consist of tuples of (index, element) from the source sequence. result_selector: An optional binary function mapping the elements in the flattened intermediate sequence together with their corresponding source elements to elements of the result sequence. The two positional arguments of the result_selector are, first the source element corresponding to an element from the intermediate sequence, and second the actual element from the intermediate sequence. The return value should be the corresponding value in the result sequence. If no result_selector function is provided, the elements of the flattened intermediate sequence are returned untransformed. Returns: A Queryable over a generated sequence whose elements are the result of applying the one-to-many collection_selector to each element of the source sequence which incorporates both the index and value of the source element, concatenating the results into an intermediate sequence, and then mapping each of those elements through the result_selector into the result sequence. Raises: ValueError: If this Queryable has been closed. TypeError: If projector [and selector] are not callable. ''' if self.closed(): raise ValueError("Attempt to call select_many_with_index() on a " "closed Queryable.") if not is_callable(collection_selector): raise TypeError("select_many_with_index() parameter " "projector={0} is not callable".format(repr(collection_selector))) if not is_callable(result_selector): raise TypeError("select_many_with_index() parameter " "selector={0} is not callable".format(repr(result_selector))) return self._create( self._generate_select_many_with_index(collection_selector, result_selector))
['def', 'select_many_with_index', '(', 'self', ',', 'collection_selector', '=', 'IndexedElement', ',', 'result_selector', '=', 'lambda', 'source_element', ',', 'collection_element', ':', 'collection_element', ')', ':', 'if', 'self', '.', 'closed', '(', ')', ':', 'raise', 'ValueError', '(', '"Attempt to call select_many_with_index() on a "', '"closed Queryable."', ')', 'if', 'not', 'is_callable', '(', 'collection_selector', ')', ':', 'raise', 'TypeError', '(', '"select_many_with_index() parameter "', '"projector={0} is not callable"', '.', 'format', '(', 'repr', '(', 'collection_selector', ')', ')', ')', 'if', 'not', 'is_callable', '(', 'result_selector', ')', ':', 'raise', 'TypeError', '(', '"select_many_with_index() parameter "', '"selector={0} is not callable"', '.', 'format', '(', 'repr', '(', 'result_selector', ')', ')', ')', 'return', 'self', '.', '_create', '(', 'self', '.', '_generate_select_many_with_index', '(', 'collection_selector', ',', 'result_selector', ')', ')']
Projects each element of a sequence to an intermediate new sequence, incorporating the index of the element, flattens the resulting sequence into one sequence and optionally transforms the flattened sequence using a selector function. Note: This method uses deferred execution. Args: collection_selector: A binary function mapping each element of the source sequence into an intermediate sequence, by incorporating its index in the source sequence. The two positional arguments to the function are the zero-based index of the source element and the value of the element. The result of the function should be an iterable derived from the index and element value. If no collection_selector is provided, the elements of the intermediate sequence will consist of tuples of (index, element) from the source sequence. result_selector: An optional binary function mapping the elements in the flattened intermediate sequence together with their corresponding source elements to elements of the result sequence. The two positional arguments of the result_selector are, first the source element corresponding to an element from the intermediate sequence, and second the actual element from the intermediate sequence. The return value should be the corresponding value in the result sequence. If no result_selector function is provided, the elements of the flattened intermediate sequence are returned untransformed. Returns: A Queryable over a generated sequence whose elements are the result of applying the one-to-many collection_selector to each element of the source sequence which incorporates both the index and value of the source element, concatenating the results into an intermediate sequence, and then mapping each of those elements through the result_selector into the result sequence. Raises: ValueError: If this Queryable has been closed. TypeError: If projector [and selector] are not callable.
['Projects', 'each', 'element', 'of', 'a', 'sequence', 'to', 'an', 'intermediate', 'new', 'sequence', 'incorporating', 'the', 'index', 'of', 'the', 'element', 'flattens', 'the', 'resulting', 'sequence', 'into', 'one', 'sequence', 'and', 'optionally', 'transforms', 'the', 'flattened', 'sequence', 'using', 'a', 'selector', 'function', '.']
train
https://github.com/sixty-north/asq/blob/db0c4cbcf2118435136d4b63c62a12711441088e/asq/queryables.py#L346-L407
3,025
chimera0/accel-brain-code
Generative-Adversarial-Networks/pygan/truesampler/conditionaltruesampler/conditional_image_true_sampler.py
ConditionalImageTrueSampler.draw
def draw(self): ''' Draws samples from the `true` distribution. Returns: `np.ndarray` of samples. ''' observed_arr = self.__image_true_sampler.draw() observed_arr = self.add_condition(observed_arr) return observed_arr
python
def draw(self): ''' Draws samples from the `true` distribution. Returns: `np.ndarray` of samples. ''' observed_arr = self.__image_true_sampler.draw() observed_arr = self.add_condition(observed_arr) return observed_arr
['def', 'draw', '(', 'self', ')', ':', 'observed_arr', '=', 'self', '.', '__image_true_sampler', '.', 'draw', '(', ')', 'observed_arr', '=', 'self', '.', 'add_condition', '(', 'observed_arr', ')', 'return', 'observed_arr']
Draws samples from the `true` distribution. Returns: `np.ndarray` of samples.
['Draws', 'samples', 'from', 'the', 'true', 'distribution', '.', 'Returns', ':', 'np', '.', 'ndarray', 'of', 'samples', '.']
train
https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Generative-Adversarial-Networks/pygan/truesampler/conditionaltruesampler/conditional_image_true_sampler.py#L23-L32
3,026
dwavesystems/dimod
dimod/reference/composites/scalecomposite.py
_scale_back_response
def _scale_back_response(bqm, response, scalar, ignored_interactions, ignored_variables, ignore_offset): """Helper function to scale back the response of sample method""" if len(ignored_interactions) + len( ignored_variables) + ignore_offset == 0: response.record.energy = np.divide(response.record.energy, scalar) else: response.record.energy = bqm.energies((response.record.sample, response.variables)) return response
python
def _scale_back_response(bqm, response, scalar, ignored_interactions, ignored_variables, ignore_offset): """Helper function to scale back the response of sample method""" if len(ignored_interactions) + len( ignored_variables) + ignore_offset == 0: response.record.energy = np.divide(response.record.energy, scalar) else: response.record.energy = bqm.energies((response.record.sample, response.variables)) return response
['def', '_scale_back_response', '(', 'bqm', ',', 'response', ',', 'scalar', ',', 'ignored_interactions', ',', 'ignored_variables', ',', 'ignore_offset', ')', ':', 'if', 'len', '(', 'ignored_interactions', ')', '+', 'len', '(', 'ignored_variables', ')', '+', 'ignore_offset', '==', '0', ':', 'response', '.', 'record', '.', 'energy', '=', 'np', '.', 'divide', '(', 'response', '.', 'record', '.', 'energy', ',', 'scalar', ')', 'else', ':', 'response', '.', 'record', '.', 'energy', '=', 'bqm', '.', 'energies', '(', '(', 'response', '.', 'record', '.', 'sample', ',', 'response', '.', 'variables', ')', ')', 'return', 'response']
Helper function to scale back the response of sample method
['Helper', 'function', 'to', 'scale', 'back', 'the', 'response', 'of', 'sample', 'method']
train
https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/reference/composites/scalecomposite.py#L213-L222
3,027
ianmiell/shutit
shutit_class.py
ShutIt.file_exists
def file_exists(self, filename, shutit_pexpect_child=None, directory=False, note=None, loglevel=logging.DEBUG): """Return True if file exists on the target host, else False @param filename: Filename to determine the existence of. @param shutit_pexpect_child: See send() @param directory: Indicate that the file is a directory. @param note: See send() @type filename: string @type directory: boolean @rtype: boolean """ shutit_global.shutit_global_object.yield_to_draw() shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child) return shutit_pexpect_session.file_exists(filename=filename,directory=directory,note=note,loglevel=loglevel)
python
def file_exists(self, filename, shutit_pexpect_child=None, directory=False, note=None, loglevel=logging.DEBUG): """Return True if file exists on the target host, else False @param filename: Filename to determine the existence of. @param shutit_pexpect_child: See send() @param directory: Indicate that the file is a directory. @param note: See send() @type filename: string @type directory: boolean @rtype: boolean """ shutit_global.shutit_global_object.yield_to_draw() shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child) return shutit_pexpect_session.file_exists(filename=filename,directory=directory,note=note,loglevel=loglevel)
['def', 'file_exists', '(', 'self', ',', 'filename', ',', 'shutit_pexpect_child', '=', 'None', ',', 'directory', '=', 'False', ',', 'note', '=', 'None', ',', 'loglevel', '=', 'logging', '.', 'DEBUG', ')', ':', 'shutit_global', '.', 'shutit_global_object', '.', 'yield_to_draw', '(', ')', 'shutit_pexpect_child', '=', 'shutit_pexpect_child', 'or', 'self', '.', 'get_current_shutit_pexpect_session', '(', ')', '.', 'pexpect_child', 'shutit_pexpect_session', '=', 'self', '.', 'get_shutit_pexpect_session_from_child', '(', 'shutit_pexpect_child', ')', 'return', 'shutit_pexpect_session', '.', 'file_exists', '(', 'filename', '=', 'filename', ',', 'directory', '=', 'directory', ',', 'note', '=', 'note', ',', 'loglevel', '=', 'loglevel', ')']
Return True if file exists on the target host, else False @param filename: Filename to determine the existence of. @param shutit_pexpect_child: See send() @param directory: Indicate that the file is a directory. @param note: See send() @type filename: string @type directory: boolean @rtype: boolean
['Return', 'True', 'if', 'file', 'exists', 'on', 'the', 'target', 'host', 'else', 'False']
train
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_class.py#L1253-L1274
3,028
twilio/twilio-python
twilio/rest/preview/hosted_numbers/authorization_document/dependent_hosted_number_order.py
DependentHostedNumberOrderList.stream
def stream(self, status=values.unset, phone_number=values.unset, incoming_phone_number_sid=values.unset, friendly_name=values.unset, unique_name=values.unset, limit=None, page_size=None): """ Streams DependentHostedNumberOrderInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param DependentHostedNumberOrderInstance.Status status: The Status of this HostedNumberOrder. :param unicode phone_number: An E164 formatted phone number. :param unicode incoming_phone_number_sid: IncomingPhoneNumber sid. :param unicode friendly_name: A human readable description of this resource. :param unicode unique_name: A unique, developer assigned name of this HostedNumberOrder. :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderInstance] """ limits = self._version.read_limits(limit, page_size) page = self.page( status=status, phone_number=phone_number, incoming_phone_number_sid=incoming_phone_number_sid, friendly_name=friendly_name, unique_name=unique_name, page_size=limits['page_size'], ) return self._version.stream(page, limits['limit'], limits['page_limit'])
python
def stream(self, status=values.unset, phone_number=values.unset, incoming_phone_number_sid=values.unset, friendly_name=values.unset, unique_name=values.unset, limit=None, page_size=None): """ Streams DependentHostedNumberOrderInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param DependentHostedNumberOrderInstance.Status status: The Status of this HostedNumberOrder. :param unicode phone_number: An E164 formatted phone number. :param unicode incoming_phone_number_sid: IncomingPhoneNumber sid. :param unicode friendly_name: A human readable description of this resource. :param unicode unique_name: A unique, developer assigned name of this HostedNumberOrder. :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderInstance] """ limits = self._version.read_limits(limit, page_size) page = self.page( status=status, phone_number=phone_number, incoming_phone_number_sid=incoming_phone_number_sid, friendly_name=friendly_name, unique_name=unique_name, page_size=limits['page_size'], ) return self._version.stream(page, limits['limit'], limits['page_limit'])
['def', 'stream', '(', 'self', ',', 'status', '=', 'values', '.', 'unset', ',', 'phone_number', '=', 'values', '.', 'unset', ',', 'incoming_phone_number_sid', '=', 'values', '.', 'unset', ',', 'friendly_name', '=', 'values', '.', 'unset', ',', 'unique_name', '=', 'values', '.', 'unset', ',', 'limit', '=', 'None', ',', 'page_size', '=', 'None', ')', ':', 'limits', '=', 'self', '.', '_version', '.', 'read_limits', '(', 'limit', ',', 'page_size', ')', 'page', '=', 'self', '.', 'page', '(', 'status', '=', 'status', ',', 'phone_number', '=', 'phone_number', ',', 'incoming_phone_number_sid', '=', 'incoming_phone_number_sid', ',', 'friendly_name', '=', 'friendly_name', ',', 'unique_name', '=', 'unique_name', ',', 'page_size', '=', 'limits', '[', "'page_size'", ']', ',', ')', 'return', 'self', '.', '_version', '.', 'stream', '(', 'page', ',', 'limits', '[', "'limit'", ']', ',', 'limits', '[', "'page_limit'", ']', ')']
Streams DependentHostedNumberOrderInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param DependentHostedNumberOrderInstance.Status status: The Status of this HostedNumberOrder. :param unicode phone_number: An E164 formatted phone number. :param unicode incoming_phone_number_sid: IncomingPhoneNumber sid. :param unicode friendly_name: A human readable description of this resource. :param unicode unique_name: A unique, developer assigned name of this HostedNumberOrder. :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderInstance]
['Streams', 'DependentHostedNumberOrderInstance', 'records', 'from', 'the', 'API', 'as', 'a', 'generator', 'stream', '.', 'This', 'operation', 'lazily', 'loads', 'records', 'as', 'efficiently', 'as', 'possible', 'until', 'the', 'limit', 'is', 'reached', '.', 'The', 'results', 'are', 'returned', 'as', 'a', 'generator', 'so', 'this', 'operation', 'is', 'memory', 'efficient', '.']
train
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/preview/hosted_numbers/authorization_document/dependent_hosted_number_order.py#L37-L72
3,029
globocom/GloboNetworkAPI-client-python
networkapiclient/Equipamento.py
Equipamento.remover_grupo
def remover_grupo(self, id_equipamento, id_grupo): """Remove a associação de um equipamento com um grupo de equipamento. :param id_equipamento: Identificador do equipamento. :param id_grupo: Identificador do grupo de equipamento. :return: None :raise EquipamentoGrupoNaoExisteError: Associação entre grupo e equipamento não cadastrada. :raise EquipamentoNaoExisteError: Equipamento não cadastrado. :raise EquipmentDontRemoveError: Failure to remove an association between an equipment and a group because the group is related only to a group. :raise InvalidParameterError: O identificador do equipamento e/ou do grupo são nulos ou inválidos. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao gerar o XML de resposta. """ if not is_valid_int_param(id_equipamento): raise InvalidParameterError( u'O identificador do equipamento é inválido ou não foi informado.') if not is_valid_int_param(id_grupo): raise InvalidParameterError( u'O identificador do grupo é inválido ou não foi informado.') url = 'equipamentogrupo/equipamento/' + \ str(id_equipamento) + '/egrupo/' + str(id_grupo) + '/' code, xml = self.submit(None, 'DELETE', url) return self.response(code, xml)
python
def remover_grupo(self, id_equipamento, id_grupo): """Remove a associação de um equipamento com um grupo de equipamento. :param id_equipamento: Identificador do equipamento. :param id_grupo: Identificador do grupo de equipamento. :return: None :raise EquipamentoGrupoNaoExisteError: Associação entre grupo e equipamento não cadastrada. :raise EquipamentoNaoExisteError: Equipamento não cadastrado. :raise EquipmentDontRemoveError: Failure to remove an association between an equipment and a group because the group is related only to a group. :raise InvalidParameterError: O identificador do equipamento e/ou do grupo são nulos ou inválidos. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao gerar o XML de resposta. """ if not is_valid_int_param(id_equipamento): raise InvalidParameterError( u'O identificador do equipamento é inválido ou não foi informado.') if not is_valid_int_param(id_grupo): raise InvalidParameterError( u'O identificador do grupo é inválido ou não foi informado.') url = 'equipamentogrupo/equipamento/' + \ str(id_equipamento) + '/egrupo/' + str(id_grupo) + '/' code, xml = self.submit(None, 'DELETE', url) return self.response(code, xml)
['def', 'remover_grupo', '(', 'self', ',', 'id_equipamento', ',', 'id_grupo', ')', ':', 'if', 'not', 'is_valid_int_param', '(', 'id_equipamento', ')', ':', 'raise', 'InvalidParameterError', '(', "u'O identificador do equipamento é inválido ou não foi informado.')", '', 'if', 'not', 'is_valid_int_param', '(', 'id_grupo', ')', ':', 'raise', 'InvalidParameterError', '(', "u'O identificador do grupo é inválido ou não foi informado.')", '', 'url', '=', "'equipamentogrupo/equipamento/'", '+', 'str', '(', 'id_equipamento', ')', '+', "'/egrupo/'", '+', 'str', '(', 'id_grupo', ')', '+', "'/'", 'code', ',', 'xml', '=', 'self', '.', 'submit', '(', 'None', ',', "'DELETE'", ',', 'url', ')', 'return', 'self', '.', 'response', '(', 'code', ',', 'xml', ')']
Remove a associação de um equipamento com um grupo de equipamento. :param id_equipamento: Identificador do equipamento. :param id_grupo: Identificador do grupo de equipamento. :return: None :raise EquipamentoGrupoNaoExisteError: Associação entre grupo e equipamento não cadastrada. :raise EquipamentoNaoExisteError: Equipamento não cadastrado. :raise EquipmentDontRemoveError: Failure to remove an association between an equipment and a group because the group is related only to a group. :raise InvalidParameterError: O identificador do equipamento e/ou do grupo são nulos ou inválidos. :raise DataBaseError: Falha na networkapi ao acessar o banco de dados. :raise XMLError: Falha na networkapi ao gerar o XML de resposta.
['Remove', 'a', 'associação', 'de', 'um', 'equipamento', 'com', 'um', 'grupo', 'de', 'equipamento', '.']
train
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/Equipamento.py#L671-L700
3,030
JarryShaw/PyPCAPKit
src/utilities/validations.py
list_check
def list_check(*args, func=None): """Check if arguments are list type.""" func = func or inspect.stack()[2][3] for var in args: if not isinstance(var, (list, collections.UserList, collections.abc.MutableSequence)): name = type(var).__name__ raise ListError( f'Function {func} expected list, {name} got instead.')
python
def list_check(*args, func=None): """Check if arguments are list type.""" func = func or inspect.stack()[2][3] for var in args: if not isinstance(var, (list, collections.UserList, collections.abc.MutableSequence)): name = type(var).__name__ raise ListError( f'Function {func} expected list, {name} got instead.')
['def', 'list_check', '(', '*', 'args', ',', 'func', '=', 'None', ')', ':', 'func', '=', 'func', 'or', 'inspect', '.', 'stack', '(', ')', '[', '2', ']', '[', '3', ']', 'for', 'var', 'in', 'args', ':', 'if', 'not', 'isinstance', '(', 'var', ',', '(', 'list', ',', 'collections', '.', 'UserList', ',', 'collections', '.', 'abc', '.', 'MutableSequence', ')', ')', ':', 'name', '=', 'type', '(', 'var', ')', '.', '__name__', 'raise', 'ListError', '(', "f'Function {func} expected list, {name} got instead.'", ')']
Check if arguments are list type.
['Check', 'if', 'arguments', 'are', 'list', 'type', '.']
train
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/utilities/validations.py#L116-L123
3,031
vinci1it2000/schedula
schedula/utils/sol.py
Solution._check_wait_input_flag
def _check_wait_input_flag(self): """ Returns a function to stop the search of the investigated node of the ArciDispatch algorithm. :return: A function to stop the search. :rtype: (bool, str) -> bool """ wf_pred = self._wf_pred # Namespace shortcuts. pred = {k: set(v).issubset for k, v in self._pred.items()} if self._wait_in: we = self._wait_in.get # Namespace shortcut. def check_wait_input_flag(wait_in, n_id): """ Stops the search of the investigated node of the ArciDispatch algorithm, until all inputs are satisfied. :param wait_in: If True the node is waiting input estimations. :type wait_in: bool :param n_id: Data or function node id. :type n_id: str :return: True if all node inputs are satisfied, otherwise False. :rtype: bool """ # Return true if the node inputs are satisfied. if we(n_id, wait_in): return not pred[n_id](wf_pred[n_id]) return False else: def check_wait_input_flag(wait_in, n_id): # Return true if the node inputs are satisfied. return wait_in and not pred[n_id](wf_pred[n_id]) return check_wait_input_flag
python
def _check_wait_input_flag(self): """ Returns a function to stop the search of the investigated node of the ArciDispatch algorithm. :return: A function to stop the search. :rtype: (bool, str) -> bool """ wf_pred = self._wf_pred # Namespace shortcuts. pred = {k: set(v).issubset for k, v in self._pred.items()} if self._wait_in: we = self._wait_in.get # Namespace shortcut. def check_wait_input_flag(wait_in, n_id): """ Stops the search of the investigated node of the ArciDispatch algorithm, until all inputs are satisfied. :param wait_in: If True the node is waiting input estimations. :type wait_in: bool :param n_id: Data or function node id. :type n_id: str :return: True if all node inputs are satisfied, otherwise False. :rtype: bool """ # Return true if the node inputs are satisfied. if we(n_id, wait_in): return not pred[n_id](wf_pred[n_id]) return False else: def check_wait_input_flag(wait_in, n_id): # Return true if the node inputs are satisfied. return wait_in and not pred[n_id](wf_pred[n_id]) return check_wait_input_flag
['def', '_check_wait_input_flag', '(', 'self', ')', ':', 'wf_pred', '=', 'self', '.', '_wf_pred', '# Namespace shortcuts.', 'pred', '=', '{', 'k', ':', 'set', '(', 'v', ')', '.', 'issubset', 'for', 'k', ',', 'v', 'in', 'self', '.', '_pred', '.', 'items', '(', ')', '}', 'if', 'self', '.', '_wait_in', ':', 'we', '=', 'self', '.', '_wait_in', '.', 'get', '# Namespace shortcut.', 'def', 'check_wait_input_flag', '(', 'wait_in', ',', 'n_id', ')', ':', '"""\n Stops the search of the investigated node of the ArciDispatch\n algorithm, until all inputs are satisfied.\n\n :param wait_in:\n If True the node is waiting input estimations.\n :type wait_in: bool\n\n :param n_id:\n Data or function node id.\n :type n_id: str\n\n :return:\n True if all node inputs are satisfied, otherwise False.\n :rtype: bool\n """', '# Return true if the node inputs are satisfied.', 'if', 'we', '(', 'n_id', ',', 'wait_in', ')', ':', 'return', 'not', 'pred', '[', 'n_id', ']', '(', 'wf_pred', '[', 'n_id', ']', ')', 'return', 'False', 'else', ':', 'def', 'check_wait_input_flag', '(', 'wait_in', ',', 'n_id', ')', ':', '# Return true if the node inputs are satisfied.', 'return', 'wait_in', 'and', 'not', 'pred', '[', 'n_id', ']', '(', 'wf_pred', '[', 'n_id', ']', ')', 'return', 'check_wait_input_flag']
Returns a function to stop the search of the investigated node of the ArciDispatch algorithm. :return: A function to stop the search. :rtype: (bool, str) -> bool
['Returns', 'a', 'function', 'to', 'stop', 'the', 'search', 'of', 'the', 'investigated', 'node', 'of', 'the', 'ArciDispatch', 'algorithm', '.']
train
https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/utils/sol.py#L452-L496
3,032
mbj4668/pyang
pyang/plugins/jsonxsl.py
JsonXslPlugin.process_module
def process_module(self, yam): """Process data nodes, RPCs and notifications in a single module.""" for ann in yam.search(("ietf-yang-metadata", "annotation")): self.process_annotation(ann) for ch in yam.i_children[:]: if ch.keyword == "rpc": self.process_rpc(ch) elif ch.keyword == "notification": self.process_notification(ch) else: continue yam.i_children.remove(ch) self.process_children(yam, "//nc:*", 1)
python
def process_module(self, yam): """Process data nodes, RPCs and notifications in a single module.""" for ann in yam.search(("ietf-yang-metadata", "annotation")): self.process_annotation(ann) for ch in yam.i_children[:]: if ch.keyword == "rpc": self.process_rpc(ch) elif ch.keyword == "notification": self.process_notification(ch) else: continue yam.i_children.remove(ch) self.process_children(yam, "//nc:*", 1)
['def', 'process_module', '(', 'self', ',', 'yam', ')', ':', 'for', 'ann', 'in', 'yam', '.', 'search', '(', '(', '"ietf-yang-metadata"', ',', '"annotation"', ')', ')', ':', 'self', '.', 'process_annotation', '(', 'ann', ')', 'for', 'ch', 'in', 'yam', '.', 'i_children', '[', ':', ']', ':', 'if', 'ch', '.', 'keyword', '==', '"rpc"', ':', 'self', '.', 'process_rpc', '(', 'ch', ')', 'elif', 'ch', '.', 'keyword', '==', '"notification"', ':', 'self', '.', 'process_notification', '(', 'ch', ')', 'else', ':', 'continue', 'yam', '.', 'i_children', '.', 'remove', '(', 'ch', ')', 'self', '.', 'process_children', '(', 'yam', ',', '"//nc:*"', ',', '1', ')']
Process data nodes, RPCs and notifications in a single module.
['Process', 'data', 'nodes', 'RPCs', 'and', 'notifications', 'in', 'a', 'single', 'module', '.']
train
https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/plugins/jsonxsl.py#L101-L113
3,033
sosy-lab/benchexec
benchexec/container.py
forward_all_signals_async
def forward_all_signals_async(target_pid, process_name): """Install all signal handler that forwards all signals to the given process.""" def forwarding_signal_handler(signum): _forward_signal(signum, process_name, forwarding_signal_handler.target_pid) # Somehow we get a Python SystemError sometimes if we access target_pid directly from inside function. forwarding_signal_handler.target_pid = target_pid for signum in _FORWARDABLE_SIGNALS: # Need to directly access libc function, # the state of the signal module is incorrect due to the clone() # (it may think we are in a different thread than the main thread). libc.signal(signum, forwarding_signal_handler) # Reactivate delivery of signals such that our handler gets called. reset_signal_handling()
python
def forward_all_signals_async(target_pid, process_name): """Install all signal handler that forwards all signals to the given process.""" def forwarding_signal_handler(signum): _forward_signal(signum, process_name, forwarding_signal_handler.target_pid) # Somehow we get a Python SystemError sometimes if we access target_pid directly from inside function. forwarding_signal_handler.target_pid = target_pid for signum in _FORWARDABLE_SIGNALS: # Need to directly access libc function, # the state of the signal module is incorrect due to the clone() # (it may think we are in a different thread than the main thread). libc.signal(signum, forwarding_signal_handler) # Reactivate delivery of signals such that our handler gets called. reset_signal_handling()
['def', 'forward_all_signals_async', '(', 'target_pid', ',', 'process_name', ')', ':', 'def', 'forwarding_signal_handler', '(', 'signum', ')', ':', '_forward_signal', '(', 'signum', ',', 'process_name', ',', 'forwarding_signal_handler', '.', 'target_pid', ')', '# Somehow we get a Python SystemError sometimes if we access target_pid directly from inside function.', 'forwarding_signal_handler', '.', 'target_pid', '=', 'target_pid', 'for', 'signum', 'in', '_FORWARDABLE_SIGNALS', ':', '# Need to directly access libc function,', '# the state of the signal module is incorrect due to the clone()', '# (it may think we are in a different thread than the main thread).', 'libc', '.', 'signal', '(', 'signum', ',', 'forwarding_signal_handler', ')', '# Reactivate delivery of signals such that our handler gets called.', 'reset_signal_handling', '(', ')']
Install all signal handler that forwards all signals to the given process.
['Install', 'all', 'signal', 'handler', 'that', 'forwards', 'all', 'signals', 'to', 'the', 'given', 'process', '.']
train
https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/container.py#L324-L339
3,034
mattrobenolt/django-sudo
sudo/decorators.py
sudo_required
def sudo_required(func): """ Enforces a view to have elevated privileges. Should likely be paired with ``@login_required``. >>> @sudo_required >>> def secure_page(request): >>> ... """ @wraps(func) def inner(request, *args, **kwargs): if not request.is_sudo(): return redirect_to_sudo(request.get_full_path()) return func(request, *args, **kwargs) return inner
python
def sudo_required(func): """ Enforces a view to have elevated privileges. Should likely be paired with ``@login_required``. >>> @sudo_required >>> def secure_page(request): >>> ... """ @wraps(func) def inner(request, *args, **kwargs): if not request.is_sudo(): return redirect_to_sudo(request.get_full_path()) return func(request, *args, **kwargs) return inner
['def', 'sudo_required', '(', 'func', ')', ':', '@', 'wraps', '(', 'func', ')', 'def', 'inner', '(', 'request', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'if', 'not', 'request', '.', 'is_sudo', '(', ')', ':', 'return', 'redirect_to_sudo', '(', 'request', '.', 'get_full_path', '(', ')', ')', 'return', 'func', '(', 'request', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'inner']
Enforces a view to have elevated privileges. Should likely be paired with ``@login_required``. >>> @sudo_required >>> def secure_page(request): >>> ...
['Enforces', 'a', 'view', 'to', 'have', 'elevated', 'privileges', '.', 'Should', 'likely', 'be', 'paired', 'with', '@login_required', '.']
train
https://github.com/mattrobenolt/django-sudo/blob/089e21a88bc3ebf9d76ea706f26707d2e4f3f729/sudo/decorators.py#L13-L27
3,035
SecurityInnovation/PGPy
pgpy/pgp.py
PGPSignature.is_expired
def is_expired(self): """ ``True`` if the signature has an expiration date, and is expired. Otherwise, ``False`` """ expires_at = self.expires_at if expires_at is not None and expires_at != self.created: return expires_at < datetime.utcnow() return False
python
def is_expired(self): """ ``True`` if the signature has an expiration date, and is expired. Otherwise, ``False`` """ expires_at = self.expires_at if expires_at is not None and expires_at != self.created: return expires_at < datetime.utcnow() return False
['def', 'is_expired', '(', 'self', ')', ':', 'expires_at', '=', 'self', '.', 'expires_at', 'if', 'expires_at', 'is', 'not', 'None', 'and', 'expires_at', '!=', 'self', '.', 'created', ':', 'return', 'expires_at', '<', 'datetime', '.', 'utcnow', '(', ')', 'return', 'False']
``True`` if the signature has an expiration date, and is expired. Otherwise, ``False``
['True', 'if', 'the', 'signature', 'has', 'an', 'expiration', 'date', 'and', 'is', 'expired', '.', 'Otherwise', 'False']
train
https://github.com/SecurityInnovation/PGPy/blob/f1c3d68e32c334f5aa14c34580925e97f17f4fde/pgpy/pgp.py#L172-L180
3,036
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels.py
brocade_tunnels.overlay_gateway_access_lists_ipv6_in_cg_ipv6_acl_in_name
def overlay_gateway_access_lists_ipv6_in_cg_ipv6_acl_in_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels") name_key = ET.SubElement(overlay_gateway, "name") name_key.text = kwargs.pop('name') access_lists = ET.SubElement(overlay_gateway, "access-lists") ipv6 = ET.SubElement(access_lists, "ipv6") in_cg = ET.SubElement(ipv6, "in") ipv6_acl_in_name = ET.SubElement(in_cg, "ipv6-acl-in-name") ipv6_acl_in_name.text = kwargs.pop('ipv6_acl_in_name') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def overlay_gateway_access_lists_ipv6_in_cg_ipv6_acl_in_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels") name_key = ET.SubElement(overlay_gateway, "name") name_key.text = kwargs.pop('name') access_lists = ET.SubElement(overlay_gateway, "access-lists") ipv6 = ET.SubElement(access_lists, "ipv6") in_cg = ET.SubElement(ipv6, "in") ipv6_acl_in_name = ET.SubElement(in_cg, "ipv6-acl-in-name") ipv6_acl_in_name.text = kwargs.pop('ipv6_acl_in_name') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'overlay_gateway_access_lists_ipv6_in_cg_ipv6_acl_in_name', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'overlay_gateway', '=', 'ET', '.', 'SubElement', '(', 'config', ',', '"overlay-gateway"', ',', 'xmlns', '=', '"urn:brocade.com:mgmt:brocade-tunnels"', ')', 'name_key', '=', 'ET', '.', 'SubElement', '(', 'overlay_gateway', ',', '"name"', ')', 'name_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'name'", ')', 'access_lists', '=', 'ET', '.', 'SubElement', '(', 'overlay_gateway', ',', '"access-lists"', ')', 'ipv6', '=', 'ET', '.', 'SubElement', '(', 'access_lists', ',', '"ipv6"', ')', 'in_cg', '=', 'ET', '.', 'SubElement', '(', 'ipv6', ',', '"in"', ')', 'ipv6_acl_in_name', '=', 'ET', '.', 'SubElement', '(', 'in_cg', ',', '"ipv6-acl-in-name"', ')', 'ipv6_acl_in_name', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'ipv6_acl_in_name'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels.py#L786-L800
3,037
juju/python-libjuju
juju/client/_client2.py
BackupsFacade.Create
async def Create(self, notes): ''' notes : str Returns -> typing.Union[str, int, _ForwardRef('Number')] ''' # map input types to rpc msg _params = dict() msg = dict(type='Backups', request='Create', version=2, params=_params) _params['notes'] = notes reply = await self.rpc(msg) return reply
python
async def Create(self, notes): ''' notes : str Returns -> typing.Union[str, int, _ForwardRef('Number')] ''' # map input types to rpc msg _params = dict() msg = dict(type='Backups', request='Create', version=2, params=_params) _params['notes'] = notes reply = await self.rpc(msg) return reply
['async', 'def', 'Create', '(', 'self', ',', 'notes', ')', ':', '# map input types to rpc msg', '_params', '=', 'dict', '(', ')', 'msg', '=', 'dict', '(', 'type', '=', "'Backups'", ',', 'request', '=', "'Create'", ',', 'version', '=', '2', ',', 'params', '=', '_params', ')', '_params', '[', "'notes'", ']', '=', 'notes', 'reply', '=', 'await', 'self', '.', 'rpc', '(', 'msg', ')', 'return', 'reply']
notes : str Returns -> typing.Union[str, int, _ForwardRef('Number')]
['notes', ':', 'str', 'Returns', '-', '>', 'typing', '.', 'Union', '[', 'str', 'int', '_ForwardRef', '(', 'Number', ')', ']']
train
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/client/_client2.py#L2162-L2175
3,038
saltstack/salt
salt/cloud/__init__.py
CloudClient.destroy
def destroy(self, names): ''' Destroy the named VMs ''' mapper = salt.cloud.Map(self._opts_defaults(destroy=True)) if isinstance(names, six.string_types): names = names.split(',') return salt.utils.data.simple_types_filter( mapper.destroy(names) )
python
def destroy(self, names): ''' Destroy the named VMs ''' mapper = salt.cloud.Map(self._opts_defaults(destroy=True)) if isinstance(names, six.string_types): names = names.split(',') return salt.utils.data.simple_types_filter( mapper.destroy(names) )
['def', 'destroy', '(', 'self', ',', 'names', ')', ':', 'mapper', '=', 'salt', '.', 'cloud', '.', 'Map', '(', 'self', '.', '_opts_defaults', '(', 'destroy', '=', 'True', ')', ')', 'if', 'isinstance', '(', 'names', ',', 'six', '.', 'string_types', ')', ':', 'names', '=', 'names', '.', 'split', '(', "','", ')', 'return', 'salt', '.', 'utils', '.', 'data', '.', 'simple_types_filter', '(', 'mapper', '.', 'destroy', '(', 'names', ')', ')']
Destroy the named VMs
['Destroy', 'the', 'named', 'VMs']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/__init__.py#L382-L391
3,039
openstack/proliantutils
proliantutils/ilo/ribcl.py
RIBCLOperations._parse_storage_embedded_health
def _parse_storage_embedded_health(self, data): """Gets the storage data from get_embedded_health Parse the get_host_health_data() for essential properties :param data: the output returned by get_host_health_data() :returns: disk size in GB. """ local_gb = 0 storage = self.get_value_as_list(data['GET_EMBEDDED_HEALTH_DATA'], 'STORAGE') if storage is None: # We dont raise exception because this dictionary # is available only when RAID is configured. # If we raise error here then we will always fail # inspection where this module is consumed. Hence # as a workaround just return 0. return local_gb minimum = local_gb for item in storage: cntlr = self.get_value_as_list(item, 'CONTROLLER') if cntlr is None: continue for s in cntlr: drive = self.get_value_as_list(s, 'LOGICAL_DRIVE') if drive is None: continue for item in drive: for key, val in item.items(): if key == 'CAPACITY': capacity = val['VALUE'] local_bytes = (strutils.string_to_bytes( capacity.replace(' ', ''), return_int=True)) local_gb = int(local_bytes / (1024 * 1024 * 1024)) if minimum >= local_gb or minimum == 0: minimum = local_gb # Return disk size 1 less than the actual disk size. This prevents # the deploy to fail from Nova when root_gb is same as local_gb # in Ironic. When the disk size is used as root_device hints, # then it should be given as the actual size i.e. # ironic (node.properties['local_gb'] + 1) else root device # hint will fail. if minimum: minimum = minimum - 1 return minimum
python
def _parse_storage_embedded_health(self, data): """Gets the storage data from get_embedded_health Parse the get_host_health_data() for essential properties :param data: the output returned by get_host_health_data() :returns: disk size in GB. """ local_gb = 0 storage = self.get_value_as_list(data['GET_EMBEDDED_HEALTH_DATA'], 'STORAGE') if storage is None: # We dont raise exception because this dictionary # is available only when RAID is configured. # If we raise error here then we will always fail # inspection where this module is consumed. Hence # as a workaround just return 0. return local_gb minimum = local_gb for item in storage: cntlr = self.get_value_as_list(item, 'CONTROLLER') if cntlr is None: continue for s in cntlr: drive = self.get_value_as_list(s, 'LOGICAL_DRIVE') if drive is None: continue for item in drive: for key, val in item.items(): if key == 'CAPACITY': capacity = val['VALUE'] local_bytes = (strutils.string_to_bytes( capacity.replace(' ', ''), return_int=True)) local_gb = int(local_bytes / (1024 * 1024 * 1024)) if minimum >= local_gb or minimum == 0: minimum = local_gb # Return disk size 1 less than the actual disk size. This prevents # the deploy to fail from Nova when root_gb is same as local_gb # in Ironic. When the disk size is used as root_device hints, # then it should be given as the actual size i.e. # ironic (node.properties['local_gb'] + 1) else root device # hint will fail. if minimum: minimum = minimum - 1 return minimum
['def', '_parse_storage_embedded_health', '(', 'self', ',', 'data', ')', ':', 'local_gb', '=', '0', 'storage', '=', 'self', '.', 'get_value_as_list', '(', 'data', '[', "'GET_EMBEDDED_HEALTH_DATA'", ']', ',', "'STORAGE'", ')', 'if', 'storage', 'is', 'None', ':', '# We dont raise exception because this dictionary', '# is available only when RAID is configured.', '# If we raise error here then we will always fail', '# inspection where this module is consumed. Hence', '# as a workaround just return 0.', 'return', 'local_gb', 'minimum', '=', 'local_gb', 'for', 'item', 'in', 'storage', ':', 'cntlr', '=', 'self', '.', 'get_value_as_list', '(', 'item', ',', "'CONTROLLER'", ')', 'if', 'cntlr', 'is', 'None', ':', 'continue', 'for', 's', 'in', 'cntlr', ':', 'drive', '=', 'self', '.', 'get_value_as_list', '(', 's', ',', "'LOGICAL_DRIVE'", ')', 'if', 'drive', 'is', 'None', ':', 'continue', 'for', 'item', 'in', 'drive', ':', 'for', 'key', ',', 'val', 'in', 'item', '.', 'items', '(', ')', ':', 'if', 'key', '==', "'CAPACITY'", ':', 'capacity', '=', 'val', '[', "'VALUE'", ']', 'local_bytes', '=', '(', 'strutils', '.', 'string_to_bytes', '(', 'capacity', '.', 'replace', '(', "' '", ',', "''", ')', ',', 'return_int', '=', 'True', ')', ')', 'local_gb', '=', 'int', '(', 'local_bytes', '/', '(', '1024', '*', '1024', '*', '1024', ')', ')', 'if', 'minimum', '>=', 'local_gb', 'or', 'minimum', '==', '0', ':', 'minimum', '=', 'local_gb', '# Return disk size 1 less than the actual disk size. This prevents', '# the deploy to fail from Nova when root_gb is same as local_gb', '# in Ironic. When the disk size is used as root_device hints,', '# then it should be given as the actual size i.e.', "# ironic (node.properties['local_gb'] + 1) else root device", '# hint will fail.', 'if', 'minimum', ':', 'minimum', '=', 'minimum', '-', '1', 'return', 'minimum']
Gets the storage data from get_embedded_health Parse the get_host_health_data() for essential properties :param data: the output returned by get_host_health_data() :returns: disk size in GB.
['Gets', 'the', 'storage', 'data', 'from', 'get_embedded_health']
train
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L889-L938
3,040
zarr-developers/zarr
zarr/hierarchy.py
Group.full
def full(self, name, fill_value, **kwargs): """Create an array. Keyword arguments as per :func:`zarr.creation.full`.""" return self._write_op(self._full_nosync, name, fill_value, **kwargs)
python
def full(self, name, fill_value, **kwargs): """Create an array. Keyword arguments as per :func:`zarr.creation.full`.""" return self._write_op(self._full_nosync, name, fill_value, **kwargs)
['def', 'full', '(', 'self', ',', 'name', ',', 'fill_value', ',', '*', '*', 'kwargs', ')', ':', 'return', 'self', '.', '_write_op', '(', 'self', '.', '_full_nosync', ',', 'name', ',', 'fill_value', ',', '*', '*', 'kwargs', ')']
Create an array. Keyword arguments as per :func:`zarr.creation.full`.
['Create', 'an', 'array', '.', 'Keyword', 'arguments', 'as', 'per', ':', 'func', ':', 'zarr', '.', 'creation', '.', 'full', '.']
train
https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/hierarchy.py#L893-L896
3,041
mongolab/dex
dex/dex.py
Dex.watch_logfile
def watch_logfile(self, logfile_path): """Analyzes queries from the tail of a given log file""" self._run_stats['logSource'] = logfile_path log_parser = LogParser() # For each new line in the logfile ... output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS try: firstLine = True for line in self._tail_file(open(logfile_path), WATCH_INTERVAL_SECONDS): if firstLine: self._run_stats['timeRange']['start'] = get_line_time(line) self._process_query(line, log_parser) self._run_stats['timeRange']['end'] = get_line_time(line) if time.time() >= output_time: self._output_aggregated_report(sys.stderr) output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS except KeyboardInterrupt: sys.stderr.write("Interrupt received\n") finally: self._output_aggregated_report(sys.stdout) return 0
python
def watch_logfile(self, logfile_path): """Analyzes queries from the tail of a given log file""" self._run_stats['logSource'] = logfile_path log_parser = LogParser() # For each new line in the logfile ... output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS try: firstLine = True for line in self._tail_file(open(logfile_path), WATCH_INTERVAL_SECONDS): if firstLine: self._run_stats['timeRange']['start'] = get_line_time(line) self._process_query(line, log_parser) self._run_stats['timeRange']['end'] = get_line_time(line) if time.time() >= output_time: self._output_aggregated_report(sys.stderr) output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS except KeyboardInterrupt: sys.stderr.write("Interrupt received\n") finally: self._output_aggregated_report(sys.stdout) return 0
['def', 'watch_logfile', '(', 'self', ',', 'logfile_path', ')', ':', 'self', '.', '_run_stats', '[', "'logSource'", ']', '=', 'logfile_path', 'log_parser', '=', 'LogParser', '(', ')', '# For each new line in the logfile ...', 'output_time', '=', 'time', '.', 'time', '(', ')', '+', 'WATCH_DISPLAY_REFRESH_SECONDS', 'try', ':', 'firstLine', '=', 'True', 'for', 'line', 'in', 'self', '.', '_tail_file', '(', 'open', '(', 'logfile_path', ')', ',', 'WATCH_INTERVAL_SECONDS', ')', ':', 'if', 'firstLine', ':', 'self', '.', '_run_stats', '[', "'timeRange'", ']', '[', "'start'", ']', '=', 'get_line_time', '(', 'line', ')', 'self', '.', '_process_query', '(', 'line', ',', 'log_parser', ')', 'self', '.', '_run_stats', '[', "'timeRange'", ']', '[', "'end'", ']', '=', 'get_line_time', '(', 'line', ')', 'if', 'time', '.', 'time', '(', ')', '>=', 'output_time', ':', 'self', '.', '_output_aggregated_report', '(', 'sys', '.', 'stderr', ')', 'output_time', '=', 'time', '.', 'time', '(', ')', '+', 'WATCH_DISPLAY_REFRESH_SECONDS', 'except', 'KeyboardInterrupt', ':', 'sys', '.', 'stderr', '.', 'write', '(', '"Interrupt received\\n"', ')', 'finally', ':', 'self', '.', '_output_aggregated_report', '(', 'sys', '.', 'stdout', ')', 'return', '0']
Analyzes queries from the tail of a given log file
['Analyzes', 'queries', 'from', 'the', 'tail', 'of', 'a', 'given', 'log', 'file']
train
https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/dex.py#L264-L287
3,042
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/cursor.py
Cursor.min
def min(self, spec): """Adds `min` operator that specifies lower bound for specific index. :Parameters: - `spec`: a list of field, limit pairs specifying the inclusive lower bound for all keys of a specific index in order. .. versionadded:: 2.7 """ if not isinstance(spec, (list, tuple)): raise TypeError("spec must be an instance of list or tuple") self.__check_okay_to_chain() self.__min = SON(spec) return self
python
def min(self, spec): """Adds `min` operator that specifies lower bound for specific index. :Parameters: - `spec`: a list of field, limit pairs specifying the inclusive lower bound for all keys of a specific index in order. .. versionadded:: 2.7 """ if not isinstance(spec, (list, tuple)): raise TypeError("spec must be an instance of list or tuple") self.__check_okay_to_chain() self.__min = SON(spec) return self
['def', 'min', '(', 'self', ',', 'spec', ')', ':', 'if', 'not', 'isinstance', '(', 'spec', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'raise', 'TypeError', '(', '"spec must be an instance of list or tuple"', ')', 'self', '.', '__check_okay_to_chain', '(', ')', 'self', '.', '__min', '=', 'SON', '(', 'spec', ')', 'return', 'self']
Adds `min` operator that specifies lower bound for specific index. :Parameters: - `spec`: a list of field, limit pairs specifying the inclusive lower bound for all keys of a specific index in order. .. versionadded:: 2.7
['Adds', 'min', 'operator', 'that', 'specifies', 'lower', 'bound', 'for', 'specific', 'index', '.']
train
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/cursor.py#L620-L634
3,043
saltstack/salt
salt/modules/cassandra_cql.py
list_permissions
def list_permissions(username=None, resource=None, resource_type='keyspace', permission=None, contact_points=None, port=None, cql_user=None, cql_pass=None): ''' List permissions. :param username: The name of the user to list permissions for. :type username: str :param resource: The resource (keyspace or table), if None, permissions for all resources are listed. :type resource: str :param resource_type: The resource_type (keyspace or table), defaults to 'keyspace'. :type resource_type: str :param permission: A permission name (e.g. select), if None, all permissions are listed. :type permission: str :param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs. :type contact_points: str | list[str] :param cql_user: The Cassandra user if authentication is turned on. :type cql_user: str :param cql_pass: The Cassandra user password if authentication is turned on. :type cql_pass: str :param port: The Cassandra cluster port, defaults to None. :type port: int :return: Dictionary of permissions. :rtype: dict CLI Example: .. code-block:: bash salt 'minion1' cassandra_cql.list_permissions salt 'minion1' cassandra_cql.list_permissions username=joe resource=test_keyspace permission=select salt 'minion1' cassandra_cql.list_permissions username=joe resource=test_table resource_type=table \ permission=select contact_points=minion1 ''' keyspace_cql = "{0} {1}".format(resource_type, resource) if resource else "all keyspaces" permission_cql = "{0} permission".format(permission) if permission else "all permissions" query = "list {0} on {1}".format(permission_cql, keyspace_cql) if username: query = "{0} of {1}".format(query, username) log.debug("Attempting to list permissions with query '%s'", query) ret = {} try: ret = cql_query(query, contact_points, port, cql_user, cql_pass) except CommandExecutionError: log.critical('Could not list permissions.') raise except BaseException as e: log.critical('Unexpected error while listing permissions: %s', e) raise return ret
python
def list_permissions(username=None, resource=None, resource_type='keyspace', permission=None, contact_points=None, port=None, cql_user=None, cql_pass=None): ''' List permissions. :param username: The name of the user to list permissions for. :type username: str :param resource: The resource (keyspace or table), if None, permissions for all resources are listed. :type resource: str :param resource_type: The resource_type (keyspace or table), defaults to 'keyspace'. :type resource_type: str :param permission: A permission name (e.g. select), if None, all permissions are listed. :type permission: str :param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs. :type contact_points: str | list[str] :param cql_user: The Cassandra user if authentication is turned on. :type cql_user: str :param cql_pass: The Cassandra user password if authentication is turned on. :type cql_pass: str :param port: The Cassandra cluster port, defaults to None. :type port: int :return: Dictionary of permissions. :rtype: dict CLI Example: .. code-block:: bash salt 'minion1' cassandra_cql.list_permissions salt 'minion1' cassandra_cql.list_permissions username=joe resource=test_keyspace permission=select salt 'minion1' cassandra_cql.list_permissions username=joe resource=test_table resource_type=table \ permission=select contact_points=minion1 ''' keyspace_cql = "{0} {1}".format(resource_type, resource) if resource else "all keyspaces" permission_cql = "{0} permission".format(permission) if permission else "all permissions" query = "list {0} on {1}".format(permission_cql, keyspace_cql) if username: query = "{0} of {1}".format(query, username) log.debug("Attempting to list permissions with query '%s'", query) ret = {} try: ret = cql_query(query, contact_points, port, cql_user, cql_pass) except CommandExecutionError: log.critical('Could not list permissions.') raise except BaseException as e: log.critical('Unexpected error while listing permissions: %s', e) raise return ret
['def', 'list_permissions', '(', 'username', '=', 'None', ',', 'resource', '=', 'None', ',', 'resource_type', '=', "'keyspace'", ',', 'permission', '=', 'None', ',', 'contact_points', '=', 'None', ',', 'port', '=', 'None', ',', 'cql_user', '=', 'None', ',', 'cql_pass', '=', 'None', ')', ':', 'keyspace_cql', '=', '"{0} {1}"', '.', 'format', '(', 'resource_type', ',', 'resource', ')', 'if', 'resource', 'else', '"all keyspaces"', 'permission_cql', '=', '"{0} permission"', '.', 'format', '(', 'permission', ')', 'if', 'permission', 'else', '"all permissions"', 'query', '=', '"list {0} on {1}"', '.', 'format', '(', 'permission_cql', ',', 'keyspace_cql', ')', 'if', 'username', ':', 'query', '=', '"{0} of {1}"', '.', 'format', '(', 'query', ',', 'username', ')', 'log', '.', 'debug', '(', '"Attempting to list permissions with query \'%s\'"', ',', 'query', ')', 'ret', '=', '{', '}', 'try', ':', 'ret', '=', 'cql_query', '(', 'query', ',', 'contact_points', ',', 'port', ',', 'cql_user', ',', 'cql_pass', ')', 'except', 'CommandExecutionError', ':', 'log', '.', 'critical', '(', "'Could not list permissions.'", ')', 'raise', 'except', 'BaseException', 'as', 'e', ':', 'log', '.', 'critical', '(', "'Unexpected error while listing permissions: %s'", ',', 'e', ')', 'raise', 'return', 'ret']
List permissions. :param username: The name of the user to list permissions for. :type username: str :param resource: The resource (keyspace or table), if None, permissions for all resources are listed. :type resource: str :param resource_type: The resource_type (keyspace or table), defaults to 'keyspace'. :type resource_type: str :param permission: A permission name (e.g. select), if None, all permissions are listed. :type permission: str :param contact_points: The Cassandra cluster addresses, can either be a string or a list of IPs. :type contact_points: str | list[str] :param cql_user: The Cassandra user if authentication is turned on. :type cql_user: str :param cql_pass: The Cassandra user password if authentication is turned on. :type cql_pass: str :param port: The Cassandra cluster port, defaults to None. :type port: int :return: Dictionary of permissions. :rtype: dict CLI Example: .. code-block:: bash salt 'minion1' cassandra_cql.list_permissions salt 'minion1' cassandra_cql.list_permissions username=joe resource=test_keyspace permission=select salt 'minion1' cassandra_cql.list_permissions username=joe resource=test_table resource_type=table \ permission=select contact_points=minion1
['List', 'permissions', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cassandra_cql.py#L939-L994
3,044
tadashi-aikawa/owlmixin
owlmixin/owlcollections.py
TList.key_by
def key_by(self, to_key: Callable[[T], str]) -> 'TDict[T]': """ :param to_key: value -> key Usage: >>> TList(['a1', 'b2', 'c3']).key_by(lambda x: x[0]).to_json() '{"a": "a1","b": "b2","c": "c3"}' >>> TList([1, 2, 3, 4, 5]).key_by(lambda x: x % 2).to_json() '{"0": 4,"1": 5}' """ return TDict({to_key(x): x for x in self})
python
def key_by(self, to_key: Callable[[T], str]) -> 'TDict[T]': """ :param to_key: value -> key Usage: >>> TList(['a1', 'b2', 'c3']).key_by(lambda x: x[0]).to_json() '{"a": "a1","b": "b2","c": "c3"}' >>> TList([1, 2, 3, 4, 5]).key_by(lambda x: x % 2).to_json() '{"0": 4,"1": 5}' """ return TDict({to_key(x): x for x in self})
['def', 'key_by', '(', 'self', ',', 'to_key', ':', 'Callable', '[', '[', 'T', ']', ',', 'str', ']', ')', '->', "'TDict[T]'", ':', 'return', 'TDict', '(', '{', 'to_key', '(', 'x', ')', ':', 'x', 'for', 'x', 'in', 'self', '}', ')']
:param to_key: value -> key Usage: >>> TList(['a1', 'b2', 'c3']).key_by(lambda x: x[0]).to_json() '{"a": "a1","b": "b2","c": "c3"}' >>> TList([1, 2, 3, 4, 5]).key_by(lambda x: x % 2).to_json() '{"0": 4,"1": 5}'
[':', 'param', 'to_key', ':', 'value', '-', '>', 'key', 'Usage', ':']
train
https://github.com/tadashi-aikawa/owlmixin/blob/7c4a042c3008abddc56a8e8e55ae930d276071f5/owlmixin/owlcollections.py#L208-L218
3,045
HumanCellAtlas/cloud-blobstore
cloud_blobstore/__init__.py
BlobStore.get_copy_token
def get_copy_token( self, bucket: str, key: str, cloud_checksum: str, ) -> typing.Any: """ Given a bucket, key, and the expected cloud-provided checksum, retrieve a token that can be passed into :func:`~cloud_blobstore.BlobStore.copy` that guarantees the copy refers to the same version of the blob identified by the checksum. :param bucket: the bucket the object resides in. :param key: the key of the object for which checksum is being retrieved. :param cloud_checksum: the expected cloud-provided checksum. :return: an opaque copy token """ raise NotImplementedError()
python
def get_copy_token( self, bucket: str, key: str, cloud_checksum: str, ) -> typing.Any: """ Given a bucket, key, and the expected cloud-provided checksum, retrieve a token that can be passed into :func:`~cloud_blobstore.BlobStore.copy` that guarantees the copy refers to the same version of the blob identified by the checksum. :param bucket: the bucket the object resides in. :param key: the key of the object for which checksum is being retrieved. :param cloud_checksum: the expected cloud-provided checksum. :return: an opaque copy token """ raise NotImplementedError()
['def', 'get_copy_token', '(', 'self', ',', 'bucket', ':', 'str', ',', 'key', ':', 'str', ',', 'cloud_checksum', ':', 'str', ',', ')', '->', 'typing', '.', 'Any', ':', 'raise', 'NotImplementedError', '(', ')']
Given a bucket, key, and the expected cloud-provided checksum, retrieve a token that can be passed into :func:`~cloud_blobstore.BlobStore.copy` that guarantees the copy refers to the same version of the blob identified by the checksum. :param bucket: the bucket the object resides in. :param key: the key of the object for which checksum is being retrieved. :param cloud_checksum: the expected cloud-provided checksum. :return: an opaque copy token
['Given', 'a', 'bucket', 'key', 'and', 'the', 'expected', 'cloud', '-', 'provided', 'checksum', 'retrieve', 'a', 'token', 'that', 'can', 'be', 'passed', 'into', ':', 'func', ':', '~cloud_blobstore', '.', 'BlobStore', '.', 'copy', 'that', 'guarantees', 'the', 'copy', 'refers', 'to', 'the', 'same', 'version', 'of', 'the', 'blob', 'identified', 'by', 'the', 'checksum', '.', ':', 'param', 'bucket', ':', 'the', 'bucket', 'the', 'object', 'resides', 'in', '.', ':', 'param', 'key', ':', 'the', 'key', 'of', 'the', 'object', 'for', 'which', 'checksum', 'is', 'being', 'retrieved', '.', ':', 'param', 'cloud_checksum', ':', 'the', 'expected', 'cloud', '-', 'provided', 'checksum', '.', ':', 'return', ':', 'an', 'opaque', 'copy', 'token']
train
https://github.com/HumanCellAtlas/cloud-blobstore/blob/b8a60e8e8c0da0e39dda084cb467a34cd2d1ef0a/cloud_blobstore/__init__.py#L189-L204
3,046
anchor/elasticsearchadmin
esadmin/elasticsearchadmin.py
Connection.get_index_translog_disable_flush
def get_index_translog_disable_flush(self): """Return a dictionary showing the position of the 'translog.disable_flush' knob for each index in the cluster. The dictionary will look like this: { "index1": True, # Autoflushing DISABLED "index2": False, # Autoflushing ENABLED "index3": "unknown", # Using default setting (probably enabled) ... } """ disabled = {} settings = self.get('/_settings') setting_getters = [ lambda s: s['index.translog.disable_flush'], lambda s: s['index']['translog']['disable_flush']] for idx in settings: idx_settings = settings[idx]['settings'] for getter in setting_getters: try: disabled[idx] = booleanise(getter(idx_settings)) except KeyError as e: pass if not idx in disabled: disabled[idx] = 'unknown' return disabled
python
def get_index_translog_disable_flush(self): """Return a dictionary showing the position of the 'translog.disable_flush' knob for each index in the cluster. The dictionary will look like this: { "index1": True, # Autoflushing DISABLED "index2": False, # Autoflushing ENABLED "index3": "unknown", # Using default setting (probably enabled) ... } """ disabled = {} settings = self.get('/_settings') setting_getters = [ lambda s: s['index.translog.disable_flush'], lambda s: s['index']['translog']['disable_flush']] for idx in settings: idx_settings = settings[idx]['settings'] for getter in setting_getters: try: disabled[idx] = booleanise(getter(idx_settings)) except KeyError as e: pass if not idx in disabled: disabled[idx] = 'unknown' return disabled
['def', 'get_index_translog_disable_flush', '(', 'self', ')', ':', 'disabled', '=', '{', '}', 'settings', '=', 'self', '.', 'get', '(', "'/_settings'", ')', 'setting_getters', '=', '[', 'lambda', 's', ':', 's', '[', "'index.translog.disable_flush'", ']', ',', 'lambda', 's', ':', 's', '[', "'index'", ']', '[', "'translog'", ']', '[', "'disable_flush'", ']', ']', 'for', 'idx', 'in', 'settings', ':', 'idx_settings', '=', 'settings', '[', 'idx', ']', '[', "'settings'", ']', 'for', 'getter', 'in', 'setting_getters', ':', 'try', ':', 'disabled', '[', 'idx', ']', '=', 'booleanise', '(', 'getter', '(', 'idx_settings', ')', ')', 'except', 'KeyError', 'as', 'e', ':', 'pass', 'if', 'not', 'idx', 'in', 'disabled', ':', 'disabled', '[', 'idx', ']', '=', "'unknown'", 'return', 'disabled']
Return a dictionary showing the position of the 'translog.disable_flush' knob for each index in the cluster. The dictionary will look like this: { "index1": True, # Autoflushing DISABLED "index2": False, # Autoflushing ENABLED "index3": "unknown", # Using default setting (probably enabled) ... }
['Return', 'a', 'dictionary', 'showing', 'the', 'position', 'of', 'the', 'translog', '.', 'disable_flush', 'knob', 'for', 'each', 'index', 'in', 'the', 'cluster', '.']
train
https://github.com/anchor/elasticsearchadmin/blob/80b5adf79ead341ce0ded34119b087a343425983/esadmin/elasticsearchadmin.py#L103-L131
3,047
knipknap/exscript
Exscript/util/mail.py
Mail.get_smtp_mail
def get_smtp_mail(self): """ Returns the SMTP formatted email, as it may be passed to sendmail. :rtype: string :return: The SMTP formatted mail. """ header = self.get_smtp_header() body = self.get_body().replace('\n', '\r\n') return header + '\r\n' + body + '\r\n'
python
def get_smtp_mail(self): """ Returns the SMTP formatted email, as it may be passed to sendmail. :rtype: string :return: The SMTP formatted mail. """ header = self.get_smtp_header() body = self.get_body().replace('\n', '\r\n') return header + '\r\n' + body + '\r\n'
['def', 'get_smtp_mail', '(', 'self', ')', ':', 'header', '=', 'self', '.', 'get_smtp_header', '(', ')', 'body', '=', 'self', '.', 'get_body', '(', ')', '.', 'replace', '(', "'\\n'", ',', "'\\r\\n'", ')', 'return', 'header', '+', "'\\r\\n'", '+', 'body', '+', "'\\r\\n'"]
Returns the SMTP formatted email, as it may be passed to sendmail. :rtype: string :return: The SMTP formatted mail.
['Returns', 'the', 'SMTP', 'formatted', 'email', 'as', 'it', 'may', 'be', 'passed', 'to', 'sendmail', '.']
train
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/util/mail.py#L369-L378
3,048
quantmind/dynts
dynts/lib/fallback/operators.py
roll_mean
def roll_mean(input, window): '''Apply a rolling mean function to an array. This is a simple rolling aggregation.''' nobs, i, j, sum_x = 0,0,0,0. N = len(input) if window > N: raise ValueError('Out of bound') output = np.ndarray(N-window+1,dtype=input.dtype) for val in input[:window]: if val == val: nobs += 1 sum_x += val output[j] = NaN if not nobs else sum_x / nobs for val in input[window:]: prev = input[j] if prev == prev: sum_x -= prev nobs -= 1 if val == val: nobs += 1 sum_x += val j += 1 output[j] = NaN if not nobs else sum_x / nobs return output
python
def roll_mean(input, window): '''Apply a rolling mean function to an array. This is a simple rolling aggregation.''' nobs, i, j, sum_x = 0,0,0,0. N = len(input) if window > N: raise ValueError('Out of bound') output = np.ndarray(N-window+1,dtype=input.dtype) for val in input[:window]: if val == val: nobs += 1 sum_x += val output[j] = NaN if not nobs else sum_x / nobs for val in input[window:]: prev = input[j] if prev == prev: sum_x -= prev nobs -= 1 if val == val: nobs += 1 sum_x += val j += 1 output[j] = NaN if not nobs else sum_x / nobs return output
['def', 'roll_mean', '(', 'input', ',', 'window', ')', ':', 'nobs', ',', 'i', ',', 'j', ',', 'sum_x', '=', '0', ',', '0', ',', '0', ',', '0.', 'N', '=', 'len', '(', 'input', ')', 'if', 'window', '>', 'N', ':', 'raise', 'ValueError', '(', "'Out of bound'", ')', 'output', '=', 'np', '.', 'ndarray', '(', 'N', '-', 'window', '+', '1', ',', 'dtype', '=', 'input', '.', 'dtype', ')', 'for', 'val', 'in', 'input', '[', ':', 'window', ']', ':', 'if', 'val', '==', 'val', ':', 'nobs', '+=', '1', 'sum_x', '+=', 'val', 'output', '[', 'j', ']', '=', 'NaN', 'if', 'not', 'nobs', 'else', 'sum_x', '/', 'nobs', 'for', 'val', 'in', 'input', '[', 'window', ':', ']', ':', 'prev', '=', 'input', '[', 'j', ']', 'if', 'prev', '==', 'prev', ':', 'sum_x', '-=', 'prev', 'nobs', '-=', '1', 'if', 'val', '==', 'val', ':', 'nobs', '+=', '1', 'sum_x', '+=', 'val', 'j', '+=', '1', 'output', '[', 'j', ']', '=', 'NaN', 'if', 'not', 'nobs', 'else', 'sum_x', '/', 'nobs', 'return', 'output']
Apply a rolling mean function to an array. This is a simple rolling aggregation.
['Apply', 'a', 'rolling', 'mean', 'function', 'to', 'an', 'array', '.', 'This', 'is', 'a', 'simple', 'rolling', 'aggregation', '.']
train
https://github.com/quantmind/dynts/blob/21ac57c648bfec402fa6b1fe569496cf098fb5e8/dynts/lib/fallback/operators.py#L76-L107
3,049
HiPERCAM/hcam_widgets
hcam_widgets/widgets.py
FloatEntry.set_unbind
def set_unbind(self): """ Unsets key bindings. """ self.unbind('<Button-1>') self.unbind('<Button-3>') self.unbind('<Up>') self.unbind('<Down>') self.unbind('<Shift-Up>') self.unbind('<Shift-Down>') self.unbind('<Control-Up>') self.unbind('<Control-Down>') self.unbind('<Double-Button-1>') self.unbind('<Double-Button-3>') self.unbind('<Shift-Button-1>') self.unbind('<Shift-Button-3>') self.unbind('<Control-Button-1>') self.unbind('<Control-Button-3>') self.unbind('<Enter>')
python
def set_unbind(self): """ Unsets key bindings. """ self.unbind('<Button-1>') self.unbind('<Button-3>') self.unbind('<Up>') self.unbind('<Down>') self.unbind('<Shift-Up>') self.unbind('<Shift-Down>') self.unbind('<Control-Up>') self.unbind('<Control-Down>') self.unbind('<Double-Button-1>') self.unbind('<Double-Button-3>') self.unbind('<Shift-Button-1>') self.unbind('<Shift-Button-3>') self.unbind('<Control-Button-1>') self.unbind('<Control-Button-3>') self.unbind('<Enter>')
['def', 'set_unbind', '(', 'self', ')', ':', 'self', '.', 'unbind', '(', "'<Button-1>'", ')', 'self', '.', 'unbind', '(', "'<Button-3>'", ')', 'self', '.', 'unbind', '(', "'<Up>'", ')', 'self', '.', 'unbind', '(', "'<Down>'", ')', 'self', '.', 'unbind', '(', "'<Shift-Up>'", ')', 'self', '.', 'unbind', '(', "'<Shift-Down>'", ')', 'self', '.', 'unbind', '(', "'<Control-Up>'", ')', 'self', '.', 'unbind', '(', "'<Control-Down>'", ')', 'self', '.', 'unbind', '(', "'<Double-Button-1>'", ')', 'self', '.', 'unbind', '(', "'<Double-Button-3>'", ')', 'self', '.', 'unbind', '(', "'<Shift-Button-1>'", ')', 'self', '.', 'unbind', '(', "'<Shift-Button-3>'", ')', 'self', '.', 'unbind', '(', "'<Control-Button-1>'", ')', 'self', '.', 'unbind', '(', "'<Control-Button-3>'", ')', 'self', '.', 'unbind', '(', "'<Enter>'", ')']
Unsets key bindings.
['Unsets', 'key', 'bindings', '.']
train
https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/widgets.py#L899-L917
3,050
juju/charm-helpers
charmhelpers/contrib/openstack/utils.py
clean_storage
def clean_storage(block_device): ''' Ensures a block device is clean. That is: - unmounted - any lvm volume groups are deactivated - any lvm physical device signatures removed - partition table wiped :param block_device: str: Full path to block device to clean. ''' for mp, d in mounts(): if d == block_device: juju_log('clean_storage(): %s is mounted @ %s, unmounting.' % (d, mp), level=INFO) umount(mp, persist=True) if is_lvm_physical_volume(block_device): deactivate_lvm_volume_group(block_device) remove_lvm_physical_volume(block_device) else: zap_disk(block_device)
python
def clean_storage(block_device): ''' Ensures a block device is clean. That is: - unmounted - any lvm volume groups are deactivated - any lvm physical device signatures removed - partition table wiped :param block_device: str: Full path to block device to clean. ''' for mp, d in mounts(): if d == block_device: juju_log('clean_storage(): %s is mounted @ %s, unmounting.' % (d, mp), level=INFO) umount(mp, persist=True) if is_lvm_physical_volume(block_device): deactivate_lvm_volume_group(block_device) remove_lvm_physical_volume(block_device) else: zap_disk(block_device)
['def', 'clean_storage', '(', 'block_device', ')', ':', 'for', 'mp', ',', 'd', 'in', 'mounts', '(', ')', ':', 'if', 'd', '==', 'block_device', ':', 'juju_log', '(', "'clean_storage(): %s is mounted @ %s, unmounting.'", '%', '(', 'd', ',', 'mp', ')', ',', 'level', '=', 'INFO', ')', 'umount', '(', 'mp', ',', 'persist', '=', 'True', ')', 'if', 'is_lvm_physical_volume', '(', 'block_device', ')', ':', 'deactivate_lvm_volume_group', '(', 'block_device', ')', 'remove_lvm_physical_volume', '(', 'block_device', ')', 'else', ':', 'zap_disk', '(', 'block_device', ')']
Ensures a block device is clean. That is: - unmounted - any lvm volume groups are deactivated - any lvm physical device signatures removed - partition table wiped :param block_device: str: Full path to block device to clean.
['Ensures', 'a', 'block', 'device', 'is', 'clean', '.', 'That', 'is', ':', '-', 'unmounted', '-', 'any', 'lvm', 'volume', 'groups', 'are', 'deactivated', '-', 'any', 'lvm', 'physical', 'device', 'signatures', 'removed', '-', 'partition', 'table', 'wiped']
train
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/utils.py#L694-L714
3,051
gem/oq-engine
openquake/hmtk/seismicity/catalogue.py
Catalogue._get_row_str
def _get_row_str(self, i): """ Returns a string representation of the key information in a row """ row_data = ["{:s}".format(self.data['eventID'][i]), "{:g}".format(self.data['year'][i]), "{:g}".format(self.data['month'][i]), "{:g}".format(self.data['day'][i]), "{:g}".format(self.data['hour'][i]), "{:g}".format(self.data['minute'][i]), "{:.1f}".format(self.data['second'][i]), "{:.3f}".format(self.data['longitude'][i]), "{:.3f}".format(self.data['latitude'][i]), "{:.1f}".format(self.data['depth'][i]), "{:.1f}".format(self.data['magnitude'][i])] return " ".join(row_data)
python
def _get_row_str(self, i): """ Returns a string representation of the key information in a row """ row_data = ["{:s}".format(self.data['eventID'][i]), "{:g}".format(self.data['year'][i]), "{:g}".format(self.data['month'][i]), "{:g}".format(self.data['day'][i]), "{:g}".format(self.data['hour'][i]), "{:g}".format(self.data['minute'][i]), "{:.1f}".format(self.data['second'][i]), "{:.3f}".format(self.data['longitude'][i]), "{:.3f}".format(self.data['latitude'][i]), "{:.1f}".format(self.data['depth'][i]), "{:.1f}".format(self.data['magnitude'][i])] return " ".join(row_data)
['def', '_get_row_str', '(', 'self', ',', 'i', ')', ':', 'row_data', '=', '[', '"{:s}"', '.', 'format', '(', 'self', '.', 'data', '[', "'eventID'", ']', '[', 'i', ']', ')', ',', '"{:g}"', '.', 'format', '(', 'self', '.', 'data', '[', "'year'", ']', '[', 'i', ']', ')', ',', '"{:g}"', '.', 'format', '(', 'self', '.', 'data', '[', "'month'", ']', '[', 'i', ']', ')', ',', '"{:g}"', '.', 'format', '(', 'self', '.', 'data', '[', "'day'", ']', '[', 'i', ']', ')', ',', '"{:g}"', '.', 'format', '(', 'self', '.', 'data', '[', "'hour'", ']', '[', 'i', ']', ')', ',', '"{:g}"', '.', 'format', '(', 'self', '.', 'data', '[', "'minute'", ']', '[', 'i', ']', ')', ',', '"{:.1f}"', '.', 'format', '(', 'self', '.', 'data', '[', "'second'", ']', '[', 'i', ']', ')', ',', '"{:.3f}"', '.', 'format', '(', 'self', '.', 'data', '[', "'longitude'", ']', '[', 'i', ']', ')', ',', '"{:.3f}"', '.', 'format', '(', 'self', '.', 'data', '[', "'latitude'", ']', '[', 'i', ']', ')', ',', '"{:.1f}"', '.', 'format', '(', 'self', '.', 'data', '[', "'depth'", ']', '[', 'i', ']', ')', ',', '"{:.1f}"', '.', 'format', '(', 'self', '.', 'data', '[', "'magnitude'", ']', '[', 'i', ']', ')', ']', 'return', '" "', '.', 'join', '(', 'row_data', ')']
Returns a string representation of the key information in a row
['Returns', 'a', 'string', 'representation', 'of', 'the', 'key', 'information', 'in', 'a', 'row']
train
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/seismicity/catalogue.py#L138-L153
3,052
michaeljoseph/changes
changes/flow.py
publish
def publish(context): """Publishes the project""" commit_version_change(context) if context.github: # github token project_settings = project_config(context.module_name) if not project_settings['gh_token']: click.echo('You need a GitHub token for changes to create a release.') click.pause( 'Press [enter] to launch the GitHub "New personal access ' 'token" page, to create a token for changes.' ) click.launch('https://github.com/settings/tokens/new') project_settings['gh_token'] = click.prompt('Enter your changes token') store_settings(context.module_name, project_settings) description = click.prompt('Describe this release') upload_url = create_github_release( context, project_settings['gh_token'], description ) upload_release_distributions( context, project_settings['gh_token'], build_distributions(context), upload_url, ) click.pause('Press [enter] to review and update your new release') click.launch( '{0}/releases/tag/{1}'.format(context.repo_url, context.new_version) ) else: tag_and_push(context)
python
def publish(context): """Publishes the project""" commit_version_change(context) if context.github: # github token project_settings = project_config(context.module_name) if not project_settings['gh_token']: click.echo('You need a GitHub token for changes to create a release.') click.pause( 'Press [enter] to launch the GitHub "New personal access ' 'token" page, to create a token for changes.' ) click.launch('https://github.com/settings/tokens/new') project_settings['gh_token'] = click.prompt('Enter your changes token') store_settings(context.module_name, project_settings) description = click.prompt('Describe this release') upload_url = create_github_release( context, project_settings['gh_token'], description ) upload_release_distributions( context, project_settings['gh_token'], build_distributions(context), upload_url, ) click.pause('Press [enter] to review and update your new release') click.launch( '{0}/releases/tag/{1}'.format(context.repo_url, context.new_version) ) else: tag_and_push(context)
['def', 'publish', '(', 'context', ')', ':', 'commit_version_change', '(', 'context', ')', 'if', 'context', '.', 'github', ':', '# github token', 'project_settings', '=', 'project_config', '(', 'context', '.', 'module_name', ')', 'if', 'not', 'project_settings', '[', "'gh_token'", ']', ':', 'click', '.', 'echo', '(', "'You need a GitHub token for changes to create a release.'", ')', 'click', '.', 'pause', '(', '\'Press [enter] to launch the GitHub "New personal access \'', '\'token" page, to create a token for changes.\'', ')', 'click', '.', 'launch', '(', "'https://github.com/settings/tokens/new'", ')', 'project_settings', '[', "'gh_token'", ']', '=', 'click', '.', 'prompt', '(', "'Enter your changes token'", ')', 'store_settings', '(', 'context', '.', 'module_name', ',', 'project_settings', ')', 'description', '=', 'click', '.', 'prompt', '(', "'Describe this release'", ')', 'upload_url', '=', 'create_github_release', '(', 'context', ',', 'project_settings', '[', "'gh_token'", ']', ',', 'description', ')', 'upload_release_distributions', '(', 'context', ',', 'project_settings', '[', "'gh_token'", ']', ',', 'build_distributions', '(', 'context', ')', ',', 'upload_url', ',', ')', 'click', '.', 'pause', '(', "'Press [enter] to review and update your new release'", ')', 'click', '.', 'launch', '(', "'{0}/releases/tag/{1}'", '.', 'format', '(', 'context', '.', 'repo_url', ',', 'context', '.', 'new_version', ')', ')', 'else', ':', 'tag_and_push', '(', 'context', ')']
Publishes the project
['Publishes', 'the', 'project']
train
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/flow.py#L25-L60
3,053
mitsei/dlkit
dlkit/handcar/learning/sessions.py
ActivityLookupSession.get_activities_for_objectives
def get_activities_for_objectives(self, objective_ids=None): """Gets the activities for the given objectives. In plenary mode, the returned list contains all of the activities specified in the objective Id list, in the order of the list, including duplicates, or an error results if a course offering Id in the supplied list is not found or inaccessible. Otherwise, inaccessible Activities may be omitted from the list and may present the elements in any order including returning a unique set. arg: objectiveIds (osid.id.IdList): list of objective Ids return: (osid.learning.ActivityList) - list of activities raise: NotFound - an objectiveId not found raise: NullArgument - objectiveIdList is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method is must be implemented. """ if objective_ids is None: raise NullArgument() # Should also check if objective_id exists? activities = [] for i in objective_ids: acts = None url_path = construct_url('activities', bank_id=self._catalog_idstr, obj_id=i) try: acts = json.loads(self._get_request(url_path)) except (NotFound, OperationFailed): if self._activity_view == PLENARY: raise else: pass if acts: activities += acts return objects.ActivityList(activities)
python
def get_activities_for_objectives(self, objective_ids=None): """Gets the activities for the given objectives. In plenary mode, the returned list contains all of the activities specified in the objective Id list, in the order of the list, including duplicates, or an error results if a course offering Id in the supplied list is not found or inaccessible. Otherwise, inaccessible Activities may be omitted from the list and may present the elements in any order including returning a unique set. arg: objectiveIds (osid.id.IdList): list of objective Ids return: (osid.learning.ActivityList) - list of activities raise: NotFound - an objectiveId not found raise: NullArgument - objectiveIdList is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method is must be implemented. """ if objective_ids is None: raise NullArgument() # Should also check if objective_id exists? activities = [] for i in objective_ids: acts = None url_path = construct_url('activities', bank_id=self._catalog_idstr, obj_id=i) try: acts = json.loads(self._get_request(url_path)) except (NotFound, OperationFailed): if self._activity_view == PLENARY: raise else: pass if acts: activities += acts return objects.ActivityList(activities)
['def', 'get_activities_for_objectives', '(', 'self', ',', 'objective_ids', '=', 'None', ')', ':', 'if', 'objective_ids', 'is', 'None', ':', 'raise', 'NullArgument', '(', ')', '# Should also check if objective_id exists?', 'activities', '=', '[', ']', 'for', 'i', 'in', 'objective_ids', ':', 'acts', '=', 'None', 'url_path', '=', 'construct_url', '(', "'activities'", ',', 'bank_id', '=', 'self', '.', '_catalog_idstr', ',', 'obj_id', '=', 'i', ')', 'try', ':', 'acts', '=', 'json', '.', 'loads', '(', 'self', '.', '_get_request', '(', 'url_path', ')', ')', 'except', '(', 'NotFound', ',', 'OperationFailed', ')', ':', 'if', 'self', '.', '_activity_view', '==', 'PLENARY', ':', 'raise', 'else', ':', 'pass', 'if', 'acts', ':', 'activities', '+=', 'acts', 'return', 'objects', '.', 'ActivityList', '(', 'activities', ')']
Gets the activities for the given objectives. In plenary mode, the returned list contains all of the activities specified in the objective Id list, in the order of the list, including duplicates, or an error results if a course offering Id in the supplied list is not found or inaccessible. Otherwise, inaccessible Activities may be omitted from the list and may present the elements in any order including returning a unique set. arg: objectiveIds (osid.id.IdList): list of objective Ids return: (osid.learning.ActivityList) - list of activities raise: NotFound - an objectiveId not found raise: NullArgument - objectiveIdList is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method is must be implemented.
['Gets', 'the', 'activities', 'for', 'the', 'given', 'objectives', '.', 'In', 'plenary', 'mode', 'the', 'returned', 'list', 'contains', 'all', 'of', 'the', 'activities', 'specified', 'in', 'the', 'objective', 'Id', 'list', 'in', 'the', 'order', 'of', 'the', 'list', 'including', 'duplicates', 'or', 'an', 'error', 'results', 'if', 'a', 'course', 'offering', 'Id', 'in', 'the', 'supplied', 'list', 'is', 'not', 'found', 'or', 'inaccessible', '.', 'Otherwise', 'inaccessible', 'Activities', 'may', 'be', 'omitted', 'from', 'the', 'list', 'and', 'may', 'present', 'the', 'elements', 'in', 'any', 'order', 'including', 'returning', 'a', 'unique', 'set', '.', 'arg', ':', 'objectiveIds', '(', 'osid', '.', 'id', '.', 'IdList', ')', ':', 'list', 'of', 'objective', 'Ids', 'return', ':', '(', 'osid', '.', 'learning', '.', 'ActivityList', ')', '-', 'list', 'of', 'activities', 'raise', ':', 'NotFound', '-', 'an', 'objectiveId', 'not', 'found', 'raise', ':', 'NullArgument', '-', 'objectiveIdList', 'is', 'null', 'raise', ':', 'OperationFailed', '-', 'unable', 'to', 'complete', 'request', 'raise', ':', 'PermissionDenied', '-', 'authorization', 'failure', 'compliance', ':', 'mandatory', '-', 'This', 'method', 'is', 'must', 'be', 'implemented', '.']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/learning/sessions.py#L2601-L2637
3,054
globality-corp/microcosm-flask
microcosm_flask/cloning.py
DAGCloningController.clone
def clone(self, substitutions, commit=True, **kwargs): """ Clone a DAG, optionally skipping the commit. """ return self.store.clone(substitutions, **kwargs)
python
def clone(self, substitutions, commit=True, **kwargs): """ Clone a DAG, optionally skipping the commit. """ return self.store.clone(substitutions, **kwargs)
['def', 'clone', '(', 'self', ',', 'substitutions', ',', 'commit', '=', 'True', ',', '*', '*', 'kwargs', ')', ':', 'return', 'self', '.', 'store', '.', 'clone', '(', 'substitutions', ',', '*', '*', 'kwargs', ')']
Clone a DAG, optionally skipping the commit.
['Clone', 'a', 'DAG', 'optionally', 'skipping', 'the', 'commit', '.']
train
https://github.com/globality-corp/microcosm-flask/blob/c2eaf57f03e7d041eea343751a4a90fcc80df418/microcosm_flask/cloning.py#L115-L120
3,055
Accelize/pycosio
pycosio/storage/s3.py
_handle_client_error
def _handle_client_error(): """ Handle boto exception and convert to class IO exceptions Raises: OSError subclasses: IO error. """ try: yield except _ClientError as exception: error = exception.response['Error'] if error['Code'] in _ERROR_CODES: raise _ERROR_CODES[error['Code']](error['Message']) raise
python
def _handle_client_error(): """ Handle boto exception and convert to class IO exceptions Raises: OSError subclasses: IO error. """ try: yield except _ClientError as exception: error = exception.response['Error'] if error['Code'] in _ERROR_CODES: raise _ERROR_CODES[error['Code']](error['Message']) raise
['def', '_handle_client_error', '(', ')', ':', 'try', ':', 'yield', 'except', '_ClientError', 'as', 'exception', ':', 'error', '=', 'exception', '.', 'response', '[', "'Error'", ']', 'if', 'error', '[', "'Code'", ']', 'in', '_ERROR_CODES', ':', 'raise', '_ERROR_CODES', '[', 'error', '[', "'Code'", ']', ']', '(', 'error', '[', "'Message'", ']', ')', 'raise']
Handle boto exception and convert to class IO exceptions Raises: OSError subclasses: IO error.
['Handle', 'boto', 'exception', 'and', 'convert', 'to', 'class', 'IO', 'exceptions']
train
https://github.com/Accelize/pycosio/blob/1cc1f8fdf5394d92918b7bae2bfa682169ccc48c/pycosio/storage/s3.py#L29-L44
3,056
ldo/dbussy
ravel.py
system_bus_async
async def system_bus_async(loop = None, **kwargs) : "returns a Connection object for the D-Bus system bus." return \ Connection \ ( await dbus.Connection.bus_get_async(DBUS.BUS_SYSTEM, private = False, loop = loop) ) \ .register_additional_standard(**kwargs)
python
async def system_bus_async(loop = None, **kwargs) : "returns a Connection object for the D-Bus system bus." return \ Connection \ ( await dbus.Connection.bus_get_async(DBUS.BUS_SYSTEM, private = False, loop = loop) ) \ .register_additional_standard(**kwargs)
['async', 'def', 'system_bus_async', '(', 'loop', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'return', 'Connection', '(', 'await', 'dbus', '.', 'Connection', '.', 'bus_get_async', '(', 'DBUS', '.', 'BUS_SYSTEM', ',', 'private', '=', 'False', ',', 'loop', '=', 'loop', ')', ')', '.', 'register_additional_standard', '(', '*', '*', 'kwargs', ')']
returns a Connection object for the D-Bus system bus.
['returns', 'a', 'Connection', 'object', 'for', 'the', 'D', '-', 'Bus', 'system', 'bus', '.']
train
https://github.com/ldo/dbussy/blob/59e4fbe8b8111ceead884e50d1973901a0a2d240/ravel.py#L1614-L1621
3,057
thombashi/pytablewriter
pytablewriter/writer/_table_writer.py
AbstractTableWriter.from_series
def from_series(self, series, add_index_column=True): """ Set tabular attributes to the writer from :py:class:`pandas.Series`. Following attributes are set by the method: - :py:attr:`~.headers` - :py:attr:`~.value_matrix` - :py:attr:`~.type_hints` Args: series(pandas.Series): Input pandas.Series object. add_index_column(bool, optional): If |True|, add a column of ``index`` of the ``series``. Defaults to |True|. """ if series.name: self.headers = [series.name] else: self.headers = ["value"] self.type_hints = [self.__get_typehint_from_dtype(series.dtype)] if add_index_column: self.headers = [""] + self.headers if self.type_hints: self.type_hints = [None] + self.type_hints self.value_matrix = [ [index] + [value] for index, value in zip(series.index.tolist(), series.tolist()) ] else: self.value_matrix = [[value] for value in series.tolist()]
python
def from_series(self, series, add_index_column=True): """ Set tabular attributes to the writer from :py:class:`pandas.Series`. Following attributes are set by the method: - :py:attr:`~.headers` - :py:attr:`~.value_matrix` - :py:attr:`~.type_hints` Args: series(pandas.Series): Input pandas.Series object. add_index_column(bool, optional): If |True|, add a column of ``index`` of the ``series``. Defaults to |True|. """ if series.name: self.headers = [series.name] else: self.headers = ["value"] self.type_hints = [self.__get_typehint_from_dtype(series.dtype)] if add_index_column: self.headers = [""] + self.headers if self.type_hints: self.type_hints = [None] + self.type_hints self.value_matrix = [ [index] + [value] for index, value in zip(series.index.tolist(), series.tolist()) ] else: self.value_matrix = [[value] for value in series.tolist()]
['def', 'from_series', '(', 'self', ',', 'series', ',', 'add_index_column', '=', 'True', ')', ':', 'if', 'series', '.', 'name', ':', 'self', '.', 'headers', '=', '[', 'series', '.', 'name', ']', 'else', ':', 'self', '.', 'headers', '=', '[', '"value"', ']', 'self', '.', 'type_hints', '=', '[', 'self', '.', '__get_typehint_from_dtype', '(', 'series', '.', 'dtype', ')', ']', 'if', 'add_index_column', ':', 'self', '.', 'headers', '=', '[', '""', ']', '+', 'self', '.', 'headers', 'if', 'self', '.', 'type_hints', ':', 'self', '.', 'type_hints', '=', '[', 'None', ']', '+', 'self', '.', 'type_hints', 'self', '.', 'value_matrix', '=', '[', '[', 'index', ']', '+', '[', 'value', ']', 'for', 'index', ',', 'value', 'in', 'zip', '(', 'series', '.', 'index', '.', 'tolist', '(', ')', ',', 'series', '.', 'tolist', '(', ')', ')', ']', 'else', ':', 'self', '.', 'value_matrix', '=', '[', '[', 'value', ']', 'for', 'value', 'in', 'series', '.', 'tolist', '(', ')', ']']
Set tabular attributes to the writer from :py:class:`pandas.Series`. Following attributes are set by the method: - :py:attr:`~.headers` - :py:attr:`~.value_matrix` - :py:attr:`~.type_hints` Args: series(pandas.Series): Input pandas.Series object. add_index_column(bool, optional): If |True|, add a column of ``index`` of the ``series``. Defaults to |True|.
['Set', 'tabular', 'attributes', 'to', 'the', 'writer', 'from', ':', 'py', ':', 'class', ':', 'pandas', '.', 'Series', '.', 'Following', 'attributes', 'are', 'set', 'by', 'the', 'method', ':']
train
https://github.com/thombashi/pytablewriter/blob/52ea85ed8e89097afa64f137c6a1b3acdfefdbda/pytablewriter/writer/_table_writer.py#L654-L686
3,058
eng-tools/sfsimodels
sfsimodels/models/soils.py
SoilProfile.get_v_total_stress_at_depth
def get_v_total_stress_at_depth(self, z): """ Determine the vertical total stress at depth z, where z can be a number or an array of numbers. """ if not hasattr(z, "__len__"): return self.one_vertical_total_stress(z) else: sigma_v_effs = [] for value in z: sigma_v_effs.append(self.one_vertical_total_stress(value)) return np.array(sigma_v_effs)
python
def get_v_total_stress_at_depth(self, z): """ Determine the vertical total stress at depth z, where z can be a number or an array of numbers. """ if not hasattr(z, "__len__"): return self.one_vertical_total_stress(z) else: sigma_v_effs = [] for value in z: sigma_v_effs.append(self.one_vertical_total_stress(value)) return np.array(sigma_v_effs)
['def', 'get_v_total_stress_at_depth', '(', 'self', ',', 'z', ')', ':', 'if', 'not', 'hasattr', '(', 'z', ',', '"__len__"', ')', ':', 'return', 'self', '.', 'one_vertical_total_stress', '(', 'z', ')', 'else', ':', 'sigma_v_effs', '=', '[', ']', 'for', 'value', 'in', 'z', ':', 'sigma_v_effs', '.', 'append', '(', 'self', '.', 'one_vertical_total_stress', '(', 'value', ')', ')', 'return', 'np', '.', 'array', '(', 'sigma_v_effs', ')']
Determine the vertical total stress at depth z, where z can be a number or an array of numbers.
['Determine', 'the', 'vertical', 'total', 'stress', 'at', 'depth', 'z', 'where', 'z', 'can', 'be', 'a', 'number', 'or', 'an', 'array', 'of', 'numbers', '.']
train
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/soils.py#L1112-L1123
3,059
dw/mitogen
mitogen/core.py
Receiver.get
def get(self, timeout=None, block=True, throw_dead=True): """ Sleep waiting for a message to arrive on this receiver. :param float timeout: If not :data:`None`, specifies a timeout in seconds. :raises mitogen.core.ChannelError: The remote end indicated the channel should be closed, communication with it was lost, or :meth:`close` was called in the local process. :raises mitogen.core.TimeoutError: Timeout was reached. :returns: :class:`Message` that was received. """ _vv and IOLOG.debug('%r.get(timeout=%r, block=%r)', self, timeout, block) try: msg = self._latch.get(timeout=timeout, block=block) except LatchError: raise ChannelError(self.closed_msg) if msg.is_dead and throw_dead: msg._throw_dead() return msg
python
def get(self, timeout=None, block=True, throw_dead=True): """ Sleep waiting for a message to arrive on this receiver. :param float timeout: If not :data:`None`, specifies a timeout in seconds. :raises mitogen.core.ChannelError: The remote end indicated the channel should be closed, communication with it was lost, or :meth:`close` was called in the local process. :raises mitogen.core.TimeoutError: Timeout was reached. :returns: :class:`Message` that was received. """ _vv and IOLOG.debug('%r.get(timeout=%r, block=%r)', self, timeout, block) try: msg = self._latch.get(timeout=timeout, block=block) except LatchError: raise ChannelError(self.closed_msg) if msg.is_dead and throw_dead: msg._throw_dead() return msg
['def', 'get', '(', 'self', ',', 'timeout', '=', 'None', ',', 'block', '=', 'True', ',', 'throw_dead', '=', 'True', ')', ':', '_vv', 'and', 'IOLOG', '.', 'debug', '(', "'%r.get(timeout=%r, block=%r)'", ',', 'self', ',', 'timeout', ',', 'block', ')', 'try', ':', 'msg', '=', 'self', '.', '_latch', '.', 'get', '(', 'timeout', '=', 'timeout', ',', 'block', '=', 'block', ')', 'except', 'LatchError', ':', 'raise', 'ChannelError', '(', 'self', '.', 'closed_msg', ')', 'if', 'msg', '.', 'is_dead', 'and', 'throw_dead', ':', 'msg', '.', '_throw_dead', '(', ')', 'return', 'msg']
Sleep waiting for a message to arrive on this receiver. :param float timeout: If not :data:`None`, specifies a timeout in seconds. :raises mitogen.core.ChannelError: The remote end indicated the channel should be closed, communication with it was lost, or :meth:`close` was called in the local process. :raises mitogen.core.TimeoutError: Timeout was reached. :returns: :class:`Message` that was received.
['Sleep', 'waiting', 'for', 'a', 'message', 'to', 'arrive', 'on', 'this', 'receiver', '.']
train
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/core.py#L1010-L1035
3,060
pjuren/pyokit
src/pyokit/io/bedIterators.py
pairedBEDIterator
def pairedBEDIterator(inputStreams, mirror=False, mirrorScore=None, ignoreStrand=False, ignoreScore=True, ignoreName=True, sortedby=ITERATOR_SORTED_END, scoreType=float, verbose=False): """ Iterate over multiple BED format files simultaneously and yield lists of genomic intervals for each matching set of intervals found. By default, regions which are not found in all files will be skipped (mirror = false). Optionally (by setting mirror to true) if a file is missing an interval, it can be added on-the-fly, and will have the same chrom, start and end and name as in other files. The score will be taken from the first file in inputStreams if mirrorScore is not set, otherwise that value will be used. :param inputStreams: a list of input streams in BED format :param mirror: if true, add missing elements so all streams contain the same elements. Inserted elements will have the same :param ignoreStrand: ignore strand when comparing elements for equality? :param ignoreScore: ignore score when comparing elements for equality? :param ignoreScore: ignore name when comparing elements for equality? :param sortedby: must be set to one of the sorting orders for BED streams; we require the streams to be sorted in some fashion. :param scoreType: interpret scores as what type? Defaults to float, which is generally the most flexible. """ # let's build our sorting order... sortOrder = ["chrom"] if sortedby == ITERATOR_SORTED_START: sortOrder.append("start") sortOrder.append("end") elif sortedby == ITERATOR_SORTED_END: sortOrder.append("end") sortOrder.append("start") if not ignoreStrand: sortOrder.append("strand") if not ignoreName: sortOrder.append("name") if not ignoreScore: sortOrder.append("score") keyFunc = attrgetter(*sortOrder) def next_item(iterator): """ little internal function to return the next item, or None """ try: return iterator.next() except StopIteration: return None bIterators = [BEDIterator(bfh, verbose=verbose, sortedby=sortedby, scoreType=scoreType) for bfh in inputStreams] elements = [next_item(it) for it in bIterators] while True: assert(len(elements) >= 2) if None not in elements and len(set([keyFunc(x) for x in elements])) == 1: # All equal -- yield and move on for all streams yield [e for e in elements] elements = [next_item(it) for it in bIterators] else: # something wasn't equal.. find the smallest thing, it's about to drop # out of range and will never have the chance to match anything again minElement = min([x for x in elements if x is not None], key=keyFunc) minIndices = [i for i in range(0, len(elements)) if elements[i] is not None and keyFunc(elements[i]) == keyFunc(minElement)] if mirror: # mirror the min item for any streams in which it doesn't match score = minElement.score if mirrorScore is None else mirrorScore yield [elements[i] if i in minIndices else GenomicInterval(minElement.chrom, minElement.start, minElement.end, minElement.name, score, minElement.strand, scoreType=scoreType) for i in range(0, len(elements))] # move the smallest element onwards now, we're done with it for index in minIndices: elements[index] = next_item(bIterators[index]) # stop once all streams are exhausted if reduce(lambda x, y: x and y, [e is None for e in elements]): break
python
def pairedBEDIterator(inputStreams, mirror=False, mirrorScore=None, ignoreStrand=False, ignoreScore=True, ignoreName=True, sortedby=ITERATOR_SORTED_END, scoreType=float, verbose=False): """ Iterate over multiple BED format files simultaneously and yield lists of genomic intervals for each matching set of intervals found. By default, regions which are not found in all files will be skipped (mirror = false). Optionally (by setting mirror to true) if a file is missing an interval, it can be added on-the-fly, and will have the same chrom, start and end and name as in other files. The score will be taken from the first file in inputStreams if mirrorScore is not set, otherwise that value will be used. :param inputStreams: a list of input streams in BED format :param mirror: if true, add missing elements so all streams contain the same elements. Inserted elements will have the same :param ignoreStrand: ignore strand when comparing elements for equality? :param ignoreScore: ignore score when comparing elements for equality? :param ignoreScore: ignore name when comparing elements for equality? :param sortedby: must be set to one of the sorting orders for BED streams; we require the streams to be sorted in some fashion. :param scoreType: interpret scores as what type? Defaults to float, which is generally the most flexible. """ # let's build our sorting order... sortOrder = ["chrom"] if sortedby == ITERATOR_SORTED_START: sortOrder.append("start") sortOrder.append("end") elif sortedby == ITERATOR_SORTED_END: sortOrder.append("end") sortOrder.append("start") if not ignoreStrand: sortOrder.append("strand") if not ignoreName: sortOrder.append("name") if not ignoreScore: sortOrder.append("score") keyFunc = attrgetter(*sortOrder) def next_item(iterator): """ little internal function to return the next item, or None """ try: return iterator.next() except StopIteration: return None bIterators = [BEDIterator(bfh, verbose=verbose, sortedby=sortedby, scoreType=scoreType) for bfh in inputStreams] elements = [next_item(it) for it in bIterators] while True: assert(len(elements) >= 2) if None not in elements and len(set([keyFunc(x) for x in elements])) == 1: # All equal -- yield and move on for all streams yield [e for e in elements] elements = [next_item(it) for it in bIterators] else: # something wasn't equal.. find the smallest thing, it's about to drop # out of range and will never have the chance to match anything again minElement = min([x for x in elements if x is not None], key=keyFunc) minIndices = [i for i in range(0, len(elements)) if elements[i] is not None and keyFunc(elements[i]) == keyFunc(minElement)] if mirror: # mirror the min item for any streams in which it doesn't match score = minElement.score if mirrorScore is None else mirrorScore yield [elements[i] if i in minIndices else GenomicInterval(minElement.chrom, minElement.start, minElement.end, minElement.name, score, minElement.strand, scoreType=scoreType) for i in range(0, len(elements))] # move the smallest element onwards now, we're done with it for index in minIndices: elements[index] = next_item(bIterators[index]) # stop once all streams are exhausted if reduce(lambda x, y: x and y, [e is None for e in elements]): break
['def', 'pairedBEDIterator', '(', 'inputStreams', ',', 'mirror', '=', 'False', ',', 'mirrorScore', '=', 'None', ',', 'ignoreStrand', '=', 'False', ',', 'ignoreScore', '=', 'True', ',', 'ignoreName', '=', 'True', ',', 'sortedby', '=', 'ITERATOR_SORTED_END', ',', 'scoreType', '=', 'float', ',', 'verbose', '=', 'False', ')', ':', "# let's build our sorting order...", 'sortOrder', '=', '[', '"chrom"', ']', 'if', 'sortedby', '==', 'ITERATOR_SORTED_START', ':', 'sortOrder', '.', 'append', '(', '"start"', ')', 'sortOrder', '.', 'append', '(', '"end"', ')', 'elif', 'sortedby', '==', 'ITERATOR_SORTED_END', ':', 'sortOrder', '.', 'append', '(', '"end"', ')', 'sortOrder', '.', 'append', '(', '"start"', ')', 'if', 'not', 'ignoreStrand', ':', 'sortOrder', '.', 'append', '(', '"strand"', ')', 'if', 'not', 'ignoreName', ':', 'sortOrder', '.', 'append', '(', '"name"', ')', 'if', 'not', 'ignoreScore', ':', 'sortOrder', '.', 'append', '(', '"score"', ')', 'keyFunc', '=', 'attrgetter', '(', '*', 'sortOrder', ')', 'def', 'next_item', '(', 'iterator', ')', ':', '""" little internal function to return the next item, or None """', 'try', ':', 'return', 'iterator', '.', 'next', '(', ')', 'except', 'StopIteration', ':', 'return', 'None', 'bIterators', '=', '[', 'BEDIterator', '(', 'bfh', ',', 'verbose', '=', 'verbose', ',', 'sortedby', '=', 'sortedby', ',', 'scoreType', '=', 'scoreType', ')', 'for', 'bfh', 'in', 'inputStreams', ']', 'elements', '=', '[', 'next_item', '(', 'it', ')', 'for', 'it', 'in', 'bIterators', ']', 'while', 'True', ':', 'assert', '(', 'len', '(', 'elements', ')', '>=', '2', ')', 'if', 'None', 'not', 'in', 'elements', 'and', 'len', '(', 'set', '(', '[', 'keyFunc', '(', 'x', ')', 'for', 'x', 'in', 'elements', ']', ')', ')', '==', '1', ':', '# All equal -- yield and move on for all streams', 'yield', '[', 'e', 'for', 'e', 'in', 'elements', ']', 'elements', '=', '[', 'next_item', '(', 'it', ')', 'for', 'it', 'in', 'bIterators', ']', 'else', ':', "# something wasn't equal.. find the smallest thing, it's about to drop", '# out of range and will never have the chance to match anything again', 'minElement', '=', 'min', '(', '[', 'x', 'for', 'x', 'in', 'elements', 'if', 'x', 'is', 'not', 'None', ']', ',', 'key', '=', 'keyFunc', ')', 'minIndices', '=', '[', 'i', 'for', 'i', 'in', 'range', '(', '0', ',', 'len', '(', 'elements', ')', ')', 'if', 'elements', '[', 'i', ']', 'is', 'not', 'None', 'and', 'keyFunc', '(', 'elements', '[', 'i', ']', ')', '==', 'keyFunc', '(', 'minElement', ')', ']', 'if', 'mirror', ':', "# mirror the min item for any streams in which it doesn't match", 'score', '=', 'minElement', '.', 'score', 'if', 'mirrorScore', 'is', 'None', 'else', 'mirrorScore', 'yield', '[', 'elements', '[', 'i', ']', 'if', 'i', 'in', 'minIndices', 'else', 'GenomicInterval', '(', 'minElement', '.', 'chrom', ',', 'minElement', '.', 'start', ',', 'minElement', '.', 'end', ',', 'minElement', '.', 'name', ',', 'score', ',', 'minElement', '.', 'strand', ',', 'scoreType', '=', 'scoreType', ')', 'for', 'i', 'in', 'range', '(', '0', ',', 'len', '(', 'elements', ')', ')', ']', "# move the smallest element onwards now, we're done with it", 'for', 'index', 'in', 'minIndices', ':', 'elements', '[', 'index', ']', '=', 'next_item', '(', 'bIterators', '[', 'index', ']', ')', '# stop once all streams are exhausted', 'if', 'reduce', '(', 'lambda', 'x', ',', 'y', ':', 'x', 'and', 'y', ',', '[', 'e', 'is', 'None', 'for', 'e', 'in', 'elements', ']', ')', ':', 'break']
Iterate over multiple BED format files simultaneously and yield lists of genomic intervals for each matching set of intervals found. By default, regions which are not found in all files will be skipped (mirror = false). Optionally (by setting mirror to true) if a file is missing an interval, it can be added on-the-fly, and will have the same chrom, start and end and name as in other files. The score will be taken from the first file in inputStreams if mirrorScore is not set, otherwise that value will be used. :param inputStreams: a list of input streams in BED format :param mirror: if true, add missing elements so all streams contain the same elements. Inserted elements will have the same :param ignoreStrand: ignore strand when comparing elements for equality? :param ignoreScore: ignore score when comparing elements for equality? :param ignoreScore: ignore name when comparing elements for equality? :param sortedby: must be set to one of the sorting orders for BED streams; we require the streams to be sorted in some fashion. :param scoreType: interpret scores as what type? Defaults to float, which is generally the most flexible.
['Iterate', 'over', 'multiple', 'BED', 'format', 'files', 'simultaneously', 'and', 'yield', 'lists', 'of', 'genomic', 'intervals', 'for', 'each', 'matching', 'set', 'of', 'intervals', 'found', '.', 'By', 'default', 'regions', 'which', 'are', 'not', 'found', 'in', 'all', 'files', 'will', 'be', 'skipped', '(', 'mirror', '=', 'false', ')', '.', 'Optionally', '(', 'by', 'setting', 'mirror', 'to', 'true', ')', 'if', 'a', 'file', 'is', 'missing', 'an', 'interval', 'it', 'can', 'be', 'added', 'on', '-', 'the', '-', 'fly', 'and', 'will', 'have', 'the', 'same', 'chrom', 'start', 'and', 'end', 'and', 'name', 'as', 'in', 'other', 'files', '.', 'The', 'score', 'will', 'be', 'taken', 'from', 'the', 'first', 'file', 'in', 'inputStreams', 'if', 'mirrorScore', 'is', 'not', 'set', 'otherwise', 'that', 'value', 'will', 'be', 'used', '.']
train
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/io/bedIterators.py#L207-L289
3,061
Microsoft/nni
tools/nni_annotation/code_generator.py
parse_annotation
def parse_annotation(code): """Parse an annotation string. Return an AST Expr node. code: annotation string (excluding '@') """ module = ast.parse(code) assert type(module) is ast.Module, 'internal error #1' assert len(module.body) == 1, 'Annotation contains more than one expression' assert type(module.body[0]) is ast.Expr, 'Annotation is not expression' return module.body[0]
python
def parse_annotation(code): """Parse an annotation string. Return an AST Expr node. code: annotation string (excluding '@') """ module = ast.parse(code) assert type(module) is ast.Module, 'internal error #1' assert len(module.body) == 1, 'Annotation contains more than one expression' assert type(module.body[0]) is ast.Expr, 'Annotation is not expression' return module.body[0]
['def', 'parse_annotation', '(', 'code', ')', ':', 'module', '=', 'ast', '.', 'parse', '(', 'code', ')', 'assert', 'type', '(', 'module', ')', 'is', 'ast', '.', 'Module', ',', "'internal error #1'", 'assert', 'len', '(', 'module', '.', 'body', ')', '==', '1', ',', "'Annotation contains more than one expression'", 'assert', 'type', '(', 'module', '.', 'body', '[', '0', ']', ')', 'is', 'ast', '.', 'Expr', ',', "'Annotation is not expression'", 'return', 'module', '.', 'body', '[', '0', ']']
Parse an annotation string. Return an AST Expr node. code: annotation string (excluding '@')
['Parse', 'an', 'annotation', 'string', '.', 'Return', 'an', 'AST', 'Expr', 'node', '.', 'code', ':', 'annotation', 'string', '(', 'excluding']
train
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/tools/nni_annotation/code_generator.py#L29-L38
3,062
tanghaibao/goatools
goatools/cli/gosubdag_plot.py
PlotCli.get_outfile
def get_outfile(self, outfile, goids=None): """Return output file for GO Term plot.""" # 1. Use the user-specfied output filename for the GO Term plot if outfile != self.dflt_outfile: return outfile # 2. If only plotting 1 GO term, use GO is in plot name if goids is not None and len(goids) == 1: goid = next(iter(goids)) goobj = self.gosubdag.go2obj[goid] fout = "GO_{NN}_{NM}".format(NN=goid.replace("GO:", ""), NM=goobj.name) return ".".join([re.sub(r"[\s#'()+,-./:<=>\[\]_}]", '_', fout), 'png']) # 3. Return default name return self.dflt_outfile
python
def get_outfile(self, outfile, goids=None): """Return output file for GO Term plot.""" # 1. Use the user-specfied output filename for the GO Term plot if outfile != self.dflt_outfile: return outfile # 2. If only plotting 1 GO term, use GO is in plot name if goids is not None and len(goids) == 1: goid = next(iter(goids)) goobj = self.gosubdag.go2obj[goid] fout = "GO_{NN}_{NM}".format(NN=goid.replace("GO:", ""), NM=goobj.name) return ".".join([re.sub(r"[\s#'()+,-./:<=>\[\]_}]", '_', fout), 'png']) # 3. Return default name return self.dflt_outfile
['def', 'get_outfile', '(', 'self', ',', 'outfile', ',', 'goids', '=', 'None', ')', ':', '# 1. Use the user-specfied output filename for the GO Term plot', 'if', 'outfile', '!=', 'self', '.', 'dflt_outfile', ':', 'return', 'outfile', '# 2. If only plotting 1 GO term, use GO is in plot name', 'if', 'goids', 'is', 'not', 'None', 'and', 'len', '(', 'goids', ')', '==', '1', ':', 'goid', '=', 'next', '(', 'iter', '(', 'goids', ')', ')', 'goobj', '=', 'self', '.', 'gosubdag', '.', 'go2obj', '[', 'goid', ']', 'fout', '=', '"GO_{NN}_{NM}"', '.', 'format', '(', 'NN', '=', 'goid', '.', 'replace', '(', '"GO:"', ',', '""', ')', ',', 'NM', '=', 'goobj', '.', 'name', ')', 'return', '"."', '.', 'join', '(', '[', 're', '.', 'sub', '(', 'r"[\\s#\'()+,-./:<=>\\[\\]_}]"', ',', "'_'", ',', 'fout', ')', ',', "'png'", ']', ')', '# 3. Return default name', 'return', 'self', '.', 'dflt_outfile']
Return output file for GO Term plot.
['Return', 'output', 'file', 'for', 'GO', 'Term', 'plot', '.']
train
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/cli/gosubdag_plot.py#L304-L316
3,063
DarkEnergySurvey/ugali
ugali/observation/mask.py
Mask.completeness
def completeness(self, delta, method='step'): """ Return the completeness as a function of magnitude. ADW: Eventually want a completeness mask to set overall efficiency. """ delta = np.asarray(delta) if method == 'step': func = lambda delta: (delta > 0).astype(float) elif method == 'erf': # Trust the SDSS EDR??? # 95% completeness: def func(delta): # Efficiency at bright end (assumed to be 100%) e = 1.0 # EDR says full width is ~0.5 mag width = 0.2 # This should be the halfway point in the curve return (e/2.0)*(1/np.sqrt(2*width))*(np.sqrt(2*width)-scipy.special.erf(-delta)) elif method == 'flemming': # Functional form taken from Fleming et al. AJ 109, 1044 (1995) # http://adsabs.harvard.edu/abs/1995AJ....109.1044F # f = 1/2 [1 - alpha(V - Vlim)/sqrt(1 + alpha^2 (V - Vlim)^2)] # CAREFUL: This definition is for Vlim = 50% completeness def func(delta): alpha = 2.0 return 0.5 * (1 - (alpha * delta)/np.sqrt(1+alpha**2 * delta**2)) else: raise Exception('...') return func(delta)
python
def completeness(self, delta, method='step'): """ Return the completeness as a function of magnitude. ADW: Eventually want a completeness mask to set overall efficiency. """ delta = np.asarray(delta) if method == 'step': func = lambda delta: (delta > 0).astype(float) elif method == 'erf': # Trust the SDSS EDR??? # 95% completeness: def func(delta): # Efficiency at bright end (assumed to be 100%) e = 1.0 # EDR says full width is ~0.5 mag width = 0.2 # This should be the halfway point in the curve return (e/2.0)*(1/np.sqrt(2*width))*(np.sqrt(2*width)-scipy.special.erf(-delta)) elif method == 'flemming': # Functional form taken from Fleming et al. AJ 109, 1044 (1995) # http://adsabs.harvard.edu/abs/1995AJ....109.1044F # f = 1/2 [1 - alpha(V - Vlim)/sqrt(1 + alpha^2 (V - Vlim)^2)] # CAREFUL: This definition is for Vlim = 50% completeness def func(delta): alpha = 2.0 return 0.5 * (1 - (alpha * delta)/np.sqrt(1+alpha**2 * delta**2)) else: raise Exception('...') return func(delta)
['def', 'completeness', '(', 'self', ',', 'delta', ',', 'method', '=', "'step'", ')', ':', 'delta', '=', 'np', '.', 'asarray', '(', 'delta', ')', 'if', 'method', '==', "'step'", ':', 'func', '=', 'lambda', 'delta', ':', '(', 'delta', '>', '0', ')', '.', 'astype', '(', 'float', ')', 'elif', 'method', '==', "'erf'", ':', '# Trust the SDSS EDR???', '# 95% completeness: ', 'def', 'func', '(', 'delta', ')', ':', '# Efficiency at bright end (assumed to be 100%)', 'e', '=', '1.0', '# EDR says full width is ~0.5 mag', 'width', '=', '0.2', '# This should be the halfway point in the curve', 'return', '(', 'e', '/', '2.0', ')', '*', '(', '1', '/', 'np', '.', 'sqrt', '(', '2', '*', 'width', ')', ')', '*', '(', 'np', '.', 'sqrt', '(', '2', '*', 'width', ')', '-', 'scipy', '.', 'special', '.', 'erf', '(', '-', 'delta', ')', ')', 'elif', 'method', '==', "'flemming'", ':', '# Functional form taken from Fleming et al. AJ 109, 1044 (1995)', '# http://adsabs.harvard.edu/abs/1995AJ....109.1044F', '# f = 1/2 [1 - alpha(V - Vlim)/sqrt(1 + alpha^2 (V - Vlim)^2)]', '# CAREFUL: This definition is for Vlim = 50% completeness', 'def', 'func', '(', 'delta', ')', ':', 'alpha', '=', '2.0', 'return', '0.5', '*', '(', '1', '-', '(', 'alpha', '*', 'delta', ')', '/', 'np', '.', 'sqrt', '(', '1', '+', 'alpha', '**', '2', '*', 'delta', '**', '2', ')', ')', 'else', ':', 'raise', 'Exception', '(', "'...'", ')', 'return', 'func', '(', 'delta', ')']
Return the completeness as a function of magnitude. ADW: Eventually want a completeness mask to set overall efficiency.
['Return', 'the', 'completeness', 'as', 'a', 'function', 'of', 'magnitude', '.']
train
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/observation/mask.py#L297-L326
3,064
9wfox/tornadoweb
tornadoweb/web.py
BaseHandler.signout
def signout(self, redirect_url = "/"): """ 注销登录状态 参数: redirect_url 跳转链接,为 None 时不跳转 (Ajax 可能用得到)。 """ self.clear_cookie(self._USER_NAME) if redirect_url: self.redirect(redirect_url)
python
def signout(self, redirect_url = "/"): """ 注销登录状态 参数: redirect_url 跳转链接,为 None 时不跳转 (Ajax 可能用得到)。 """ self.clear_cookie(self._USER_NAME) if redirect_url: self.redirect(redirect_url)
['def', 'signout', '(', 'self', ',', 'redirect_url', '=', '"/"', ')', ':', 'self', '.', 'clear_cookie', '(', 'self', '.', '_USER_NAME', ')', 'if', 'redirect_url', ':', 'self', '.', 'redirect', '(', 'redirect_url', ')']
注销登录状态 参数: redirect_url 跳转链接,为 None 时不跳转 (Ajax 可能用得到)。
['注销登录状态', '参数', ':', 'redirect_url', '跳转链接,为', 'None', '时不跳转', '(', 'Ajax', '可能用得到', ')', '。']
train
https://github.com/9wfox/tornadoweb/blob/2286b66fbe10e4d9f212b979664c15fa17adf378/tornadoweb/web.py#L122-L130
3,065
CI-WATER/gsshapy
gsshapy/orm/wms_dataset.py
WMSDatasetFile.getAsKmlPngAnimation
def getAsKmlPngAnimation(self, session, projectFile=None, path=None, documentName=None, colorRamp=None, alpha=1.0, noDataValue=0, drawOrder=0, cellSize=None, resampleMethod='NearestNeighbour'): """ Retrieve the WMS dataset as a PNG time stamped KMZ Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. projectFile(:class:`gsshapy.orm.ProjectFile`): Project file object for the GSSHA project to which the WMS dataset belongs. path (str, optional): Path to file where KML file will be written. Defaults to None. documentName (str, optional): Name of the KML document. This will be the name that appears in the legend. Defaults to 'Stream Network'. colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the 'interpolatedPoints' must be an integer representing the number of points to interpolate between each color given in the colors list. alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100% opaque and 0.0 is 100% transparent. Defaults to 1.0. noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters. Defaults to 0.0. drawOrder (int, optional): Set the draw order of the images. Defaults to 0. cellSize (float, optional): Define the cell size in the units of the project projection at which to resample the raster to generate the PNG. Defaults to None which will cause the PNG to be generated with the original raster cell size. It is generally better to set this to a size smaller than the original cell size to obtain a higher resolution image. However, computation time increases exponentially as the cell size is decreased. resampleMethod (str, optional): If cellSize is set, this method will be used to resample the raster. Valid values include: NearestNeighbour, Bilinear, Cubic, CubicSpline, and Lanczos. Defaults to NearestNeighbour. Returns: (str, list): Returns a KML string and a list of binary strings that are the PNG images. """ # Prepare rasters timeStampedRasters = self._assembleRasterParams(projectFile, self.rasters) # Make sure the raster field is valid converter = RasterConverter(sqlAlchemyEngineOrSession=session) # Configure color ramp if isinstance(colorRamp, dict): converter.setCustomColorRamp(colorRamp['colors'], colorRamp['interpolatedPoints']) else: converter.setDefaultColorRamp(colorRamp) if documentName is None: documentName = self.fileExtension kmlString, binaryPngStrings = converter.getAsKmlPngAnimation(tableName=WMSDatasetRaster.tableName, timeStampedRasters=timeStampedRasters, rasterIdFieldName='id', rasterFieldName='raster', documentName=documentName, alpha=alpha, drawOrder=drawOrder, cellSize=cellSize, noDataValue=noDataValue, resampleMethod=resampleMethod) if path: directory = os.path.dirname(path) archiveName = (os.path.split(path)[1]).split('.')[0] kmzPath = os.path.join(directory, (archiveName + '.kmz')) with ZipFile(kmzPath, 'w') as kmz: kmz.writestr(archiveName + '.kml', kmlString) for index, binaryPngString in enumerate(binaryPngStrings): kmz.writestr('raster{0}.png'.format(index), binaryPngString) return kmlString, binaryPngStrings
python
def getAsKmlPngAnimation(self, session, projectFile=None, path=None, documentName=None, colorRamp=None, alpha=1.0, noDataValue=0, drawOrder=0, cellSize=None, resampleMethod='NearestNeighbour'): """ Retrieve the WMS dataset as a PNG time stamped KMZ Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. projectFile(:class:`gsshapy.orm.ProjectFile`): Project file object for the GSSHA project to which the WMS dataset belongs. path (str, optional): Path to file where KML file will be written. Defaults to None. documentName (str, optional): Name of the KML document. This will be the name that appears in the legend. Defaults to 'Stream Network'. colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the 'interpolatedPoints' must be an integer representing the number of points to interpolate between each color given in the colors list. alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100% opaque and 0.0 is 100% transparent. Defaults to 1.0. noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters. Defaults to 0.0. drawOrder (int, optional): Set the draw order of the images. Defaults to 0. cellSize (float, optional): Define the cell size in the units of the project projection at which to resample the raster to generate the PNG. Defaults to None which will cause the PNG to be generated with the original raster cell size. It is generally better to set this to a size smaller than the original cell size to obtain a higher resolution image. However, computation time increases exponentially as the cell size is decreased. resampleMethod (str, optional): If cellSize is set, this method will be used to resample the raster. Valid values include: NearestNeighbour, Bilinear, Cubic, CubicSpline, and Lanczos. Defaults to NearestNeighbour. Returns: (str, list): Returns a KML string and a list of binary strings that are the PNG images. """ # Prepare rasters timeStampedRasters = self._assembleRasterParams(projectFile, self.rasters) # Make sure the raster field is valid converter = RasterConverter(sqlAlchemyEngineOrSession=session) # Configure color ramp if isinstance(colorRamp, dict): converter.setCustomColorRamp(colorRamp['colors'], colorRamp['interpolatedPoints']) else: converter.setDefaultColorRamp(colorRamp) if documentName is None: documentName = self.fileExtension kmlString, binaryPngStrings = converter.getAsKmlPngAnimation(tableName=WMSDatasetRaster.tableName, timeStampedRasters=timeStampedRasters, rasterIdFieldName='id', rasterFieldName='raster', documentName=documentName, alpha=alpha, drawOrder=drawOrder, cellSize=cellSize, noDataValue=noDataValue, resampleMethod=resampleMethod) if path: directory = os.path.dirname(path) archiveName = (os.path.split(path)[1]).split('.')[0] kmzPath = os.path.join(directory, (archiveName + '.kmz')) with ZipFile(kmzPath, 'w') as kmz: kmz.writestr(archiveName + '.kml', kmlString) for index, binaryPngString in enumerate(binaryPngStrings): kmz.writestr('raster{0}.png'.format(index), binaryPngString) return kmlString, binaryPngStrings
['def', 'getAsKmlPngAnimation', '(', 'self', ',', 'session', ',', 'projectFile', '=', 'None', ',', 'path', '=', 'None', ',', 'documentName', '=', 'None', ',', 'colorRamp', '=', 'None', ',', 'alpha', '=', '1.0', ',', 'noDataValue', '=', '0', ',', 'drawOrder', '=', '0', ',', 'cellSize', '=', 'None', ',', 'resampleMethod', '=', "'NearestNeighbour'", ')', ':', '# Prepare rasters', 'timeStampedRasters', '=', 'self', '.', '_assembleRasterParams', '(', 'projectFile', ',', 'self', '.', 'rasters', ')', '# Make sure the raster field is valid', 'converter', '=', 'RasterConverter', '(', 'sqlAlchemyEngineOrSession', '=', 'session', ')', '# Configure color ramp', 'if', 'isinstance', '(', 'colorRamp', ',', 'dict', ')', ':', 'converter', '.', 'setCustomColorRamp', '(', 'colorRamp', '[', "'colors'", ']', ',', 'colorRamp', '[', "'interpolatedPoints'", ']', ')', 'else', ':', 'converter', '.', 'setDefaultColorRamp', '(', 'colorRamp', ')', 'if', 'documentName', 'is', 'None', ':', 'documentName', '=', 'self', '.', 'fileExtension', 'kmlString', ',', 'binaryPngStrings', '=', 'converter', '.', 'getAsKmlPngAnimation', '(', 'tableName', '=', 'WMSDatasetRaster', '.', 'tableName', ',', 'timeStampedRasters', '=', 'timeStampedRasters', ',', 'rasterIdFieldName', '=', "'id'", ',', 'rasterFieldName', '=', "'raster'", ',', 'documentName', '=', 'documentName', ',', 'alpha', '=', 'alpha', ',', 'drawOrder', '=', 'drawOrder', ',', 'cellSize', '=', 'cellSize', ',', 'noDataValue', '=', 'noDataValue', ',', 'resampleMethod', '=', 'resampleMethod', ')', 'if', 'path', ':', 'directory', '=', 'os', '.', 'path', '.', 'dirname', '(', 'path', ')', 'archiveName', '=', '(', 'os', '.', 'path', '.', 'split', '(', 'path', ')', '[', '1', ']', ')', '.', 'split', '(', "'.'", ')', '[', '0', ']', 'kmzPath', '=', 'os', '.', 'path', '.', 'join', '(', 'directory', ',', '(', 'archiveName', '+', "'.kmz'", ')', ')', 'with', 'ZipFile', '(', 'kmzPath', ',', "'w'", ')', 'as', 'kmz', ':', 'kmz', '.', 'writestr', '(', 'archiveName', '+', "'.kml'", ',', 'kmlString', ')', 'for', 'index', ',', 'binaryPngString', 'in', 'enumerate', '(', 'binaryPngStrings', ')', ':', 'kmz', '.', 'writestr', '(', "'raster{0}.png'", '.', 'format', '(', 'index', ')', ',', 'binaryPngString', ')', 'return', 'kmlString', ',', 'binaryPngStrings']
Retrieve the WMS dataset as a PNG time stamped KMZ Args: session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database. projectFile(:class:`gsshapy.orm.ProjectFile`): Project file object for the GSSHA project to which the WMS dataset belongs. path (str, optional): Path to file where KML file will be written. Defaults to None. documentName (str, optional): Name of the KML document. This will be the name that appears in the legend. Defaults to 'Stream Network'. colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the 'interpolatedPoints' must be an integer representing the number of points to interpolate between each color given in the colors list. alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100% opaque and 0.0 is 100% transparent. Defaults to 1.0. noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters. Defaults to 0.0. drawOrder (int, optional): Set the draw order of the images. Defaults to 0. cellSize (float, optional): Define the cell size in the units of the project projection at which to resample the raster to generate the PNG. Defaults to None which will cause the PNG to be generated with the original raster cell size. It is generally better to set this to a size smaller than the original cell size to obtain a higher resolution image. However, computation time increases exponentially as the cell size is decreased. resampleMethod (str, optional): If cellSize is set, this method will be used to resample the raster. Valid values include: NearestNeighbour, Bilinear, Cubic, CubicSpline, and Lanczos. Defaults to NearestNeighbour. Returns: (str, list): Returns a KML string and a list of binary strings that are the PNG images.
['Retrieve', 'the', 'WMS', 'dataset', 'as', 'a', 'PNG', 'time', 'stamped', 'KMZ']
train
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/wms_dataset.py#L236-L306
3,066
vtkiorg/vtki
vtki/filters.py
DataSetFilters.select_enclosed_points
def select_enclosed_points(dataset, surface, tolerance=0.001, inside_out=False, check_surface=True): """Mark points as to whether they are inside a closed surface. This evaluates all the input points to determine whether they are in an enclosed surface. The filter produces a (0,1) mask (in the form of a vtkDataArray) that indicates whether points are outside (mask value=0) or inside (mask value=1) a provided surface. (The name of the output vtkDataArray is "SelectedPointsArray".) The filter assumes that the surface is closed and manifold. A boolean flag can be set to force the filter to first check whether this is true. If false, all points will be marked outside. Note that if this check is not performed and the surface is not closed, the results are undefined. This filter produces and output data array, but does not modify the input dataset. If you wish to extract cells or poinrs, various threshold filters are available (i.e., threshold the output array). Parameters ---------- surface : vtki.PolyData Set the surface to be used to test for containment. This must be a :class:`vtki.PolyData` object. tolerance : float The tolerance on the intersection. The tolerance is expressed as a fraction of the bounding box of the enclosing surface. inside_out : bool By default, points inside the surface are marked inside or sent to the output. If ``inside_out`` is ``True``, then the points outside the surface are marked inside. check_surface : bool Specify whether to check the surface for closure. If on, then the algorithm first checks to see if the surface is closed and manifold. """ alg = vtk.vtkSelectEnclosedPoints() alg.SetInputData(dataset) alg.SetSurfaceData(surface) alg.SetTolerance(tolerance) alg.SetCheckSurface(check_surface) alg.Update() return _get_output(alg)
python
def select_enclosed_points(dataset, surface, tolerance=0.001, inside_out=False, check_surface=True): """Mark points as to whether they are inside a closed surface. This evaluates all the input points to determine whether they are in an enclosed surface. The filter produces a (0,1) mask (in the form of a vtkDataArray) that indicates whether points are outside (mask value=0) or inside (mask value=1) a provided surface. (The name of the output vtkDataArray is "SelectedPointsArray".) The filter assumes that the surface is closed and manifold. A boolean flag can be set to force the filter to first check whether this is true. If false, all points will be marked outside. Note that if this check is not performed and the surface is not closed, the results are undefined. This filter produces and output data array, but does not modify the input dataset. If you wish to extract cells or poinrs, various threshold filters are available (i.e., threshold the output array). Parameters ---------- surface : vtki.PolyData Set the surface to be used to test for containment. This must be a :class:`vtki.PolyData` object. tolerance : float The tolerance on the intersection. The tolerance is expressed as a fraction of the bounding box of the enclosing surface. inside_out : bool By default, points inside the surface are marked inside or sent to the output. If ``inside_out`` is ``True``, then the points outside the surface are marked inside. check_surface : bool Specify whether to check the surface for closure. If on, then the algorithm first checks to see if the surface is closed and manifold. """ alg = vtk.vtkSelectEnclosedPoints() alg.SetInputData(dataset) alg.SetSurfaceData(surface) alg.SetTolerance(tolerance) alg.SetCheckSurface(check_surface) alg.Update() return _get_output(alg)
['def', 'select_enclosed_points', '(', 'dataset', ',', 'surface', ',', 'tolerance', '=', '0.001', ',', 'inside_out', '=', 'False', ',', 'check_surface', '=', 'True', ')', ':', 'alg', '=', 'vtk', '.', 'vtkSelectEnclosedPoints', '(', ')', 'alg', '.', 'SetInputData', '(', 'dataset', ')', 'alg', '.', 'SetSurfaceData', '(', 'surface', ')', 'alg', '.', 'SetTolerance', '(', 'tolerance', ')', 'alg', '.', 'SetCheckSurface', '(', 'check_surface', ')', 'alg', '.', 'Update', '(', ')', 'return', '_get_output', '(', 'alg', ')']
Mark points as to whether they are inside a closed surface. This evaluates all the input points to determine whether they are in an enclosed surface. The filter produces a (0,1) mask (in the form of a vtkDataArray) that indicates whether points are outside (mask value=0) or inside (mask value=1) a provided surface. (The name of the output vtkDataArray is "SelectedPointsArray".) The filter assumes that the surface is closed and manifold. A boolean flag can be set to force the filter to first check whether this is true. If false, all points will be marked outside. Note that if this check is not performed and the surface is not closed, the results are undefined. This filter produces and output data array, but does not modify the input dataset. If you wish to extract cells or poinrs, various threshold filters are available (i.e., threshold the output array). Parameters ---------- surface : vtki.PolyData Set the surface to be used to test for containment. This must be a :class:`vtki.PolyData` object. tolerance : float The tolerance on the intersection. The tolerance is expressed as a fraction of the bounding box of the enclosing surface. inside_out : bool By default, points inside the surface are marked inside or sent to the output. If ``inside_out`` is ``True``, then the points outside the surface are marked inside. check_surface : bool Specify whether to check the surface for closure. If on, then the algorithm first checks to see if the surface is closed and manifold.
['Mark', 'points', 'as', 'to', 'whether', 'they', 'are', 'inside', 'a', 'closed', 'surface', '.', 'This', 'evaluates', 'all', 'the', 'input', 'points', 'to', 'determine', 'whether', 'they', 'are', 'in', 'an', 'enclosed', 'surface', '.', 'The', 'filter', 'produces', 'a', '(', '0', '1', ')', 'mask', '(', 'in', 'the', 'form', 'of', 'a', 'vtkDataArray', ')', 'that', 'indicates', 'whether', 'points', 'are', 'outside', '(', 'mask', 'value', '=', '0', ')', 'or', 'inside', '(', 'mask', 'value', '=', '1', ')', 'a', 'provided', 'surface', '.', '(', 'The', 'name', 'of', 'the', 'output', 'vtkDataArray', 'is', 'SelectedPointsArray', '.', ')']
train
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/filters.py#L943-L988
3,067
offu/WeRoBot
werobot/pay.py
WeixinPayClient.pay_deliver_notify
def pay_deliver_notify(self, **deliver_info): """ 通知 腾讯发货 一般形式 :: wxclient.pay_delivernotify( openid=openid, transid=transaction_id, out_trade_no=本地订单号, deliver_timestamp=int(time.time()), deliver_status="1", deliver_msg="ok" ) :param 需要签名的的参数 :return: 支付需要的对象 """ params, sign, _ = self._pay_sign_dict( add_noncestr=False, add_timestamp=False, **deliver_info ) params['app_signature'] = sign params['sign_method'] = 'sha1' return self.post( url="https://api.weixin.qq.com/pay/delivernotify", data=params )
python
def pay_deliver_notify(self, **deliver_info): """ 通知 腾讯发货 一般形式 :: wxclient.pay_delivernotify( openid=openid, transid=transaction_id, out_trade_no=本地订单号, deliver_timestamp=int(time.time()), deliver_status="1", deliver_msg="ok" ) :param 需要签名的的参数 :return: 支付需要的对象 """ params, sign, _ = self._pay_sign_dict( add_noncestr=False, add_timestamp=False, **deliver_info ) params['app_signature'] = sign params['sign_method'] = 'sha1' return self.post( url="https://api.weixin.qq.com/pay/delivernotify", data=params )
['def', 'pay_deliver_notify', '(', 'self', ',', '*', '*', 'deliver_info', ')', ':', 'params', ',', 'sign', ',', '_', '=', 'self', '.', '_pay_sign_dict', '(', 'add_noncestr', '=', 'False', ',', 'add_timestamp', '=', 'False', ',', '*', '*', 'deliver_info', ')', 'params', '[', "'app_signature'", ']', '=', 'sign', 'params', '[', "'sign_method'", ']', '=', "'sha1'", 'return', 'self', '.', 'post', '(', 'url', '=', '"https://api.weixin.qq.com/pay/delivernotify"', ',', 'data', '=', 'params', ')']
通知 腾讯发货 一般形式 :: wxclient.pay_delivernotify( openid=openid, transid=transaction_id, out_trade_no=本地订单号, deliver_timestamp=int(time.time()), deliver_status="1", deliver_msg="ok" ) :param 需要签名的的参数 :return: 支付需要的对象
['通知', '腾讯发货']
train
https://github.com/offu/WeRoBot/blob/fd42109105b03f9acf45ebd9dcabb9d5cff98f3c/werobot/pay.py#L140-L166
3,068
quantumlib/Cirq
cirq/circuits/_block_diagram_drawer.py
Block.draw_curve
def draw_curve(self, grid_characters: BoxDrawCharacterSet, *, top: bool = False, left: bool = False, right: bool = False, bottom: bool = False, crossing_char: Optional[str] = None): """Draws lines in the box using the given character set. Supports merging the new lines with the lines from a previous call to draw_curve, including when they have different character sets (assuming there exist characters merging the two). Args: grid_characters: The character set to draw the curve with. top: Draw topward leg? left: Draw leftward leg? right: Draw rightward leg? bottom: Draw downward leg? crossing_char: Overrides the all-legs-present character. Useful for ascii diagrams, where the + doesn't always look the clearest. """ if not any([top, left, right, bottom]): return # Remember which legs are new, old, or missing. sign_top = +1 if top else -1 if self.top else 0 sign_bottom = +1 if bottom else -1 if self.bottom else 0 sign_left = +1 if left else -1 if self.left else 0 sign_right = +1 if right else -1 if self.right else 0 # Add new segments. if top: self.top = grid_characters.top_bottom if bottom: self.bottom = grid_characters.top_bottom if left: self.left = grid_characters.left_right if right: self.right = grid_characters.left_right # Fill center. if not all([crossing_char, self.top, self.bottom, self.left, self.right]): crossing_char = box_draw_character( self._prev_curve_grid_chars, grid_characters, top=sign_top, bottom=sign_bottom, left=sign_left, right=sign_right) self.center = crossing_char or '' self._prev_curve_grid_chars = grid_characters
python
def draw_curve(self, grid_characters: BoxDrawCharacterSet, *, top: bool = False, left: bool = False, right: bool = False, bottom: bool = False, crossing_char: Optional[str] = None): """Draws lines in the box using the given character set. Supports merging the new lines with the lines from a previous call to draw_curve, including when they have different character sets (assuming there exist characters merging the two). Args: grid_characters: The character set to draw the curve with. top: Draw topward leg? left: Draw leftward leg? right: Draw rightward leg? bottom: Draw downward leg? crossing_char: Overrides the all-legs-present character. Useful for ascii diagrams, where the + doesn't always look the clearest. """ if not any([top, left, right, bottom]): return # Remember which legs are new, old, or missing. sign_top = +1 if top else -1 if self.top else 0 sign_bottom = +1 if bottom else -1 if self.bottom else 0 sign_left = +1 if left else -1 if self.left else 0 sign_right = +1 if right else -1 if self.right else 0 # Add new segments. if top: self.top = grid_characters.top_bottom if bottom: self.bottom = grid_characters.top_bottom if left: self.left = grid_characters.left_right if right: self.right = grid_characters.left_right # Fill center. if not all([crossing_char, self.top, self.bottom, self.left, self.right]): crossing_char = box_draw_character( self._prev_curve_grid_chars, grid_characters, top=sign_top, bottom=sign_bottom, left=sign_left, right=sign_right) self.center = crossing_char or '' self._prev_curve_grid_chars = grid_characters
['def', 'draw_curve', '(', 'self', ',', 'grid_characters', ':', 'BoxDrawCharacterSet', ',', '*', ',', 'top', ':', 'bool', '=', 'False', ',', 'left', ':', 'bool', '=', 'False', ',', 'right', ':', 'bool', '=', 'False', ',', 'bottom', ':', 'bool', '=', 'False', ',', 'crossing_char', ':', 'Optional', '[', 'str', ']', '=', 'None', ')', ':', 'if', 'not', 'any', '(', '[', 'top', ',', 'left', ',', 'right', ',', 'bottom', ']', ')', ':', 'return', '# Remember which legs are new, old, or missing.', 'sign_top', '=', '+', '1', 'if', 'top', 'else', '-', '1', 'if', 'self', '.', 'top', 'else', '0', 'sign_bottom', '=', '+', '1', 'if', 'bottom', 'else', '-', '1', 'if', 'self', '.', 'bottom', 'else', '0', 'sign_left', '=', '+', '1', 'if', 'left', 'else', '-', '1', 'if', 'self', '.', 'left', 'else', '0', 'sign_right', '=', '+', '1', 'if', 'right', 'else', '-', '1', 'if', 'self', '.', 'right', 'else', '0', '# Add new segments.', 'if', 'top', ':', 'self', '.', 'top', '=', 'grid_characters', '.', 'top_bottom', 'if', 'bottom', ':', 'self', '.', 'bottom', '=', 'grid_characters', '.', 'top_bottom', 'if', 'left', ':', 'self', '.', 'left', '=', 'grid_characters', '.', 'left_right', 'if', 'right', ':', 'self', '.', 'right', '=', 'grid_characters', '.', 'left_right', '# Fill center.', 'if', 'not', 'all', '(', '[', 'crossing_char', ',', 'self', '.', 'top', ',', 'self', '.', 'bottom', ',', 'self', '.', 'left', ',', 'self', '.', 'right', ']', ')', ':', 'crossing_char', '=', 'box_draw_character', '(', 'self', '.', '_prev_curve_grid_chars', ',', 'grid_characters', ',', 'top', '=', 'sign_top', ',', 'bottom', '=', 'sign_bottom', ',', 'left', '=', 'sign_left', ',', 'right', '=', 'sign_right', ')', 'self', '.', 'center', '=', 'crossing_char', 'or', "''", 'self', '.', '_prev_curve_grid_chars', '=', 'grid_characters']
Draws lines in the box using the given character set. Supports merging the new lines with the lines from a previous call to draw_curve, including when they have different character sets (assuming there exist characters merging the two). Args: grid_characters: The character set to draw the curve with. top: Draw topward leg? left: Draw leftward leg? right: Draw rightward leg? bottom: Draw downward leg? crossing_char: Overrides the all-legs-present character. Useful for ascii diagrams, where the + doesn't always look the clearest.
['Draws', 'lines', 'in', 'the', 'box', 'using', 'the', 'given', 'character', 'set', '.']
train
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/circuits/_block_diagram_drawer.py#L53-L107
3,069
ClimateImpactLab/DataFS
datafs/managers/manager.py
BaseDataManager.add_tags
def add_tags(self, archive_name, tags): ''' Add tags to an archive Parameters ---------- archive_name:s tr Name of archive tags: list or tuple of strings tags to add to the archive ''' updated_tag_list = list(self._get_tags(archive_name)) for tag in tags: if tag not in updated_tag_list: updated_tag_list.append(tag) self._set_tags(archive_name, updated_tag_list)
python
def add_tags(self, archive_name, tags): ''' Add tags to an archive Parameters ---------- archive_name:s tr Name of archive tags: list or tuple of strings tags to add to the archive ''' updated_tag_list = list(self._get_tags(archive_name)) for tag in tags: if tag not in updated_tag_list: updated_tag_list.append(tag) self._set_tags(archive_name, updated_tag_list)
['def', 'add_tags', '(', 'self', ',', 'archive_name', ',', 'tags', ')', ':', 'updated_tag_list', '=', 'list', '(', 'self', '.', '_get_tags', '(', 'archive_name', ')', ')', 'for', 'tag', 'in', 'tags', ':', 'if', 'tag', 'not', 'in', 'updated_tag_list', ':', 'updated_tag_list', '.', 'append', '(', 'tag', ')', 'self', '.', '_set_tags', '(', 'archive_name', ',', 'updated_tag_list', ')']
Add tags to an archive Parameters ---------- archive_name:s tr Name of archive tags: list or tuple of strings tags to add to the archive
['Add', 'tags', 'to', 'an', 'archive']
train
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/managers/manager.py#L451-L469
3,070
inveniosoftware/invenio-accounts
invenio_accounts/sessions.py
add_session
def add_session(session=None): r"""Add a session to the SessionActivity table. :param session: Flask Session object to add. If None, ``session`` is used. The object is expected to have a dictionary entry named ``"user_id"`` and a field ``sid_s`` """ user_id, sid_s = session['user_id'], session.sid_s with db.session.begin_nested(): session_activity = SessionActivity( user_id=user_id, sid_s=sid_s, ip=request.remote_addr, country=_ip2country(request.remote_addr), **_extract_info_from_useragent( request.headers.get('User-Agent', '') ) ) db.session.merge(session_activity)
python
def add_session(session=None): r"""Add a session to the SessionActivity table. :param session: Flask Session object to add. If None, ``session`` is used. The object is expected to have a dictionary entry named ``"user_id"`` and a field ``sid_s`` """ user_id, sid_s = session['user_id'], session.sid_s with db.session.begin_nested(): session_activity = SessionActivity( user_id=user_id, sid_s=sid_s, ip=request.remote_addr, country=_ip2country(request.remote_addr), **_extract_info_from_useragent( request.headers.get('User-Agent', '') ) ) db.session.merge(session_activity)
['def', 'add_session', '(', 'session', '=', 'None', ')', ':', 'user_id', ',', 'sid_s', '=', 'session', '[', "'user_id'", ']', ',', 'session', '.', 'sid_s', 'with', 'db', '.', 'session', '.', 'begin_nested', '(', ')', ':', 'session_activity', '=', 'SessionActivity', '(', 'user_id', '=', 'user_id', ',', 'sid_s', '=', 'sid_s', ',', 'ip', '=', 'request', '.', 'remote_addr', ',', 'country', '=', '_ip2country', '(', 'request', '.', 'remote_addr', ')', ',', '*', '*', '_extract_info_from_useragent', '(', 'request', '.', 'headers', '.', 'get', '(', "'User-Agent'", ',', "''", ')', ')', ')', 'db', '.', 'session', '.', 'merge', '(', 'session_activity', ')']
r"""Add a session to the SessionActivity table. :param session: Flask Session object to add. If None, ``session`` is used. The object is expected to have a dictionary entry named ``"user_id"`` and a field ``sid_s``
['r', 'Add', 'a', 'session', 'to', 'the', 'SessionActivity', 'table', '.']
train
https://github.com/inveniosoftware/invenio-accounts/blob/b0d2f0739b00dbefea22ca15d7d374a1b4a63aec/invenio_accounts/sessions.py#L49-L67
3,071
JasonKessler/scattertext
scattertext/TermDocMatrix.py
TermDocMatrix.get_term_freq_df
def get_term_freq_df(self, label_append=' freq'): ''' Parameters ------- label_append : str Returns ------- pd.DataFrame indexed on terms, with columns giving frequencies for each ''' ''' row = self._row_category_ids() newX = csr_matrix((self._X.data, (row, self._X.indices))) return self._term_freq_df_from_matrix(newX) ''' mat = self.get_term_freq_mat() return pd.DataFrame(mat, index=pd.Series(self.get_terms(), name='term'), columns=[c + label_append for c in self.get_categories()])
python
def get_term_freq_df(self, label_append=' freq'): ''' Parameters ------- label_append : str Returns ------- pd.DataFrame indexed on terms, with columns giving frequencies for each ''' ''' row = self._row_category_ids() newX = csr_matrix((self._X.data, (row, self._X.indices))) return self._term_freq_df_from_matrix(newX) ''' mat = self.get_term_freq_mat() return pd.DataFrame(mat, index=pd.Series(self.get_terms(), name='term'), columns=[c + label_append for c in self.get_categories()])
['def', 'get_term_freq_df', '(', 'self', ',', 'label_append', '=', "' freq'", ')', ':', "'''\n row = self._row_category_ids()\n newX = csr_matrix((self._X.data, (row, self._X.indices)))\n return self._term_freq_df_from_matrix(newX)\n '''", 'mat', '=', 'self', '.', 'get_term_freq_mat', '(', ')', 'return', 'pd', '.', 'DataFrame', '(', 'mat', ',', 'index', '=', 'pd', '.', 'Series', '(', 'self', '.', 'get_terms', '(', ')', ',', 'name', '=', "'term'", ')', ',', 'columns', '=', '[', 'c', '+', 'label_append', 'for', 'c', 'in', 'self', '.', 'get_categories', '(', ')', ']', ')']
Parameters ------- label_append : str Returns ------- pd.DataFrame indexed on terms, with columns giving frequencies for each
['Parameters', '-------', 'label_append', ':', 'str']
train
https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/TermDocMatrix.py#L87-L106
3,072
HiPERCAM/hcam_widgets
hcam_widgets/hcam.py
RunPars.loadJSON
def loadJSON(self, json_string): """ Sets the values of the run parameters given an JSON string """ g = get_root(self).globals user = json.loads(json_string)['user'] def setField(widget, field): val = user.get(field) if val is not None: widget.set(val) setField(self.prog_ob.obid, 'OB') setField(self.target, 'target') setField(self.prog_ob.progid, 'ID') setField(self.pi, 'PI') setField(self.observers, 'Observers') setField(self.comment, 'comment') setField(self.filter, 'filters') setField(g.observe.rtype, 'flags')
python
def loadJSON(self, json_string): """ Sets the values of the run parameters given an JSON string """ g = get_root(self).globals user = json.loads(json_string)['user'] def setField(widget, field): val = user.get(field) if val is not None: widget.set(val) setField(self.prog_ob.obid, 'OB') setField(self.target, 'target') setField(self.prog_ob.progid, 'ID') setField(self.pi, 'PI') setField(self.observers, 'Observers') setField(self.comment, 'comment') setField(self.filter, 'filters') setField(g.observe.rtype, 'flags')
['def', 'loadJSON', '(', 'self', ',', 'json_string', ')', ':', 'g', '=', 'get_root', '(', 'self', ')', '.', 'globals', 'user', '=', 'json', '.', 'loads', '(', 'json_string', ')', '[', "'user'", ']', 'def', 'setField', '(', 'widget', ',', 'field', ')', ':', 'val', '=', 'user', '.', 'get', '(', 'field', ')', 'if', 'val', 'is', 'not', 'None', ':', 'widget', '.', 'set', '(', 'val', ')', 'setField', '(', 'self', '.', 'prog_ob', '.', 'obid', ',', "'OB'", ')', 'setField', '(', 'self', '.', 'target', ',', "'target'", ')', 'setField', '(', 'self', '.', 'prog_ob', '.', 'progid', ',', "'ID'", ')', 'setField', '(', 'self', '.', 'pi', ',', "'PI'", ')', 'setField', '(', 'self', '.', 'observers', ',', "'Observers'", ')', 'setField', '(', 'self', '.', 'comment', ',', "'comment'", ')', 'setField', '(', 'self', '.', 'filter', ',', "'filters'", ')', 'setField', '(', 'g', '.', 'observe', '.', 'rtype', ',', "'flags'", ')']
Sets the values of the run parameters given an JSON string
['Sets', 'the', 'values', 'of', 'the', 'run', 'parameters', 'given', 'an', 'JSON', 'string']
train
https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/hcam.py#L1075-L1094
3,073
scanny/python-pptx
pptx/text/fonts.py
_NameTable._read_name_text
def _read_name_text( self, bufr, platform_id, encoding_id, strings_offset, name_str_offset, length): """ Return the unicode name string at *name_str_offset* or |None| if decoding its format is not supported. """ raw_name = self._raw_name_string( bufr, strings_offset, name_str_offset, length ) return self._decode_name(raw_name, platform_id, encoding_id)
python
def _read_name_text( self, bufr, platform_id, encoding_id, strings_offset, name_str_offset, length): """ Return the unicode name string at *name_str_offset* or |None| if decoding its format is not supported. """ raw_name = self._raw_name_string( bufr, strings_offset, name_str_offset, length ) return self._decode_name(raw_name, platform_id, encoding_id)
['def', '_read_name_text', '(', 'self', ',', 'bufr', ',', 'platform_id', ',', 'encoding_id', ',', 'strings_offset', ',', 'name_str_offset', ',', 'length', ')', ':', 'raw_name', '=', 'self', '.', '_raw_name_string', '(', 'bufr', ',', 'strings_offset', ',', 'name_str_offset', ',', 'length', ')', 'return', 'self', '.', '_decode_name', '(', 'raw_name', ',', 'platform_id', ',', 'encoding_id', ')']
Return the unicode name string at *name_str_offset* or |None| if decoding its format is not supported.
['Return', 'the', 'unicode', 'name', 'string', 'at', '*', 'name_str_offset', '*', 'or', '|None|', 'if', 'decoding', 'its', 'format', 'is', 'not', 'supported', '.']
train
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/text/fonts.py#L376-L386
3,074
ThreatConnect-Inc/tcex
tcex/tcex_bin_run.py
TcExRun.data_not_in
def data_not_in(db_data, user_data): """Validate data not in user data. Args: db_data (str): The data store in Redis. user_data (list): The user provided data. Returns: bool: True if the data passed validation. """ if isinstance(user_data, list): if db_data not in user_data: return True return False
python
def data_not_in(db_data, user_data): """Validate data not in user data. Args: db_data (str): The data store in Redis. user_data (list): The user provided data. Returns: bool: True if the data passed validation. """ if isinstance(user_data, list): if db_data not in user_data: return True return False
['def', 'data_not_in', '(', 'db_data', ',', 'user_data', ')', ':', 'if', 'isinstance', '(', 'user_data', ',', 'list', ')', ':', 'if', 'db_data', 'not', 'in', 'user_data', ':', 'return', 'True', 'return', 'False']
Validate data not in user data. Args: db_data (str): The data store in Redis. user_data (list): The user provided data. Returns: bool: True if the data passed validation.
['Validate', 'data', 'not', 'in', 'user', 'data', '.']
train
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin_run.py#L445-L458
3,075
Microsoft/nni
examples/trials/weight_sharing/ga_squad/data.py
WhitespaceTokenizer.tokenize
def tokenize(self, text): ''' tokenize function in Tokenizer. ''' start = -1 tokens = [] for i, character in enumerate(text): if character == ' ' or character == '\t': if start >= 0: word = text[start:i] tokens.append({ 'word': word, 'original_text': word, 'char_begin': start, 'char_end': i}) start = -1 else: if start < 0: start = i if start >= 0: tokens.append({ 'word': text[start:len(text)], 'original_text': text[start:len(text)], 'char_begin': start, 'char_end': len(text) }) return tokens
python
def tokenize(self, text): ''' tokenize function in Tokenizer. ''' start = -1 tokens = [] for i, character in enumerate(text): if character == ' ' or character == '\t': if start >= 0: word = text[start:i] tokens.append({ 'word': word, 'original_text': word, 'char_begin': start, 'char_end': i}) start = -1 else: if start < 0: start = i if start >= 0: tokens.append({ 'word': text[start:len(text)], 'original_text': text[start:len(text)], 'char_begin': start, 'char_end': len(text) }) return tokens
['def', 'tokenize', '(', 'self', ',', 'text', ')', ':', 'start', '=', '-', '1', 'tokens', '=', '[', ']', 'for', 'i', ',', 'character', 'in', 'enumerate', '(', 'text', ')', ':', 'if', 'character', '==', "' '", 'or', 'character', '==', "'\\t'", ':', 'if', 'start', '>=', '0', ':', 'word', '=', 'text', '[', 'start', ':', 'i', ']', 'tokens', '.', 'append', '(', '{', "'word'", ':', 'word', ',', "'original_text'", ':', 'word', ',', "'char_begin'", ':', 'start', ',', "'char_end'", ':', 'i', '}', ')', 'start', '=', '-', '1', 'else', ':', 'if', 'start', '<', '0', ':', 'start', '=', 'i', 'if', 'start', '>=', '0', ':', 'tokens', '.', 'append', '(', '{', "'word'", ':', 'text', '[', 'start', ':', 'len', '(', 'text', ')', ']', ',', "'original_text'", ':', 'text', '[', 'start', ':', 'len', '(', 'text', ')', ']', ',', "'char_begin'", ':', 'start', ',', "'char_end'", ':', 'len', '(', 'text', ')', '}', ')', 'return', 'tokens']
tokenize function in Tokenizer.
['tokenize', 'function', 'in', 'Tokenizer', '.']
train
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/examples/trials/weight_sharing/ga_squad/data.py#L38-L64
3,076
scott-griffiths/bitstring
bitstring.py
Bits._readbytes
def _readbytes(self, length, start): """Read bytes and return them. Note that length is in bits.""" assert length % 8 == 0 assert start + length <= self.len if not (start + self._offset) % 8: return bytes(self._datastore.getbyteslice((start + self._offset) // 8, (start + self._offset + length) // 8)) return self._slice(start, start + length).tobytes()
python
def _readbytes(self, length, start): """Read bytes and return them. Note that length is in bits.""" assert length % 8 == 0 assert start + length <= self.len if not (start + self._offset) % 8: return bytes(self._datastore.getbyteslice((start + self._offset) // 8, (start + self._offset + length) // 8)) return self._slice(start, start + length).tobytes()
['def', '_readbytes', '(', 'self', ',', 'length', ',', 'start', ')', ':', 'assert', 'length', '%', '8', '==', '0', 'assert', 'start', '+', 'length', '<=', 'self', '.', 'len', 'if', 'not', '(', 'start', '+', 'self', '.', '_offset', ')', '%', '8', ':', 'return', 'bytes', '(', 'self', '.', '_datastore', '.', 'getbyteslice', '(', '(', 'start', '+', 'self', '.', '_offset', ')', '//', '8', ',', '(', 'start', '+', 'self', '.', '_offset', '+', 'length', ')', '//', '8', ')', ')', 'return', 'self', '.', '_slice', '(', 'start', ',', 'start', '+', 'length', ')', '.', 'tobytes', '(', ')']
Read bytes and return them. Note that length is in bits.
['Read', 'bytes', 'and', 'return', 'them', '.', 'Note', 'that', 'length', 'is', 'in', 'bits', '.']
train
https://github.com/scott-griffiths/bitstring/blob/ab40ae7f0b43fe223a39b63cbc0529b09f3ef653/bitstring.py#L1335-L1342
3,077
shexSpec/grammar
parsers/python/pyshexc/parser_impl/shex_oneofshape_parser.py
ShexOneOfShapeParser.visitSenseFlags
def visitSenseFlags(self, ctx: ShExDocParser.SenseFlagsContext): """ '!' '^'? | '^' '!'? """ if '!' in ctx.getText(): self.expression.negated = True if '^' in ctx.getText(): self.expression.inverse = True
python
def visitSenseFlags(self, ctx: ShExDocParser.SenseFlagsContext): """ '!' '^'? | '^' '!'? """ if '!' in ctx.getText(): self.expression.negated = True if '^' in ctx.getText(): self.expression.inverse = True
['def', 'visitSenseFlags', '(', 'self', ',', 'ctx', ':', 'ShExDocParser', '.', 'SenseFlagsContext', ')', ':', 'if', "'!'", 'in', 'ctx', '.', 'getText', '(', ')', ':', 'self', '.', 'expression', '.', 'negated', '=', 'True', 'if', "'^'", 'in', 'ctx', '.', 'getText', '(', ')', ':', 'self', '.', 'expression', '.', 'inverse', '=', 'True']
'!' '^'? | '^' '!'?
['!', '^', '?', '|', '^', '!', '?']
train
https://github.com/shexSpec/grammar/blob/4497cd1f73fa6703bca6e2cb53ba9c120f22e48c/parsers/python/pyshexc/parser_impl/shex_oneofshape_parser.py#L114-L119
3,078
HiPERCAM/hcam_widgets
hcam_widgets/hcam.py
Observe.setExpertLevel
def setExpertLevel(self): """ Set expert level """ g = get_root(self).globals level = g.cpars['expert_level'] # now set whether buttons are permanently enabled or not if level == 0 or level == 1: self.load.setNonExpert() self.save.setNonExpert() self.unfreeze.setNonExpert() self.start.setNonExpert() self.stop.setNonExpert() elif level == 2: self.load.setExpert() self.save.setExpert() self.unfreeze.setExpert() self.start.setExpert() self.stop.setExpert()
python
def setExpertLevel(self): """ Set expert level """ g = get_root(self).globals level = g.cpars['expert_level'] # now set whether buttons are permanently enabled or not if level == 0 or level == 1: self.load.setNonExpert() self.save.setNonExpert() self.unfreeze.setNonExpert() self.start.setNonExpert() self.stop.setNonExpert() elif level == 2: self.load.setExpert() self.save.setExpert() self.unfreeze.setExpert() self.start.setExpert() self.stop.setExpert()
['def', 'setExpertLevel', '(', 'self', ')', ':', 'g', '=', 'get_root', '(', 'self', ')', '.', 'globals', 'level', '=', 'g', '.', 'cpars', '[', "'expert_level'", ']', '# now set whether buttons are permanently enabled or not', 'if', 'level', '==', '0', 'or', 'level', '==', '1', ':', 'self', '.', 'load', '.', 'setNonExpert', '(', ')', 'self', '.', 'save', '.', 'setNonExpert', '(', ')', 'self', '.', 'unfreeze', '.', 'setNonExpert', '(', ')', 'self', '.', 'start', '.', 'setNonExpert', '(', ')', 'self', '.', 'stop', '.', 'setNonExpert', '(', ')', 'elif', 'level', '==', '2', ':', 'self', '.', 'load', '.', 'setExpert', '(', ')', 'self', '.', 'save', '.', 'setExpert', '(', ')', 'self', '.', 'unfreeze', '.', 'setExpert', '(', ')', 'self', '.', 'start', '.', 'setExpert', '(', ')', 'self', '.', 'stop', '.', 'setExpert', '(', ')']
Set expert level
['Set', 'expert', 'level']
train
https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/hcam.py#L1846-L1866
3,079
quantum5/2048
_2048/game.py
AnimatedTile.get_position
def get_position(self, dt): """Given dt in [0, 1], return the current position of the tile.""" return self.sx + self.dx * dt, self.sy + self.dy * dt
python
def get_position(self, dt): """Given dt in [0, 1], return the current position of the tile.""" return self.sx + self.dx * dt, self.sy + self.dy * dt
['def', 'get_position', '(', 'self', ',', 'dt', ')', ':', 'return', 'self', '.', 'sx', '+', 'self', '.', 'dx', '*', 'dt', ',', 'self', '.', 'sy', '+', 'self', '.', 'dy', '*', 'dt']
Given dt in [0, 1], return the current position of the tile.
['Given', 'dt', 'in', '[', '0', '1', ']', 'return', 'the', 'current', 'position', 'of', 'the', 'tile', '.']
train
https://github.com/quantum5/2048/blob/93ada2e3026eaf154e1bbee943d0500c9253e66f/_2048/game.py#L28-L30
3,080
secdev/scapy
scapy/arch/unix.py
_guess_iface_name
def _guess_iface_name(netif): """ We attempt to guess the name of interfaces that are truncated from the output of ifconfig -l. If there is only one possible candidate matching the interface name then we return it. If there are none or more, then we return None. """ with os.popen('%s -l' % conf.prog.ifconfig) as fdesc: ifaces = fdesc.readline().strip().split(' ') matches = [iface for iface in ifaces if iface.startswith(netif)] if len(matches) == 1: return matches[0] return None
python
def _guess_iface_name(netif): """ We attempt to guess the name of interfaces that are truncated from the output of ifconfig -l. If there is only one possible candidate matching the interface name then we return it. If there are none or more, then we return None. """ with os.popen('%s -l' % conf.prog.ifconfig) as fdesc: ifaces = fdesc.readline().strip().split(' ') matches = [iface for iface in ifaces if iface.startswith(netif)] if len(matches) == 1: return matches[0] return None
['def', '_guess_iface_name', '(', 'netif', ')', ':', 'with', 'os', '.', 'popen', '(', "'%s -l'", '%', 'conf', '.', 'prog', '.', 'ifconfig', ')', 'as', 'fdesc', ':', 'ifaces', '=', 'fdesc', '.', 'readline', '(', ')', '.', 'strip', '(', ')', '.', 'split', '(', "' '", ')', 'matches', '=', '[', 'iface', 'for', 'iface', 'in', 'ifaces', 'if', 'iface', '.', 'startswith', '(', 'netif', ')', ']', 'if', 'len', '(', 'matches', ')', '==', '1', ':', 'return', 'matches', '[', '0', ']', 'return', 'None']
We attempt to guess the name of interfaces that are truncated from the output of ifconfig -l. If there is only one possible candidate matching the interface name then we return it. If there are none or more, then we return None.
['We', 'attempt', 'to', 'guess', 'the', 'name', 'of', 'interfaces', 'that', 'are', 'truncated', 'from', 'the', 'output', 'of', 'ifconfig', '-', 'l', '.', 'If', 'there', 'is', 'only', 'one', 'possible', 'candidate', 'matching', 'the', 'interface', 'name', 'then', 'we', 'return', 'it', '.', 'If', 'there', 'are', 'none', 'or', 'more', 'then', 'we', 'return', 'None', '.']
train
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/arch/unix.py#L28-L41
3,081
sassoftware/saspy
saspy/sasbase.py
SASsession.sasdata2dataframe
def sasdata2dataframe(self, table: str, libref: str = '', dsopts: dict = None, method: str = 'MEMORY', **kwargs) -> 'pd.DataFrame': """ This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object. SASdata object that refers to the Sas Data Set you want to export to a Pandas Data Frame :param table: the name of the SAS Data Set you want to export to a Pandas Data Frame :param libref: the libref for the SAS Data Set. :param dsopts: a dictionary containing any of the following SAS data set options(where, drop, keep, obs, firstobs): - where is a string - keep are strings or list of strings. - drop are strings or list of strings. - obs is a numbers - either string or int - first obs is a numbers - either string or int - format is a string or dictionary { var: format } .. code-block:: python {'where' : 'msrp < 20000 and make = "Ford"' 'keep' : 'msrp enginesize Cylinders Horsepower Weight' 'drop' : ['msrp', 'enginesize', 'Cylinders', 'Horsepower', 'Weight'] 'obs' : 10 'firstobs' : '12' 'format' : {'money': 'dollar10', 'time': 'tod5.'} } :param method: defaults to MEMORY; the original method. CSV is the other choice which uses an intermediary csv file; faster for large data :param kwargs: dictionary :return: Pandas data frame """ dsopts = dsopts if dsopts is not None else {} if self.exist(table, libref) == 0: print('The SAS Data Set ' + libref + '.' + table + ' does not exist') return None if self.nosub: print("too complicated to show the code, read the source :), sorry.") return None else: return self._io.sasdata2dataframe(table, libref, dsopts, method=method, **kwargs)
python
def sasdata2dataframe(self, table: str, libref: str = '', dsopts: dict = None, method: str = 'MEMORY', **kwargs) -> 'pd.DataFrame': """ This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object. SASdata object that refers to the Sas Data Set you want to export to a Pandas Data Frame :param table: the name of the SAS Data Set you want to export to a Pandas Data Frame :param libref: the libref for the SAS Data Set. :param dsopts: a dictionary containing any of the following SAS data set options(where, drop, keep, obs, firstobs): - where is a string - keep are strings or list of strings. - drop are strings or list of strings. - obs is a numbers - either string or int - first obs is a numbers - either string or int - format is a string or dictionary { var: format } .. code-block:: python {'where' : 'msrp < 20000 and make = "Ford"' 'keep' : 'msrp enginesize Cylinders Horsepower Weight' 'drop' : ['msrp', 'enginesize', 'Cylinders', 'Horsepower', 'Weight'] 'obs' : 10 'firstobs' : '12' 'format' : {'money': 'dollar10', 'time': 'tod5.'} } :param method: defaults to MEMORY; the original method. CSV is the other choice which uses an intermediary csv file; faster for large data :param kwargs: dictionary :return: Pandas data frame """ dsopts = dsopts if dsopts is not None else {} if self.exist(table, libref) == 0: print('The SAS Data Set ' + libref + '.' + table + ' does not exist') return None if self.nosub: print("too complicated to show the code, read the source :), sorry.") return None else: return self._io.sasdata2dataframe(table, libref, dsopts, method=method, **kwargs)
['def', 'sasdata2dataframe', '(', 'self', ',', 'table', ':', 'str', ',', 'libref', ':', 'str', '=', "''", ',', 'dsopts', ':', 'dict', '=', 'None', ',', 'method', ':', 'str', '=', "'MEMORY'", ',', '*', '*', 'kwargs', ')', '->', "'pd.DataFrame'", ':', 'dsopts', '=', 'dsopts', 'if', 'dsopts', 'is', 'not', 'None', 'else', '{', '}', 'if', 'self', '.', 'exist', '(', 'table', ',', 'libref', ')', '==', '0', ':', 'print', '(', "'The SAS Data Set '", '+', 'libref', '+', "'.'", '+', 'table', '+', "' does not exist'", ')', 'return', 'None', 'if', 'self', '.', 'nosub', ':', 'print', '(', '"too complicated to show the code, read the source :), sorry."', ')', 'return', 'None', 'else', ':', 'return', 'self', '.', '_io', '.', 'sasdata2dataframe', '(', 'table', ',', 'libref', ',', 'dsopts', ',', 'method', '=', 'method', ',', '*', '*', 'kwargs', ')']
This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object. SASdata object that refers to the Sas Data Set you want to export to a Pandas Data Frame :param table: the name of the SAS Data Set you want to export to a Pandas Data Frame :param libref: the libref for the SAS Data Set. :param dsopts: a dictionary containing any of the following SAS data set options(where, drop, keep, obs, firstobs): - where is a string - keep are strings or list of strings. - drop are strings or list of strings. - obs is a numbers - either string or int - first obs is a numbers - either string or int - format is a string or dictionary { var: format } .. code-block:: python {'where' : 'msrp < 20000 and make = "Ford"' 'keep' : 'msrp enginesize Cylinders Horsepower Weight' 'drop' : ['msrp', 'enginesize', 'Cylinders', 'Horsepower', 'Weight'] 'obs' : 10 'firstobs' : '12' 'format' : {'money': 'dollar10', 'time': 'tod5.'} } :param method: defaults to MEMORY; the original method. CSV is the other choice which uses an intermediary csv file; faster for large data :param kwargs: dictionary :return: Pandas data frame
['This', 'method', 'exports', 'the', 'SAS', 'Data', 'Set', 'to', 'a', 'Pandas', 'Data', 'Frame', 'returning', 'the', 'Data', 'Frame', 'object', '.', 'SASdata', 'object', 'that', 'refers', 'to', 'the', 'Sas', 'Data', 'Set', 'you', 'want', 'to', 'export', 'to', 'a', 'Pandas', 'Data', 'Frame']
train
https://github.com/sassoftware/saspy/blob/e433f71990f249d3a6c3db323ceb11cb2d462cf9/saspy/sasbase.py#L939-L979
3,082
klen/makesite
makesite/modules/flask/base/core/models.py
BaseMixin.history
def history(self): """Returns an SQLAlchemy query of the object's history (previous versions). If the class does not support history/versioning, returns None. """ history = self.history_class if history: return self.session.query(history).filter(history.id == self.id) else: return None
python
def history(self): """Returns an SQLAlchemy query of the object's history (previous versions). If the class does not support history/versioning, returns None. """ history = self.history_class if history: return self.session.query(history).filter(history.id == self.id) else: return None
['def', 'history', '(', 'self', ')', ':', 'history', '=', 'self', '.', 'history_class', 'if', 'history', ':', 'return', 'self', '.', 'session', '.', 'query', '(', 'history', ')', '.', 'filter', '(', 'history', '.', 'id', '==', 'self', '.', 'id', ')', 'else', ':', 'return', 'None']
Returns an SQLAlchemy query of the object's history (previous versions). If the class does not support history/versioning, returns None.
['Returns', 'an', 'SQLAlchemy', 'query', 'of', 'the', 'object', 's', 'history', '(', 'previous', 'versions', ')', '.', 'If', 'the', 'class', 'does', 'not', 'support', 'history', '/', 'versioning', 'returns', 'None', '.']
train
https://github.com/klen/makesite/blob/f6f77a43a04a256189e8fffbeac1ffd63f35a10c/makesite/modules/flask/base/core/models.py#L71-L80
3,083
aparo/pyes
pyes/mappings.py
get_field
def get_field(name, data, default="object", document_object_field=None, is_document=False): """ Return a valid Field by given data """ if isinstance(data, AbstractField): return data data = keys_to_string(data) _type = data.get('type', default) if _type == "string": return StringField(name=name, **data) elif _type == "binary": return BinaryField(name=name, **data) elif _type == "boolean": return BooleanField(name=name, **data) elif _type == "byte": return ByteField(name=name, **data) elif _type == "short": return ShortField(name=name, **data) elif _type == "integer": return IntegerField(name=name, **data) elif _type == "long": return LongField(name=name, **data) elif _type == "float": return FloatField(name=name, **data) elif _type == "double": return DoubleField(name=name, **data) elif _type == "ip": return IpField(name=name, **data) elif _type == "date": return DateField(name=name, **data) elif _type == "multi_field": return MultiField(name=name, **data) elif _type == "geo_point": return GeoPointField(name=name, **data) elif _type == "attachment": return AttachmentField(name=name, **data) elif is_document or _type == "document": if document_object_field: return document_object_field(name=name, **data) else: data.pop("name",None) return DocumentObjectField(name=name, **data) elif _type == "object": if '_timestamp' in data or "_all" in data: if document_object_field: return document_object_field(name=name, **data) else: return DocumentObjectField(name=name, **data) return ObjectField(name=name, **data) elif _type == "nested": return NestedObject(name=name, **data) raise RuntimeError("Invalid type: %s" % _type)
python
def get_field(name, data, default="object", document_object_field=None, is_document=False): """ Return a valid Field by given data """ if isinstance(data, AbstractField): return data data = keys_to_string(data) _type = data.get('type', default) if _type == "string": return StringField(name=name, **data) elif _type == "binary": return BinaryField(name=name, **data) elif _type == "boolean": return BooleanField(name=name, **data) elif _type == "byte": return ByteField(name=name, **data) elif _type == "short": return ShortField(name=name, **data) elif _type == "integer": return IntegerField(name=name, **data) elif _type == "long": return LongField(name=name, **data) elif _type == "float": return FloatField(name=name, **data) elif _type == "double": return DoubleField(name=name, **data) elif _type == "ip": return IpField(name=name, **data) elif _type == "date": return DateField(name=name, **data) elif _type == "multi_field": return MultiField(name=name, **data) elif _type == "geo_point": return GeoPointField(name=name, **data) elif _type == "attachment": return AttachmentField(name=name, **data) elif is_document or _type == "document": if document_object_field: return document_object_field(name=name, **data) else: data.pop("name",None) return DocumentObjectField(name=name, **data) elif _type == "object": if '_timestamp' in data or "_all" in data: if document_object_field: return document_object_field(name=name, **data) else: return DocumentObjectField(name=name, **data) return ObjectField(name=name, **data) elif _type == "nested": return NestedObject(name=name, **data) raise RuntimeError("Invalid type: %s" % _type)
['def', 'get_field', '(', 'name', ',', 'data', ',', 'default', '=', '"object"', ',', 'document_object_field', '=', 'None', ',', 'is_document', '=', 'False', ')', ':', 'if', 'isinstance', '(', 'data', ',', 'AbstractField', ')', ':', 'return', 'data', 'data', '=', 'keys_to_string', '(', 'data', ')', '_type', '=', 'data', '.', 'get', '(', "'type'", ',', 'default', ')', 'if', '_type', '==', '"string"', ':', 'return', 'StringField', '(', 'name', '=', 'name', ',', '*', '*', 'data', ')', 'elif', '_type', '==', '"binary"', ':', 'return', 'BinaryField', '(', 'name', '=', 'name', ',', '*', '*', 'data', ')', 'elif', '_type', '==', '"boolean"', ':', 'return', 'BooleanField', '(', 'name', '=', 'name', ',', '*', '*', 'data', ')', 'elif', '_type', '==', '"byte"', ':', 'return', 'ByteField', '(', 'name', '=', 'name', ',', '*', '*', 'data', ')', 'elif', '_type', '==', '"short"', ':', 'return', 'ShortField', '(', 'name', '=', 'name', ',', '*', '*', 'data', ')', 'elif', '_type', '==', '"integer"', ':', 'return', 'IntegerField', '(', 'name', '=', 'name', ',', '*', '*', 'data', ')', 'elif', '_type', '==', '"long"', ':', 'return', 'LongField', '(', 'name', '=', 'name', ',', '*', '*', 'data', ')', 'elif', '_type', '==', '"float"', ':', 'return', 'FloatField', '(', 'name', '=', 'name', ',', '*', '*', 'data', ')', 'elif', '_type', '==', '"double"', ':', 'return', 'DoubleField', '(', 'name', '=', 'name', ',', '*', '*', 'data', ')', 'elif', '_type', '==', '"ip"', ':', 'return', 'IpField', '(', 'name', '=', 'name', ',', '*', '*', 'data', ')', 'elif', '_type', '==', '"date"', ':', 'return', 'DateField', '(', 'name', '=', 'name', ',', '*', '*', 'data', ')', 'elif', '_type', '==', '"multi_field"', ':', 'return', 'MultiField', '(', 'name', '=', 'name', ',', '*', '*', 'data', ')', 'elif', '_type', '==', '"geo_point"', ':', 'return', 'GeoPointField', '(', 'name', '=', 'name', ',', '*', '*', 'data', ')', 'elif', '_type', '==', '"attachment"', ':', 'return', 'AttachmentField', '(', 'name', '=', 'name', ',', '*', '*', 'data', ')', 'elif', 'is_document', 'or', '_type', '==', '"document"', ':', 'if', 'document_object_field', ':', 'return', 'document_object_field', '(', 'name', '=', 'name', ',', '*', '*', 'data', ')', 'else', ':', 'data', '.', 'pop', '(', '"name"', ',', 'None', ')', 'return', 'DocumentObjectField', '(', 'name', '=', 'name', ',', '*', '*', 'data', ')', 'elif', '_type', '==', '"object"', ':', 'if', "'_timestamp'", 'in', 'data', 'or', '"_all"', 'in', 'data', ':', 'if', 'document_object_field', ':', 'return', 'document_object_field', '(', 'name', '=', 'name', ',', '*', '*', 'data', ')', 'else', ':', 'return', 'DocumentObjectField', '(', 'name', '=', 'name', ',', '*', '*', 'data', ')', 'return', 'ObjectField', '(', 'name', '=', 'name', ',', '*', '*', 'data', ')', 'elif', '_type', '==', '"nested"', ':', 'return', 'NestedObject', '(', 'name', '=', 'name', ',', '*', '*', 'data', ')', 'raise', 'RuntimeError', '(', '"Invalid type: %s"', '%', '_type', ')']
Return a valid Field by given data
['Return', 'a', 'valid', 'Field', 'by', 'given', 'data']
train
https://github.com/aparo/pyes/blob/712eb6095961755067b2b5baa262008ade6584b3/pyes/mappings.py#L754-L807
3,084
minio/minio-py
minio/helpers.py
is_valid_bucket_notification_config
def is_valid_bucket_notification_config(notifications): """ Validate the notifications config structure :param notifications: Dictionary with specific structure. :return: True if input is a valid bucket notifications structure. Raise :exc:`InvalidArgumentError` otherwise. """ # check if notifications is a dict. if not isinstance(notifications, dict): raise TypeError('notifications configuration must be a dictionary') if len(notifications) == 0: raise InvalidArgumentError( 'notifications configuration may not be empty' ) VALID_NOTIFICATION_KEYS = set([ "TopicConfigurations", "QueueConfigurations", "CloudFunctionConfigurations", ]) VALID_SERVICE_CONFIG_KEYS = set([ 'Id', 'Arn', 'Events', 'Filter', ]) NOTIFICATION_EVENTS = set([ 's3:ReducedRedundancyLostObject', 's3:ObjectCreated:*', 's3:ObjectCreated:Put', 's3:ObjectCreated:Post', 's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload', 's3:ObjectRemoved:*', 's3:ObjectRemoved:Delete', 's3:ObjectRemoved:DeleteMarkerCreated', ]) for key, value in notifications.items(): # check if key names are valid if key not in VALID_NOTIFICATION_KEYS: raise InvalidArgumentError(( '{} is an invalid key ' 'for notifications configuration').format(key)) # check if config values conform # first check if value is a list if not isinstance(value, list): raise InvalidArgumentError(( 'The value for key "{}" in the notifications ' 'configuration must be a list.').format(key)) for service_config in value: # check type matches if not isinstance(service_config, dict): raise InvalidArgumentError(( 'Each service configuration item for "{}" must be a ' 'dictionary').format(key)) # check keys are valid for skey in service_config.keys(): if skey not in VALID_SERVICE_CONFIG_KEYS: raise InvalidArgumentError(( '{} is an invalid key for a service ' 'configuration item').format(skey)) # check for required keys arn = service_config.get('Arn', '') if arn == '': raise InvalidArgumentError( 'Arn key in service config must be present and has to be ' 'non-empty string' ) events = service_config.get('Events', []) if len(events) < 1: raise InvalidArgumentError( 'At least one event must be specified in a service config' ) if not isinstance(events, list): raise InvalidArgumentError('"Events" must be a list of strings ' 'in a service configuration') # check if 'Id' key is present, it should be string or bytes. if not isinstance(service_config.get('Id', ''), basestring): raise InvalidArgumentError('"Id" key must be a string') for event in events: if event not in NOTIFICATION_EVENTS: raise InvalidArgumentError( '{} is not a valid event. Valid ' 'events are: {}'.format(event, NOTIFICATION_EVENTS)) if 'Filter' in service_config: exception_msg = ( '{} - If a Filter key is given, it must be a ' 'dictionary, the dictionary must have the ' 'key "Key", and its value must be an object, with ' 'a key named "FilterRules" which must be a non-empty list.' ).format( service_config['Filter'] ) try: filter_rules = service_config.get('Filter', {}).get( 'Key', {}).get('FilterRules', []) if not isinstance(filter_rules, list): raise InvalidArgumentError(exception_msg) if len(filter_rules) < 1: raise InvalidArgumentError(exception_msg) except AttributeError: raise InvalidArgumentError(exception_msg) for filter_rule in filter_rules: try: name = filter_rule['Name'] value = filter_rule['Value'] except KeyError: raise InvalidArgumentError(( '{} - a FilterRule dictionary must have "Name" ' 'and "Value" keys').format(filter_rule)) if name not in ['prefix', 'suffix']: raise InvalidArgumentError(( '{} - The "Name" key in a filter rule must be ' 'either "prefix" or "suffix"').format(name)) return True
python
def is_valid_bucket_notification_config(notifications): """ Validate the notifications config structure :param notifications: Dictionary with specific structure. :return: True if input is a valid bucket notifications structure. Raise :exc:`InvalidArgumentError` otherwise. """ # check if notifications is a dict. if not isinstance(notifications, dict): raise TypeError('notifications configuration must be a dictionary') if len(notifications) == 0: raise InvalidArgumentError( 'notifications configuration may not be empty' ) VALID_NOTIFICATION_KEYS = set([ "TopicConfigurations", "QueueConfigurations", "CloudFunctionConfigurations", ]) VALID_SERVICE_CONFIG_KEYS = set([ 'Id', 'Arn', 'Events', 'Filter', ]) NOTIFICATION_EVENTS = set([ 's3:ReducedRedundancyLostObject', 's3:ObjectCreated:*', 's3:ObjectCreated:Put', 's3:ObjectCreated:Post', 's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload', 's3:ObjectRemoved:*', 's3:ObjectRemoved:Delete', 's3:ObjectRemoved:DeleteMarkerCreated', ]) for key, value in notifications.items(): # check if key names are valid if key not in VALID_NOTIFICATION_KEYS: raise InvalidArgumentError(( '{} is an invalid key ' 'for notifications configuration').format(key)) # check if config values conform # first check if value is a list if not isinstance(value, list): raise InvalidArgumentError(( 'The value for key "{}" in the notifications ' 'configuration must be a list.').format(key)) for service_config in value: # check type matches if not isinstance(service_config, dict): raise InvalidArgumentError(( 'Each service configuration item for "{}" must be a ' 'dictionary').format(key)) # check keys are valid for skey in service_config.keys(): if skey not in VALID_SERVICE_CONFIG_KEYS: raise InvalidArgumentError(( '{} is an invalid key for a service ' 'configuration item').format(skey)) # check for required keys arn = service_config.get('Arn', '') if arn == '': raise InvalidArgumentError( 'Arn key in service config must be present and has to be ' 'non-empty string' ) events = service_config.get('Events', []) if len(events) < 1: raise InvalidArgumentError( 'At least one event must be specified in a service config' ) if not isinstance(events, list): raise InvalidArgumentError('"Events" must be a list of strings ' 'in a service configuration') # check if 'Id' key is present, it should be string or bytes. if not isinstance(service_config.get('Id', ''), basestring): raise InvalidArgumentError('"Id" key must be a string') for event in events: if event not in NOTIFICATION_EVENTS: raise InvalidArgumentError( '{} is not a valid event. Valid ' 'events are: {}'.format(event, NOTIFICATION_EVENTS)) if 'Filter' in service_config: exception_msg = ( '{} - If a Filter key is given, it must be a ' 'dictionary, the dictionary must have the ' 'key "Key", and its value must be an object, with ' 'a key named "FilterRules" which must be a non-empty list.' ).format( service_config['Filter'] ) try: filter_rules = service_config.get('Filter', {}).get( 'Key', {}).get('FilterRules', []) if not isinstance(filter_rules, list): raise InvalidArgumentError(exception_msg) if len(filter_rules) < 1: raise InvalidArgumentError(exception_msg) except AttributeError: raise InvalidArgumentError(exception_msg) for filter_rule in filter_rules: try: name = filter_rule['Name'] value = filter_rule['Value'] except KeyError: raise InvalidArgumentError(( '{} - a FilterRule dictionary must have "Name" ' 'and "Value" keys').format(filter_rule)) if name not in ['prefix', 'suffix']: raise InvalidArgumentError(( '{} - The "Name" key in a filter rule must be ' 'either "prefix" or "suffix"').format(name)) return True
['def', 'is_valid_bucket_notification_config', '(', 'notifications', ')', ':', '# check if notifications is a dict.', 'if', 'not', 'isinstance', '(', 'notifications', ',', 'dict', ')', ':', 'raise', 'TypeError', '(', "'notifications configuration must be a dictionary'", ')', 'if', 'len', '(', 'notifications', ')', '==', '0', ':', 'raise', 'InvalidArgumentError', '(', "'notifications configuration may not be empty'", ')', 'VALID_NOTIFICATION_KEYS', '=', 'set', '(', '[', '"TopicConfigurations"', ',', '"QueueConfigurations"', ',', '"CloudFunctionConfigurations"', ',', ']', ')', 'VALID_SERVICE_CONFIG_KEYS', '=', 'set', '(', '[', "'Id'", ',', "'Arn'", ',', "'Events'", ',', "'Filter'", ',', ']', ')', 'NOTIFICATION_EVENTS', '=', 'set', '(', '[', "'s3:ReducedRedundancyLostObject'", ',', "'s3:ObjectCreated:*'", ',', "'s3:ObjectCreated:Put'", ',', "'s3:ObjectCreated:Post'", ',', "'s3:ObjectCreated:Copy'", ',', "'s3:ObjectCreated:CompleteMultipartUpload'", ',', "'s3:ObjectRemoved:*'", ',', "'s3:ObjectRemoved:Delete'", ',', "'s3:ObjectRemoved:DeleteMarkerCreated'", ',', ']', ')', 'for', 'key', ',', 'value', 'in', 'notifications', '.', 'items', '(', ')', ':', '# check if key names are valid', 'if', 'key', 'not', 'in', 'VALID_NOTIFICATION_KEYS', ':', 'raise', 'InvalidArgumentError', '(', '(', "'{} is an invalid key '", "'for notifications configuration'", ')', '.', 'format', '(', 'key', ')', ')', '# check if config values conform', '# first check if value is a list', 'if', 'not', 'isinstance', '(', 'value', ',', 'list', ')', ':', 'raise', 'InvalidArgumentError', '(', '(', '\'The value for key "{}" in the notifications \'', "'configuration must be a list.'", ')', '.', 'format', '(', 'key', ')', ')', 'for', 'service_config', 'in', 'value', ':', '# check type matches', 'if', 'not', 'isinstance', '(', 'service_config', ',', 'dict', ')', ':', 'raise', 'InvalidArgumentError', '(', '(', '\'Each service configuration item for "{}" must be a \'', "'dictionary'", ')', '.', 'format', '(', 'key', ')', ')', '# check keys are valid', 'for', 'skey', 'in', 'service_config', '.', 'keys', '(', ')', ':', 'if', 'skey', 'not', 'in', 'VALID_SERVICE_CONFIG_KEYS', ':', 'raise', 'InvalidArgumentError', '(', '(', "'{} is an invalid key for a service '", "'configuration item'", ')', '.', 'format', '(', 'skey', ')', ')', '# check for required keys', 'arn', '=', 'service_config', '.', 'get', '(', "'Arn'", ',', "''", ')', 'if', 'arn', '==', "''", ':', 'raise', 'InvalidArgumentError', '(', "'Arn key in service config must be present and has to be '", "'non-empty string'", ')', 'events', '=', 'service_config', '.', 'get', '(', "'Events'", ',', '[', ']', ')', 'if', 'len', '(', 'events', ')', '<', '1', ':', 'raise', 'InvalidArgumentError', '(', "'At least one event must be specified in a service config'", ')', 'if', 'not', 'isinstance', '(', 'events', ',', 'list', ')', ':', 'raise', 'InvalidArgumentError', '(', '\'"Events" must be a list of strings \'', "'in a service configuration'", ')', "# check if 'Id' key is present, it should be string or bytes.", 'if', 'not', 'isinstance', '(', 'service_config', '.', 'get', '(', "'Id'", ',', "''", ')', ',', 'basestring', ')', ':', 'raise', 'InvalidArgumentError', '(', '\'"Id" key must be a string\'', ')', 'for', 'event', 'in', 'events', ':', 'if', 'event', 'not', 'in', 'NOTIFICATION_EVENTS', ':', 'raise', 'InvalidArgumentError', '(', "'{} is not a valid event. Valid '", "'events are: {}'", '.', 'format', '(', 'event', ',', 'NOTIFICATION_EVENTS', ')', ')', 'if', "'Filter'", 'in', 'service_config', ':', 'exception_msg', '=', '(', "'{} - If a Filter key is given, it must be a '", "'dictionary, the dictionary must have the '", '\'key "Key", and its value must be an object, with \'', '\'a key named "FilterRules" which must be a non-empty list.\'', ')', '.', 'format', '(', 'service_config', '[', "'Filter'", ']', ')', 'try', ':', 'filter_rules', '=', 'service_config', '.', 'get', '(', "'Filter'", ',', '{', '}', ')', '.', 'get', '(', "'Key'", ',', '{', '}', ')', '.', 'get', '(', "'FilterRules'", ',', '[', ']', ')', 'if', 'not', 'isinstance', '(', 'filter_rules', ',', 'list', ')', ':', 'raise', 'InvalidArgumentError', '(', 'exception_msg', ')', 'if', 'len', '(', 'filter_rules', ')', '<', '1', ':', 'raise', 'InvalidArgumentError', '(', 'exception_msg', ')', 'except', 'AttributeError', ':', 'raise', 'InvalidArgumentError', '(', 'exception_msg', ')', 'for', 'filter_rule', 'in', 'filter_rules', ':', 'try', ':', 'name', '=', 'filter_rule', '[', "'Name'", ']', 'value', '=', 'filter_rule', '[', "'Value'", ']', 'except', 'KeyError', ':', 'raise', 'InvalidArgumentError', '(', '(', '\'{} - a FilterRule dictionary must have "Name" \'', '\'and "Value" keys\'', ')', '.', 'format', '(', 'filter_rule', ')', ')', 'if', 'name', 'not', 'in', '[', "'prefix'", ',', "'suffix'", ']', ':', 'raise', 'InvalidArgumentError', '(', '(', '\'{} - The "Name" key in a filter rule must be \'', '\'either "prefix" or "suffix"\'', ')', '.', 'format', '(', 'name', ')', ')', 'return', 'True']
Validate the notifications config structure :param notifications: Dictionary with specific structure. :return: True if input is a valid bucket notifications structure. Raise :exc:`InvalidArgumentError` otherwise.
['Validate', 'the', 'notifications', 'config', 'structure']
train
https://github.com/minio/minio-py/blob/7107c84183cf5fb4deff68c0a16ab9f1c0b4c37e/minio/helpers.py#L394-L522
3,085
mypebble/django-feature-flipper
feature_flipper/templatetags/feature_flipper.py
FlipperNode.render
def render(self, context): """Handle the actual rendering. """ user = self._get_value(self.user_key, context) feature = self._get_value(self.feature, context) if feature is None: return '' allowed = show_feature(user, feature) return self.nodelist.render(context) if allowed else ''
python
def render(self, context): """Handle the actual rendering. """ user = self._get_value(self.user_key, context) feature = self._get_value(self.feature, context) if feature is None: return '' allowed = show_feature(user, feature) return self.nodelist.render(context) if allowed else ''
['def', 'render', '(', 'self', ',', 'context', ')', ':', 'user', '=', 'self', '.', '_get_value', '(', 'self', '.', 'user_key', ',', 'context', ')', 'feature', '=', 'self', '.', '_get_value', '(', 'self', '.', 'feature', ',', 'context', ')', 'if', 'feature', 'is', 'None', ':', 'return', "''", 'allowed', '=', 'show_feature', '(', 'user', ',', 'feature', ')', 'return', 'self', '.', 'nodelist', '.', 'render', '(', 'context', ')', 'if', 'allowed', 'else', "''"]
Handle the actual rendering.
['Handle', 'the', 'actual', 'rendering', '.']
train
https://github.com/mypebble/django-feature-flipper/blob/53ff52296955f2ff8b5b6ae4ea426b3f0665960e/feature_flipper/templatetags/feature_flipper.py#L41-L51
3,086
awslabs/aws-sam-cli
samcli/local/docker/utils.py
to_posix_path
def to_posix_path(code_path): """ Change the code_path to be of unix-style if running on windows when supplied with an absolute windows path. Parameters ---------- code_path : str Directory in the host operating system that should be mounted within the container. Returns ------- str Posix equivalent of absolute windows style path. Examples -------- >>> to_posix_path('/Users/UserName/sam-app') /Users/UserName/sam-app >>> to_posix_path('C:\\\\Users\\\\UserName\\\\AppData\\\\Local\\\\Temp\\\\mydir') /c/Users/UserName/AppData/Local/Temp/mydir """ return re.sub("^([A-Za-z])+:", lambda match: posixpath.sep + match.group().replace(":", "").lower(), pathlib.PureWindowsPath(code_path).as_posix()) if os.name == "nt" else code_path
python
def to_posix_path(code_path): """ Change the code_path to be of unix-style if running on windows when supplied with an absolute windows path. Parameters ---------- code_path : str Directory in the host operating system that should be mounted within the container. Returns ------- str Posix equivalent of absolute windows style path. Examples -------- >>> to_posix_path('/Users/UserName/sam-app') /Users/UserName/sam-app >>> to_posix_path('C:\\\\Users\\\\UserName\\\\AppData\\\\Local\\\\Temp\\\\mydir') /c/Users/UserName/AppData/Local/Temp/mydir """ return re.sub("^([A-Za-z])+:", lambda match: posixpath.sep + match.group().replace(":", "").lower(), pathlib.PureWindowsPath(code_path).as_posix()) if os.name == "nt" else code_path
['def', 'to_posix_path', '(', 'code_path', ')', ':', 'return', 're', '.', 'sub', '(', '"^([A-Za-z])+:"', ',', 'lambda', 'match', ':', 'posixpath', '.', 'sep', '+', 'match', '.', 'group', '(', ')', '.', 'replace', '(', '":"', ',', '""', ')', '.', 'lower', '(', ')', ',', 'pathlib', '.', 'PureWindowsPath', '(', 'code_path', ')', '.', 'as_posix', '(', ')', ')', 'if', 'os', '.', 'name', '==', '"nt"', 'else', 'code_path']
Change the code_path to be of unix-style if running on windows when supplied with an absolute windows path. Parameters ---------- code_path : str Directory in the host operating system that should be mounted within the container. Returns ------- str Posix equivalent of absolute windows style path. Examples -------- >>> to_posix_path('/Users/UserName/sam-app') /Users/UserName/sam-app >>> to_posix_path('C:\\\\Users\\\\UserName\\\\AppData\\\\Local\\\\Temp\\\\mydir') /c/Users/UserName/AppData/Local/Temp/mydir
['Change', 'the', 'code_path', 'to', 'be', 'of', 'unix', '-', 'style', 'if', 'running', 'on', 'windows', 'when', 'supplied', 'with', 'an', 'absolute', 'windows', 'path', '.']
train
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/local/docker/utils.py#L14-L36
3,087
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_firmware.py
brocade_firmware.fwdl_status_output_fwdl_entries_blade_slot
def fwdl_status_output_fwdl_entries_blade_slot(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fwdl_status = ET.Element("fwdl_status") config = fwdl_status output = ET.SubElement(fwdl_status, "output") fwdl_entries = ET.SubElement(output, "fwdl-entries") blade_slot = ET.SubElement(fwdl_entries, "blade-slot") blade_slot.text = kwargs.pop('blade_slot') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def fwdl_status_output_fwdl_entries_blade_slot(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fwdl_status = ET.Element("fwdl_status") config = fwdl_status output = ET.SubElement(fwdl_status, "output") fwdl_entries = ET.SubElement(output, "fwdl-entries") blade_slot = ET.SubElement(fwdl_entries, "blade-slot") blade_slot.text = kwargs.pop('blade_slot') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'fwdl_status_output_fwdl_entries_blade_slot', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'fwdl_status', '=', 'ET', '.', 'Element', '(', '"fwdl_status"', ')', 'config', '=', 'fwdl_status', 'output', '=', 'ET', '.', 'SubElement', '(', 'fwdl_status', ',', '"output"', ')', 'fwdl_entries', '=', 'ET', '.', 'SubElement', '(', 'output', ',', '"fwdl-entries"', ')', 'blade_slot', '=', 'ET', '.', 'SubElement', '(', 'fwdl_entries', ',', '"blade-slot"', ')', 'blade_slot', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'blade_slot'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_firmware.py#L107-L119
3,088
mlperf/training
object_detection/pytorch/maskrcnn_benchmark/modeling/rpn/inference.py
RPNPostProcessor.forward_for_single_feature_map
def forward_for_single_feature_map(self, anchors, objectness, box_regression): """ Arguments: anchors: list[BoxList] objectness: tensor of size N, A, H, W box_regression: tensor of size N, A * 4, H, W """ device = objectness.device N, A, H, W = objectness.shape # put in the same format as anchors objectness = permute_and_flatten(objectness, N, A, 1, H, W).view(N, -1) objectness = objectness.sigmoid() box_regression = permute_and_flatten(box_regression, N, A, 4, H, W) num_anchors = A * H * W pre_nms_top_n = min(self.pre_nms_top_n, num_anchors) objectness, topk_idx = objectness.topk(pre_nms_top_n, dim=1, sorted=True) batch_idx = torch.arange(N, device=device)[:, None] box_regression = box_regression[batch_idx, topk_idx] image_shapes = [box.size for box in anchors] concat_anchors = torch.cat([a.bbox for a in anchors], dim=0) concat_anchors = concat_anchors.reshape(N, -1, 4)[batch_idx, topk_idx] proposals = self.box_coder.decode( box_regression.view(-1, 4), concat_anchors.view(-1, 4) ) proposals = proposals.view(N, -1, 4) result = [] for proposal, score, im_shape in zip(proposals, objectness, image_shapes): boxlist = BoxList(proposal, im_shape, mode="xyxy") boxlist.add_field("objectness", score) boxlist = boxlist.clip_to_image(remove_empty=False) boxlist = remove_small_boxes(boxlist, self.min_size) boxlist = boxlist_nms( boxlist, self.nms_thresh, max_proposals=self.post_nms_top_n, score_field="objectness", ) result.append(boxlist) return result
python
def forward_for_single_feature_map(self, anchors, objectness, box_regression): """ Arguments: anchors: list[BoxList] objectness: tensor of size N, A, H, W box_regression: tensor of size N, A * 4, H, W """ device = objectness.device N, A, H, W = objectness.shape # put in the same format as anchors objectness = permute_and_flatten(objectness, N, A, 1, H, W).view(N, -1) objectness = objectness.sigmoid() box_regression = permute_and_flatten(box_regression, N, A, 4, H, W) num_anchors = A * H * W pre_nms_top_n = min(self.pre_nms_top_n, num_anchors) objectness, topk_idx = objectness.topk(pre_nms_top_n, dim=1, sorted=True) batch_idx = torch.arange(N, device=device)[:, None] box_regression = box_regression[batch_idx, topk_idx] image_shapes = [box.size for box in anchors] concat_anchors = torch.cat([a.bbox for a in anchors], dim=0) concat_anchors = concat_anchors.reshape(N, -1, 4)[batch_idx, topk_idx] proposals = self.box_coder.decode( box_regression.view(-1, 4), concat_anchors.view(-1, 4) ) proposals = proposals.view(N, -1, 4) result = [] for proposal, score, im_shape in zip(proposals, objectness, image_shapes): boxlist = BoxList(proposal, im_shape, mode="xyxy") boxlist.add_field("objectness", score) boxlist = boxlist.clip_to_image(remove_empty=False) boxlist = remove_small_boxes(boxlist, self.min_size) boxlist = boxlist_nms( boxlist, self.nms_thresh, max_proposals=self.post_nms_top_n, score_field="objectness", ) result.append(boxlist) return result
['def', 'forward_for_single_feature_map', '(', 'self', ',', 'anchors', ',', 'objectness', ',', 'box_regression', ')', ':', 'device', '=', 'objectness', '.', 'device', 'N', ',', 'A', ',', 'H', ',', 'W', '=', 'objectness', '.', 'shape', '# put in the same format as anchors', 'objectness', '=', 'permute_and_flatten', '(', 'objectness', ',', 'N', ',', 'A', ',', '1', ',', 'H', ',', 'W', ')', '.', 'view', '(', 'N', ',', '-', '1', ')', 'objectness', '=', 'objectness', '.', 'sigmoid', '(', ')', 'box_regression', '=', 'permute_and_flatten', '(', 'box_regression', ',', 'N', ',', 'A', ',', '4', ',', 'H', ',', 'W', ')', 'num_anchors', '=', 'A', '*', 'H', '*', 'W', 'pre_nms_top_n', '=', 'min', '(', 'self', '.', 'pre_nms_top_n', ',', 'num_anchors', ')', 'objectness', ',', 'topk_idx', '=', 'objectness', '.', 'topk', '(', 'pre_nms_top_n', ',', 'dim', '=', '1', ',', 'sorted', '=', 'True', ')', 'batch_idx', '=', 'torch', '.', 'arange', '(', 'N', ',', 'device', '=', 'device', ')', '[', ':', ',', 'None', ']', 'box_regression', '=', 'box_regression', '[', 'batch_idx', ',', 'topk_idx', ']', 'image_shapes', '=', '[', 'box', '.', 'size', 'for', 'box', 'in', 'anchors', ']', 'concat_anchors', '=', 'torch', '.', 'cat', '(', '[', 'a', '.', 'bbox', 'for', 'a', 'in', 'anchors', ']', ',', 'dim', '=', '0', ')', 'concat_anchors', '=', 'concat_anchors', '.', 'reshape', '(', 'N', ',', '-', '1', ',', '4', ')', '[', 'batch_idx', ',', 'topk_idx', ']', 'proposals', '=', 'self', '.', 'box_coder', '.', 'decode', '(', 'box_regression', '.', 'view', '(', '-', '1', ',', '4', ')', ',', 'concat_anchors', '.', 'view', '(', '-', '1', ',', '4', ')', ')', 'proposals', '=', 'proposals', '.', 'view', '(', 'N', ',', '-', '1', ',', '4', ')', 'result', '=', '[', ']', 'for', 'proposal', ',', 'score', ',', 'im_shape', 'in', 'zip', '(', 'proposals', ',', 'objectness', ',', 'image_shapes', ')', ':', 'boxlist', '=', 'BoxList', '(', 'proposal', ',', 'im_shape', ',', 'mode', '=', '"xyxy"', ')', 'boxlist', '.', 'add_field', '(', '"objectness"', ',', 'score', ')', 'boxlist', '=', 'boxlist', '.', 'clip_to_image', '(', 'remove_empty', '=', 'False', ')', 'boxlist', '=', 'remove_small_boxes', '(', 'boxlist', ',', 'self', '.', 'min_size', ')', 'boxlist', '=', 'boxlist_nms', '(', 'boxlist', ',', 'self', '.', 'nms_thresh', ',', 'max_proposals', '=', 'self', '.', 'post_nms_top_n', ',', 'score_field', '=', '"objectness"', ',', ')', 'result', '.', 'append', '(', 'boxlist', ')', 'return', 'result']
Arguments: anchors: list[BoxList] objectness: tensor of size N, A, H, W box_regression: tensor of size N, A * 4, H, W
['Arguments', ':', 'anchors', ':', 'list', '[', 'BoxList', ']', 'objectness', ':', 'tensor', 'of', 'size', 'N', 'A', 'H', 'W', 'box_regression', ':', 'tensor', 'of', 'size', 'N', 'A', '*', '4', 'H', 'W']
train
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/object_detection/pytorch/maskrcnn_benchmark/modeling/rpn/inference.py#L74-L121
3,089
markovmodel/PyEMMA
pyemma/_base/estimator.py
_estimate_param_scan_worker
def _estimate_param_scan_worker(estimator, params, X, evaluate, evaluate_args, failfast, return_exceptions): """ Method that runs estimation for several parameter settings. Defined as a worker for parallelization """ # run estimation model = None try: # catch any exception estimator.estimate(X, **params) model = estimator.model except KeyboardInterrupt: # we want to be able to interactively interrupt the worker, no matter of failfast=False. raise except: e = sys.exc_info()[1] if isinstance(estimator, Loggable): estimator.logger.warning("Ignored error during estimation: %s" % e) if failfast: raise # re-raise elif return_exceptions: model = e else: pass # just return model=None # deal with results res = [] # deal with result if evaluate is None: # we want full models res.append(model) # we want to evaluate function(s) of the model elif _types.is_iterable(evaluate): values = [] # the function values the model for ieval, name in enumerate(evaluate): # get method/attribute name and arguments to be evaluated #name = evaluate[ieval] args = () if evaluate_args is not None: args = evaluate_args[ieval] # wrap single arguments in an iterable again to pass them. if _types.is_string(args): args = (args, ) # evaluate try: # try calling method/property/attribute value = _call_member(estimator.model, name, failfast, *args) # couldn't find method/property/attribute except AttributeError as e: if failfast: raise e # raise an AttributeError else: value = None # we just ignore it and return None values.append(value) # if we only have one value, unpack it if len(values) == 1: values = values[0] res.append(values) else: raise ValueError('Invalid setting for evaluate: ' + str(evaluate)) if len(res) == 1: res = res[0] return res
python
def _estimate_param_scan_worker(estimator, params, X, evaluate, evaluate_args, failfast, return_exceptions): """ Method that runs estimation for several parameter settings. Defined as a worker for parallelization """ # run estimation model = None try: # catch any exception estimator.estimate(X, **params) model = estimator.model except KeyboardInterrupt: # we want to be able to interactively interrupt the worker, no matter of failfast=False. raise except: e = sys.exc_info()[1] if isinstance(estimator, Loggable): estimator.logger.warning("Ignored error during estimation: %s" % e) if failfast: raise # re-raise elif return_exceptions: model = e else: pass # just return model=None # deal with results res = [] # deal with result if evaluate is None: # we want full models res.append(model) # we want to evaluate function(s) of the model elif _types.is_iterable(evaluate): values = [] # the function values the model for ieval, name in enumerate(evaluate): # get method/attribute name and arguments to be evaluated #name = evaluate[ieval] args = () if evaluate_args is not None: args = evaluate_args[ieval] # wrap single arguments in an iterable again to pass them. if _types.is_string(args): args = (args, ) # evaluate try: # try calling method/property/attribute value = _call_member(estimator.model, name, failfast, *args) # couldn't find method/property/attribute except AttributeError as e: if failfast: raise e # raise an AttributeError else: value = None # we just ignore it and return None values.append(value) # if we only have one value, unpack it if len(values) == 1: values = values[0] res.append(values) else: raise ValueError('Invalid setting for evaluate: ' + str(evaluate)) if len(res) == 1: res = res[0] return res
['def', '_estimate_param_scan_worker', '(', 'estimator', ',', 'params', ',', 'X', ',', 'evaluate', ',', 'evaluate_args', ',', 'failfast', ',', 'return_exceptions', ')', ':', '# run estimation', 'model', '=', 'None', 'try', ':', '# catch any exception', 'estimator', '.', 'estimate', '(', 'X', ',', '*', '*', 'params', ')', 'model', '=', 'estimator', '.', 'model', 'except', 'KeyboardInterrupt', ':', '# we want to be able to interactively interrupt the worker, no matter of failfast=False.', 'raise', 'except', ':', 'e', '=', 'sys', '.', 'exc_info', '(', ')', '[', '1', ']', 'if', 'isinstance', '(', 'estimator', ',', 'Loggable', ')', ':', 'estimator', '.', 'logger', '.', 'warning', '(', '"Ignored error during estimation: %s"', '%', 'e', ')', 'if', 'failfast', ':', 'raise', '# re-raise', 'elif', 'return_exceptions', ':', 'model', '=', 'e', 'else', ':', 'pass', '# just return model=None', '# deal with results', 'res', '=', '[', ']', '# deal with result', 'if', 'evaluate', 'is', 'None', ':', '# we want full models', 'res', '.', 'append', '(', 'model', ')', '# we want to evaluate function(s) of the model', 'elif', '_types', '.', 'is_iterable', '(', 'evaluate', ')', ':', 'values', '=', '[', ']', '# the function values the model', 'for', 'ieval', ',', 'name', 'in', 'enumerate', '(', 'evaluate', ')', ':', '# get method/attribute name and arguments to be evaluated', '#name = evaluate[ieval]', 'args', '=', '(', ')', 'if', 'evaluate_args', 'is', 'not', 'None', ':', 'args', '=', 'evaluate_args', '[', 'ieval', ']', '# wrap single arguments in an iterable again to pass them.', 'if', '_types', '.', 'is_string', '(', 'args', ')', ':', 'args', '=', '(', 'args', ',', ')', '# evaluate', 'try', ':', '# try calling method/property/attribute', 'value', '=', '_call_member', '(', 'estimator', '.', 'model', ',', 'name', ',', 'failfast', ',', '*', 'args', ')', "# couldn't find method/property/attribute", 'except', 'AttributeError', 'as', 'e', ':', 'if', 'failfast', ':', 'raise', 'e', '# raise an AttributeError', 'else', ':', 'value', '=', 'None', '# we just ignore it and return None', 'values', '.', 'append', '(', 'value', ')', '# if we only have one value, unpack it', 'if', 'len', '(', 'values', ')', '==', '1', ':', 'values', '=', 'values', '[', '0', ']', 'res', '.', 'append', '(', 'values', ')', 'else', ':', 'raise', 'ValueError', '(', "'Invalid setting for evaluate: '", '+', 'str', '(', 'evaluate', ')', ')', 'if', 'len', '(', 'res', ')', '==', '1', ':', 'res', '=', 'res', '[', '0', ']', 'return', 'res']
Method that runs estimation for several parameter settings. Defined as a worker for parallelization
['Method', 'that', 'runs', 'estimation', 'for', 'several', 'parameter', 'settings', '.']
train
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/_base/estimator.py#L127-L191
3,090
swistakm/python-gmaps
src/gmaps/client.py
Client._serialize_parameters
def _serialize_parameters(parameters): """Serialize some parameters to match python native types with formats specified in google api docs like: * True/False -> "true"/"false", * {"a": 1, "b":2} -> "a:1|b:2" :type parameters: dict oif query parameters """ for key, value in parameters.items(): if isinstance(value, bool): parameters[key] = "true" if value else "false" elif isinstance(value, dict): parameters[key] = "|".join( ("%s:%s" % (k, v) for k, v in value.items())) elif isinstance(value, (list, tuple)): parameters[key] = "|".join(value) return parameters
python
def _serialize_parameters(parameters): """Serialize some parameters to match python native types with formats specified in google api docs like: * True/False -> "true"/"false", * {"a": 1, "b":2} -> "a:1|b:2" :type parameters: dict oif query parameters """ for key, value in parameters.items(): if isinstance(value, bool): parameters[key] = "true" if value else "false" elif isinstance(value, dict): parameters[key] = "|".join( ("%s:%s" % (k, v) for k, v in value.items())) elif isinstance(value, (list, tuple)): parameters[key] = "|".join(value) return parameters
['def', '_serialize_parameters', '(', 'parameters', ')', ':', 'for', 'key', ',', 'value', 'in', 'parameters', '.', 'items', '(', ')', ':', 'if', 'isinstance', '(', 'value', ',', 'bool', ')', ':', 'parameters', '[', 'key', ']', '=', '"true"', 'if', 'value', 'else', '"false"', 'elif', 'isinstance', '(', 'value', ',', 'dict', ')', ':', 'parameters', '[', 'key', ']', '=', '"|"', '.', 'join', '(', '(', '"%s:%s"', '%', '(', 'k', ',', 'v', ')', 'for', 'k', ',', 'v', 'in', 'value', '.', 'items', '(', ')', ')', ')', 'elif', 'isinstance', '(', 'value', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'parameters', '[', 'key', ']', '=', '"|"', '.', 'join', '(', 'value', ')', 'return', 'parameters']
Serialize some parameters to match python native types with formats specified in google api docs like: * True/False -> "true"/"false", * {"a": 1, "b":2} -> "a:1|b:2" :type parameters: dict oif query parameters
['Serialize', 'some', 'parameters', 'to', 'match', 'python', 'native', 'types', 'with', 'formats', 'specified', 'in', 'google', 'api', 'docs', 'like', ':', '*', 'True', '/', 'False', '-', '>', 'true', '/', 'false', '*', '{', 'a', ':', '1', 'b', ':', '2', '}', '-', '>', 'a', ':', '1|b', ':', '2']
train
https://github.com/swistakm/python-gmaps/blob/ef3bdea6f02277200f21a09f99d4e2aebad762b9/src/gmaps/client.py#L36-L53
3,091
mozilla/python_moztelemetry
moztelemetry/stats.py
ndtr
def ndtr(a): """ Returns the area under the Gaussian probability density function, integrated from minus infinity to x. See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.ndtr.html#scipy.special.ndtr """ sqrth = math.sqrt(2) / 2 x = float(a) * sqrth z = abs(x) if z < sqrth: y = 0.5 + 0.5 * math.erf(x) else: y = 0.5 * math.erfc(z) if x > 0: y = 1 - y return y
python
def ndtr(a): """ Returns the area under the Gaussian probability density function, integrated from minus infinity to x. See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.ndtr.html#scipy.special.ndtr """ sqrth = math.sqrt(2) / 2 x = float(a) * sqrth z = abs(x) if z < sqrth: y = 0.5 + 0.5 * math.erf(x) else: y = 0.5 * math.erfc(z) if x > 0: y = 1 - y return y
['def', 'ndtr', '(', 'a', ')', ':', 'sqrth', '=', 'math', '.', 'sqrt', '(', '2', ')', '/', '2', 'x', '=', 'float', '(', 'a', ')', '*', 'sqrth', 'z', '=', 'abs', '(', 'x', ')', 'if', 'z', '<', 'sqrth', ':', 'y', '=', '0.5', '+', '0.5', '*', 'math', '.', 'erf', '(', 'x', ')', 'else', ':', 'y', '=', '0.5', '*', 'math', '.', 'erfc', '(', 'z', ')', 'if', 'x', '>', '0', ':', 'y', '=', '1', '-', 'y', 'return', 'y']
Returns the area under the Gaussian probability density function, integrated from minus infinity to x. See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.ndtr.html#scipy.special.ndtr
['Returns', 'the', 'area', 'under', 'the', 'Gaussian', 'probability', 'density', 'function', 'integrated', 'from', 'minus', 'infinity', 'to', 'x', '.']
train
https://github.com/mozilla/python_moztelemetry/blob/09ddf1ec7d953a4308dfdcb0ed968f27bd5921bb/moztelemetry/stats.py#L58-L75
3,092
adamrehn/ue4cli
ue4cli/UnrealManagerBase.py
UnrealManagerBase._runUnrealBuildTool
def _runUnrealBuildTool(self, target, platform, configuration, args, capture=False): """ Invokes UnrealBuildTool with the specified parameters """ platform = self._transformBuildToolPlatform(platform) arguments = [self.getBuildScript(), target, platform, configuration] + args if capture == True: return Utility.capture(arguments, cwd=self.getEngineRoot(), raiseOnError=True) else: Utility.run(arguments, cwd=self.getEngineRoot(), raiseOnError=True)
python
def _runUnrealBuildTool(self, target, platform, configuration, args, capture=False): """ Invokes UnrealBuildTool with the specified parameters """ platform = self._transformBuildToolPlatform(platform) arguments = [self.getBuildScript(), target, platform, configuration] + args if capture == True: return Utility.capture(arguments, cwd=self.getEngineRoot(), raiseOnError=True) else: Utility.run(arguments, cwd=self.getEngineRoot(), raiseOnError=True)
['def', '_runUnrealBuildTool', '(', 'self', ',', 'target', ',', 'platform', ',', 'configuration', ',', 'args', ',', 'capture', '=', 'False', ')', ':', 'platform', '=', 'self', '.', '_transformBuildToolPlatform', '(', 'platform', ')', 'arguments', '=', '[', 'self', '.', 'getBuildScript', '(', ')', ',', 'target', ',', 'platform', ',', 'configuration', ']', '+', 'args', 'if', 'capture', '==', 'True', ':', 'return', 'Utility', '.', 'capture', '(', 'arguments', ',', 'cwd', '=', 'self', '.', 'getEngineRoot', '(', ')', ',', 'raiseOnError', '=', 'True', ')', 'else', ':', 'Utility', '.', 'run', '(', 'arguments', ',', 'cwd', '=', 'self', '.', 'getEngineRoot', '(', ')', ',', 'raiseOnError', '=', 'True', ')']
Invokes UnrealBuildTool with the specified parameters
['Invokes', 'UnrealBuildTool', 'with', 'the', 'specified', 'parameters']
train
https://github.com/adamrehn/ue4cli/blob/f1c34502c96059e36757b7433da7e98760a75a6f/ue4cli/UnrealManagerBase.py#L607-L616
3,093
Gandi/gandi.cli
gandi/cli/modules/cert.py
Certificate.from_cn
def from_cn(cls, common_name): """ Retrieve a certificate by its common name. """ # search with cn result_cn = [(cert['id'], [cert['cn']] + cert['altnames']) for cert in cls.list({'status': ['pending', 'valid'], 'items_per_page': 500, 'cn': common_name})] # search with altname result_alt = [(cert['id'], [cert['cn']] + cert['altnames']) for cert in cls.list({'status': ['pending', 'valid'], 'items_per_page': 500, 'altname': common_name})] result = result_cn + result_alt ret = {} for id_, fqdns in result: for fqdn in fqdns: ret.setdefault(fqdn, []).append(id_) cert_id = ret.get(common_name) if not cert_id: return return cert_id
python
def from_cn(cls, common_name): """ Retrieve a certificate by its common name. """ # search with cn result_cn = [(cert['id'], [cert['cn']] + cert['altnames']) for cert in cls.list({'status': ['pending', 'valid'], 'items_per_page': 500, 'cn': common_name})] # search with altname result_alt = [(cert['id'], [cert['cn']] + cert['altnames']) for cert in cls.list({'status': ['pending', 'valid'], 'items_per_page': 500, 'altname': common_name})] result = result_cn + result_alt ret = {} for id_, fqdns in result: for fqdn in fqdns: ret.setdefault(fqdn, []).append(id_) cert_id = ret.get(common_name) if not cert_id: return return cert_id
['def', 'from_cn', '(', 'cls', ',', 'common_name', ')', ':', '# search with cn', 'result_cn', '=', '[', '(', 'cert', '[', "'id'", ']', ',', '[', 'cert', '[', "'cn'", ']', ']', '+', 'cert', '[', "'altnames'", ']', ')', 'for', 'cert', 'in', 'cls', '.', 'list', '(', '{', "'status'", ':', '[', "'pending'", ',', "'valid'", ']', ',', "'items_per_page'", ':', '500', ',', "'cn'", ':', 'common_name', '}', ')', ']', '# search with altname', 'result_alt', '=', '[', '(', 'cert', '[', "'id'", ']', ',', '[', 'cert', '[', "'cn'", ']', ']', '+', 'cert', '[', "'altnames'", ']', ')', 'for', 'cert', 'in', 'cls', '.', 'list', '(', '{', "'status'", ':', '[', "'pending'", ',', "'valid'", ']', ',', "'items_per_page'", ':', '500', ',', "'altname'", ':', 'common_name', '}', ')', ']', 'result', '=', 'result_cn', '+', 'result_alt', 'ret', '=', '{', '}', 'for', 'id_', ',', 'fqdns', 'in', 'result', ':', 'for', 'fqdn', 'in', 'fqdns', ':', 'ret', '.', 'setdefault', '(', 'fqdn', ',', '[', ']', ')', '.', 'append', '(', 'id_', ')', 'cert_id', '=', 'ret', '.', 'get', '(', 'common_name', ')', 'if', 'not', 'cert_id', ':', 'return', 'return', 'cert_id']
Retrieve a certificate by its common name.
['Retrieve', 'a', 'certificate', 'by', 'its', 'common', 'name', '.']
train
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/cert.py#L121-L144
3,094
apple/turicreate
src/unity/python/turicreate/toolkits/object_detector/util/_visualization.py
draw_bounding_boxes
def draw_bounding_boxes(images, annotations, confidence_threshold=0): """ Visualizes bounding boxes (ground truth or predictions) by returning annotated copies of the images. Parameters ---------- images: SArray or Image An `SArray` of type `Image`. A single `Image` instance may also be given. annotations: SArray or list An `SArray` of annotations (either output from the `ObjectDetector.predict` function or ground truth). A single list of annotations may also be given, provided that it is coupled with a single image. confidence_threshold: float Confidence threshold can limit the number of boxes to draw. By default, this is set to 0, since the prediction may have already pruned with an appropriate confidence threshold. Returns ------- annotated_images: SArray or Image Similar to the input `images`, except the images are decorated with boxes to visualize the object instances. See also -------- unstack_annotations """ _numeric_param_check_range('confidence_threshold', confidence_threshold, 0.0, 1.0) from PIL import Image def draw_single_image(row): image = row['image'] anns = row['annotations'] if anns == None: anns = [] elif type(anns) == dict: anns = [anns] pil_img = Image.fromarray(image.pixel_data) _annotate_image(pil_img, anns, confidence_threshold=confidence_threshold) image = _np.array(pil_img) FORMAT_RAW = 2 annotated_image = _tc.Image(_image_data=image.tobytes(), _width=image.shape[1], _height=image.shape[0], _channels=image.shape[2], _format_enum=FORMAT_RAW, _image_data_size=image.size) return annotated_image if isinstance(images, _tc.Image) and isinstance(annotations, list): return draw_single_image({'image': images, 'annotations': annotations}) else: return (_tc.SFrame({'image': images, 'annotations': annotations}) .apply(draw_single_image))
python
def draw_bounding_boxes(images, annotations, confidence_threshold=0): """ Visualizes bounding boxes (ground truth or predictions) by returning annotated copies of the images. Parameters ---------- images: SArray or Image An `SArray` of type `Image`. A single `Image` instance may also be given. annotations: SArray or list An `SArray` of annotations (either output from the `ObjectDetector.predict` function or ground truth). A single list of annotations may also be given, provided that it is coupled with a single image. confidence_threshold: float Confidence threshold can limit the number of boxes to draw. By default, this is set to 0, since the prediction may have already pruned with an appropriate confidence threshold. Returns ------- annotated_images: SArray or Image Similar to the input `images`, except the images are decorated with boxes to visualize the object instances. See also -------- unstack_annotations """ _numeric_param_check_range('confidence_threshold', confidence_threshold, 0.0, 1.0) from PIL import Image def draw_single_image(row): image = row['image'] anns = row['annotations'] if anns == None: anns = [] elif type(anns) == dict: anns = [anns] pil_img = Image.fromarray(image.pixel_data) _annotate_image(pil_img, anns, confidence_threshold=confidence_threshold) image = _np.array(pil_img) FORMAT_RAW = 2 annotated_image = _tc.Image(_image_data=image.tobytes(), _width=image.shape[1], _height=image.shape[0], _channels=image.shape[2], _format_enum=FORMAT_RAW, _image_data_size=image.size) return annotated_image if isinstance(images, _tc.Image) and isinstance(annotations, list): return draw_single_image({'image': images, 'annotations': annotations}) else: return (_tc.SFrame({'image': images, 'annotations': annotations}) .apply(draw_single_image))
['def', 'draw_bounding_boxes', '(', 'images', ',', 'annotations', ',', 'confidence_threshold', '=', '0', ')', ':', '_numeric_param_check_range', '(', "'confidence_threshold'", ',', 'confidence_threshold', ',', '0.0', ',', '1.0', ')', 'from', 'PIL', 'import', 'Image', 'def', 'draw_single_image', '(', 'row', ')', ':', 'image', '=', 'row', '[', "'image'", ']', 'anns', '=', 'row', '[', "'annotations'", ']', 'if', 'anns', '==', 'None', ':', 'anns', '=', '[', ']', 'elif', 'type', '(', 'anns', ')', '==', 'dict', ':', 'anns', '=', '[', 'anns', ']', 'pil_img', '=', 'Image', '.', 'fromarray', '(', 'image', '.', 'pixel_data', ')', '_annotate_image', '(', 'pil_img', ',', 'anns', ',', 'confidence_threshold', '=', 'confidence_threshold', ')', 'image', '=', '_np', '.', 'array', '(', 'pil_img', ')', 'FORMAT_RAW', '=', '2', 'annotated_image', '=', '_tc', '.', 'Image', '(', '_image_data', '=', 'image', '.', 'tobytes', '(', ')', ',', '_width', '=', 'image', '.', 'shape', '[', '1', ']', ',', '_height', '=', 'image', '.', 'shape', '[', '0', ']', ',', '_channels', '=', 'image', '.', 'shape', '[', '2', ']', ',', '_format_enum', '=', 'FORMAT_RAW', ',', '_image_data_size', '=', 'image', '.', 'size', ')', 'return', 'annotated_image', 'if', 'isinstance', '(', 'images', ',', '_tc', '.', 'Image', ')', 'and', 'isinstance', '(', 'annotations', ',', 'list', ')', ':', 'return', 'draw_single_image', '(', '{', "'image'", ':', 'images', ',', "'annotations'", ':', 'annotations', '}', ')', 'else', ':', 'return', '(', '_tc', '.', 'SFrame', '(', '{', "'image'", ':', 'images', ',', "'annotations'", ':', 'annotations', '}', ')', '.', 'apply', '(', 'draw_single_image', ')', ')']
Visualizes bounding boxes (ground truth or predictions) by returning annotated copies of the images. Parameters ---------- images: SArray or Image An `SArray` of type `Image`. A single `Image` instance may also be given. annotations: SArray or list An `SArray` of annotations (either output from the `ObjectDetector.predict` function or ground truth). A single list of annotations may also be given, provided that it is coupled with a single image. confidence_threshold: float Confidence threshold can limit the number of boxes to draw. By default, this is set to 0, since the prediction may have already pruned with an appropriate confidence threshold. Returns ------- annotated_images: SArray or Image Similar to the input `images`, except the images are decorated with boxes to visualize the object instances. See also -------- unstack_annotations
['Visualizes', 'bounding', 'boxes', '(', 'ground', 'truth', 'or', 'predictions', ')', 'by', 'returning', 'annotated', 'copies', 'of', 'the', 'images', '.']
train
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/object_detector/util/_visualization.py#L94-L151
3,095
useblocks/groundwork
groundwork/patterns/gw_commands_pattern.py
CommandsListApplication.unregister
def unregister(self, command): """ Unregisters an existing command, so that this command is no longer available on the command line interface. This function is mainly used during plugin deactivation. :param command: Name of the command """ if command not in self._commands.keys(): self.log.warning("Can not unregister command %s" % command) else: # Click does not have any kind of a function to unregister/remove/deactivate already added commands. # So we need to delete the related objects manually from the click internal commands dictionary for # our root command. del(self._click_root_command.commands[command]) # Finally lets delete the command from our internal dictionary too. del(self._commands[command]) self.log.debug("Command %s got unregistered" % command)
python
def unregister(self, command): """ Unregisters an existing command, so that this command is no longer available on the command line interface. This function is mainly used during plugin deactivation. :param command: Name of the command """ if command not in self._commands.keys(): self.log.warning("Can not unregister command %s" % command) else: # Click does not have any kind of a function to unregister/remove/deactivate already added commands. # So we need to delete the related objects manually from the click internal commands dictionary for # our root command. del(self._click_root_command.commands[command]) # Finally lets delete the command from our internal dictionary too. del(self._commands[command]) self.log.debug("Command %s got unregistered" % command)
['def', 'unregister', '(', 'self', ',', 'command', ')', ':', 'if', 'command', 'not', 'in', 'self', '.', '_commands', '.', 'keys', '(', ')', ':', 'self', '.', 'log', '.', 'warning', '(', '"Can not unregister command %s"', '%', 'command', ')', 'else', ':', '# Click does not have any kind of a function to unregister/remove/deactivate already added commands.', '# So we need to delete the related objects manually from the click internal commands dictionary for', '# our root command.', 'del', '(', 'self', '.', '_click_root_command', '.', 'commands', '[', 'command', ']', ')', '# Finally lets delete the command from our internal dictionary too.', 'del', '(', 'self', '.', '_commands', '[', 'command', ']', ')', 'self', '.', 'log', '.', 'debug', '(', '"Command %s got unregistered"', '%', 'command', ')']
Unregisters an existing command, so that this command is no longer available on the command line interface. This function is mainly used during plugin deactivation. :param command: Name of the command
['Unregisters', 'an', 'existing', 'command', 'so', 'that', 'this', 'command', 'is', 'no', 'longer', 'available', 'on', 'the', 'command', 'line', 'interface', '.']
train
https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/patterns/gw_commands_pattern.py#L185-L202
3,096
python-cmd2/cmd2
cmd2/cmd2.py
Cmd.enable_category
def enable_category(self, category: str) -> None: """ Enable an entire category of commands :param category: the category to enable """ for cmd_name in list(self.disabled_commands): func = self.disabled_commands[cmd_name].command_function if hasattr(func, HELP_CATEGORY) and getattr(func, HELP_CATEGORY) == category: self.enable_command(cmd_name)
python
def enable_category(self, category: str) -> None: """ Enable an entire category of commands :param category: the category to enable """ for cmd_name in list(self.disabled_commands): func = self.disabled_commands[cmd_name].command_function if hasattr(func, HELP_CATEGORY) and getattr(func, HELP_CATEGORY) == category: self.enable_command(cmd_name)
['def', 'enable_category', '(', 'self', ',', 'category', ':', 'str', ')', '->', 'None', ':', 'for', 'cmd_name', 'in', 'list', '(', 'self', '.', 'disabled_commands', ')', ':', 'func', '=', 'self', '.', 'disabled_commands', '[', 'cmd_name', ']', '.', 'command_function', 'if', 'hasattr', '(', 'func', ',', 'HELP_CATEGORY', ')', 'and', 'getattr', '(', 'func', ',', 'HELP_CATEGORY', ')', '==', 'category', ':', 'self', '.', 'enable_command', '(', 'cmd_name', ')']
Enable an entire category of commands :param category: the category to enable
['Enable', 'an', 'entire', 'category', 'of', 'commands', ':', 'param', 'category', ':', 'the', 'category', 'to', 'enable']
train
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/cmd2/cmd2.py#L3786-L3794
3,097
twisted/txacme
src/txacme/client.py
Client.update_registration
def update_registration(self, regr, uri=None): """ Submit a registration to the server to update it. :param ~acme.messages.RegistrationResource regr: The registration to update. Can be a :class:`~acme.messages.NewRegistration` instead, in order to create a new registration. :param str uri: The url to submit to. Must be specified if a :class:`~acme.messages.NewRegistration` is provided. :return: The updated registration resource. :rtype: Deferred[`~acme.messages.RegistrationResource`] """ if uri is None: uri = regr.uri if isinstance(regr, messages.RegistrationResource): message = messages.UpdateRegistration(**dict(regr.body)) else: message = regr action = LOG_ACME_UPDATE_REGISTRATION(uri=uri, registration=message) with action.context(): return ( DeferredContext(self._client.post(uri, message)) .addCallback(self._parse_regr_response, uri=uri) .addCallback(self._check_regr, regr) .addCallback( tap(lambda r: action.add_success_fields(registration=r))) .addActionFinish())
python
def update_registration(self, regr, uri=None): """ Submit a registration to the server to update it. :param ~acme.messages.RegistrationResource regr: The registration to update. Can be a :class:`~acme.messages.NewRegistration` instead, in order to create a new registration. :param str uri: The url to submit to. Must be specified if a :class:`~acme.messages.NewRegistration` is provided. :return: The updated registration resource. :rtype: Deferred[`~acme.messages.RegistrationResource`] """ if uri is None: uri = regr.uri if isinstance(regr, messages.RegistrationResource): message = messages.UpdateRegistration(**dict(regr.body)) else: message = regr action = LOG_ACME_UPDATE_REGISTRATION(uri=uri, registration=message) with action.context(): return ( DeferredContext(self._client.post(uri, message)) .addCallback(self._parse_regr_response, uri=uri) .addCallback(self._check_regr, regr) .addCallback( tap(lambda r: action.add_success_fields(registration=r))) .addActionFinish())
['def', 'update_registration', '(', 'self', ',', 'regr', ',', 'uri', '=', 'None', ')', ':', 'if', 'uri', 'is', 'None', ':', 'uri', '=', 'regr', '.', 'uri', 'if', 'isinstance', '(', 'regr', ',', 'messages', '.', 'RegistrationResource', ')', ':', 'message', '=', 'messages', '.', 'UpdateRegistration', '(', '*', '*', 'dict', '(', 'regr', '.', 'body', ')', ')', 'else', ':', 'message', '=', 'regr', 'action', '=', 'LOG_ACME_UPDATE_REGISTRATION', '(', 'uri', '=', 'uri', ',', 'registration', '=', 'message', ')', 'with', 'action', '.', 'context', '(', ')', ':', 'return', '(', 'DeferredContext', '(', 'self', '.', '_client', '.', 'post', '(', 'uri', ',', 'message', ')', ')', '.', 'addCallback', '(', 'self', '.', '_parse_regr_response', ',', 'uri', '=', 'uri', ')', '.', 'addCallback', '(', 'self', '.', '_check_regr', ',', 'regr', ')', '.', 'addCallback', '(', 'tap', '(', 'lambda', 'r', ':', 'action', '.', 'add_success_fields', '(', 'registration', '=', 'r', ')', ')', ')', '.', 'addActionFinish', '(', ')', ')']
Submit a registration to the server to update it. :param ~acme.messages.RegistrationResource regr: The registration to update. Can be a :class:`~acme.messages.NewRegistration` instead, in order to create a new registration. :param str uri: The url to submit to. Must be specified if a :class:`~acme.messages.NewRegistration` is provided. :return: The updated registration resource. :rtype: Deferred[`~acme.messages.RegistrationResource`]
['Submit', 'a', 'registration', 'to', 'the', 'server', 'to', 'update', 'it', '.']
train
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L201-L228
3,098
wakatime/wakatime
wakatime/packages/pygments/lexers/__init__.py
_iter_lexerclasses
def _iter_lexerclasses(plugins=True): """Return an iterator over all lexer classes.""" for key in sorted(LEXERS): module_name, name = LEXERS[key][:2] if name not in _lexer_cache: _load_lexers(module_name) yield _lexer_cache[name] if plugins: for lexer in find_plugin_lexers(): yield lexer
python
def _iter_lexerclasses(plugins=True): """Return an iterator over all lexer classes.""" for key in sorted(LEXERS): module_name, name = LEXERS[key][:2] if name not in _lexer_cache: _load_lexers(module_name) yield _lexer_cache[name] if plugins: for lexer in find_plugin_lexers(): yield lexer
['def', '_iter_lexerclasses', '(', 'plugins', '=', 'True', ')', ':', 'for', 'key', 'in', 'sorted', '(', 'LEXERS', ')', ':', 'module_name', ',', 'name', '=', 'LEXERS', '[', 'key', ']', '[', ':', '2', ']', 'if', 'name', 'not', 'in', '_lexer_cache', ':', '_load_lexers', '(', 'module_name', ')', 'yield', '_lexer_cache', '[', 'name', ']', 'if', 'plugins', ':', 'for', 'lexer', 'in', 'find_plugin_lexers', '(', ')', ':', 'yield', 'lexer']
Return an iterator over all lexer classes.
['Return', 'an', 'iterator', 'over', 'all', 'lexer', 'classes', '.']
train
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/__init__.py#L225-L234
3,099
saltstack/salt
salt/modules/win_pkg.py
remove
def remove(name=None, pkgs=None, **kwargs): ''' Remove the passed package(s) from the system using winrepo .. versionadded:: 0.16.0 Args: name (str): The name(s) of the package(s) to be uninstalled. Can be a single package or a comma delimited list of packages, no spaces. pkgs (list): A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Kwargs: version (str): The version of the package to be uninstalled. If this option is used to to uninstall multiple packages, then this version will be applied to all targeted packages. Recommended using only when uninstalling a single package. If this parameter is omitted, the latest version will be uninstalled. saltenv (str): Salt environment. Default ``base`` refresh (bool): Refresh package metadata. Default ``False`` Returns: dict: Returns a dict containing the changes. If the package is removed by ``pkg.remove``: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} If the package is already uninstalled: {'<package>': {'current': 'not installed'}} CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ''' saltenv = kwargs.get('saltenv', 'base') refresh = salt.utils.data.is_true(kwargs.get('refresh', False)) # no need to call _refresh_db_conditional as list_pkgs will do it ret = {} # Make sure name or pkgs is passed if not name and not pkgs: return 'Must pass a single package or a list of packages' # Get package parameters pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs, **kwargs)[0] # Get a list of currently installed software for comparison at the end old = list_pkgs(saltenv=saltenv, refresh=refresh, versions_as_list=True) # Loop through each package changed = [] # list of changed package names for pkgname, version_num in six.iteritems(pkg_params): # Load package information for the package pkginfo = _get_package_info(pkgname, saltenv=saltenv) # Make sure pkginfo was found if not pkginfo: msg = 'Unable to locate package {0}'.format(pkgname) log.error(msg) ret[pkgname] = msg continue # Check to see if package is installed on the system if pkgname not in old: log.debug('%s %s not installed', pkgname, version_num if version_num else '') ret[pkgname] = {'current': 'not installed'} continue removal_targets = [] # Only support a single version number if version_num is not None: # Using the salt cmdline with version=5.3 might be interpreted # as a float it must be converted to a string in order for # string matching to work. version_num = six.text_type(version_num) # At least one version of the software is installed. if version_num is None: for ver_install in old[pkgname]: if ver_install not in pkginfo and 'latest' in pkginfo: log.debug('%s %s using package latest entry to to remove', pkgname, version_num) removal_targets.append('latest') else: removal_targets.append(ver_install) else: if version_num in pkginfo: # we known how to remove this version if version_num in old[pkgname]: removal_targets.append(version_num) else: log.debug('%s %s not installed', pkgname, version_num) ret[pkgname] = {'current': '{0} not installed'.format(version_num)} continue elif 'latest' in pkginfo: # we do not have version entry, assume software can self upgrade and use latest log.debug('%s %s using package latest entry to to remove', pkgname, version_num) removal_targets.append('latest') if not removal_targets: log.error('%s %s no definition to remove this version', pkgname, version_num) ret[pkgname] = { 'current': '{0} no definition, cannot removed'.format(version_num) } continue for target in removal_targets: # Get the uninstaller uninstaller = pkginfo[target].get('uninstaller', '') cache_dir = pkginfo[target].get('cache_dir', False) uninstall_flags = pkginfo[target].get('uninstall_flags', '') # If no uninstaller found, use the installer with uninstall flags if not uninstaller and uninstall_flags: uninstaller = pkginfo[target].get('installer', '') # If still no uninstaller found, fail if not uninstaller: log.error( 'No installer or uninstaller configured for package %s', pkgname, ) ret[pkgname] = {'no uninstaller defined': target} continue # Where is the uninstaller if uninstaller.startswith(('salt:', 'http:', 'https:', 'ftp:')): # Check for the 'cache_dir' parameter in the .sls file # If true, the entire directory will be cached instead of the # individual file. This is useful for installations that are not # single files if cache_dir and uninstaller.startswith('salt:'): path, _ = os.path.split(uninstaller) __salt__['cp.cache_dir'](path, saltenv, False, None, '[email protected]$') # Check to see if the uninstaller is cached cached_pkg = __salt__['cp.is_cached'](uninstaller, saltenv) if not cached_pkg: # It's not cached. Cache it, mate. cached_pkg = __salt__['cp.cache_file'](uninstaller, saltenv) # Check if the uninstaller was cached successfully if not cached_pkg: log.error('Unable to cache %s', uninstaller) ret[pkgname] = {'unable to cache': uninstaller} continue # Compare the hash of the cached installer to the source only if # the file is hosted on salt: # TODO cp.cache_file does cache and hash checking? So why do it again? if uninstaller.startswith('salt:'): if __salt__['cp.hash_file'](uninstaller, saltenv) != \ __salt__['cp.hash_file'](cached_pkg): try: cached_pkg = __salt__['cp.cache_file']( uninstaller, saltenv) except MinionError as exc: return '{0}: {1}'.format(exc, uninstaller) # Check if the installer was cached successfully if not cached_pkg: log.error('Unable to cache %s', uninstaller) ret[pkgname] = {'unable to cache': uninstaller} continue else: # Run the uninstaller directly # (not hosted on salt:, https:, etc.) cached_pkg = os.path.expandvars(uninstaller) # Fix non-windows slashes cached_pkg = cached_pkg.replace('/', '\\') cache_path, _ = os.path.split(cached_pkg) # os.path.expandvars is not required as we run everything through cmd.exe /s /c if kwargs.get('extra_uninstall_flags'): uninstall_flags = '{0} {1}'.format( uninstall_flags, kwargs.get('extra_uninstall_flags', '')) # Compute msiexec string use_msiexec, msiexec = _get_msiexec(pkginfo[target].get('msiexec', False)) cmd_shell = os.getenv('ComSpec', '{0}\\system32\\cmd.exe'.format(os.getenv('WINDIR'))) # Build cmd and arguments # cmd and arguments must be separated for use with the task scheduler if use_msiexec: # Check if uninstaller is set to {guid}, if not we assume its a remote msi file. # which has already been downloaded. arguments = '"{0}" /X "{1}"'.format(msiexec, cached_pkg) else: arguments = '"{0}"'.format(cached_pkg) if uninstall_flags: arguments = '{0} {1}'.format(arguments, uninstall_flags) # Uninstall the software changed.append(pkgname) # Check Use Scheduler Option if pkginfo[target].get('use_scheduler', False): # Create Scheduled Task __salt__['task.create_task'](name='update-salt-software', user_name='System', force=True, action_type='Execute', cmd=cmd_shell, arguments='/s /c "{0}"'.format(arguments), start_in=cache_path, trigger_type='Once', start_date='1975-01-01', start_time='01:00', ac_only=False, stop_if_on_batteries=False) # Run Scheduled Task if not __salt__['task.run_wait'](name='update-salt-software'): log.error('Failed to remove %s', pkgname) log.error('Scheduled Task failed to run') ret[pkgname] = {'uninstall status': 'failed'} else: # Launch the command result = __salt__['cmd.run_all']( '"{0}" /s /c "{1}"'.format(cmd_shell, arguments), output_loglevel='trace', python_shell=False, redirect_stderr=True) if not result['retcode']: ret[pkgname] = {'uninstall status': 'success'} changed.append(pkgname) elif result['retcode'] == 3010: # 3010 is ERROR_SUCCESS_REBOOT_REQUIRED report_reboot_exit_codes = kwargs.pop( 'report_reboot_exit_codes', True) if report_reboot_exit_codes: __salt__['system.set_reboot_required_witnessed']() ret[pkgname] = {'uninstall status': 'success, reboot required'} changed.append(pkgname) elif result['retcode'] == 1641: # 1641 is ERROR_SUCCESS_REBOOT_INITIATED ret[pkgname] = {'uninstall status': 'success, reboot initiated'} changed.append(pkgname) else: log.error('Failed to remove %s', pkgname) log.error('retcode %s', result['retcode']) log.error('uninstaller output: %s', result['stdout']) ret[pkgname] = {'uninstall status': 'failed'} # Get a new list of installed software new = list_pkgs(saltenv=saltenv, refresh=False) # Take the "old" package list and convert the values to strings in # preparation for the comparison below. __salt__['pkg_resource.stringify'](old) # Check for changes in the registry difference = salt.utils.data.compare_dicts(old, new) found_chgs = all(name in difference for name in changed) end_t = time.time() + 3 # give it 3 seconds to catch up. while not found_chgs and time.time() < end_t: time.sleep(0.5) new = list_pkgs(saltenv=saltenv, refresh=False) difference = salt.utils.data.compare_dicts(old, new) found_chgs = all(name in difference for name in changed) if not found_chgs: log.warning('Expected changes for package removal may not have occured') # Compare the software list before and after # Add the difference to ret ret.update(difference) return ret
python
def remove(name=None, pkgs=None, **kwargs): ''' Remove the passed package(s) from the system using winrepo .. versionadded:: 0.16.0 Args: name (str): The name(s) of the package(s) to be uninstalled. Can be a single package or a comma delimited list of packages, no spaces. pkgs (list): A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Kwargs: version (str): The version of the package to be uninstalled. If this option is used to to uninstall multiple packages, then this version will be applied to all targeted packages. Recommended using only when uninstalling a single package. If this parameter is omitted, the latest version will be uninstalled. saltenv (str): Salt environment. Default ``base`` refresh (bool): Refresh package metadata. Default ``False`` Returns: dict: Returns a dict containing the changes. If the package is removed by ``pkg.remove``: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} If the package is already uninstalled: {'<package>': {'current': 'not installed'}} CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ''' saltenv = kwargs.get('saltenv', 'base') refresh = salt.utils.data.is_true(kwargs.get('refresh', False)) # no need to call _refresh_db_conditional as list_pkgs will do it ret = {} # Make sure name or pkgs is passed if not name and not pkgs: return 'Must pass a single package or a list of packages' # Get package parameters pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs, **kwargs)[0] # Get a list of currently installed software for comparison at the end old = list_pkgs(saltenv=saltenv, refresh=refresh, versions_as_list=True) # Loop through each package changed = [] # list of changed package names for pkgname, version_num in six.iteritems(pkg_params): # Load package information for the package pkginfo = _get_package_info(pkgname, saltenv=saltenv) # Make sure pkginfo was found if not pkginfo: msg = 'Unable to locate package {0}'.format(pkgname) log.error(msg) ret[pkgname] = msg continue # Check to see if package is installed on the system if pkgname not in old: log.debug('%s %s not installed', pkgname, version_num if version_num else '') ret[pkgname] = {'current': 'not installed'} continue removal_targets = [] # Only support a single version number if version_num is not None: # Using the salt cmdline with version=5.3 might be interpreted # as a float it must be converted to a string in order for # string matching to work. version_num = six.text_type(version_num) # At least one version of the software is installed. if version_num is None: for ver_install in old[pkgname]: if ver_install not in pkginfo and 'latest' in pkginfo: log.debug('%s %s using package latest entry to to remove', pkgname, version_num) removal_targets.append('latest') else: removal_targets.append(ver_install) else: if version_num in pkginfo: # we known how to remove this version if version_num in old[pkgname]: removal_targets.append(version_num) else: log.debug('%s %s not installed', pkgname, version_num) ret[pkgname] = {'current': '{0} not installed'.format(version_num)} continue elif 'latest' in pkginfo: # we do not have version entry, assume software can self upgrade and use latest log.debug('%s %s using package latest entry to to remove', pkgname, version_num) removal_targets.append('latest') if not removal_targets: log.error('%s %s no definition to remove this version', pkgname, version_num) ret[pkgname] = { 'current': '{0} no definition, cannot removed'.format(version_num) } continue for target in removal_targets: # Get the uninstaller uninstaller = pkginfo[target].get('uninstaller', '') cache_dir = pkginfo[target].get('cache_dir', False) uninstall_flags = pkginfo[target].get('uninstall_flags', '') # If no uninstaller found, use the installer with uninstall flags if not uninstaller and uninstall_flags: uninstaller = pkginfo[target].get('installer', '') # If still no uninstaller found, fail if not uninstaller: log.error( 'No installer or uninstaller configured for package %s', pkgname, ) ret[pkgname] = {'no uninstaller defined': target} continue # Where is the uninstaller if uninstaller.startswith(('salt:', 'http:', 'https:', 'ftp:')): # Check for the 'cache_dir' parameter in the .sls file # If true, the entire directory will be cached instead of the # individual file. This is useful for installations that are not # single files if cache_dir and uninstaller.startswith('salt:'): path, _ = os.path.split(uninstaller) __salt__['cp.cache_dir'](path, saltenv, False, None, '[email protected]$') # Check to see if the uninstaller is cached cached_pkg = __salt__['cp.is_cached'](uninstaller, saltenv) if not cached_pkg: # It's not cached. Cache it, mate. cached_pkg = __salt__['cp.cache_file'](uninstaller, saltenv) # Check if the uninstaller was cached successfully if not cached_pkg: log.error('Unable to cache %s', uninstaller) ret[pkgname] = {'unable to cache': uninstaller} continue # Compare the hash of the cached installer to the source only if # the file is hosted on salt: # TODO cp.cache_file does cache and hash checking? So why do it again? if uninstaller.startswith('salt:'): if __salt__['cp.hash_file'](uninstaller, saltenv) != \ __salt__['cp.hash_file'](cached_pkg): try: cached_pkg = __salt__['cp.cache_file']( uninstaller, saltenv) except MinionError as exc: return '{0}: {1}'.format(exc, uninstaller) # Check if the installer was cached successfully if not cached_pkg: log.error('Unable to cache %s', uninstaller) ret[pkgname] = {'unable to cache': uninstaller} continue else: # Run the uninstaller directly # (not hosted on salt:, https:, etc.) cached_pkg = os.path.expandvars(uninstaller) # Fix non-windows slashes cached_pkg = cached_pkg.replace('/', '\\') cache_path, _ = os.path.split(cached_pkg) # os.path.expandvars is not required as we run everything through cmd.exe /s /c if kwargs.get('extra_uninstall_flags'): uninstall_flags = '{0} {1}'.format( uninstall_flags, kwargs.get('extra_uninstall_flags', '')) # Compute msiexec string use_msiexec, msiexec = _get_msiexec(pkginfo[target].get('msiexec', False)) cmd_shell = os.getenv('ComSpec', '{0}\\system32\\cmd.exe'.format(os.getenv('WINDIR'))) # Build cmd and arguments # cmd and arguments must be separated for use with the task scheduler if use_msiexec: # Check if uninstaller is set to {guid}, if not we assume its a remote msi file. # which has already been downloaded. arguments = '"{0}" /X "{1}"'.format(msiexec, cached_pkg) else: arguments = '"{0}"'.format(cached_pkg) if uninstall_flags: arguments = '{0} {1}'.format(arguments, uninstall_flags) # Uninstall the software changed.append(pkgname) # Check Use Scheduler Option if pkginfo[target].get('use_scheduler', False): # Create Scheduled Task __salt__['task.create_task'](name='update-salt-software', user_name='System', force=True, action_type='Execute', cmd=cmd_shell, arguments='/s /c "{0}"'.format(arguments), start_in=cache_path, trigger_type='Once', start_date='1975-01-01', start_time='01:00', ac_only=False, stop_if_on_batteries=False) # Run Scheduled Task if not __salt__['task.run_wait'](name='update-salt-software'): log.error('Failed to remove %s', pkgname) log.error('Scheduled Task failed to run') ret[pkgname] = {'uninstall status': 'failed'} else: # Launch the command result = __salt__['cmd.run_all']( '"{0}" /s /c "{1}"'.format(cmd_shell, arguments), output_loglevel='trace', python_shell=False, redirect_stderr=True) if not result['retcode']: ret[pkgname] = {'uninstall status': 'success'} changed.append(pkgname) elif result['retcode'] == 3010: # 3010 is ERROR_SUCCESS_REBOOT_REQUIRED report_reboot_exit_codes = kwargs.pop( 'report_reboot_exit_codes', True) if report_reboot_exit_codes: __salt__['system.set_reboot_required_witnessed']() ret[pkgname] = {'uninstall status': 'success, reboot required'} changed.append(pkgname) elif result['retcode'] == 1641: # 1641 is ERROR_SUCCESS_REBOOT_INITIATED ret[pkgname] = {'uninstall status': 'success, reboot initiated'} changed.append(pkgname) else: log.error('Failed to remove %s', pkgname) log.error('retcode %s', result['retcode']) log.error('uninstaller output: %s', result['stdout']) ret[pkgname] = {'uninstall status': 'failed'} # Get a new list of installed software new = list_pkgs(saltenv=saltenv, refresh=False) # Take the "old" package list and convert the values to strings in # preparation for the comparison below. __salt__['pkg_resource.stringify'](old) # Check for changes in the registry difference = salt.utils.data.compare_dicts(old, new) found_chgs = all(name in difference for name in changed) end_t = time.time() + 3 # give it 3 seconds to catch up. while not found_chgs and time.time() < end_t: time.sleep(0.5) new = list_pkgs(saltenv=saltenv, refresh=False) difference = salt.utils.data.compare_dicts(old, new) found_chgs = all(name in difference for name in changed) if not found_chgs: log.warning('Expected changes for package removal may not have occured') # Compare the software list before and after # Add the difference to ret ret.update(difference) return ret
['def', 'remove', '(', 'name', '=', 'None', ',', 'pkgs', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'saltenv', '=', 'kwargs', '.', 'get', '(', "'saltenv'", ',', "'base'", ')', 'refresh', '=', 'salt', '.', 'utils', '.', 'data', '.', 'is_true', '(', 'kwargs', '.', 'get', '(', "'refresh'", ',', 'False', ')', ')', '# no need to call _refresh_db_conditional as list_pkgs will do it', 'ret', '=', '{', '}', '# Make sure name or pkgs is passed', 'if', 'not', 'name', 'and', 'not', 'pkgs', ':', 'return', "'Must pass a single package or a list of packages'", '# Get package parameters', 'pkg_params', '=', '__salt__', '[', "'pkg_resource.parse_targets'", ']', '(', 'name', ',', 'pkgs', ',', '*', '*', 'kwargs', ')', '[', '0', ']', '# Get a list of currently installed software for comparison at the end', 'old', '=', 'list_pkgs', '(', 'saltenv', '=', 'saltenv', ',', 'refresh', '=', 'refresh', ',', 'versions_as_list', '=', 'True', ')', '# Loop through each package', 'changed', '=', '[', ']', '# list of changed package names', 'for', 'pkgname', ',', 'version_num', 'in', 'six', '.', 'iteritems', '(', 'pkg_params', ')', ':', '# Load package information for the package', 'pkginfo', '=', '_get_package_info', '(', 'pkgname', ',', 'saltenv', '=', 'saltenv', ')', '# Make sure pkginfo was found', 'if', 'not', 'pkginfo', ':', 'msg', '=', "'Unable to locate package {0}'", '.', 'format', '(', 'pkgname', ')', 'log', '.', 'error', '(', 'msg', ')', 'ret', '[', 'pkgname', ']', '=', 'msg', 'continue', '# Check to see if package is installed on the system', 'if', 'pkgname', 'not', 'in', 'old', ':', 'log', '.', 'debug', '(', "'%s %s not installed'", ',', 'pkgname', ',', 'version_num', 'if', 'version_num', 'else', "''", ')', 'ret', '[', 'pkgname', ']', '=', '{', "'current'", ':', "'not installed'", '}', 'continue', 'removal_targets', '=', '[', ']', '# Only support a single version number', 'if', 'version_num', 'is', 'not', 'None', ':', '# Using the salt cmdline with version=5.3 might be interpreted', '# as a float it must be converted to a string in order for', '# string matching to work.', 'version_num', '=', 'six', '.', 'text_type', '(', 'version_num', ')', '# At least one version of the software is installed.', 'if', 'version_num', 'is', 'None', ':', 'for', 'ver_install', 'in', 'old', '[', 'pkgname', ']', ':', 'if', 'ver_install', 'not', 'in', 'pkginfo', 'and', "'latest'", 'in', 'pkginfo', ':', 'log', '.', 'debug', '(', "'%s %s using package latest entry to to remove'", ',', 'pkgname', ',', 'version_num', ')', 'removal_targets', '.', 'append', '(', "'latest'", ')', 'else', ':', 'removal_targets', '.', 'append', '(', 'ver_install', ')', 'else', ':', 'if', 'version_num', 'in', 'pkginfo', ':', '# we known how to remove this version', 'if', 'version_num', 'in', 'old', '[', 'pkgname', ']', ':', 'removal_targets', '.', 'append', '(', 'version_num', ')', 'else', ':', 'log', '.', 'debug', '(', "'%s %s not installed'", ',', 'pkgname', ',', 'version_num', ')', 'ret', '[', 'pkgname', ']', '=', '{', "'current'", ':', "'{0} not installed'", '.', 'format', '(', 'version_num', ')', '}', 'continue', 'elif', "'latest'", 'in', 'pkginfo', ':', '# we do not have version entry, assume software can self upgrade and use latest', 'log', '.', 'debug', '(', "'%s %s using package latest entry to to remove'", ',', 'pkgname', ',', 'version_num', ')', 'removal_targets', '.', 'append', '(', "'latest'", ')', 'if', 'not', 'removal_targets', ':', 'log', '.', 'error', '(', "'%s %s no definition to remove this version'", ',', 'pkgname', ',', 'version_num', ')', 'ret', '[', 'pkgname', ']', '=', '{', "'current'", ':', "'{0} no definition, cannot removed'", '.', 'format', '(', 'version_num', ')', '}', 'continue', 'for', 'target', 'in', 'removal_targets', ':', '# Get the uninstaller', 'uninstaller', '=', 'pkginfo', '[', 'target', ']', '.', 'get', '(', "'uninstaller'", ',', "''", ')', 'cache_dir', '=', 'pkginfo', '[', 'target', ']', '.', 'get', '(', "'cache_dir'", ',', 'False', ')', 'uninstall_flags', '=', 'pkginfo', '[', 'target', ']', '.', 'get', '(', "'uninstall_flags'", ',', "''", ')', '# If no uninstaller found, use the installer with uninstall flags', 'if', 'not', 'uninstaller', 'and', 'uninstall_flags', ':', 'uninstaller', '=', 'pkginfo', '[', 'target', ']', '.', 'get', '(', "'installer'", ',', "''", ')', '# If still no uninstaller found, fail', 'if', 'not', 'uninstaller', ':', 'log', '.', 'error', '(', "'No installer or uninstaller configured for package %s'", ',', 'pkgname', ',', ')', 'ret', '[', 'pkgname', ']', '=', '{', "'no uninstaller defined'", ':', 'target', '}', 'continue', '# Where is the uninstaller', 'if', 'uninstaller', '.', 'startswith', '(', '(', "'salt:'", ',', "'http:'", ',', "'https:'", ',', "'ftp:'", ')', ')', ':', "# Check for the 'cache_dir' parameter in the .sls file", '# If true, the entire directory will be cached instead of the', '# individual file. This is useful for installations that are not', '# single files', 'if', 'cache_dir', 'and', 'uninstaller', '.', 'startswith', '(', "'salt:'", ')', ':', 'path', ',', '_', '=', 'os', '.', 'path', '.', 'split', '(', 'uninstaller', ')', '__salt__', '[', "'cp.cache_dir'", ']', '(', 'path', ',', 'saltenv', ',', 'False', ',', 'None', ',', "'[email protected]$'", ')', '# Check to see if the uninstaller is cached', 'cached_pkg', '=', '__salt__', '[', "'cp.is_cached'", ']', '(', 'uninstaller', ',', 'saltenv', ')', 'if', 'not', 'cached_pkg', ':', "# It's not cached. Cache it, mate.", 'cached_pkg', '=', '__salt__', '[', "'cp.cache_file'", ']', '(', 'uninstaller', ',', 'saltenv', ')', '# Check if the uninstaller was cached successfully', 'if', 'not', 'cached_pkg', ':', 'log', '.', 'error', '(', "'Unable to cache %s'", ',', 'uninstaller', ')', 'ret', '[', 'pkgname', ']', '=', '{', "'unable to cache'", ':', 'uninstaller', '}', 'continue', '# Compare the hash of the cached installer to the source only if', '# the file is hosted on salt:', '# TODO cp.cache_file does cache and hash checking? So why do it again?', 'if', 'uninstaller', '.', 'startswith', '(', "'salt:'", ')', ':', 'if', '__salt__', '[', "'cp.hash_file'", ']', '(', 'uninstaller', ',', 'saltenv', ')', '!=', '__salt__', '[', "'cp.hash_file'", ']', '(', 'cached_pkg', ')', ':', 'try', ':', 'cached_pkg', '=', '__salt__', '[', "'cp.cache_file'", ']', '(', 'uninstaller', ',', 'saltenv', ')', 'except', 'MinionError', 'as', 'exc', ':', 'return', "'{0}: {1}'", '.', 'format', '(', 'exc', ',', 'uninstaller', ')', '# Check if the installer was cached successfully', 'if', 'not', 'cached_pkg', ':', 'log', '.', 'error', '(', "'Unable to cache %s'", ',', 'uninstaller', ')', 'ret', '[', 'pkgname', ']', '=', '{', "'unable to cache'", ':', 'uninstaller', '}', 'continue', 'else', ':', '# Run the uninstaller directly', '# (not hosted on salt:, https:, etc.)', 'cached_pkg', '=', 'os', '.', 'path', '.', 'expandvars', '(', 'uninstaller', ')', '# Fix non-windows slashes', 'cached_pkg', '=', 'cached_pkg', '.', 'replace', '(', "'/'", ',', "'\\\\'", ')', 'cache_path', ',', '_', '=', 'os', '.', 'path', '.', 'split', '(', 'cached_pkg', ')', '# os.path.expandvars is not required as we run everything through cmd.exe /s /c', 'if', 'kwargs', '.', 'get', '(', "'extra_uninstall_flags'", ')', ':', 'uninstall_flags', '=', "'{0} {1}'", '.', 'format', '(', 'uninstall_flags', ',', 'kwargs', '.', 'get', '(', "'extra_uninstall_flags'", ',', "''", ')', ')', '# Compute msiexec string', 'use_msiexec', ',', 'msiexec', '=', '_get_msiexec', '(', 'pkginfo', '[', 'target', ']', '.', 'get', '(', "'msiexec'", ',', 'False', ')', ')', 'cmd_shell', '=', 'os', '.', 'getenv', '(', "'ComSpec'", ',', "'{0}\\\\system32\\\\cmd.exe'", '.', 'format', '(', 'os', '.', 'getenv', '(', "'WINDIR'", ')', ')', ')', '# Build cmd and arguments', '# cmd and arguments must be separated for use with the task scheduler', 'if', 'use_msiexec', ':', '# Check if uninstaller is set to {guid}, if not we assume its a remote msi file.', '# which has already been downloaded.', 'arguments', '=', '\'"{0}" /X "{1}"\'', '.', 'format', '(', 'msiexec', ',', 'cached_pkg', ')', 'else', ':', 'arguments', '=', '\'"{0}"\'', '.', 'format', '(', 'cached_pkg', ')', 'if', 'uninstall_flags', ':', 'arguments', '=', "'{0} {1}'", '.', 'format', '(', 'arguments', ',', 'uninstall_flags', ')', '# Uninstall the software', 'changed', '.', 'append', '(', 'pkgname', ')', '# Check Use Scheduler Option', 'if', 'pkginfo', '[', 'target', ']', '.', 'get', '(', "'use_scheduler'", ',', 'False', ')', ':', '# Create Scheduled Task', '__salt__', '[', "'task.create_task'", ']', '(', 'name', '=', "'update-salt-software'", ',', 'user_name', '=', "'System'", ',', 'force', '=', 'True', ',', 'action_type', '=', "'Execute'", ',', 'cmd', '=', 'cmd_shell', ',', 'arguments', '=', '\'/s /c "{0}"\'', '.', 'format', '(', 'arguments', ')', ',', 'start_in', '=', 'cache_path', ',', 'trigger_type', '=', "'Once'", ',', 'start_date', '=', "'1975-01-01'", ',', 'start_time', '=', "'01:00'", ',', 'ac_only', '=', 'False', ',', 'stop_if_on_batteries', '=', 'False', ')', '# Run Scheduled Task', 'if', 'not', '__salt__', '[', "'task.run_wait'", ']', '(', 'name', '=', "'update-salt-software'", ')', ':', 'log', '.', 'error', '(', "'Failed to remove %s'", ',', 'pkgname', ')', 'log', '.', 'error', '(', "'Scheduled Task failed to run'", ')', 'ret', '[', 'pkgname', ']', '=', '{', "'uninstall status'", ':', "'failed'", '}', 'else', ':', '# Launch the command', 'result', '=', '__salt__', '[', "'cmd.run_all'", ']', '(', '\'"{0}" /s /c "{1}"\'', '.', 'format', '(', 'cmd_shell', ',', 'arguments', ')', ',', 'output_loglevel', '=', "'trace'", ',', 'python_shell', '=', 'False', ',', 'redirect_stderr', '=', 'True', ')', 'if', 'not', 'result', '[', "'retcode'", ']', ':', 'ret', '[', 'pkgname', ']', '=', '{', "'uninstall status'", ':', "'success'", '}', 'changed', '.', 'append', '(', 'pkgname', ')', 'elif', 'result', '[', "'retcode'", ']', '==', '3010', ':', '# 3010 is ERROR_SUCCESS_REBOOT_REQUIRED', 'report_reboot_exit_codes', '=', 'kwargs', '.', 'pop', '(', "'report_reboot_exit_codes'", ',', 'True', ')', 'if', 'report_reboot_exit_codes', ':', '__salt__', '[', "'system.set_reboot_required_witnessed'", ']', '(', ')', 'ret', '[', 'pkgname', ']', '=', '{', "'uninstall status'", ':', "'success, reboot required'", '}', 'changed', '.', 'append', '(', 'pkgname', ')', 'elif', 'result', '[', "'retcode'", ']', '==', '1641', ':', '# 1641 is ERROR_SUCCESS_REBOOT_INITIATED', 'ret', '[', 'pkgname', ']', '=', '{', "'uninstall status'", ':', "'success, reboot initiated'", '}', 'changed', '.', 'append', '(', 'pkgname', ')', 'else', ':', 'log', '.', 'error', '(', "'Failed to remove %s'", ',', 'pkgname', ')', 'log', '.', 'error', '(', "'retcode %s'", ',', 'result', '[', "'retcode'", ']', ')', 'log', '.', 'error', '(', "'uninstaller output: %s'", ',', 'result', '[', "'stdout'", ']', ')', 'ret', '[', 'pkgname', ']', '=', '{', "'uninstall status'", ':', "'failed'", '}', '# Get a new list of installed software', 'new', '=', 'list_pkgs', '(', 'saltenv', '=', 'saltenv', ',', 'refresh', '=', 'False', ')', '# Take the "old" package list and convert the values to strings in', '# preparation for the comparison below.', '__salt__', '[', "'pkg_resource.stringify'", ']', '(', 'old', ')', '# Check for changes in the registry', 'difference', '=', 'salt', '.', 'utils', '.', 'data', '.', 'compare_dicts', '(', 'old', ',', 'new', ')', 'found_chgs', '=', 'all', '(', 'name', 'in', 'difference', 'for', 'name', 'in', 'changed', ')', 'end_t', '=', 'time', '.', 'time', '(', ')', '+', '3', '# give it 3 seconds to catch up.', 'while', 'not', 'found_chgs', 'and', 'time', '.', 'time', '(', ')', '<', 'end_t', ':', 'time', '.', 'sleep', '(', '0.5', ')', 'new', '=', 'list_pkgs', '(', 'saltenv', '=', 'saltenv', ',', 'refresh', '=', 'False', ')', 'difference', '=', 'salt', '.', 'utils', '.', 'data', '.', 'compare_dicts', '(', 'old', ',', 'new', ')', 'found_chgs', '=', 'all', '(', 'name', 'in', 'difference', 'for', 'name', 'in', 'changed', ')', 'if', 'not', 'found_chgs', ':', 'log', '.', 'warning', '(', "'Expected changes for package removal may not have occured'", ')', '# Compare the software list before and after', '# Add the difference to ret', 'ret', '.', 'update', '(', 'difference', ')', 'return', 'ret']
Remove the passed package(s) from the system using winrepo .. versionadded:: 0.16.0 Args: name (str): The name(s) of the package(s) to be uninstalled. Can be a single package or a comma delimited list of packages, no spaces. pkgs (list): A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Kwargs: version (str): The version of the package to be uninstalled. If this option is used to to uninstall multiple packages, then this version will be applied to all targeted packages. Recommended using only when uninstalling a single package. If this parameter is omitted, the latest version will be uninstalled. saltenv (str): Salt environment. Default ``base`` refresh (bool): Refresh package metadata. Default ``False`` Returns: dict: Returns a dict containing the changes. If the package is removed by ``pkg.remove``: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} If the package is already uninstalled: {'<package>': {'current': 'not installed'}} CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]'
['Remove', 'the', 'passed', 'package', '(', 's', ')', 'from', 'the', 'system', 'using', 'winrepo']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_pkg.py#L1757-L2044