Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
2,300
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
OAuthSession.request
def request(self, method, url, **kwargs): """Constructs a :class:`requests.Request`, prepares it and sends it. Raises HTTPErrors by default. :param method: method for the new :class:`Request` object. :type method: :class:`str` :param url: URL for the new :class:`Request` object. :type url: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError` """ if oauthlib.oauth2.is_secure_transport(url): m = super(OAuthSession, self).request else: m = super(requests_oauthlib.OAuth2Session, self).request log.debug("%s \"%s\" with %s", method, url, kwargs) response = m(method, url, **kwargs) response.raise_for_status() return response
python
def request(self, method, url, **kwargs): """Constructs a :class:`requests.Request`, prepares it and sends it. Raises HTTPErrors by default. :param method: method for the new :class:`Request` object. :type method: :class:`str` :param url: URL for the new :class:`Request` object. :type url: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError` """ if oauthlib.oauth2.is_secure_transport(url): m = super(OAuthSession, self).request else: m = super(requests_oauthlib.OAuth2Session, self).request log.debug("%s \"%s\" with %s", method, url, kwargs) response = m(method, url, **kwargs) response.raise_for_status() return response
['def', 'request', '(', 'self', ',', 'method', ',', 'url', ',', '*', '*', 'kwargs', ')', ':', 'if', 'oauthlib', '.', 'oauth2', '.', 'is_secure_transport', '(', 'url', ')', ':', 'm', '=', 'super', '(', 'OAuthSession', ',', 'self', ')', '.', 'request', 'else', ':', 'm', '=', 'super', '(', 'requests_oauthlib', '.', 'OAuth2Session', ',', 'self', ')', '.', 'request', 'log', '.', 'debug', '(', '"%s \\"%s\\" with %s"', ',', 'method', ',', 'url', ',', 'kwargs', ')', 'response', '=', 'm', '(', 'method', ',', 'url', ',', '*', '*', 'kwargs', ')', 'response', '.', 'raise_for_status', '(', ')', 'return', 'response']
Constructs a :class:`requests.Request`, prepares it and sends it. Raises HTTPErrors by default. :param method: method for the new :class:`Request` object. :type method: :class:`str` :param url: URL for the new :class:`Request` object. :type url: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError`
['Constructs', 'a', ':', 'class', ':', 'requests', '.', 'Request', 'prepares', 'it', 'and', 'sends', 'it', '.', 'Raises', 'HTTPErrors', 'by', 'default', '.']
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L89-L109
2,301
jleclanche/fireplace
fireplace/player.py
Player.can_pay_cost
def can_pay_cost(self, card): """ Returns whether the player can pay the resource cost of a card. """ if self.spells_cost_health and card.type == CardType.SPELL: return self.hero.health > card.cost return self.mana >= card.cost
python
def can_pay_cost(self, card): """ Returns whether the player can pay the resource cost of a card. """ if self.spells_cost_health and card.type == CardType.SPELL: return self.hero.health > card.cost return self.mana >= card.cost
['def', 'can_pay_cost', '(', 'self', ',', 'card', ')', ':', 'if', 'self', '.', 'spells_cost_health', 'and', 'card', '.', 'type', '==', 'CardType', '.', 'SPELL', ':', 'return', 'self', '.', 'hero', '.', 'health', '>', 'card', '.', 'cost', 'return', 'self', '.', 'mana', '>=', 'card', '.', 'cost']
Returns whether the player can pay the resource cost of a card.
['Returns', 'whether', 'the', 'player', 'can', 'pay', 'the', 'resource', 'cost', 'of', 'a', 'card', '.']
train
https://github.com/jleclanche/fireplace/blob/d0fc0e97e185c0210de86631be20638659c0609e/fireplace/player.py#L184-L190
2,302
singularityhub/singularity-python
singularity/analysis/compare.py
calculate_similarity
def calculate_similarity(container1=None, container2=None, comparison=None, metric=None): '''calculate_similarity will calculate similarity of two containers by files content, default will calculate 2.0*len(intersect) / total package1 + total package2 Parameters ========== container1: container 1 container2: container 2 must be defined or metric a function to take a total1, total2, and intersect count (we can make this more general if / when more are added) valid are currently files.txt or folders.txt comparison: the comparison result object for the tree. If provided, will skip over function to obtain it. ''' if metric is None: metric = information_coefficient if comparison == None: comparison = compare_containers(container1=container1, container2=container2) return metric(total1=comparison['total1'], total2=comparison['total2'], intersect=comparison["intersect"])
python
def calculate_similarity(container1=None, container2=None, comparison=None, metric=None): '''calculate_similarity will calculate similarity of two containers by files content, default will calculate 2.0*len(intersect) / total package1 + total package2 Parameters ========== container1: container 1 container2: container 2 must be defined or metric a function to take a total1, total2, and intersect count (we can make this more general if / when more are added) valid are currently files.txt or folders.txt comparison: the comparison result object for the tree. If provided, will skip over function to obtain it. ''' if metric is None: metric = information_coefficient if comparison == None: comparison = compare_containers(container1=container1, container2=container2) return metric(total1=comparison['total1'], total2=comparison['total2'], intersect=comparison["intersect"])
['def', 'calculate_similarity', '(', 'container1', '=', 'None', ',', 'container2', '=', 'None', ',', 'comparison', '=', 'None', ',', 'metric', '=', 'None', ')', ':', 'if', 'metric', 'is', 'None', ':', 'metric', '=', 'information_coefficient', 'if', 'comparison', '==', 'None', ':', 'comparison', '=', 'compare_containers', '(', 'container1', '=', 'container1', ',', 'container2', '=', 'container2', ')', 'return', 'metric', '(', 'total1', '=', 'comparison', '[', "'total1'", ']', ',', 'total2', '=', 'comparison', '[', "'total2'", ']', ',', 'intersect', '=', 'comparison', '[', '"intersect"', ']', ')']
calculate_similarity will calculate similarity of two containers by files content, default will calculate 2.0*len(intersect) / total package1 + total package2 Parameters ========== container1: container 1 container2: container 2 must be defined or metric a function to take a total1, total2, and intersect count (we can make this more general if / when more are added) valid are currently files.txt or folders.txt comparison: the comparison result object for the tree. If provided, will skip over function to obtain it.
['calculate_similarity', 'will', 'calculate', 'similarity', 'of', 'two', 'containers', 'by', 'files', 'content', 'default', 'will', 'calculate', '2', '.', '0', '*', 'len', '(', 'intersect', ')', '/', 'total', 'package1', '+', 'total', 'package2']
train
https://github.com/singularityhub/singularity-python/blob/498c3433724b332f7493fec632d8daf479f47b82/singularity/analysis/compare.py#L163-L193
2,303
kensho-technologies/graphql-compiler
graphql_compiler/schema_generation/schema_properties.py
_parse_datetime_default_value
def _parse_datetime_default_value(property_name, default_value_string): """Parse and return the default value for a datetime property.""" # OrientDB doesn't use ISO-8601 datetime format, so we have to parse it manually # and then turn it into a python datetime object. strptime() will raise an exception # if the provided value cannot be parsed correctly. parsed_value = time.strptime(default_value_string, ORIENTDB_DATETIME_FORMAT) return datetime.datetime( parsed_value.tm_year, parsed_value.tm_mon, parsed_value.tm_mday, parsed_value.tm_hour, parsed_value.tm_min, parsed_value.tm_sec, 0, None)
python
def _parse_datetime_default_value(property_name, default_value_string): """Parse and return the default value for a datetime property.""" # OrientDB doesn't use ISO-8601 datetime format, so we have to parse it manually # and then turn it into a python datetime object. strptime() will raise an exception # if the provided value cannot be parsed correctly. parsed_value = time.strptime(default_value_string, ORIENTDB_DATETIME_FORMAT) return datetime.datetime( parsed_value.tm_year, parsed_value.tm_mon, parsed_value.tm_mday, parsed_value.tm_hour, parsed_value.tm_min, parsed_value.tm_sec, 0, None)
['def', '_parse_datetime_default_value', '(', 'property_name', ',', 'default_value_string', ')', ':', "# OrientDB doesn't use ISO-8601 datetime format, so we have to parse it manually", '# and then turn it into a python datetime object. strptime() will raise an exception', '# if the provided value cannot be parsed correctly.', 'parsed_value', '=', 'time', '.', 'strptime', '(', 'default_value_string', ',', 'ORIENTDB_DATETIME_FORMAT', ')', 'return', 'datetime', '.', 'datetime', '(', 'parsed_value', '.', 'tm_year', ',', 'parsed_value', '.', 'tm_mon', ',', 'parsed_value', '.', 'tm_mday', ',', 'parsed_value', '.', 'tm_hour', ',', 'parsed_value', '.', 'tm_min', ',', 'parsed_value', '.', 'tm_sec', ',', '0', ',', 'None', ')']
Parse and return the default value for a datetime property.
['Parse', 'and', 'return', 'the', 'default', 'value', 'for', 'a', 'datetime', 'property', '.']
train
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/schema_properties.py#L115-L123
2,304
ejeschke/ginga
ginga/Bindings.py
ImageViewBindings.sc_cuts_coarse
def sc_cuts_coarse(self, viewer, event, msg=True): """Adjust cuts interactively by setting the low AND high cut levels. This function adjusts it coarsely. """ if self.cancut: # adjust the cut by 10% on each end self._adjust_cuts(viewer, event.direction, 0.1, msg=msg) return True
python
def sc_cuts_coarse(self, viewer, event, msg=True): """Adjust cuts interactively by setting the low AND high cut levels. This function adjusts it coarsely. """ if self.cancut: # adjust the cut by 10% on each end self._adjust_cuts(viewer, event.direction, 0.1, msg=msg) return True
['def', 'sc_cuts_coarse', '(', 'self', ',', 'viewer', ',', 'event', ',', 'msg', '=', 'True', ')', ':', 'if', 'self', '.', 'cancut', ':', '# adjust the cut by 10% on each end', 'self', '.', '_adjust_cuts', '(', 'viewer', ',', 'event', '.', 'direction', ',', '0.1', ',', 'msg', '=', 'msg', ')', 'return', 'True']
Adjust cuts interactively by setting the low AND high cut levels. This function adjusts it coarsely.
['Adjust', 'cuts', 'interactively', 'by', 'setting', 'the', 'low', 'AND', 'high', 'cut', 'levels', '.', 'This', 'function', 'adjusts', 'it', 'coarsely', '.']
train
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/Bindings.py#L1878-L1885
2,305
Contraz/pyrocket
rocket/tracks.py
Track.delete
def delete(self, row): """Delete a track value""" i = self._get_key_index(row) del self.keys[i]
python
def delete(self, row): """Delete a track value""" i = self._get_key_index(row) del self.keys[i]
['def', 'delete', '(', 'self', ',', 'row', ')', ':', 'i', '=', 'self', '.', '_get_key_index', '(', 'row', ')', 'del', 'self', '.', 'keys', '[', 'i', ']']
Delete a track value
['Delete', 'a', 'track', 'value']
train
https://github.com/Contraz/pyrocket/blob/97f4153c79030497b97fbaf43b1aa6dc1a6c7f7b/rocket/tracks.py#L111-L114
2,306
jrspruitt/ubi_reader
ubireader/ubi/block/__init__.py
extract_blocks
def extract_blocks(ubi): """Get a list of UBI block objects from file Arguments:. Obj:ubi -- UBI object. Returns: Dict -- Of block objects keyed by PEB number. """ blocks = {} ubi.file.seek(ubi.file.start_offset) peb_count = 0 cur_offset = 0 bad_blocks = [] # range instead of xrange, as xrange breaks > 4GB end_offset. for i in range(ubi.file.start_offset, ubi.file.end_offset, ubi.file.block_size): try: buf = ubi.file.read(ubi.file.block_size) except Exception as e: if settings.warn_only_block_read_errors: error(extract_blocks, 'Error', 'PEB: %s: %s' % (ubi.first_peb_num + peb_count, str(e))) continue else: error(extract_blocks, 'Fatal', 'PEB: %s: %s' % (ubi.first_peb_num + peb_count, str(e))) if buf.startswith(UBI_EC_HDR_MAGIC): blk = description(buf) blk.file_offset = i blk.peb_num = ubi.first_peb_num + peb_count blk.size = ubi.file.block_size blocks[blk.peb_num] = blk peb_count += 1 log(extract_blocks, blk) verbose_log(extract_blocks, 'file addr: %s' % (ubi.file.last_read_addr())) ec_hdr_errors = '' vid_hdr_errors = '' if blk.ec_hdr.errors: ec_hdr_errors = ','.join(blk.ec_hdr.errors) if blk.vid_hdr and blk.vid_hdr.errors: vid_hdr_errors = ','.join(blk.vid_hdr.errors) if ec_hdr_errors or vid_hdr_errors: if blk.peb_num not in bad_blocks: bad_blocks.append(blk.peb_num) log(extract_blocks, 'PEB: %s has possible issue EC_HDR [%s], VID_HDR [%s]' % (blk.peb_num, ec_hdr_errors, vid_hdr_errors)) verbose_display(blk) else: cur_offset += ubi.file.block_size ubi.first_peb_num = cur_offset/ubi.file.block_size ubi.file.start_offset = cur_offset return blocks
python
def extract_blocks(ubi): """Get a list of UBI block objects from file Arguments:. Obj:ubi -- UBI object. Returns: Dict -- Of block objects keyed by PEB number. """ blocks = {} ubi.file.seek(ubi.file.start_offset) peb_count = 0 cur_offset = 0 bad_blocks = [] # range instead of xrange, as xrange breaks > 4GB end_offset. for i in range(ubi.file.start_offset, ubi.file.end_offset, ubi.file.block_size): try: buf = ubi.file.read(ubi.file.block_size) except Exception as e: if settings.warn_only_block_read_errors: error(extract_blocks, 'Error', 'PEB: %s: %s' % (ubi.first_peb_num + peb_count, str(e))) continue else: error(extract_blocks, 'Fatal', 'PEB: %s: %s' % (ubi.first_peb_num + peb_count, str(e))) if buf.startswith(UBI_EC_HDR_MAGIC): blk = description(buf) blk.file_offset = i blk.peb_num = ubi.first_peb_num + peb_count blk.size = ubi.file.block_size blocks[blk.peb_num] = blk peb_count += 1 log(extract_blocks, blk) verbose_log(extract_blocks, 'file addr: %s' % (ubi.file.last_read_addr())) ec_hdr_errors = '' vid_hdr_errors = '' if blk.ec_hdr.errors: ec_hdr_errors = ','.join(blk.ec_hdr.errors) if blk.vid_hdr and blk.vid_hdr.errors: vid_hdr_errors = ','.join(blk.vid_hdr.errors) if ec_hdr_errors or vid_hdr_errors: if blk.peb_num not in bad_blocks: bad_blocks.append(blk.peb_num) log(extract_blocks, 'PEB: %s has possible issue EC_HDR [%s], VID_HDR [%s]' % (blk.peb_num, ec_hdr_errors, vid_hdr_errors)) verbose_display(blk) else: cur_offset += ubi.file.block_size ubi.first_peb_num = cur_offset/ubi.file.block_size ubi.file.start_offset = cur_offset return blocks
['def', 'extract_blocks', '(', 'ubi', ')', ':', 'blocks', '=', '{', '}', 'ubi', '.', 'file', '.', 'seek', '(', 'ubi', '.', 'file', '.', 'start_offset', ')', 'peb_count', '=', '0', 'cur_offset', '=', '0', 'bad_blocks', '=', '[', ']', '# range instead of xrange, as xrange breaks > 4GB end_offset.', 'for', 'i', 'in', 'range', '(', 'ubi', '.', 'file', '.', 'start_offset', ',', 'ubi', '.', 'file', '.', 'end_offset', ',', 'ubi', '.', 'file', '.', 'block_size', ')', ':', 'try', ':', 'buf', '=', 'ubi', '.', 'file', '.', 'read', '(', 'ubi', '.', 'file', '.', 'block_size', ')', 'except', 'Exception', 'as', 'e', ':', 'if', 'settings', '.', 'warn_only_block_read_errors', ':', 'error', '(', 'extract_blocks', ',', "'Error'", ',', "'PEB: %s: %s'", '%', '(', 'ubi', '.', 'first_peb_num', '+', 'peb_count', ',', 'str', '(', 'e', ')', ')', ')', 'continue', 'else', ':', 'error', '(', 'extract_blocks', ',', "'Fatal'", ',', "'PEB: %s: %s'", '%', '(', 'ubi', '.', 'first_peb_num', '+', 'peb_count', ',', 'str', '(', 'e', ')', ')', ')', 'if', 'buf', '.', 'startswith', '(', 'UBI_EC_HDR_MAGIC', ')', ':', 'blk', '=', 'description', '(', 'buf', ')', 'blk', '.', 'file_offset', '=', 'i', 'blk', '.', 'peb_num', '=', 'ubi', '.', 'first_peb_num', '+', 'peb_count', 'blk', '.', 'size', '=', 'ubi', '.', 'file', '.', 'block_size', 'blocks', '[', 'blk', '.', 'peb_num', ']', '=', 'blk', 'peb_count', '+=', '1', 'log', '(', 'extract_blocks', ',', 'blk', ')', 'verbose_log', '(', 'extract_blocks', ',', "'file addr: %s'", '%', '(', 'ubi', '.', 'file', '.', 'last_read_addr', '(', ')', ')', ')', 'ec_hdr_errors', '=', "''", 'vid_hdr_errors', '=', "''", 'if', 'blk', '.', 'ec_hdr', '.', 'errors', ':', 'ec_hdr_errors', '=', "','", '.', 'join', '(', 'blk', '.', 'ec_hdr', '.', 'errors', ')', 'if', 'blk', '.', 'vid_hdr', 'and', 'blk', '.', 'vid_hdr', '.', 'errors', ':', 'vid_hdr_errors', '=', "','", '.', 'join', '(', 'blk', '.', 'vid_hdr', '.', 'errors', ')', 'if', 'ec_hdr_errors', 'or', 'vid_hdr_errors', ':', 'if', 'blk', '.', 'peb_num', 'not', 'in', 'bad_blocks', ':', 'bad_blocks', '.', 'append', '(', 'blk', '.', 'peb_num', ')', 'log', '(', 'extract_blocks', ',', "'PEB: %s has possible issue EC_HDR [%s], VID_HDR [%s]'", '%', '(', 'blk', '.', 'peb_num', ',', 'ec_hdr_errors', ',', 'vid_hdr_errors', ')', ')', 'verbose_display', '(', 'blk', ')', 'else', ':', 'cur_offset', '+=', 'ubi', '.', 'file', '.', 'block_size', 'ubi', '.', 'first_peb_num', '=', 'cur_offset', '/', 'ubi', '.', 'file', '.', 'block_size', 'ubi', '.', 'file', '.', 'start_offset', '=', 'cur_offset', 'return', 'blocks']
Get a list of UBI block objects from file Arguments:. Obj:ubi -- UBI object. Returns: Dict -- Of block objects keyed by PEB number.
['Get', 'a', 'list', 'of', 'UBI', 'block', 'objects', 'from', 'file']
train
https://github.com/jrspruitt/ubi_reader/blob/7079dd380c1c9896bced30d6d34e8780b9181597/ubireader/ubi/block/__init__.py#L105-L162
2,307
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
MAVLink.highres_imu_encode
def highres_imu_encode(self, time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag, abs_pressure, diff_pressure, pressure_alt, temperature, fields_updated): ''' The IMU readings in SI units in NED body frame time_usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t) xacc : X acceleration (m/s^2) (float) yacc : Y acceleration (m/s^2) (float) zacc : Z acceleration (m/s^2) (float) xgyro : Angular speed around X axis (rad / sec) (float) ygyro : Angular speed around Y axis (rad / sec) (float) zgyro : Angular speed around Z axis (rad / sec) (float) xmag : X Magnetic field (Gauss) (float) ymag : Y Magnetic field (Gauss) (float) zmag : Z Magnetic field (Gauss) (float) abs_pressure : Absolute pressure in millibar (float) diff_pressure : Differential pressure in millibar (float) pressure_alt : Altitude calculated from pressure (float) temperature : Temperature in degrees celsius (float) fields_updated : Bitmask for fields that have updated since last message, bit 0 = xacc, bit 12: temperature (uint16_t) ''' return MAVLink_highres_imu_message(time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag, abs_pressure, diff_pressure, pressure_alt, temperature, fields_updated)
python
def highres_imu_encode(self, time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag, abs_pressure, diff_pressure, pressure_alt, temperature, fields_updated): ''' The IMU readings in SI units in NED body frame time_usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t) xacc : X acceleration (m/s^2) (float) yacc : Y acceleration (m/s^2) (float) zacc : Z acceleration (m/s^2) (float) xgyro : Angular speed around X axis (rad / sec) (float) ygyro : Angular speed around Y axis (rad / sec) (float) zgyro : Angular speed around Z axis (rad / sec) (float) xmag : X Magnetic field (Gauss) (float) ymag : Y Magnetic field (Gauss) (float) zmag : Z Magnetic field (Gauss) (float) abs_pressure : Absolute pressure in millibar (float) diff_pressure : Differential pressure in millibar (float) pressure_alt : Altitude calculated from pressure (float) temperature : Temperature in degrees celsius (float) fields_updated : Bitmask for fields that have updated since last message, bit 0 = xacc, bit 12: temperature (uint16_t) ''' return MAVLink_highres_imu_message(time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag, abs_pressure, diff_pressure, pressure_alt, temperature, fields_updated)
['def', 'highres_imu_encode', '(', 'self', ',', 'time_usec', ',', 'xacc', ',', 'yacc', ',', 'zacc', ',', 'xgyro', ',', 'ygyro', ',', 'zgyro', ',', 'xmag', ',', 'ymag', ',', 'zmag', ',', 'abs_pressure', ',', 'diff_pressure', ',', 'pressure_alt', ',', 'temperature', ',', 'fields_updated', ')', ':', 'return', 'MAVLink_highres_imu_message', '(', 'time_usec', ',', 'xacc', ',', 'yacc', ',', 'zacc', ',', 'xgyro', ',', 'ygyro', ',', 'zgyro', ',', 'xmag', ',', 'ymag', ',', 'zmag', ',', 'abs_pressure', ',', 'diff_pressure', ',', 'pressure_alt', ',', 'temperature', ',', 'fields_updated', ')']
The IMU readings in SI units in NED body frame time_usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t) xacc : X acceleration (m/s^2) (float) yacc : Y acceleration (m/s^2) (float) zacc : Z acceleration (m/s^2) (float) xgyro : Angular speed around X axis (rad / sec) (float) ygyro : Angular speed around Y axis (rad / sec) (float) zgyro : Angular speed around Z axis (rad / sec) (float) xmag : X Magnetic field (Gauss) (float) ymag : Y Magnetic field (Gauss) (float) zmag : Z Magnetic field (Gauss) (float) abs_pressure : Absolute pressure in millibar (float) diff_pressure : Differential pressure in millibar (float) pressure_alt : Altitude calculated from pressure (float) temperature : Temperature in degrees celsius (float) fields_updated : Bitmask for fields that have updated since last message, bit 0 = xacc, bit 12: temperature (uint16_t)
['The', 'IMU', 'readings', 'in', 'SI', 'units', 'in', 'NED', 'body', 'frame']
train
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L11135-L11156
2,308
joke2k/django-environ
environ/environ.py
Path.file
def file(self, name, *args, **kwargs): """Open a file. :param name: Filename appended to self.root :param args: passed to open() :param kwargs: passed to open() :rtype: file """ return open(self(name), *args, **kwargs)
python
def file(self, name, *args, **kwargs): """Open a file. :param name: Filename appended to self.root :param args: passed to open() :param kwargs: passed to open() :rtype: file """ return open(self(name), *args, **kwargs)
['def', 'file', '(', 'self', ',', 'name', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'return', 'open', '(', 'self', '(', 'name', ')', ',', '*', 'args', ',', '*', '*', 'kwargs', ')']
Open a file. :param name: Filename appended to self.root :param args: passed to open() :param kwargs: passed to open() :rtype: file
['Open', 'a', 'file', '.']
train
https://github.com/joke2k/django-environ/blob/c2620021614557abe197578f99deeef42af3e082/environ/environ.py#L696-L705
2,309
collectiveacuity/labPack
labpack/storage/dropbox.py
dropboxClient.exists
def exists(self, record_key): ''' a method to determine if a record exists in collection :param record_key: string with key of record :return: boolean reporting status ''' title = '%s.exists' % self.__class__.__name__ # validate inputs input_fields = { 'record_key': record_key } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # send get metadata request file_path = '/%s' % record_key try: self.dropbox.files_get_metadata(file_path) except Exception as err: if str(err).find("LookupError('not_found'") > -1: return False else: raise DropboxConnectionError(title) return True
python
def exists(self, record_key): ''' a method to determine if a record exists in collection :param record_key: string with key of record :return: boolean reporting status ''' title = '%s.exists' % self.__class__.__name__ # validate inputs input_fields = { 'record_key': record_key } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # send get metadata request file_path = '/%s' % record_key try: self.dropbox.files_get_metadata(file_path) except Exception as err: if str(err).find("LookupError('not_found'") > -1: return False else: raise DropboxConnectionError(title) return True
['def', 'exists', '(', 'self', ',', 'record_key', ')', ':', 'title', '=', "'%s.exists'", '%', 'self', '.', '__class__', '.', '__name__', '# validate inputs\r', 'input_fields', '=', '{', "'record_key'", ':', 'record_key', '}', 'for', 'key', ',', 'value', 'in', 'input_fields', '.', 'items', '(', ')', ':', 'object_title', '=', "'%s(%s=%s)'", '%', '(', 'title', ',', 'key', ',', 'str', '(', 'value', ')', ')', 'self', '.', 'fields', '.', 'validate', '(', 'value', ',', "'.%s'", '%', 'key', ',', 'object_title', ')', '# send get metadata request\r', 'file_path', '=', "'/%s'", '%', 'record_key', 'try', ':', 'self', '.', 'dropbox', '.', 'files_get_metadata', '(', 'file_path', ')', 'except', 'Exception', 'as', 'err', ':', 'if', 'str', '(', 'err', ')', '.', 'find', '(', '"LookupError(\'not_found\'"', ')', '>', '-', '1', ':', 'return', 'False', 'else', ':', 'raise', 'DropboxConnectionError', '(', 'title', ')', 'return', 'True']
a method to determine if a record exists in collection :param record_key: string with key of record :return: boolean reporting status
['a', 'method', 'to', 'determine', 'if', 'a', 'record', 'exists', 'in', 'collection', ':', 'param', 'record_key', ':', 'string', 'with', 'key', 'of', 'record', ':', 'return', ':', 'boolean', 'reporting', 'status']
train
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/dropbox.py#L298-L327
2,310
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/maven/maven_client.py
MavenClient.get_package_version
def get_package_version(self, feed, group_id, artifact_id, version, show_deleted=None): """GetPackageVersion. [Preview API] Get information about a package version. :param str feed: Name or ID of the feed. :param str group_id: Group ID of the package. :param str artifact_id: Artifact ID of the package. :param str version: Version of the package. :param bool show_deleted: True to show information for deleted packages. :rtype: :class:`<Package> <azure.devops.v5_1.maven.models.Package>` """ route_values = {} if feed is not None: route_values['feed'] = self._serialize.url('feed', feed, 'str') if group_id is not None: route_values['groupId'] = self._serialize.url('group_id', group_id, 'str') if artifact_id is not None: route_values['artifactId'] = self._serialize.url('artifact_id', artifact_id, 'str') if version is not None: route_values['version'] = self._serialize.url('version', version, 'str') query_parameters = {} if show_deleted is not None: query_parameters['showDeleted'] = self._serialize.query('show_deleted', show_deleted, 'bool') response = self._send(http_method='GET', location_id='180ed967-377a-4112-986b-607adb14ded4', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('Package', response)
python
def get_package_version(self, feed, group_id, artifact_id, version, show_deleted=None): """GetPackageVersion. [Preview API] Get information about a package version. :param str feed: Name or ID of the feed. :param str group_id: Group ID of the package. :param str artifact_id: Artifact ID of the package. :param str version: Version of the package. :param bool show_deleted: True to show information for deleted packages. :rtype: :class:`<Package> <azure.devops.v5_1.maven.models.Package>` """ route_values = {} if feed is not None: route_values['feed'] = self._serialize.url('feed', feed, 'str') if group_id is not None: route_values['groupId'] = self._serialize.url('group_id', group_id, 'str') if artifact_id is not None: route_values['artifactId'] = self._serialize.url('artifact_id', artifact_id, 'str') if version is not None: route_values['version'] = self._serialize.url('version', version, 'str') query_parameters = {} if show_deleted is not None: query_parameters['showDeleted'] = self._serialize.query('show_deleted', show_deleted, 'bool') response = self._send(http_method='GET', location_id='180ed967-377a-4112-986b-607adb14ded4', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('Package', response)
['def', 'get_package_version', '(', 'self', ',', 'feed', ',', 'group_id', ',', 'artifact_id', ',', 'version', ',', 'show_deleted', '=', 'None', ')', ':', 'route_values', '=', '{', '}', 'if', 'feed', 'is', 'not', 'None', ':', 'route_values', '[', "'feed'", ']', '=', 'self', '.', '_serialize', '.', 'url', '(', "'feed'", ',', 'feed', ',', "'str'", ')', 'if', 'group_id', 'is', 'not', 'None', ':', 'route_values', '[', "'groupId'", ']', '=', 'self', '.', '_serialize', '.', 'url', '(', "'group_id'", ',', 'group_id', ',', "'str'", ')', 'if', 'artifact_id', 'is', 'not', 'None', ':', 'route_values', '[', "'artifactId'", ']', '=', 'self', '.', '_serialize', '.', 'url', '(', "'artifact_id'", ',', 'artifact_id', ',', "'str'", ')', 'if', 'version', 'is', 'not', 'None', ':', 'route_values', '[', "'version'", ']', '=', 'self', '.', '_serialize', '.', 'url', '(', "'version'", ',', 'version', ',', "'str'", ')', 'query_parameters', '=', '{', '}', 'if', 'show_deleted', 'is', 'not', 'None', ':', 'query_parameters', '[', "'showDeleted'", ']', '=', 'self', '.', '_serialize', '.', 'query', '(', "'show_deleted'", ',', 'show_deleted', ',', "'bool'", ')', 'response', '=', 'self', '.', '_send', '(', 'http_method', '=', "'GET'", ',', 'location_id', '=', "'180ed967-377a-4112-986b-607adb14ded4'", ',', 'version', '=', "'5.1-preview.1'", ',', 'route_values', '=', 'route_values', ',', 'query_parameters', '=', 'query_parameters', ')', 'return', 'self', '.', '_deserialize', '(', "'Package'", ',', 'response', ')']
GetPackageVersion. [Preview API] Get information about a package version. :param str feed: Name or ID of the feed. :param str group_id: Group ID of the package. :param str artifact_id: Artifact ID of the package. :param str version: Version of the package. :param bool show_deleted: True to show information for deleted packages. :rtype: :class:`<Package> <azure.devops.v5_1.maven.models.Package>`
['GetPackageVersion', '.', '[', 'Preview', 'API', ']', 'Get', 'information', 'about', 'a', 'package', 'version', '.', ':', 'param', 'str', 'feed', ':', 'Name', 'or', 'ID', 'of', 'the', 'feed', '.', ':', 'param', 'str', 'group_id', ':', 'Group', 'ID', 'of', 'the', 'package', '.', ':', 'param', 'str', 'artifact_id', ':', 'Artifact', 'ID', 'of', 'the', 'package', '.', ':', 'param', 'str', 'version', ':', 'Version', 'of', 'the', 'package', '.', ':', 'param', 'bool', 'show_deleted', ':', 'True', 'to', 'show', 'information', 'for', 'deleted', 'packages', '.', ':', 'rtype', ':', ':', 'class', ':', '<Package', '>', '<azure', '.', 'devops', '.', 'v5_1', '.', 'maven', '.', 'models', '.', 'Package', '>']
train
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/maven/maven_client.py#L126-L153
2,311
signaturit/python-sdk
signaturit_sdk/signaturit_client.py
SignaturitClient.delete_subscription
def delete_subscription(self, subscription_id): """ Delete single subscription """ url = self.SUBSCRIPTIONS_ID_URL % subscription_id connection = Connection(self.token) connection.set_url(self.production, url) return connection.delete_request()
python
def delete_subscription(self, subscription_id): """ Delete single subscription """ url = self.SUBSCRIPTIONS_ID_URL % subscription_id connection = Connection(self.token) connection.set_url(self.production, url) return connection.delete_request()
['def', 'delete_subscription', '(', 'self', ',', 'subscription_id', ')', ':', 'url', '=', 'self', '.', 'SUBSCRIPTIONS_ID_URL', '%', 'subscription_id', 'connection', '=', 'Connection', '(', 'self', '.', 'token', ')', 'connection', '.', 'set_url', '(', 'self', '.', 'production', ',', 'url', ')', 'return', 'connection', '.', 'delete_request', '(', ')']
Delete single subscription
['Delete', 'single', 'subscription']
train
https://github.com/signaturit/python-sdk/blob/2419c6d9675d901244f807ae360dc58aa46109a9/signaturit_sdk/signaturit_client.py#L784-L793
2,312
DLR-RM/RAFCON
source/rafcon/gui/helpers/state_machine.py
save_selected_state_as
def save_selected_state_as(): """Save selected state as separate state machine :return True if successfully stored, False if the storing process was canceled or stopped by condition fail :rtype bool: :raises exceptions.ValueError: If dialog response ids are out of bounds """ state_machine_manager_model = rafcon.gui.singleton.state_machine_manager_model selection = state_machine_manager_model.get_selected_state_machine_model().selection selected_state = selection.get_selected_state() state_machine_id = state_machine_manager_model.get_selected_state_machine_model().state_machine.state_machine_id if len(selection.states) == 1: state_m = copy.copy(selected_state) sm_m = StateMachineModel(StateMachine(root_state=state_m.state)) sm_m.root_state = state_m path = interface.create_folder_func("Please choose a root folder and a folder name for the state-machine your " "state is saved in. The default folder name is the name of state.", format_default_folder_name(selected_state.state.name)) if path: storage.save_state_machine_to_path(sm_m.state_machine, base_path=path) sm_m.store_meta_data() else: logger.warning("No valid path specified") return False def open_as_state_machine_saved_state_as_separate_state_machine(): logger.debug("Open state machine.") try: open_state_machine(path=path, recent_opened_notification=True) except (ValueError, IOError) as e: logger.error('Error while trying to open state machine: {0}'.format(e)) # check if state machine is in library path root_window = rafcon.gui.singleton.main_window_controller.get_root_window() if library_manager.is_os_path_within_library_root_paths(path): _library_path, _library_name = \ library_manager.get_library_path_and_name_for_os_path(sm_m.state_machine.file_system_path) overwrote_old_lib = library_manager.is_library_in_libraries(_library_path, _library_name) message_string = "You stored your state machine in a path that is within the library root paths. " \ "Thereby your state machine can be used as a library state.\n\n"\ "Do you want to:" table_header = ["Option", "Description"] table_data = [(True, "Substitute the original state by this new library state."), (True, "Open the newly created library state machine.")] if overwrote_old_lib: table_data.append((False, "Refresh all open state machines, as an already existing library was " "overwritten.")) dialog = RAFCONCheckBoxTableDialog(message_string, button_texts=("Apply", "Cancel"), table_header=table_header, table_data=table_data, message_type=Gtk.MessageType.QUESTION, parent=root_window, width=800, standalone=False) response_id = dialog.run() if response_id == 1: # Apply pressed if overwrote_old_lib and dialog.list_store[2][0]: # refresh all open state machine selected logger.debug("Refresh all is triggered.") refresh_all() else: # if not all was refreshed at least the libraries are refreshed logger.debug("Library refresh is triggered.") refresh_libraries() if dialog.list_store[0][0]: # Substitute saved state with Library selected logger.debug("Substitute saved state with Library.") if dialog.list_store[0][0] or dialog.list_store[0][1]: refresh_libraries() state_machine_manager_model.selected_state_machine_id = state_machine_id [library_path, library_name] = library_manager.get_library_path_and_name_for_os_path(path) state = library_manager.get_library_instance(library_path, library_name) try: substitute_selected_state(state, as_template=False) except ValueError as e: logger.error('Error while trying to open state machine: {0}'.format(e)) if dialog.list_store[1][0]: # Open as state machine saved state as separate state machine selected open_as_state_machine_saved_state_as_separate_state_machine() elif response_id in [2, -4]: # Cancel or Close pressed pass else: raise ValueError("Response id: {} is not considered".format(response_id)) dialog.destroy() else: # Offer to open saved state machine dialog message_string = "Should the newly created state machine be opened?" dialog = RAFCONButtonDialog(message_string, ["Open", "Do not open"], message_type=Gtk.MessageType.QUESTION, parent=root_window) response_id = dialog.run() if response_id == 1: # Apply pressed open_as_state_machine_saved_state_as_separate_state_machine() elif response_id in [2, -4]: # Cancel or Close pressed pass else: raise ValueError("Response id: {} is not considered".format(response_id)) dialog.destroy() return True else: logger.warning("Multiple states can not be saved as state machine directly. Group them before.") return False
python
def save_selected_state_as(): """Save selected state as separate state machine :return True if successfully stored, False if the storing process was canceled or stopped by condition fail :rtype bool: :raises exceptions.ValueError: If dialog response ids are out of bounds """ state_machine_manager_model = rafcon.gui.singleton.state_machine_manager_model selection = state_machine_manager_model.get_selected_state_machine_model().selection selected_state = selection.get_selected_state() state_machine_id = state_machine_manager_model.get_selected_state_machine_model().state_machine.state_machine_id if len(selection.states) == 1: state_m = copy.copy(selected_state) sm_m = StateMachineModel(StateMachine(root_state=state_m.state)) sm_m.root_state = state_m path = interface.create_folder_func("Please choose a root folder and a folder name for the state-machine your " "state is saved in. The default folder name is the name of state.", format_default_folder_name(selected_state.state.name)) if path: storage.save_state_machine_to_path(sm_m.state_machine, base_path=path) sm_m.store_meta_data() else: logger.warning("No valid path specified") return False def open_as_state_machine_saved_state_as_separate_state_machine(): logger.debug("Open state machine.") try: open_state_machine(path=path, recent_opened_notification=True) except (ValueError, IOError) as e: logger.error('Error while trying to open state machine: {0}'.format(e)) # check if state machine is in library path root_window = rafcon.gui.singleton.main_window_controller.get_root_window() if library_manager.is_os_path_within_library_root_paths(path): _library_path, _library_name = \ library_manager.get_library_path_and_name_for_os_path(sm_m.state_machine.file_system_path) overwrote_old_lib = library_manager.is_library_in_libraries(_library_path, _library_name) message_string = "You stored your state machine in a path that is within the library root paths. " \ "Thereby your state machine can be used as a library state.\n\n"\ "Do you want to:" table_header = ["Option", "Description"] table_data = [(True, "Substitute the original state by this new library state."), (True, "Open the newly created library state machine.")] if overwrote_old_lib: table_data.append((False, "Refresh all open state machines, as an already existing library was " "overwritten.")) dialog = RAFCONCheckBoxTableDialog(message_string, button_texts=("Apply", "Cancel"), table_header=table_header, table_data=table_data, message_type=Gtk.MessageType.QUESTION, parent=root_window, width=800, standalone=False) response_id = dialog.run() if response_id == 1: # Apply pressed if overwrote_old_lib and dialog.list_store[2][0]: # refresh all open state machine selected logger.debug("Refresh all is triggered.") refresh_all() else: # if not all was refreshed at least the libraries are refreshed logger.debug("Library refresh is triggered.") refresh_libraries() if dialog.list_store[0][0]: # Substitute saved state with Library selected logger.debug("Substitute saved state with Library.") if dialog.list_store[0][0] or dialog.list_store[0][1]: refresh_libraries() state_machine_manager_model.selected_state_machine_id = state_machine_id [library_path, library_name] = library_manager.get_library_path_and_name_for_os_path(path) state = library_manager.get_library_instance(library_path, library_name) try: substitute_selected_state(state, as_template=False) except ValueError as e: logger.error('Error while trying to open state machine: {0}'.format(e)) if dialog.list_store[1][0]: # Open as state machine saved state as separate state machine selected open_as_state_machine_saved_state_as_separate_state_machine() elif response_id in [2, -4]: # Cancel or Close pressed pass else: raise ValueError("Response id: {} is not considered".format(response_id)) dialog.destroy() else: # Offer to open saved state machine dialog message_string = "Should the newly created state machine be opened?" dialog = RAFCONButtonDialog(message_string, ["Open", "Do not open"], message_type=Gtk.MessageType.QUESTION, parent=root_window) response_id = dialog.run() if response_id == 1: # Apply pressed open_as_state_machine_saved_state_as_separate_state_machine() elif response_id in [2, -4]: # Cancel or Close pressed pass else: raise ValueError("Response id: {} is not considered".format(response_id)) dialog.destroy() return True else: logger.warning("Multiple states can not be saved as state machine directly. Group them before.") return False
['def', 'save_selected_state_as', '(', ')', ':', 'state_machine_manager_model', '=', 'rafcon', '.', 'gui', '.', 'singleton', '.', 'state_machine_manager_model', 'selection', '=', 'state_machine_manager_model', '.', 'get_selected_state_machine_model', '(', ')', '.', 'selection', 'selected_state', '=', 'selection', '.', 'get_selected_state', '(', ')', 'state_machine_id', '=', 'state_machine_manager_model', '.', 'get_selected_state_machine_model', '(', ')', '.', 'state_machine', '.', 'state_machine_id', 'if', 'len', '(', 'selection', '.', 'states', ')', '==', '1', ':', 'state_m', '=', 'copy', '.', 'copy', '(', 'selected_state', ')', 'sm_m', '=', 'StateMachineModel', '(', 'StateMachine', '(', 'root_state', '=', 'state_m', '.', 'state', ')', ')', 'sm_m', '.', 'root_state', '=', 'state_m', 'path', '=', 'interface', '.', 'create_folder_func', '(', '"Please choose a root folder and a folder name for the state-machine your "', '"state is saved in. The default folder name is the name of state."', ',', 'format_default_folder_name', '(', 'selected_state', '.', 'state', '.', 'name', ')', ')', 'if', 'path', ':', 'storage', '.', 'save_state_machine_to_path', '(', 'sm_m', '.', 'state_machine', ',', 'base_path', '=', 'path', ')', 'sm_m', '.', 'store_meta_data', '(', ')', 'else', ':', 'logger', '.', 'warning', '(', '"No valid path specified"', ')', 'return', 'False', 'def', 'open_as_state_machine_saved_state_as_separate_state_machine', '(', ')', ':', 'logger', '.', 'debug', '(', '"Open state machine."', ')', 'try', ':', 'open_state_machine', '(', 'path', '=', 'path', ',', 'recent_opened_notification', '=', 'True', ')', 'except', '(', 'ValueError', ',', 'IOError', ')', 'as', 'e', ':', 'logger', '.', 'error', '(', "'Error while trying to open state machine: {0}'", '.', 'format', '(', 'e', ')', ')', '# check if state machine is in library path', 'root_window', '=', 'rafcon', '.', 'gui', '.', 'singleton', '.', 'main_window_controller', '.', 'get_root_window', '(', ')', 'if', 'library_manager', '.', 'is_os_path_within_library_root_paths', '(', 'path', ')', ':', '_library_path', ',', '_library_name', '=', 'library_manager', '.', 'get_library_path_and_name_for_os_path', '(', 'sm_m', '.', 'state_machine', '.', 'file_system_path', ')', 'overwrote_old_lib', '=', 'library_manager', '.', 'is_library_in_libraries', '(', '_library_path', ',', '_library_name', ')', 'message_string', '=', '"You stored your state machine in a path that is within the library root paths. "', '"Thereby your state machine can be used as a library state.\\n\\n"', '"Do you want to:"', 'table_header', '=', '[', '"Option"', ',', '"Description"', ']', 'table_data', '=', '[', '(', 'True', ',', '"Substitute the original state by this new library state."', ')', ',', '(', 'True', ',', '"Open the newly created library state machine."', ')', ']', 'if', 'overwrote_old_lib', ':', 'table_data', '.', 'append', '(', '(', 'False', ',', '"Refresh all open state machines, as an already existing library was "', '"overwritten."', ')', ')', 'dialog', '=', 'RAFCONCheckBoxTableDialog', '(', 'message_string', ',', 'button_texts', '=', '(', '"Apply"', ',', '"Cancel"', ')', ',', 'table_header', '=', 'table_header', ',', 'table_data', '=', 'table_data', ',', 'message_type', '=', 'Gtk', '.', 'MessageType', '.', 'QUESTION', ',', 'parent', '=', 'root_window', ',', 'width', '=', '800', ',', 'standalone', '=', 'False', ')', 'response_id', '=', 'dialog', '.', 'run', '(', ')', 'if', 'response_id', '==', '1', ':', '# Apply pressed', 'if', 'overwrote_old_lib', 'and', 'dialog', '.', 'list_store', '[', '2', ']', '[', '0', ']', ':', '# refresh all open state machine selected', 'logger', '.', 'debug', '(', '"Refresh all is triggered."', ')', 'refresh_all', '(', ')', 'else', ':', '# if not all was refreshed at least the libraries are refreshed', 'logger', '.', 'debug', '(', '"Library refresh is triggered."', ')', 'refresh_libraries', '(', ')', 'if', 'dialog', '.', 'list_store', '[', '0', ']', '[', '0', ']', ':', '# Substitute saved state with Library selected', 'logger', '.', 'debug', '(', '"Substitute saved state with Library."', ')', 'if', 'dialog', '.', 'list_store', '[', '0', ']', '[', '0', ']', 'or', 'dialog', '.', 'list_store', '[', '0', ']', '[', '1', ']', ':', 'refresh_libraries', '(', ')', 'state_machine_manager_model', '.', 'selected_state_machine_id', '=', 'state_machine_id', '[', 'library_path', ',', 'library_name', ']', '=', 'library_manager', '.', 'get_library_path_and_name_for_os_path', '(', 'path', ')', 'state', '=', 'library_manager', '.', 'get_library_instance', '(', 'library_path', ',', 'library_name', ')', 'try', ':', 'substitute_selected_state', '(', 'state', ',', 'as_template', '=', 'False', ')', 'except', 'ValueError', 'as', 'e', ':', 'logger', '.', 'error', '(', "'Error while trying to open state machine: {0}'", '.', 'format', '(', 'e', ')', ')', 'if', 'dialog', '.', 'list_store', '[', '1', ']', '[', '0', ']', ':', '# Open as state machine saved state as separate state machine selected', 'open_as_state_machine_saved_state_as_separate_state_machine', '(', ')', 'elif', 'response_id', 'in', '[', '2', ',', '-', '4', ']', ':', '# Cancel or Close pressed', 'pass', 'else', ':', 'raise', 'ValueError', '(', '"Response id: {} is not considered"', '.', 'format', '(', 'response_id', ')', ')', 'dialog', '.', 'destroy', '(', ')', 'else', ':', '# Offer to open saved state machine dialog', 'message_string', '=', '"Should the newly created state machine be opened?"', 'dialog', '=', 'RAFCONButtonDialog', '(', 'message_string', ',', '[', '"Open"', ',', '"Do not open"', ']', ',', 'message_type', '=', 'Gtk', '.', 'MessageType', '.', 'QUESTION', ',', 'parent', '=', 'root_window', ')', 'response_id', '=', 'dialog', '.', 'run', '(', ')', 'if', 'response_id', '==', '1', ':', '# Apply pressed', 'open_as_state_machine_saved_state_as_separate_state_machine', '(', ')', 'elif', 'response_id', 'in', '[', '2', ',', '-', '4', ']', ':', '# Cancel or Close pressed', 'pass', 'else', ':', 'raise', 'ValueError', '(', '"Response id: {} is not considered"', '.', 'format', '(', 'response_id', ')', ')', 'dialog', '.', 'destroy', '(', ')', 'return', 'True', 'else', ':', 'logger', '.', 'warning', '(', '"Multiple states can not be saved as state machine directly. Group them before."', ')', 'return', 'False']
Save selected state as separate state machine :return True if successfully stored, False if the storing process was canceled or stopped by condition fail :rtype bool: :raises exceptions.ValueError: If dialog response ids are out of bounds
['Save', 'selected', 'state', 'as', 'separate', 'state', 'machine']
train
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/helpers/state_machine.py#L267-L371
2,313
Tanganelli/CoAPthon3
coapthon/client/coap.py
CoAP._send_block_request
def _send_block_request(self, transaction): """ A former request resulted in a block wise transfer. With this method, the block wise transfer will be continued, including triggering of the retry mechanism. :param transaction: The former transaction including the request which should be continued. """ transaction = self._messageLayer.send_request(transaction.request) # ... but don't forget to reset the acknowledge flag transaction.request.acknowledged = False self.send_datagram(transaction.request) if transaction.request.type == defines.Types["CON"]: self._start_retransmission(transaction, transaction.request)
python
def _send_block_request(self, transaction): """ A former request resulted in a block wise transfer. With this method, the block wise transfer will be continued, including triggering of the retry mechanism. :param transaction: The former transaction including the request which should be continued. """ transaction = self._messageLayer.send_request(transaction.request) # ... but don't forget to reset the acknowledge flag transaction.request.acknowledged = False self.send_datagram(transaction.request) if transaction.request.type == defines.Types["CON"]: self._start_retransmission(transaction, transaction.request)
['def', '_send_block_request', '(', 'self', ',', 'transaction', ')', ':', 'transaction', '=', 'self', '.', '_messageLayer', '.', 'send_request', '(', 'transaction', '.', 'request', ')', "# ... but don't forget to reset the acknowledge flag", 'transaction', '.', 'request', '.', 'acknowledged', '=', 'False', 'self', '.', 'send_datagram', '(', 'transaction', '.', 'request', ')', 'if', 'transaction', '.', 'request', '.', 'type', '==', 'defines', '.', 'Types', '[', '"CON"', ']', ':', 'self', '.', '_start_retransmission', '(', 'transaction', ',', 'transaction', '.', 'request', ')']
A former request resulted in a block wise transfer. With this method, the block wise transfer will be continued, including triggering of the retry mechanism. :param transaction: The former transaction including the request which should be continued.
['A', 'former', 'request', 'resulted', 'in', 'a', 'block', 'wise', 'transfer', '.', 'With', 'this', 'method', 'the', 'block', 'wise', 'transfer', 'will', 'be', 'continued', 'including', 'triggering', 'of', 'the', 'retry', 'mechanism', '.', ':', 'param', 'transaction', ':', 'The', 'former', 'transaction', 'including', 'the', 'request', 'which', 'should', 'be', 'continued', '.']
train
https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/client/coap.py#L131-L143
2,314
pycontribs/jira
jira/client.py
JIRA.issue
def issue(self, id, fields=None, expand=None): """Get an issue Resource from the server. :param id: ID or key of the issue to get :type id: Union[Issue, str] :param fields: comma-separated string of issue fields to include in the results :type fields: Optional[str] :param expand: extra information to fetch inside each resource :type expand: Optional[str] :rtype: Issue """ # this allows us to pass Issue objects to issue() if isinstance(id, Issue): return id issue = Issue(self._options, self._session) params = {} if fields is not None: params['fields'] = fields if expand is not None: params['expand'] = expand issue.find(id, params=params) return issue
python
def issue(self, id, fields=None, expand=None): """Get an issue Resource from the server. :param id: ID or key of the issue to get :type id: Union[Issue, str] :param fields: comma-separated string of issue fields to include in the results :type fields: Optional[str] :param expand: extra information to fetch inside each resource :type expand: Optional[str] :rtype: Issue """ # this allows us to pass Issue objects to issue() if isinstance(id, Issue): return id issue = Issue(self._options, self._session) params = {} if fields is not None: params['fields'] = fields if expand is not None: params['expand'] = expand issue.find(id, params=params) return issue
['def', 'issue', '(', 'self', ',', 'id', ',', 'fields', '=', 'None', ',', 'expand', '=', 'None', ')', ':', '# this allows us to pass Issue objects to issue()', 'if', 'isinstance', '(', 'id', ',', 'Issue', ')', ':', 'return', 'id', 'issue', '=', 'Issue', '(', 'self', '.', '_options', ',', 'self', '.', '_session', ')', 'params', '=', '{', '}', 'if', 'fields', 'is', 'not', 'None', ':', 'params', '[', "'fields'", ']', '=', 'fields', 'if', 'expand', 'is', 'not', 'None', ':', 'params', '[', "'expand'", ']', '=', 'expand', 'issue', '.', 'find', '(', 'id', ',', 'params', '=', 'params', ')', 'return', 'issue']
Get an issue Resource from the server. :param id: ID or key of the issue to get :type id: Union[Issue, str] :param fields: comma-separated string of issue fields to include in the results :type fields: Optional[str] :param expand: extra information to fetch inside each resource :type expand: Optional[str] :rtype: Issue
['Get', 'an', 'issue', 'Resource', 'from', 'the', 'server', '.']
train
https://github.com/pycontribs/jira/blob/397db5d78441ed6a680a9b7db4c62030ade1fd8a/jira/client.py#L1224-L1247
2,315
axialmarket/fsq
fsq/const.py
const
def const(const): '''Convenience wrapper to yield the value of a constant''' try: return getattr(_c, const) except AttributeError: raise FSQEnvError(errno.EINVAL, u'No such constant:'\ u' {0}'.format(const)) except TypeError: raise TypeError(errno.EINVAL, u'const name must be a string or'\ u' unicode object, not:'\ u' {0}'.format(const.__class__.__name__))
python
def const(const): '''Convenience wrapper to yield the value of a constant''' try: return getattr(_c, const) except AttributeError: raise FSQEnvError(errno.EINVAL, u'No such constant:'\ u' {0}'.format(const)) except TypeError: raise TypeError(errno.EINVAL, u'const name must be a string or'\ u' unicode object, not:'\ u' {0}'.format(const.__class__.__name__))
['def', 'const', '(', 'const', ')', ':', 'try', ':', 'return', 'getattr', '(', '_c', ',', 'const', ')', 'except', 'AttributeError', ':', 'raise', 'FSQEnvError', '(', 'errno', '.', 'EINVAL', ',', "u'No such constant:'", "u' {0}'", '.', 'format', '(', 'const', ')', ')', 'except', 'TypeError', ':', 'raise', 'TypeError', '(', 'errno', '.', 'EINVAL', ',', "u'const name must be a string or'", "u' unicode object, not:'", "u' {0}'", '.', 'format', '(', 'const', '.', '__class__', '.', '__name__', ')', ')']
Convenience wrapper to yield the value of a constant
['Convenience', 'wrapper', 'to', 'yield', 'the', 'value', 'of', 'a', 'constant']
train
https://github.com/axialmarket/fsq/blob/43b84c292cb8a187599d86753b947cf73248f989/fsq/const.py#L19-L29
2,316
ahopkins/sanic-jwt
sanic_jwt/authentication.py
Authentication.generate_access_token
async def generate_access_token(self, user): """ Generate an access token for a given user. """ payload = await self._get_payload(user) secret = self._get_secret(True) algorithm = self._get_algorithm() return jwt.encode(payload, secret, algorithm=algorithm).decode("utf-8")
python
async def generate_access_token(self, user): """ Generate an access token for a given user. """ payload = await self._get_payload(user) secret = self._get_secret(True) algorithm = self._get_algorithm() return jwt.encode(payload, secret, algorithm=algorithm).decode("utf-8")
['async', 'def', 'generate_access_token', '(', 'self', ',', 'user', ')', ':', 'payload', '=', 'await', 'self', '.', '_get_payload', '(', 'user', ')', 'secret', '=', 'self', '.', '_get_secret', '(', 'True', ')', 'algorithm', '=', 'self', '.', '_get_algorithm', '(', ')', 'return', 'jwt', '.', 'encode', '(', 'payload', ',', 'secret', ',', 'algorithm', '=', 'algorithm', ')', '.', 'decode', '(', '"utf-8"', ')']
Generate an access token for a given user.
['Generate', 'an', 'access', 'token', 'for', 'a', 'given', 'user', '.']
train
https://github.com/ahopkins/sanic-jwt/blob/fca7750499c8cedde823d778512f613777fb5282/sanic_jwt/authentication.py#L441-L449
2,317
saltstack/salt
salt/utils/vmware.py
list_datastore_full
def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items
python
def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items
['def', 'list_datastore_full', '(', 'service_instance', ',', 'datastore', ')', ':', 'datastore_object', '=', 'get_mor_by_name', '(', 'service_instance', ',', 'vim', '.', 'Datastore', ',', 'datastore', ')', 'if', 'not', 'datastore_object', ':', 'raise', 'salt', '.', 'exceptions', '.', 'VMwareObjectRetrievalError', '(', "'Datastore \\'{0}\\' does not exist.'", '.', 'format', '(', 'datastore', ')', ')', 'items', '=', '{', '}', 'items', '[', "'name'", ']', '=', 'str', '(', 'datastore_object', '.', 'summary', '.', 'name', ')', '.', 'replace', '(', '"\'"', ',', '""', ')', 'items', '[', "'type'", ']', '=', 'str', '(', 'datastore_object', '.', 'summary', '.', 'type', ')', '.', 'replace', '(', '"\'"', ',', '""', ')', 'items', '[', "'url'", ']', '=', 'str', '(', 'datastore_object', '.', 'summary', '.', 'url', ')', '.', 'replace', '(', '"\'"', ',', '""', ')', 'items', '[', "'capacity'", ']', '=', 'datastore_object', '.', 'summary', '.', 'capacity', '/', '1024', '/', '1024', 'items', '[', "'free'", ']', '=', 'datastore_object', '.', 'summary', '.', 'freeSpace', '/', '1024', '/', '1024', 'items', '[', "'used'", ']', '=', 'items', '[', "'capacity'", ']', '-', 'items', '[', "'free'", ']', 'items', '[', "'usage'", ']', '=', '(', 'float', '(', 'items', '[', "'used'", ']', ')', '/', 'float', '(', 'items', '[', "'capacity'", ']', ')', ')', '*', '100', 'items', '[', "'hosts'", ']', '=', '[', ']', 'for', 'host', 'in', 'datastore_object', '.', 'host', ':', 'host_key', '=', 'str', '(', 'host', '.', 'key', ')', '.', 'replace', '(', '"\'"', ',', '""', ')', '.', 'split', '(', '":"', ',', '1', ')', '[', '1', ']', 'host_object', '=', 'get_mor_by_moid', '(', 'service_instance', ',', 'vim', '.', 'HostSystem', ',', 'host_key', ')', 'items', '[', "'hosts'", ']', '.', 'append', '(', 'host_object', '.', 'name', ')', 'return', 'items']
Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore.
['Returns', 'a', 'dictionary', 'with', 'the', 'basic', 'information', 'for', 'the', 'given', 'datastore', ':', 'name', 'type', 'url', 'capacity', 'free', 'used', 'usage', 'hosts']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1959-L1992
2,318
synw/goerr
goerr/__init__.py
Err._create_err
def _create_err(self, errclass: str, *args) -> "Err": """ Create an error """ error = self._new_err(errclass, *args) self._add(error) return error
python
def _create_err(self, errclass: str, *args) -> "Err": """ Create an error """ error = self._new_err(errclass, *args) self._add(error) return error
['def', '_create_err', '(', 'self', ',', 'errclass', ':', 'str', ',', '*', 'args', ')', '->', '"Err"', ':', 'error', '=', 'self', '.', '_new_err', '(', 'errclass', ',', '*', 'args', ')', 'self', '.', '_add', '(', 'error', ')', 'return', 'error']
Create an error
['Create', 'an', 'error']
train
https://github.com/synw/goerr/blob/08b3809d6715bffe26899a769d96fa5de8573faf/goerr/__init__.py#L91-L97
2,319
a2liu/mr-clean
mr_clean/core/tools/mrc.py
clean
def clean(df,error_rate = 0): """ Superficially cleans data, i.e. changing simple things about formatting. Parameters: df - DataFrame DataFrame to clean error_rate - float {0 <= error_rate <= 1}, default 0 Maximum amount of errors/inconsistencies caused explicitly by cleaning, expressed as a percentage of total dataframe rows (0 = 0%, .5 = 50%, etc.) Ex: na values from coercing a column of data to numeric """ df = df.copy() # Change colnames basics.clean_colnames(df) # Eventually use a more advanced function to clean colnames print('Changed colnames to {}'.format(df.columns)) # Remove extra whitespace obj_col_list = df.select_dtypes(include = 'object').columns for col_name in obj_col_list: df[col_name] = basics.col_strip(df,col_name) print("Stripped extra whitespace from '{}'".format(col_name)) # Coerce columns if possible for col_name in obj_col_list: new_dtype = coerce_col(df,col_name,error_rate) if new_dtype is not None: print("Coerced '{}' to datatype '{}'".format(col_name, new_dtype)) # Scrub columns obj_col_list = df.select_dtypes(include = 'object').columns for col_name in obj_col_list: scrubf, scrubb = smart_scrub(df,col_name,1-error_rate) if scrubf is not None or scrubb is not None: print("Scrubbed '{}' from the front and '{}' from the back of column '{}'" \ .format(scrubf,scrubb,col_name)) # Coerice columns if possible for col_name in obj_col_list: new_dtype = coerce_col(df,col_name,error_rate) if new_dtype is not None: print("Coerced '{}' to datatype '{}'".format(col_name, new_dtype)) return df
python
def clean(df,error_rate = 0): """ Superficially cleans data, i.e. changing simple things about formatting. Parameters: df - DataFrame DataFrame to clean error_rate - float {0 <= error_rate <= 1}, default 0 Maximum amount of errors/inconsistencies caused explicitly by cleaning, expressed as a percentage of total dataframe rows (0 = 0%, .5 = 50%, etc.) Ex: na values from coercing a column of data to numeric """ df = df.copy() # Change colnames basics.clean_colnames(df) # Eventually use a more advanced function to clean colnames print('Changed colnames to {}'.format(df.columns)) # Remove extra whitespace obj_col_list = df.select_dtypes(include = 'object').columns for col_name in obj_col_list: df[col_name] = basics.col_strip(df,col_name) print("Stripped extra whitespace from '{}'".format(col_name)) # Coerce columns if possible for col_name in obj_col_list: new_dtype = coerce_col(df,col_name,error_rate) if new_dtype is not None: print("Coerced '{}' to datatype '{}'".format(col_name, new_dtype)) # Scrub columns obj_col_list = df.select_dtypes(include = 'object').columns for col_name in obj_col_list: scrubf, scrubb = smart_scrub(df,col_name,1-error_rate) if scrubf is not None or scrubb is not None: print("Scrubbed '{}' from the front and '{}' from the back of column '{}'" \ .format(scrubf,scrubb,col_name)) # Coerice columns if possible for col_name in obj_col_list: new_dtype = coerce_col(df,col_name,error_rate) if new_dtype is not None: print("Coerced '{}' to datatype '{}'".format(col_name, new_dtype)) return df
['def', 'clean', '(', 'df', ',', 'error_rate', '=', '0', ')', ':', 'df', '=', 'df', '.', 'copy', '(', ')', '# Change colnames', 'basics', '.', 'clean_colnames', '(', 'df', ')', '# Eventually use a more advanced function to clean colnames', 'print', '(', "'Changed colnames to {}'", '.', 'format', '(', 'df', '.', 'columns', ')', ')', '# Remove extra whitespace', 'obj_col_list', '=', 'df', '.', 'select_dtypes', '(', 'include', '=', "'object'", ')', '.', 'columns', 'for', 'col_name', 'in', 'obj_col_list', ':', 'df', '[', 'col_name', ']', '=', 'basics', '.', 'col_strip', '(', 'df', ',', 'col_name', ')', 'print', '(', '"Stripped extra whitespace from \'{}\'"', '.', 'format', '(', 'col_name', ')', ')', '# Coerce columns if possible', 'for', 'col_name', 'in', 'obj_col_list', ':', 'new_dtype', '=', 'coerce_col', '(', 'df', ',', 'col_name', ',', 'error_rate', ')', 'if', 'new_dtype', 'is', 'not', 'None', ':', 'print', '(', '"Coerced \'{}\' to datatype \'{}\'"', '.', 'format', '(', 'col_name', ',', 'new_dtype', ')', ')', '# Scrub columns', 'obj_col_list', '=', 'df', '.', 'select_dtypes', '(', 'include', '=', "'object'", ')', '.', 'columns', 'for', 'col_name', 'in', 'obj_col_list', ':', 'scrubf', ',', 'scrubb', '=', 'smart_scrub', '(', 'df', ',', 'col_name', ',', '1', '-', 'error_rate', ')', 'if', 'scrubf', 'is', 'not', 'None', 'or', 'scrubb', 'is', 'not', 'None', ':', 'print', '(', '"Scrubbed \'{}\' from the front and \'{}\' from the back of column \'{}\'"', '.', 'format', '(', 'scrubf', ',', 'scrubb', ',', 'col_name', ')', ')', '# Coerice columns if possible', 'for', 'col_name', 'in', 'obj_col_list', ':', 'new_dtype', '=', 'coerce_col', '(', 'df', ',', 'col_name', ',', 'error_rate', ')', 'if', 'new_dtype', 'is', 'not', 'None', ':', 'print', '(', '"Coerced \'{}\' to datatype \'{}\'"', '.', 'format', '(', 'col_name', ',', 'new_dtype', ')', ')', 'return', 'df']
Superficially cleans data, i.e. changing simple things about formatting. Parameters: df - DataFrame DataFrame to clean error_rate - float {0 <= error_rate <= 1}, default 0 Maximum amount of errors/inconsistencies caused explicitly by cleaning, expressed as a percentage of total dataframe rows (0 = 0%, .5 = 50%, etc.) Ex: na values from coercing a column of data to numeric
['Superficially', 'cleans', 'data', 'i', '.', 'e', '.', 'changing', 'simple', 'things', 'about', 'formatting', '.', 'Parameters', ':', 'df', '-', 'DataFrame', 'DataFrame', 'to', 'clean', 'error_rate', '-', 'float', '{', '0', '<', '=', 'error_rate', '<', '=', '1', '}', 'default', '0', 'Maximum', 'amount', 'of', 'errors', '/', 'inconsistencies', 'caused', 'explicitly', 'by', 'cleaning', 'expressed', 'as', 'a', 'percentage', 'of', 'total', 'dataframe', 'rows', '(', '0', '=', '0%', '.', '5', '=', '50%', 'etc', '.', ')', 'Ex', ':', 'na', 'values', 'from', 'coercing', 'a', 'column', 'of', 'data', 'to', 'numeric']
train
https://github.com/a2liu/mr-clean/blob/0ee4ee5639f834dec4b59b94442fa84373f3c176/mr_clean/core/tools/mrc.py#L7-L49
2,320
spotify/luigi
luigi/mock.py
MockFileSystem.copy
def copy(self, path, dest, raise_if_exists=False): """ Copies the contents of a single file path to dest """ if raise_if_exists and dest in self.get_all_data(): raise RuntimeError('Destination exists: %s' % path) contents = self.get_all_data()[path] self.get_all_data()[dest] = contents
python
def copy(self, path, dest, raise_if_exists=False): """ Copies the contents of a single file path to dest """ if raise_if_exists and dest in self.get_all_data(): raise RuntimeError('Destination exists: %s' % path) contents = self.get_all_data()[path] self.get_all_data()[dest] = contents
['def', 'copy', '(', 'self', ',', 'path', ',', 'dest', ',', 'raise_if_exists', '=', 'False', ')', ':', 'if', 'raise_if_exists', 'and', 'dest', 'in', 'self', '.', 'get_all_data', '(', ')', ':', 'raise', 'RuntimeError', '(', "'Destination exists: %s'", '%', 'path', ')', 'contents', '=', 'self', '.', 'get_all_data', '(', ')', '[', 'path', ']', 'self', '.', 'get_all_data', '(', ')', '[', 'dest', ']', '=', 'contents']
Copies the contents of a single file path to dest
['Copies', 'the', 'contents', 'of', 'a', 'single', 'file', 'path', 'to', 'dest']
train
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/mock.py#L40-L47
2,321
note35/sinon
sinon/lib/stub.py
SinonStub.__get_return_value_no_withargs
def __get_return_value_no_withargs(self, *args, **kwargs): """ Pre-conditions: (1) The user has created a stub and specified the stub behaviour (2) The user has called the stub function with the specified "args" and "kwargs" (3) No 'withArgs' conditions were applicable in this case Args: args: tuple, the arguments inputed by the user kwargs: dictionary, the keyword arguments inputed by the user Returns: any type, the appropriate return value, based on the stub's behaviour setup and the user input """ c = self._conditions call_count = self._wrapper.callCount # if there might be applicable onCall conditions if call_count in c["oncall"]: index_list = [i for i, x in enumerate(c["oncall"]) if x and not c["args"][i] and not c["kwargs"][i]] for i in reversed(index_list): # if the onCall condition applies if call_count == c["oncall"][i]: return c["action"][i](*args, **kwargs) # else all conditions did not match return c["default"](*args, **kwargs)
python
def __get_return_value_no_withargs(self, *args, **kwargs): """ Pre-conditions: (1) The user has created a stub and specified the stub behaviour (2) The user has called the stub function with the specified "args" and "kwargs" (3) No 'withArgs' conditions were applicable in this case Args: args: tuple, the arguments inputed by the user kwargs: dictionary, the keyword arguments inputed by the user Returns: any type, the appropriate return value, based on the stub's behaviour setup and the user input """ c = self._conditions call_count = self._wrapper.callCount # if there might be applicable onCall conditions if call_count in c["oncall"]: index_list = [i for i, x in enumerate(c["oncall"]) if x and not c["args"][i] and not c["kwargs"][i]] for i in reversed(index_list): # if the onCall condition applies if call_count == c["oncall"][i]: return c["action"][i](*args, **kwargs) # else all conditions did not match return c["default"](*args, **kwargs)
['def', '__get_return_value_no_withargs', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'c', '=', 'self', '.', '_conditions', 'call_count', '=', 'self', '.', '_wrapper', '.', 'callCount', '# if there might be applicable onCall conditions', 'if', 'call_count', 'in', 'c', '[', '"oncall"', ']', ':', 'index_list', '=', '[', 'i', 'for', 'i', ',', 'x', 'in', 'enumerate', '(', 'c', '[', '"oncall"', ']', ')', 'if', 'x', 'and', 'not', 'c', '[', '"args"', ']', '[', 'i', ']', 'and', 'not', 'c', '[', '"kwargs"', ']', '[', 'i', ']', ']', 'for', 'i', 'in', 'reversed', '(', 'index_list', ')', ':', '# if the onCall condition applies', 'if', 'call_count', '==', 'c', '[', '"oncall"', ']', '[', 'i', ']', ':', 'return', 'c', '[', '"action"', ']', '[', 'i', ']', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', '# else all conditions did not match', 'return', 'c', '[', '"default"', ']', '(', '*', 'args', ',', '*', '*', 'kwargs', ')']
Pre-conditions: (1) The user has created a stub and specified the stub behaviour (2) The user has called the stub function with the specified "args" and "kwargs" (3) No 'withArgs' conditions were applicable in this case Args: args: tuple, the arguments inputed by the user kwargs: dictionary, the keyword arguments inputed by the user Returns: any type, the appropriate return value, based on the stub's behaviour setup and the user input
['Pre', '-', 'conditions', ':', '(', '1', ')', 'The', 'user', 'has', 'created', 'a', 'stub', 'and', 'specified', 'the', 'stub', 'behaviour', '(', '2', ')', 'The', 'user', 'has', 'called', 'the', 'stub', 'function', 'with', 'the', 'specified', 'args', 'and', 'kwargs', '(', '3', ')', 'No', 'withArgs', 'conditions', 'were', 'applicable', 'in', 'this', 'case', 'Args', ':', 'args', ':', 'tuple', 'the', 'arguments', 'inputed', 'by', 'the', 'user', 'kwargs', ':', 'dictionary', 'the', 'keyword', 'arguments', 'inputed', 'by', 'the', 'user', 'Returns', ':', 'any', 'type', 'the', 'appropriate', 'return', 'value', 'based', 'on', 'the', 'stub', 's', 'behaviour', 'setup', 'and', 'the', 'user', 'input']
train
https://github.com/note35/sinon/blob/f1d551b679b393d64d926a8a279320904c38d0f5/sinon/lib/stub.py#L137-L161
2,322
SKA-ScienceDataProcessor/integration-prototype
sip/examples/flask_processing_controller/app/old.db/client.py
ConfigDb.get_sub_array_ids
def get_sub_array_ids(self): """Get list of sub array ids""" # Initialise empty list _scheduling_block_ids = [] _sub_array_ids = [] for blocks_id in self.get_sched_block_instance_ids(): _scheduling_block_ids.append(blocks_id) block_details = self.get_block_details(_scheduling_block_ids) for details in block_details: _sub_array_ids.append(details['sub_array_id']) _sub_array_ids = sorted(list(set(_sub_array_ids))) return _sub_array_ids
python
def get_sub_array_ids(self): """Get list of sub array ids""" # Initialise empty list _scheduling_block_ids = [] _sub_array_ids = [] for blocks_id in self.get_sched_block_instance_ids(): _scheduling_block_ids.append(blocks_id) block_details = self.get_block_details(_scheduling_block_ids) for details in block_details: _sub_array_ids.append(details['sub_array_id']) _sub_array_ids = sorted(list(set(_sub_array_ids))) return _sub_array_ids
['def', 'get_sub_array_ids', '(', 'self', ')', ':', '# Initialise empty list', '_scheduling_block_ids', '=', '[', ']', '_sub_array_ids', '=', '[', ']', 'for', 'blocks_id', 'in', 'self', '.', 'get_sched_block_instance_ids', '(', ')', ':', '_scheduling_block_ids', '.', 'append', '(', 'blocks_id', ')', 'block_details', '=', 'self', '.', 'get_block_details', '(', '_scheduling_block_ids', ')', 'for', 'details', 'in', 'block_details', ':', '_sub_array_ids', '.', 'append', '(', 'details', '[', "'sub_array_id'", ']', ')', '_sub_array_ids', '=', 'sorted', '(', 'list', '(', 'set', '(', '_sub_array_ids', ')', ')', ')', 'return', '_sub_array_ids']
Get list of sub array ids
['Get', 'list', 'of', 'sub', 'array', 'ids']
train
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/examples/flask_processing_controller/app/old.db/client.py#L114-L127
2,323
markfinger/assembla
assembla/api.py
Space.wiki_pages
def wiki_pages(self, extra_params=None): """ All Wiki Pages with access to this Space """ return self.api._get_json( WikiPage, space=self, rel_path=self._build_rel_path('wiki_pages'), extra_params=extra_params, )
python
def wiki_pages(self, extra_params=None): """ All Wiki Pages with access to this Space """ return self.api._get_json( WikiPage, space=self, rel_path=self._build_rel_path('wiki_pages'), extra_params=extra_params, )
['def', 'wiki_pages', '(', 'self', ',', 'extra_params', '=', 'None', ')', ':', 'return', 'self', '.', 'api', '.', '_get_json', '(', 'WikiPage', ',', 'space', '=', 'self', ',', 'rel_path', '=', 'self', '.', '_build_rel_path', '(', "'wiki_pages'", ')', ',', 'extra_params', '=', 'extra_params', ',', ')']
All Wiki Pages with access to this Space
['All', 'Wiki', 'Pages', 'with', 'access', 'to', 'this', 'Space']
train
https://github.com/markfinger/assembla/blob/967a77a5ba718df94f60e832b6e0cf14c72426aa/assembla/api.py#L394-L403
2,324
gccxml/pygccxml
pygccxml/parser/config.py
create_compiler_path
def create_compiler_path(xml_generator, compiler_path): """ Try to guess a path for the compiler. If you want ot use a specific compiler, please provide the compiler path manually, as the guess may not be what you are expecting. Providing the path can be done by passing it as an argument (compiler_path) to the xml_generator_configuration_t() or by defining it in your pygccxml configuration file. """ if xml_generator == 'castxml' and compiler_path is None: if platform.system() == 'Windows': # Look for msvc p = subprocess.Popen( ['where', 'cl'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) compiler_path = p.stdout.read().decode("utf-8").rstrip() p.wait() p.stdout.close() p.stderr.close() # No msvc found; look for mingw if compiler_path == '': p = subprocess.Popen( ['where', 'mingw'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) compiler_path = p.stdout.read().decode("utf-8").rstrip() p.wait() p.stdout.close() p.stderr.close() else: # OS X or Linux # Look for clang first, then gcc p = subprocess.Popen( ['which', 'clang++'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) compiler_path = p.stdout.read().decode("utf-8").rstrip() p.wait() p.stdout.close() p.stderr.close() # No clang found; use gcc if compiler_path == '': p = subprocess.Popen( ['which', 'c++'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) compiler_path = p.stdout.read().decode("utf-8").rstrip() p.wait() p.stdout.close() p.stderr.close() if compiler_path == "": compiler_path = None return compiler_path
python
def create_compiler_path(xml_generator, compiler_path): """ Try to guess a path for the compiler. If you want ot use a specific compiler, please provide the compiler path manually, as the guess may not be what you are expecting. Providing the path can be done by passing it as an argument (compiler_path) to the xml_generator_configuration_t() or by defining it in your pygccxml configuration file. """ if xml_generator == 'castxml' and compiler_path is None: if platform.system() == 'Windows': # Look for msvc p = subprocess.Popen( ['where', 'cl'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) compiler_path = p.stdout.read().decode("utf-8").rstrip() p.wait() p.stdout.close() p.stderr.close() # No msvc found; look for mingw if compiler_path == '': p = subprocess.Popen( ['where', 'mingw'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) compiler_path = p.stdout.read().decode("utf-8").rstrip() p.wait() p.stdout.close() p.stderr.close() else: # OS X or Linux # Look for clang first, then gcc p = subprocess.Popen( ['which', 'clang++'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) compiler_path = p.stdout.read().decode("utf-8").rstrip() p.wait() p.stdout.close() p.stderr.close() # No clang found; use gcc if compiler_path == '': p = subprocess.Popen( ['which', 'c++'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) compiler_path = p.stdout.read().decode("utf-8").rstrip() p.wait() p.stdout.close() p.stderr.close() if compiler_path == "": compiler_path = None return compiler_path
['def', 'create_compiler_path', '(', 'xml_generator', ',', 'compiler_path', ')', ':', 'if', 'xml_generator', '==', "'castxml'", 'and', 'compiler_path', 'is', 'None', ':', 'if', 'platform', '.', 'system', '(', ')', '==', "'Windows'", ':', '# Look for msvc', 'p', '=', 'subprocess', '.', 'Popen', '(', '[', "'where'", ',', "'cl'", ']', ',', 'stdout', '=', 'subprocess', '.', 'PIPE', ',', 'stderr', '=', 'subprocess', '.', 'PIPE', ')', 'compiler_path', '=', 'p', '.', 'stdout', '.', 'read', '(', ')', '.', 'decode', '(', '"utf-8"', ')', '.', 'rstrip', '(', ')', 'p', '.', 'wait', '(', ')', 'p', '.', 'stdout', '.', 'close', '(', ')', 'p', '.', 'stderr', '.', 'close', '(', ')', '# No msvc found; look for mingw', 'if', 'compiler_path', '==', "''", ':', 'p', '=', 'subprocess', '.', 'Popen', '(', '[', "'where'", ',', "'mingw'", ']', ',', 'stdout', '=', 'subprocess', '.', 'PIPE', ',', 'stderr', '=', 'subprocess', '.', 'PIPE', ')', 'compiler_path', '=', 'p', '.', 'stdout', '.', 'read', '(', ')', '.', 'decode', '(', '"utf-8"', ')', '.', 'rstrip', '(', ')', 'p', '.', 'wait', '(', ')', 'p', '.', 'stdout', '.', 'close', '(', ')', 'p', '.', 'stderr', '.', 'close', '(', ')', 'else', ':', '# OS X or Linux', '# Look for clang first, then gcc', 'p', '=', 'subprocess', '.', 'Popen', '(', '[', "'which'", ',', "'clang++'", ']', ',', 'stdout', '=', 'subprocess', '.', 'PIPE', ',', 'stderr', '=', 'subprocess', '.', 'PIPE', ')', 'compiler_path', '=', 'p', '.', 'stdout', '.', 'read', '(', ')', '.', 'decode', '(', '"utf-8"', ')', '.', 'rstrip', '(', ')', 'p', '.', 'wait', '(', ')', 'p', '.', 'stdout', '.', 'close', '(', ')', 'p', '.', 'stderr', '.', 'close', '(', ')', '# No clang found; use gcc', 'if', 'compiler_path', '==', "''", ':', 'p', '=', 'subprocess', '.', 'Popen', '(', '[', "'which'", ',', "'c++'", ']', ',', 'stdout', '=', 'subprocess', '.', 'PIPE', ',', 'stderr', '=', 'subprocess', '.', 'PIPE', ')', 'compiler_path', '=', 'p', '.', 'stdout', '.', 'read', '(', ')', '.', 'decode', '(', '"utf-8"', ')', '.', 'rstrip', '(', ')', 'p', '.', 'wait', '(', ')', 'p', '.', 'stdout', '.', 'close', '(', ')', 'p', '.', 'stderr', '.', 'close', '(', ')', 'if', 'compiler_path', '==', '""', ':', 'compiler_path', '=', 'None', 'return', 'compiler_path']
Try to guess a path for the compiler. If you want ot use a specific compiler, please provide the compiler path manually, as the guess may not be what you are expecting. Providing the path can be done by passing it as an argument (compiler_path) to the xml_generator_configuration_t() or by defining it in your pygccxml configuration file.
['Try', 'to', 'guess', 'a', 'path', 'for', 'the', 'compiler', '.']
train
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/parser/config.py#L413-L471
2,325
StackStorm/pybind
pybind/slxos/v17r_1_01a/isis_state/interface_detail/isis_intf/__init__.py
isis_intf._set_circ_chstats
def _set_circ_chstats(self, v, load=False): """ Setter method for circ_chstats, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_chstats (container) If this variable is read-only (config: false) in the source YANG file, then _set_circ_chstats is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_circ_chstats() directly. YANG Description: ISIS circuit change statistics """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=circ_chstats.circ_chstats, is_container='container', presence=False, yang_name="circ-chstats", rest_name="circ-chstats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-circuit-change-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """circ_chstats must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=circ_chstats.circ_chstats, is_container='container', presence=False, yang_name="circ-chstats", rest_name="circ-chstats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-circuit-change-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""", }) self.__circ_chstats = t if hasattr(self, '_set'): self._set()
python
def _set_circ_chstats(self, v, load=False): """ Setter method for circ_chstats, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_chstats (container) If this variable is read-only (config: false) in the source YANG file, then _set_circ_chstats is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_circ_chstats() directly. YANG Description: ISIS circuit change statistics """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=circ_chstats.circ_chstats, is_container='container', presence=False, yang_name="circ-chstats", rest_name="circ-chstats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-circuit-change-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """circ_chstats must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=circ_chstats.circ_chstats, is_container='container', presence=False, yang_name="circ-chstats", rest_name="circ-chstats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-circuit-change-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""", }) self.__circ_chstats = t if hasattr(self, '_set'): self._set()
['def', '_set_circ_chstats', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'circ_chstats', '.', 'circ_chstats', ',', 'is_container', '=', "'container'", ',', 'presence', '=', 'False', ',', 'yang_name', '=', '"circ-chstats"', ',', 'rest_name', '=', '"circ-chstats"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'callpoint'", ':', "u'isis-circuit-change-stats'", ',', "u'cli-suppress-show-path'", ':', 'None', '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-isis-operational'", ',', 'defining_module', '=', "'brocade-isis-operational'", ',', 'yang_type', '=', "'container'", ',', 'is_config', '=', 'False', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""circ_chstats must be of a type compatible with container"""', ',', "'defined-type'", ':', '"container"', ',', "'generated-type'", ':', '"""YANGDynClass(base=circ_chstats.circ_chstats, is_container=\'container\', presence=False, yang_name="circ-chstats", rest_name="circ-chstats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'callpoint\': u\'isis-circuit-change-stats\', u\'cli-suppress-show-path\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-isis-operational\', defining_module=\'brocade-isis-operational\', yang_type=\'container\', is_config=False)"""', ',', '}', ')', 'self', '.', '__circ_chstats', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')']
Setter method for circ_chstats, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_chstats (container) If this variable is read-only (config: false) in the source YANG file, then _set_circ_chstats is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_circ_chstats() directly. YANG Description: ISIS circuit change statistics
['Setter', 'method', 'for', 'circ_chstats', 'mapped', 'from', 'YANG', 'variable', '/', 'isis_state', '/', 'interface_detail', '/', 'isis_intf', '/', 'circ_chstats', '(', 'container', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_circ_chstats', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_circ_chstats', '()', 'directly', '.']
train
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/isis_state/interface_detail/isis_intf/__init__.py#L726-L749
2,326
instaloader/instaloader
instaloader/instaloader.py
Instaloader.load_session_from_file
def load_session_from_file(self, username: str, filename: Optional[str] = None) -> None: """Internally stores :class:`requests.Session` object loaded from file. If filename is None, the file with the default session path is loaded. :raises FileNotFoundError: If the file does not exist. """ if filename is None: filename = get_default_session_filename(username) with open(filename, 'rb') as sessionfile: self.context.load_session_from_file(username, sessionfile) self.context.log("Loaded session from %s." % filename)
python
def load_session_from_file(self, username: str, filename: Optional[str] = None) -> None: """Internally stores :class:`requests.Session` object loaded from file. If filename is None, the file with the default session path is loaded. :raises FileNotFoundError: If the file does not exist. """ if filename is None: filename = get_default_session_filename(username) with open(filename, 'rb') as sessionfile: self.context.load_session_from_file(username, sessionfile) self.context.log("Loaded session from %s." % filename)
['def', 'load_session_from_file', '(', 'self', ',', 'username', ':', 'str', ',', 'filename', ':', 'Optional', '[', 'str', ']', '=', 'None', ')', '->', 'None', ':', 'if', 'filename', 'is', 'None', ':', 'filename', '=', 'get_default_session_filename', '(', 'username', ')', 'with', 'open', '(', 'filename', ',', "'rb'", ')', 'as', 'sessionfile', ':', 'self', '.', 'context', '.', 'load_session_from_file', '(', 'username', ',', 'sessionfile', ')', 'self', '.', 'context', '.', 'log', '(', '"Loaded session from %s."', '%', 'filename', ')']
Internally stores :class:`requests.Session` object loaded from file. If filename is None, the file with the default session path is loaded. :raises FileNotFoundError: If the file does not exist.
['Internally', 'stores', ':', 'class', ':', 'requests', '.', 'Session', 'object', 'loaded', 'from', 'file', '.']
train
https://github.com/instaloader/instaloader/blob/87d877e650cd8020b04b8b51be120599a441fd5b/instaloader/instaloader.py#L386-L397
2,327
openpermissions/perch
perch/model.py
Document.clean
def clean(self): """Remove internal fields""" doc = self._resource result = {k: v for k, v in doc.iteritems() if k not in self.internal_fields} if '_id' in doc and 'id' not in result: result['id'] = doc['_id'] return result
python
def clean(self): """Remove internal fields""" doc = self._resource result = {k: v for k, v in doc.iteritems() if k not in self.internal_fields} if '_id' in doc and 'id' not in result: result['id'] = doc['_id'] return result
['def', 'clean', '(', 'self', ')', ':', 'doc', '=', 'self', '.', '_resource', 'result', '=', '{', 'k', ':', 'v', 'for', 'k', ',', 'v', 'in', 'doc', '.', 'iteritems', '(', ')', 'if', 'k', 'not', 'in', 'self', '.', 'internal_fields', '}', 'if', "'_id'", 'in', 'doc', 'and', "'id'", 'not', 'in', 'result', ':', 'result', '[', "'id'", ']', '=', 'doc', '[', "'_id'", ']', 'return', 'result']
Remove internal fields
['Remove', 'internal', 'fields']
train
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/model.py#L395-L404
2,328
mperlet/PyDect200
PyDect200/PyDect200.py
PyDect200.get_state_all
def get_state_all(self): """Returns all device states""" state_dict = {} for device in self.get_device_names().keys(): state_dict[device] = self.get_state(device) return state_dict
python
def get_state_all(self): """Returns all device states""" state_dict = {} for device in self.get_device_names().keys(): state_dict[device] = self.get_state(device) return state_dict
['def', 'get_state_all', '(', 'self', ')', ':', 'state_dict', '=', '{', '}', 'for', 'device', 'in', 'self', '.', 'get_device_names', '(', ')', '.', 'keys', '(', ')', ':', 'state_dict', '[', 'device', ']', '=', 'self', '.', 'get_state', '(', 'device', ')', 'return', 'state_dict']
Returns all device states
['Returns', 'all', 'device', 'states']
train
https://github.com/mperlet/PyDect200/blob/4758d80c663324a612c2772e6442db1472016913/PyDect200/PyDect200.py#L199-L204
2,329
ajslater/picopt
picopt/timestamp.py
_get_timestamp
def _get_timestamp(dirname_full, remove): """ Get the timestamp from the timestamp file. Optionally mark it for removal if we're going to write another one. """ record_filename = os.path.join(dirname_full, RECORD_FILENAME) if not os.path.exists(record_filename): return None mtime = os.stat(record_filename).st_mtime mtime_str = datetime.fromtimestamp(mtime) print('Found timestamp {}:{}'.format(dirname_full, mtime_str)) if Settings.record_timestamp and remove: OLD_TIMESTAMPS.add(record_filename) return mtime
python
def _get_timestamp(dirname_full, remove): """ Get the timestamp from the timestamp file. Optionally mark it for removal if we're going to write another one. """ record_filename = os.path.join(dirname_full, RECORD_FILENAME) if not os.path.exists(record_filename): return None mtime = os.stat(record_filename).st_mtime mtime_str = datetime.fromtimestamp(mtime) print('Found timestamp {}:{}'.format(dirname_full, mtime_str)) if Settings.record_timestamp and remove: OLD_TIMESTAMPS.add(record_filename) return mtime
['def', '_get_timestamp', '(', 'dirname_full', ',', 'remove', ')', ':', 'record_filename', '=', 'os', '.', 'path', '.', 'join', '(', 'dirname_full', ',', 'RECORD_FILENAME', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'record_filename', ')', ':', 'return', 'None', 'mtime', '=', 'os', '.', 'stat', '(', 'record_filename', ')', '.', 'st_mtime', 'mtime_str', '=', 'datetime', '.', 'fromtimestamp', '(', 'mtime', ')', 'print', '(', "'Found timestamp {}:{}'", '.', 'format', '(', 'dirname_full', ',', 'mtime_str', ')', ')', 'if', 'Settings', '.', 'record_timestamp', 'and', 'remove', ':', 'OLD_TIMESTAMPS', '.', 'add', '(', 'record_filename', ')', 'return', 'mtime']
Get the timestamp from the timestamp file. Optionally mark it for removal if we're going to write another one.
['Get', 'the', 'timestamp', 'from', 'the', 'timestamp', 'file', '.']
train
https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/timestamp.py#L16-L32
2,330
Kronuz/pyScss
scss/selector.py
SimpleSelector.replace_parent
def replace_parent(self, parent_simples): """If ``&`` (or the legacy xCSS equivalent ``self``) appears in this selector, replace it with the given iterable of parent selectors. Returns a tuple of simple selectors. """ assert parent_simples ancestors = parent_simples[:-1] parent = parent_simples[-1] did_replace = False new_tokens = [] for token in self.tokens: if not did_replace and token in ('&', 'self'): did_replace = True new_tokens.extend(parent.tokens) if token == 'self': warn(FutureWarning( "The xCSS 'self' selector is deprecated and will be " "removed in 2.0. Use & instead. ({0!r})" .format(self) )) else: new_tokens.append(token) if not did_replace: # This simple selector doesn't contain a parent reference so just # stick it on the end return parent_simples + (self,) # This simple selector was merged into the direct parent. merged_self = type(self)(parent.combinator, new_tokens) selector = ancestors + (merged_self,) # Our combinator goes on the first ancestor, i.e., substituting "foo # bar baz" into "+ &.quux" produces "+ foo bar baz.quux". This means a # potential conflict with the first ancestor's combinator! root = selector[0] if not _is_combinator_subset_of(self.combinator, root.combinator): raise ValueError( "Can't sub parent {0!r} into {1!r}: " "combinators {2!r} and {3!r} conflict!" .format( parent_simples, self, self.combinator, root.combinator)) root = type(self)(self.combinator, root.tokens) selector = (root,) + selector[1:] return tuple(selector)
python
def replace_parent(self, parent_simples): """If ``&`` (or the legacy xCSS equivalent ``self``) appears in this selector, replace it with the given iterable of parent selectors. Returns a tuple of simple selectors. """ assert parent_simples ancestors = parent_simples[:-1] parent = parent_simples[-1] did_replace = False new_tokens = [] for token in self.tokens: if not did_replace and token in ('&', 'self'): did_replace = True new_tokens.extend(parent.tokens) if token == 'self': warn(FutureWarning( "The xCSS 'self' selector is deprecated and will be " "removed in 2.0. Use & instead. ({0!r})" .format(self) )) else: new_tokens.append(token) if not did_replace: # This simple selector doesn't contain a parent reference so just # stick it on the end return parent_simples + (self,) # This simple selector was merged into the direct parent. merged_self = type(self)(parent.combinator, new_tokens) selector = ancestors + (merged_self,) # Our combinator goes on the first ancestor, i.e., substituting "foo # bar baz" into "+ &.quux" produces "+ foo bar baz.quux". This means a # potential conflict with the first ancestor's combinator! root = selector[0] if not _is_combinator_subset_of(self.combinator, root.combinator): raise ValueError( "Can't sub parent {0!r} into {1!r}: " "combinators {2!r} and {3!r} conflict!" .format( parent_simples, self, self.combinator, root.combinator)) root = type(self)(self.combinator, root.tokens) selector = (root,) + selector[1:] return tuple(selector)
['def', 'replace_parent', '(', 'self', ',', 'parent_simples', ')', ':', 'assert', 'parent_simples', 'ancestors', '=', 'parent_simples', '[', ':', '-', '1', ']', 'parent', '=', 'parent_simples', '[', '-', '1', ']', 'did_replace', '=', 'False', 'new_tokens', '=', '[', ']', 'for', 'token', 'in', 'self', '.', 'tokens', ':', 'if', 'not', 'did_replace', 'and', 'token', 'in', '(', "'&'", ',', "'self'", ')', ':', 'did_replace', '=', 'True', 'new_tokens', '.', 'extend', '(', 'parent', '.', 'tokens', ')', 'if', 'token', '==', "'self'", ':', 'warn', '(', 'FutureWarning', '(', '"The xCSS \'self\' selector is deprecated and will be "', '"removed in 2.0. Use & instead. ({0!r})"', '.', 'format', '(', 'self', ')', ')', ')', 'else', ':', 'new_tokens', '.', 'append', '(', 'token', ')', 'if', 'not', 'did_replace', ':', "# This simple selector doesn't contain a parent reference so just", '# stick it on the end', 'return', 'parent_simples', '+', '(', 'self', ',', ')', '# This simple selector was merged into the direct parent.', 'merged_self', '=', 'type', '(', 'self', ')', '(', 'parent', '.', 'combinator', ',', 'new_tokens', ')', 'selector', '=', 'ancestors', '+', '(', 'merged_self', ',', ')', '# Our combinator goes on the first ancestor, i.e., substituting "foo', '# bar baz" into "+ &.quux" produces "+ foo bar baz.quux". This means a', "# potential conflict with the first ancestor's combinator!", 'root', '=', 'selector', '[', '0', ']', 'if', 'not', '_is_combinator_subset_of', '(', 'self', '.', 'combinator', ',', 'root', '.', 'combinator', ')', ':', 'raise', 'ValueError', '(', '"Can\'t sub parent {0!r} into {1!r}: "', '"combinators {2!r} and {3!r} conflict!"', '.', 'format', '(', 'parent_simples', ',', 'self', ',', 'self', '.', 'combinator', ',', 'root', '.', 'combinator', ')', ')', 'root', '=', 'type', '(', 'self', ')', '(', 'self', '.', 'combinator', ',', 'root', '.', 'tokens', ')', 'selector', '=', '(', 'root', ',', ')', '+', 'selector', '[', '1', ':', ']', 'return', 'tuple', '(', 'selector', ')']
If ``&`` (or the legacy xCSS equivalent ``self``) appears in this selector, replace it with the given iterable of parent selectors. Returns a tuple of simple selectors.
['If', '&', '(', 'or', 'the', 'legacy', 'xCSS', 'equivalent', 'self', ')', 'appears', 'in', 'this', 'selector', 'replace', 'it', 'with', 'the', 'given', 'iterable', 'of', 'parent', 'selectors', '.']
train
https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/selector.py#L162-L209
2,331
phoebe-project/phoebe2
phoebe/parameters/parameters.py
ParameterSet.get_description
def get_description(self, twig=None, **kwargs): """ TODO: add documentation """ return self.get_parameter(twig=twig, **kwargs).get_description()
python
def get_description(self, twig=None, **kwargs): """ TODO: add documentation """ return self.get_parameter(twig=twig, **kwargs).get_description()
['def', 'get_description', '(', 'self', ',', 'twig', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'return', 'self', '.', 'get_parameter', '(', 'twig', '=', 'twig', ',', '*', '*', 'kwargs', ')', '.', 'get_description', '(', ')']
TODO: add documentation
['TODO', ':', 'add', 'documentation']
train
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/parameters/parameters.py#L1944-L1948
2,332
gccxml/pygccxml
pygccxml/declarations/scopedef.py
scopedef_t.typedefs
def typedefs( self, name=None, function=None, header_dir=None, header_file=None, recursive=None, allow_empty=None): """returns a set of typedef declarations, that are matched defined criteria""" return ( self._find_multiple( self._impl_matchers[scopedef_t.typedef], name=name, function=function, decl_type=self._impl_decl_types[ scopedef_t.typedef], header_dir=header_dir, header_file=header_file, recursive=recursive, allow_empty=allow_empty) )
python
def typedefs( self, name=None, function=None, header_dir=None, header_file=None, recursive=None, allow_empty=None): """returns a set of typedef declarations, that are matched defined criteria""" return ( self._find_multiple( self._impl_matchers[scopedef_t.typedef], name=name, function=function, decl_type=self._impl_decl_types[ scopedef_t.typedef], header_dir=header_dir, header_file=header_file, recursive=recursive, allow_empty=allow_empty) )
['def', 'typedefs', '(', 'self', ',', 'name', '=', 'None', ',', 'function', '=', 'None', ',', 'header_dir', '=', 'None', ',', 'header_file', '=', 'None', ',', 'recursive', '=', 'None', ',', 'allow_empty', '=', 'None', ')', ':', 'return', '(', 'self', '.', '_find_multiple', '(', 'self', '.', '_impl_matchers', '[', 'scopedef_t', '.', 'typedef', ']', ',', 'name', '=', 'name', ',', 'function', '=', 'function', ',', 'decl_type', '=', 'self', '.', '_impl_decl_types', '[', 'scopedef_t', '.', 'typedef', ']', ',', 'header_dir', '=', 'header_dir', ',', 'header_file', '=', 'header_file', ',', 'recursive', '=', 'recursive', ',', 'allow_empty', '=', 'allow_empty', ')', ')']
returns a set of typedef declarations, that are matched defined criteria
['returns', 'a', 'set', 'of', 'typedef', 'declarations', 'that', 'are', 'matched', 'defined', 'criteria']
train
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/declarations/scopedef.py#L1023-L1044
2,333
pulumi/pulumi
sdk/python/lib/pulumi/log.py
debug
def debug(msg: str, resource: Optional['Resource'] = None, stream_id: Optional[int] = None) -> None: """ Logs a message to the Pulumi CLI's debug channel, associating it with a resource and stream_id if provided. :param str msg: The message to send to the Pulumi CLI. :param Optional[Resource] resource: If provided, associate this message with the given resource in the Pulumi CLI. :param Optional[int] stream_id: If provided, associate this message with a stream of other messages. """ engine = get_engine() if engine is not None: _log(engine, engine_pb2.DEBUG, msg, resource, stream_id) else: print("debug: " + msg, file=sys.stderr)
python
def debug(msg: str, resource: Optional['Resource'] = None, stream_id: Optional[int] = None) -> None: """ Logs a message to the Pulumi CLI's debug channel, associating it with a resource and stream_id if provided. :param str msg: The message to send to the Pulumi CLI. :param Optional[Resource] resource: If provided, associate this message with the given resource in the Pulumi CLI. :param Optional[int] stream_id: If provided, associate this message with a stream of other messages. """ engine = get_engine() if engine is not None: _log(engine, engine_pb2.DEBUG, msg, resource, stream_id) else: print("debug: " + msg, file=sys.stderr)
['def', 'debug', '(', 'msg', ':', 'str', ',', 'resource', ':', 'Optional', '[', "'Resource'", ']', '=', 'None', ',', 'stream_id', ':', 'Optional', '[', 'int', ']', '=', 'None', ')', '->', 'None', ':', 'engine', '=', 'get_engine', '(', ')', 'if', 'engine', 'is', 'not', 'None', ':', '_log', '(', 'engine', ',', 'engine_pb2', '.', 'DEBUG', ',', 'msg', ',', 'resource', ',', 'stream_id', ')', 'else', ':', 'print', '(', '"debug: "', '+', 'msg', ',', 'file', '=', 'sys', '.', 'stderr', ')']
Logs a message to the Pulumi CLI's debug channel, associating it with a resource and stream_id if provided. :param str msg: The message to send to the Pulumi CLI. :param Optional[Resource] resource: If provided, associate this message with the given resource in the Pulumi CLI. :param Optional[int] stream_id: If provided, associate this message with a stream of other messages.
['Logs', 'a', 'message', 'to', 'the', 'Pulumi', 'CLI', 's', 'debug', 'channel', 'associating', 'it', 'with', 'a', 'resource', 'and', 'stream_id', 'if', 'provided', '.']
train
https://github.com/pulumi/pulumi/blob/95d51efe6ab9a533838b6d83aa240b5f912e72aa/sdk/python/lib/pulumi/log.py#L29-L42
2,334
django-fluent/django-fluent-comments
fluent_comments/templatetags/fluent_comments_tags.py
AjaxCommentTags.parse
def parse(cls, parser, token): """ Custom parsing for the ``{% ajax_comment_tags for ... %}`` tag. """ # Process the template line. tag_name, args, kwargs = parse_token_kwargs( parser, token, allowed_kwargs=cls.allowed_kwargs, compile_args=False, # Only overrule here, keep at render() phase. compile_kwargs=cls.compile_kwargs ) # remove "for" keyword, so all other args can be resolved in render(). if args[0] == 'for': args.pop(0) # And apply the compilation afterwards for i in range(len(args)): args[i] = parser.compile_filter(args[i]) cls.validate_args(tag_name, *args, **kwargs) return cls(tag_name, *args, **kwargs)
python
def parse(cls, parser, token): """ Custom parsing for the ``{% ajax_comment_tags for ... %}`` tag. """ # Process the template line. tag_name, args, kwargs = parse_token_kwargs( parser, token, allowed_kwargs=cls.allowed_kwargs, compile_args=False, # Only overrule here, keep at render() phase. compile_kwargs=cls.compile_kwargs ) # remove "for" keyword, so all other args can be resolved in render(). if args[0] == 'for': args.pop(0) # And apply the compilation afterwards for i in range(len(args)): args[i] = parser.compile_filter(args[i]) cls.validate_args(tag_name, *args, **kwargs) return cls(tag_name, *args, **kwargs)
['def', 'parse', '(', 'cls', ',', 'parser', ',', 'token', ')', ':', '# Process the template line.', 'tag_name', ',', 'args', ',', 'kwargs', '=', 'parse_token_kwargs', '(', 'parser', ',', 'token', ',', 'allowed_kwargs', '=', 'cls', '.', 'allowed_kwargs', ',', 'compile_args', '=', 'False', ',', '# Only overrule here, keep at render() phase.', 'compile_kwargs', '=', 'cls', '.', 'compile_kwargs', ')', '# remove "for" keyword, so all other args can be resolved in render().', 'if', 'args', '[', '0', ']', '==', "'for'", ':', 'args', '.', 'pop', '(', '0', ')', '# And apply the compilation afterwards', 'for', 'i', 'in', 'range', '(', 'len', '(', 'args', ')', ')', ':', 'args', '[', 'i', ']', '=', 'parser', '.', 'compile_filter', '(', 'args', '[', 'i', ']', ')', 'cls', '.', 'validate_args', '(', 'tag_name', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'cls', '(', 'tag_name', ',', '*', 'args', ',', '*', '*', 'kwargs', ')']
Custom parsing for the ``{% ajax_comment_tags for ... %}`` tag.
['Custom', 'parsing', 'for', 'the', '{', '%', 'ajax_comment_tags', 'for', '...', '%', '}', 'tag', '.']
train
https://github.com/django-fluent/django-fluent-comments/blob/bfe98d55b56fedd8ca2e2659eed53a6390e53adf/fluent_comments/templatetags/fluent_comments_tags.py#L26-L47
2,335
saltstack/salt
salt/modules/panos.py
set_timezone
def set_timezone(tz=None, deploy=False): ''' Set the timezone of the Palo Alto proxy minion. A commit will be required before this is processed. CLI Example: Args: tz (str): The name of the timezone to set. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' panos.set_timezone UTC salt '*' panos.set_timezone UTC deploy=True ''' if not tz: raise CommandExecutionError("Timezone name option must not be none.") ret = {} query = {'type': 'config', 'action': 'set', 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/timezone', 'element': '<timezone>{0}</timezone>'.format(tz)} ret.update(__proxy__['panos.call'](query)) if deploy is True: ret.update(commit()) return ret
python
def set_timezone(tz=None, deploy=False): ''' Set the timezone of the Palo Alto proxy minion. A commit will be required before this is processed. CLI Example: Args: tz (str): The name of the timezone to set. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' panos.set_timezone UTC salt '*' panos.set_timezone UTC deploy=True ''' if not tz: raise CommandExecutionError("Timezone name option must not be none.") ret = {} query = {'type': 'config', 'action': 'set', 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/timezone', 'element': '<timezone>{0}</timezone>'.format(tz)} ret.update(__proxy__['panos.call'](query)) if deploy is True: ret.update(commit()) return ret
['def', 'set_timezone', '(', 'tz', '=', 'None', ',', 'deploy', '=', 'False', ')', ':', 'if', 'not', 'tz', ':', 'raise', 'CommandExecutionError', '(', '"Timezone name option must not be none."', ')', 'ret', '=', '{', '}', 'query', '=', '{', "'type'", ':', "'config'", ',', "'action'", ':', "'set'", ',', "'xpath'", ':', "'/config/devices/entry[@name=\\'localhost.localdomain\\']/deviceconfig/system/timezone'", ',', "'element'", ':', "'<timezone>{0}</timezone>'", '.', 'format', '(', 'tz', ')', '}', 'ret', '.', 'update', '(', '__proxy__', '[', "'panos.call'", ']', '(', 'query', ')', ')', 'if', 'deploy', 'is', 'True', ':', 'ret', '.', 'update', '(', 'commit', '(', ')', ')', 'return', 'ret']
Set the timezone of the Palo Alto proxy minion. A commit will be required before this is processed. CLI Example: Args: tz (str): The name of the timezone to set. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' panos.set_timezone UTC salt '*' panos.set_timezone UTC deploy=True
['Set', 'the', 'timezone', 'of', 'the', 'Palo', 'Alto', 'proxy', 'minion', '.', 'A', 'commit', 'will', 'be', 'required', 'before', 'this', 'is', 'processed', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/panos.py#L2150-L2183
2,336
PythonCharmers/python-future
src/future/standard_library/__init__.py
detect_hooks
def detect_hooks(): """ Returns True if the import hooks are installed, False if not. """ flog.debug('Detecting hooks ...') present = any([hasattr(hook, 'RENAMER') for hook in sys.meta_path]) if present: flog.debug('Detected.') else: flog.debug('Not detected.') return present
python
def detect_hooks(): """ Returns True if the import hooks are installed, False if not. """ flog.debug('Detecting hooks ...') present = any([hasattr(hook, 'RENAMER') for hook in sys.meta_path]) if present: flog.debug('Detected.') else: flog.debug('Not detected.') return present
['def', 'detect_hooks', '(', ')', ':', 'flog', '.', 'debug', '(', "'Detecting hooks ...'", ')', 'present', '=', 'any', '(', '[', 'hasattr', '(', 'hook', ',', "'RENAMER'", ')', 'for', 'hook', 'in', 'sys', '.', 'meta_path', ']', ')', 'if', 'present', ':', 'flog', '.', 'debug', '(', "'Detected.'", ')', 'else', ':', 'flog', '.', 'debug', '(', "'Not detected.'", ')', 'return', 'present']
Returns True if the import hooks are installed, False if not.
['Returns', 'True', 'if', 'the', 'import', 'hooks', 'are', 'installed', 'False', 'if', 'not', '.']
train
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/standard_library/__init__.py#L579-L589
2,337
sbg/sevenbridges-python
sevenbridges/models/project.py
Project.add_files
def add_files(self, files): """ Adds files to this project. :param files: List of files or a Collection object. """ for file in files: file.copy(project=self.id)
python
def add_files(self, files): """ Adds files to this project. :param files: List of files or a Collection object. """ for file in files: file.copy(project=self.id)
['def', 'add_files', '(', 'self', ',', 'files', ')', ':', 'for', 'file', 'in', 'files', ':', 'file', '.', 'copy', '(', 'project', '=', 'self', '.', 'id', ')']
Adds files to this project. :param files: List of files or a Collection object.
['Adds', 'files', 'to', 'this', 'project', '.', ':', 'param', 'files', ':', 'List', 'of', 'files', 'or', 'a', 'Collection', 'object', '.']
train
https://github.com/sbg/sevenbridges-python/blob/f62640d1018d959f0b686f2dbe5e183085336607/sevenbridges/models/project.py#L321-L327
2,338
nicolargo/glances
glances/plugins/glances_irq.py
GlancesIRQ.__header
def __header(self, line): """Build the header (contain the number of CPU). CPU0 CPU1 CPU2 CPU3 0: 21 0 0 0 IO-APIC 2-edge timer """ self.cpu_number = len(line.split()) return self.cpu_number
python
def __header(self, line): """Build the header (contain the number of CPU). CPU0 CPU1 CPU2 CPU3 0: 21 0 0 0 IO-APIC 2-edge timer """ self.cpu_number = len(line.split()) return self.cpu_number
['def', '__header', '(', 'self', ',', 'line', ')', ':', 'self', '.', 'cpu_number', '=', 'len', '(', 'line', '.', 'split', '(', ')', ')', 'return', 'self', '.', 'cpu_number']
Build the header (contain the number of CPU). CPU0 CPU1 CPU2 CPU3 0: 21 0 0 0 IO-APIC 2-edge timer
['Build', 'the', 'header', '(', 'contain', 'the', 'number', 'of', 'CPU', ')', '.']
train
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_irq.py#L142-L149
2,339
upgrad/django-deletes
djangodeletes/softdeletes/models.py
SoftDeletable.restore
def restore(self, time=None): """ Undeletes the object. Returns True if undeleted, False if it was already not deleted """ if self.deleted: time = time if time else self.deleted_at if time == self.deleted_at: self.deleted = False self.save() return True else: return False return False
python
def restore(self, time=None): """ Undeletes the object. Returns True if undeleted, False if it was already not deleted """ if self.deleted: time = time if time else self.deleted_at if time == self.deleted_at: self.deleted = False self.save() return True else: return False return False
['def', 'restore', '(', 'self', ',', 'time', '=', 'None', ')', ':', 'if', 'self', '.', 'deleted', ':', 'time', '=', 'time', 'if', 'time', 'else', 'self', '.', 'deleted_at', 'if', 'time', '==', 'self', '.', 'deleted_at', ':', 'self', '.', 'deleted', '=', 'False', 'self', '.', 'save', '(', ')', 'return', 'True', 'else', ':', 'return', 'False', 'return', 'False']
Undeletes the object. Returns True if undeleted, False if it was already not deleted
['Undeletes', 'the', 'object', '.', 'Returns', 'True', 'if', 'undeleted', 'False', 'if', 'it', 'was', 'already', 'not', 'deleted']
train
https://github.com/upgrad/django-deletes/blob/05cebc3323840badc67b926ec1ba2640d6cd12be/djangodeletes/softdeletes/models.py#L97-L109
2,340
skulumani/kinematics
kinematics/sphere.py
perturb_vec
def perturb_vec(q, cone_half_angle=2): r"""Perturb a vector randomly qp = perturb_vec(q, cone_half_angle=2) Parameters ---------- q : (n,) numpy array Vector to perturb cone_half_angle : float Maximum angle to perturb the vector in degrees Returns ------- perturbed : (n,) numpy array Perturbed numpy array Author ------ Shankar Kulumani GWU [email protected] References ---------- .. [1] https://stackoverflow.com/questions/2659257/perturb-vector-by-some-angle """ rand_vec = tan_rand(q) cross_vector = attitude.unit_vector(np.cross(q, rand_vec)) s = np.random.uniform(0, 1, 1) r = np.random.uniform(0, 1, 1) h = np.cos(np.deg2rad(cone_half_angle)) phi = 2 * np.pi * s z = h + ( 1- h) * r sinT = np.sqrt(1 - z**2) x = np.cos(phi) * sinT y = np.sin(phi) * sinT perturbed = rand_vec * x + cross_vector * y + q * z return perturbed
python
def perturb_vec(q, cone_half_angle=2): r"""Perturb a vector randomly qp = perturb_vec(q, cone_half_angle=2) Parameters ---------- q : (n,) numpy array Vector to perturb cone_half_angle : float Maximum angle to perturb the vector in degrees Returns ------- perturbed : (n,) numpy array Perturbed numpy array Author ------ Shankar Kulumani GWU [email protected] References ---------- .. [1] https://stackoverflow.com/questions/2659257/perturb-vector-by-some-angle """ rand_vec = tan_rand(q) cross_vector = attitude.unit_vector(np.cross(q, rand_vec)) s = np.random.uniform(0, 1, 1) r = np.random.uniform(0, 1, 1) h = np.cos(np.deg2rad(cone_half_angle)) phi = 2 * np.pi * s z = h + ( 1- h) * r sinT = np.sqrt(1 - z**2) x = np.cos(phi) * sinT y = np.sin(phi) * sinT perturbed = rand_vec * x + cross_vector * y + q * z return perturbed
['def', 'perturb_vec', '(', 'q', ',', 'cone_half_angle', '=', '2', ')', ':', 'rand_vec', '=', 'tan_rand', '(', 'q', ')', 'cross_vector', '=', 'attitude', '.', 'unit_vector', '(', 'np', '.', 'cross', '(', 'q', ',', 'rand_vec', ')', ')', 's', '=', 'np', '.', 'random', '.', 'uniform', '(', '0', ',', '1', ',', '1', ')', 'r', '=', 'np', '.', 'random', '.', 'uniform', '(', '0', ',', '1', ',', '1', ')', 'h', '=', 'np', '.', 'cos', '(', 'np', '.', 'deg2rad', '(', 'cone_half_angle', ')', ')', 'phi', '=', '2', '*', 'np', '.', 'pi', '*', 's', 'z', '=', 'h', '+', '(', '1', '-', 'h', ')', '*', 'r', 'sinT', '=', 'np', '.', 'sqrt', '(', '1', '-', 'z', '**', '2', ')', 'x', '=', 'np', '.', 'cos', '(', 'phi', ')', '*', 'sinT', 'y', '=', 'np', '.', 'sin', '(', 'phi', ')', '*', 'sinT', 'perturbed', '=', 'rand_vec', '*', 'x', '+', 'cross_vector', '*', 'y', '+', 'q', '*', 'z', 'return', 'perturbed']
r"""Perturb a vector randomly qp = perturb_vec(q, cone_half_angle=2) Parameters ---------- q : (n,) numpy array Vector to perturb cone_half_angle : float Maximum angle to perturb the vector in degrees Returns ------- perturbed : (n,) numpy array Perturbed numpy array Author ------ Shankar Kulumani GWU [email protected] References ---------- .. [1] https://stackoverflow.com/questions/2659257/perturb-vector-by-some-angle
['r', 'Perturb', 'a', 'vector', 'randomly']
train
https://github.com/skulumani/kinematics/blob/e8cb45efb40539982025ed0f85d6561f9f10fef0/kinematics/sphere.py#L66-L109
2,341
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Verbs.tenses
def tenses(self, verb, parse=True): """ Returns a list of possible tenses for the given inflected verb. """ verb = verb.lower() a = set() b = self.lemma(verb, parse=parse) v = [] if b in self: v = self[b] elif parse is True: # rule-based v = self.find_lexeme(b) # For each tense in the verb lexeme that matches the given tense, # 1) retrieve the tense tuple, # 2) retrieve the tense tuples for which that tense is a default. for i, tense in enumerate(v): if tense == verb: for id, index in self._format.items(): if i == index: a.add(id) for id1, id2 in self._default.items(): if id2 in a: a.add(id1) for id1, id2 in self._default.items(): if id2 in a: a.add(id1) a = (TENSES[id][:-2] for id in a) a = Tenses(sorted(a)) return a
python
def tenses(self, verb, parse=True): """ Returns a list of possible tenses for the given inflected verb. """ verb = verb.lower() a = set() b = self.lemma(verb, parse=parse) v = [] if b in self: v = self[b] elif parse is True: # rule-based v = self.find_lexeme(b) # For each tense in the verb lexeme that matches the given tense, # 1) retrieve the tense tuple, # 2) retrieve the tense tuples for which that tense is a default. for i, tense in enumerate(v): if tense == verb: for id, index in self._format.items(): if i == index: a.add(id) for id1, id2 in self._default.items(): if id2 in a: a.add(id1) for id1, id2 in self._default.items(): if id2 in a: a.add(id1) a = (TENSES[id][:-2] for id in a) a = Tenses(sorted(a)) return a
['def', 'tenses', '(', 'self', ',', 'verb', ',', 'parse', '=', 'True', ')', ':', 'verb', '=', 'verb', '.', 'lower', '(', ')', 'a', '=', 'set', '(', ')', 'b', '=', 'self', '.', 'lemma', '(', 'verb', ',', 'parse', '=', 'parse', ')', 'v', '=', '[', ']', 'if', 'b', 'in', 'self', ':', 'v', '=', 'self', '[', 'b', ']', 'elif', 'parse', 'is', 'True', ':', '# rule-based', 'v', '=', 'self', '.', 'find_lexeme', '(', 'b', ')', '# For each tense in the verb lexeme that matches the given tense,', '# 1) retrieve the tense tuple,', '# 2) retrieve the tense tuples for which that tense is a default.', 'for', 'i', ',', 'tense', 'in', 'enumerate', '(', 'v', ')', ':', 'if', 'tense', '==', 'verb', ':', 'for', 'id', ',', 'index', 'in', 'self', '.', '_format', '.', 'items', '(', ')', ':', 'if', 'i', '==', 'index', ':', 'a', '.', 'add', '(', 'id', ')', 'for', 'id1', ',', 'id2', 'in', 'self', '.', '_default', '.', 'items', '(', ')', ':', 'if', 'id2', 'in', 'a', ':', 'a', '.', 'add', '(', 'id1', ')', 'for', 'id1', ',', 'id2', 'in', 'self', '.', '_default', '.', 'items', '(', ')', ':', 'if', 'id2', 'in', 'a', ':', 'a', '.', 'add', '(', 'id1', ')', 'a', '=', '(', 'TENSES', '[', 'id', ']', '[', ':', '-', '2', ']', 'for', 'id', 'in', 'a', ')', 'a', '=', 'Tenses', '(', 'sorted', '(', 'a', ')', ')', 'return', 'a']
Returns a list of possible tenses for the given inflected verb.
['Returns', 'a', 'list', 'of', 'possible', 'tenses', 'for', 'the', 'given', 'inflected', 'verb', '.']
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1762-L1789
2,342
Alignak-monitoring/alignak
alignak/external_command.py
ExternalCommandManager.schedule_forced_svc_check
def schedule_forced_svc_check(self, service, check_time): """Schedule a forced check on a service Format of the line that triggers function call:: SCHEDULE_FORCED_SVC_CHECK;<host_name>;<service_description>;<check_time> :param service: service to check :type service: alignak.object.service.Service :param check_time: time to check :type check_time: int :return: None """ service.schedule(self.daemon.hosts, self.daemon.services, self.daemon.timeperiods, self.daemon.macromodulations, self.daemon.checkmodulations, self.daemon.checks, force=True, force_time=check_time) self.send_an_element(service.get_update_status_brok())
python
def schedule_forced_svc_check(self, service, check_time): """Schedule a forced check on a service Format of the line that triggers function call:: SCHEDULE_FORCED_SVC_CHECK;<host_name>;<service_description>;<check_time> :param service: service to check :type service: alignak.object.service.Service :param check_time: time to check :type check_time: int :return: None """ service.schedule(self.daemon.hosts, self.daemon.services, self.daemon.timeperiods, self.daemon.macromodulations, self.daemon.checkmodulations, self.daemon.checks, force=True, force_time=check_time) self.send_an_element(service.get_update_status_brok())
['def', 'schedule_forced_svc_check', '(', 'self', ',', 'service', ',', 'check_time', ')', ':', 'service', '.', 'schedule', '(', 'self', '.', 'daemon', '.', 'hosts', ',', 'self', '.', 'daemon', '.', 'services', ',', 'self', '.', 'daemon', '.', 'timeperiods', ',', 'self', '.', 'daemon', '.', 'macromodulations', ',', 'self', '.', 'daemon', '.', 'checkmodulations', ',', 'self', '.', 'daemon', '.', 'checks', ',', 'force', '=', 'True', ',', 'force_time', '=', 'check_time', ')', 'self', '.', 'send_an_element', '(', 'service', '.', 'get_update_status_brok', '(', ')', ')']
Schedule a forced check on a service Format of the line that triggers function call:: SCHEDULE_FORCED_SVC_CHECK;<host_name>;<service_description>;<check_time> :param service: service to check :type service: alignak.object.service.Service :param check_time: time to check :type check_time: int :return: None
['Schedule', 'a', 'forced', 'check', 'on', 'a', 'service', 'Format', 'of', 'the', 'line', 'that', 'triggers', 'function', 'call', '::']
train
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L3489-L3505
2,343
fishtown-analytics/dbt
core/dbt/clients/_jinja_blocks.py
BlockIterator._process_macro_args
def _process_macro_args(self): """Macro args are pretty tricky! Arg names themselves are simple, but you can set arbitrary default values, including doing stuff like: {% macro my_macro(arg="x" + ("}% {# {% endmacro %}" * 2)) %} Which makes you a jerk, but is valid jinja. """ # we are currently after the first parenthesis (+ any whitespace) after # the macro args started. You can either have the close paren, or a # name. while self._parenthesis_stack: match = self._expect_match('macro arguments', MACRO_ARGS_END_PATTERN, MACRO_ARG_PATTERN) self.advance(match.end()) matchgroups = match.groupdict() if matchgroups.get('macro_end') is not None: self._parenthesis_stack.pop() # we got an argument. let's see what it has elif matchgroups.get('value') is not None: # we have to process a single macro argument. This mutates # the parenthesis stack! If it finds a comma, it will continue # the loop. self._process_macro_default_arg() elif matchgroups.get('more_args') is not None: continue else: raise dbt.exceptions.InternalException( 'unhandled regex in _process_macro_args(), no match: {}' .format(matchgroups) )
python
def _process_macro_args(self): """Macro args are pretty tricky! Arg names themselves are simple, but you can set arbitrary default values, including doing stuff like: {% macro my_macro(arg="x" + ("}% {# {% endmacro %}" * 2)) %} Which makes you a jerk, but is valid jinja. """ # we are currently after the first parenthesis (+ any whitespace) after # the macro args started. You can either have the close paren, or a # name. while self._parenthesis_stack: match = self._expect_match('macro arguments', MACRO_ARGS_END_PATTERN, MACRO_ARG_PATTERN) self.advance(match.end()) matchgroups = match.groupdict() if matchgroups.get('macro_end') is not None: self._parenthesis_stack.pop() # we got an argument. let's see what it has elif matchgroups.get('value') is not None: # we have to process a single macro argument. This mutates # the parenthesis stack! If it finds a comma, it will continue # the loop. self._process_macro_default_arg() elif matchgroups.get('more_args') is not None: continue else: raise dbt.exceptions.InternalException( 'unhandled regex in _process_macro_args(), no match: {}' .format(matchgroups) )
['def', '_process_macro_args', '(', 'self', ')', ':', '# we are currently after the first parenthesis (+ any whitespace) after', '# the macro args started. You can either have the close paren, or a', '# name.', 'while', 'self', '.', '_parenthesis_stack', ':', 'match', '=', 'self', '.', '_expect_match', '(', "'macro arguments'", ',', 'MACRO_ARGS_END_PATTERN', ',', 'MACRO_ARG_PATTERN', ')', 'self', '.', 'advance', '(', 'match', '.', 'end', '(', ')', ')', 'matchgroups', '=', 'match', '.', 'groupdict', '(', ')', 'if', 'matchgroups', '.', 'get', '(', "'macro_end'", ')', 'is', 'not', 'None', ':', 'self', '.', '_parenthesis_stack', '.', 'pop', '(', ')', "# we got an argument. let's see what it has", 'elif', 'matchgroups', '.', 'get', '(', "'value'", ')', 'is', 'not', 'None', ':', '# we have to process a single macro argument. This mutates', '# the parenthesis stack! If it finds a comma, it will continue', '# the loop.', 'self', '.', '_process_macro_default_arg', '(', ')', 'elif', 'matchgroups', '.', 'get', '(', "'more_args'", ')', 'is', 'not', 'None', ':', 'continue', 'else', ':', 'raise', 'dbt', '.', 'exceptions', '.', 'InternalException', '(', "'unhandled regex in _process_macro_args(), no match: {}'", '.', 'format', '(', 'matchgroups', ')', ')']
Macro args are pretty tricky! Arg names themselves are simple, but you can set arbitrary default values, including doing stuff like: {% macro my_macro(arg="x" + ("}% {# {% endmacro %}" * 2)) %} Which makes you a jerk, but is valid jinja.
['Macro', 'args', 'are', 'pretty', 'tricky!', 'Arg', 'names', 'themselves', 'are', 'simple', 'but', 'you', 'can', 'set', 'arbitrary', 'default', 'values', 'including', 'doing', 'stuff', 'like', ':', '{', '%', 'macro', 'my_macro', '(', 'arg', '=', 'x', '+', '(', '}', '%', '{', '#', '{', '%', 'endmacro', '%', '}', '*', '2', '))', '%', '}']
train
https://github.com/fishtown-analytics/dbt/blob/aa4f771df28b307af0cf9fe2fc24432f10a8236b/core/dbt/clients/_jinja_blocks.py#L437-L467
2,344
pkgw/pwkit
pwkit/environments/__init__.py
prepend_path
def prepend_path(orig, text, pathsep=os.pathsep): """Returns a $PATH-like environment variable with `text` prepended. `orig` is the original variable value, or None. `pathsep` is the character separating path elements, defaulting to `os.pathsep`. Example: newpath = cli.prepend_path(oldpath, '/mypackage/bin') See also `prepend_environ_path`. """ if orig is None: orig = '' if not len(orig): return text return ''.join([text, pathsep, orig])
python
def prepend_path(orig, text, pathsep=os.pathsep): """Returns a $PATH-like environment variable with `text` prepended. `orig` is the original variable value, or None. `pathsep` is the character separating path elements, defaulting to `os.pathsep`. Example: newpath = cli.prepend_path(oldpath, '/mypackage/bin') See also `prepend_environ_path`. """ if orig is None: orig = '' if not len(orig): return text return ''.join([text, pathsep, orig])
['def', 'prepend_path', '(', 'orig', ',', 'text', ',', 'pathsep', '=', 'os', '.', 'pathsep', ')', ':', 'if', 'orig', 'is', 'None', ':', 'orig', '=', "''", 'if', 'not', 'len', '(', 'orig', ')', ':', 'return', 'text', 'return', "''", '.', 'join', '(', '[', 'text', ',', 'pathsep', ',', 'orig', ']', ')']
Returns a $PATH-like environment variable with `text` prepended. `orig` is the original variable value, or None. `pathsep` is the character separating path elements, defaulting to `os.pathsep`. Example: newpath = cli.prepend_path(oldpath, '/mypackage/bin') See also `prepend_environ_path`.
['Returns', 'a', '$PATH', '-', 'like', 'environment', 'variable', 'with', 'text', 'prepended', '.', 'orig', 'is', 'the', 'original', 'variable', 'value', 'or', 'None', '.', 'pathsep', 'is', 'the', 'character', 'separating', 'path', 'elements', 'defaulting', 'to', 'os', '.', 'pathsep', '.']
train
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/environments/__init__.py#L94-L110
2,345
dogoncouch/logdissect
logdissect/parsers/type.py
ParseModule.parse_line
def parse_line(self, line): """Parse a line into a dictionary""" match = re.findall(self.date_regex, line) if match: fields = self.fields elif self.backup_format_regex and not match: match = re.findall(self.backup_date_regex, line) fields = self.backup_fields if match: entry = {} entry['raw_text'] = line entry['parser'] = self.name matchlist = list(zip(fields, match[0])) for f, v in matchlist: entry[f] = v if 'date_stamp' in entry.keys(): if self.datestamp_type == 'standard': entry = logdissect.utils.convert_standard_datestamp(entry) elif self.datestamp_type == 'iso': entry = logdissect.utils.convert_iso_datestamp( entry) elif self.datestamp_type == 'webaccess': entry = logdissect.utils.convert_webaccess_datestamp( entry) elif self.datestamp_type == 'nodate': entry, self.datedata = \ logdissect.utils.convert_nodate_datestamp( entry, self.datedata) elif self.datestamp_type == 'unix': entry = logdissect.utils.convert_unix_datestamp( entry) if self.datestamp_type == 'now': entry = logdissect.utils.convert_now_datestamp( entry) entry = self.post_parse_action(entry) return entry else: return None
python
def parse_line(self, line): """Parse a line into a dictionary""" match = re.findall(self.date_regex, line) if match: fields = self.fields elif self.backup_format_regex and not match: match = re.findall(self.backup_date_regex, line) fields = self.backup_fields if match: entry = {} entry['raw_text'] = line entry['parser'] = self.name matchlist = list(zip(fields, match[0])) for f, v in matchlist: entry[f] = v if 'date_stamp' in entry.keys(): if self.datestamp_type == 'standard': entry = logdissect.utils.convert_standard_datestamp(entry) elif self.datestamp_type == 'iso': entry = logdissect.utils.convert_iso_datestamp( entry) elif self.datestamp_type == 'webaccess': entry = logdissect.utils.convert_webaccess_datestamp( entry) elif self.datestamp_type == 'nodate': entry, self.datedata = \ logdissect.utils.convert_nodate_datestamp( entry, self.datedata) elif self.datestamp_type == 'unix': entry = logdissect.utils.convert_unix_datestamp( entry) if self.datestamp_type == 'now': entry = logdissect.utils.convert_now_datestamp( entry) entry = self.post_parse_action(entry) return entry else: return None
['def', 'parse_line', '(', 'self', ',', 'line', ')', ':', 'match', '=', 're', '.', 'findall', '(', 'self', '.', 'date_regex', ',', 'line', ')', 'if', 'match', ':', 'fields', '=', 'self', '.', 'fields', 'elif', 'self', '.', 'backup_format_regex', 'and', 'not', 'match', ':', 'match', '=', 're', '.', 'findall', '(', 'self', '.', 'backup_date_regex', ',', 'line', ')', 'fields', '=', 'self', '.', 'backup_fields', 'if', 'match', ':', 'entry', '=', '{', '}', 'entry', '[', "'raw_text'", ']', '=', 'line', 'entry', '[', "'parser'", ']', '=', 'self', '.', 'name', 'matchlist', '=', 'list', '(', 'zip', '(', 'fields', ',', 'match', '[', '0', ']', ')', ')', 'for', 'f', ',', 'v', 'in', 'matchlist', ':', 'entry', '[', 'f', ']', '=', 'v', 'if', "'date_stamp'", 'in', 'entry', '.', 'keys', '(', ')', ':', 'if', 'self', '.', 'datestamp_type', '==', "'standard'", ':', 'entry', '=', 'logdissect', '.', 'utils', '.', 'convert_standard_datestamp', '(', 'entry', ')', 'elif', 'self', '.', 'datestamp_type', '==', "'iso'", ':', 'entry', '=', 'logdissect', '.', 'utils', '.', 'convert_iso_datestamp', '(', 'entry', ')', 'elif', 'self', '.', 'datestamp_type', '==', "'webaccess'", ':', 'entry', '=', 'logdissect', '.', 'utils', '.', 'convert_webaccess_datestamp', '(', 'entry', ')', 'elif', 'self', '.', 'datestamp_type', '==', "'nodate'", ':', 'entry', ',', 'self', '.', 'datedata', '=', 'logdissect', '.', 'utils', '.', 'convert_nodate_datestamp', '(', 'entry', ',', 'self', '.', 'datedata', ')', 'elif', 'self', '.', 'datestamp_type', '==', "'unix'", ':', 'entry', '=', 'logdissect', '.', 'utils', '.', 'convert_unix_datestamp', '(', 'entry', ')', 'if', 'self', '.', 'datestamp_type', '==', "'now'", ':', 'entry', '=', 'logdissect', '.', 'utils', '.', 'convert_now_datestamp', '(', 'entry', ')', 'entry', '=', 'self', '.', 'post_parse_action', '(', 'entry', ')', 'return', 'entry', 'else', ':', 'return', 'None']
Parse a line into a dictionary
['Parse', 'a', 'line', 'into', 'a', 'dictionary']
train
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/parsers/type.py#L125-L167
2,346
lemieuxl/pyGenClean
pyGenClean/LaTeX/merge_reports.py
get_final_numbers
def get_final_numbers(filename, out_dir): """Copy the final_files file and get the number of markers and samples. :param filename: the name of the file. :param out_dir: the output directory. :type filename: str :type out_dir: str :returns: the final number of markers and samples :rtype: tuple """ # Copying the file shutil.copy(filename, out_dir) # Reading the number of markers and samples nb_samples = None nb_markers = None with open(filename, "r") as i_file: for line in i_file: row = line.rstrip("\r\n").split("\t") if len(row) == 1: continue path, ext = os.path.splitext(row[0]) if ext in {".bim", ".tped", ".map"}: nb_markers = row[1] elif ext in {".fam", ".ped", ".tfam"}: nb_samples = row[1] assert nb_samples assert nb_markers return nb_markers, nb_samples
python
def get_final_numbers(filename, out_dir): """Copy the final_files file and get the number of markers and samples. :param filename: the name of the file. :param out_dir: the output directory. :type filename: str :type out_dir: str :returns: the final number of markers and samples :rtype: tuple """ # Copying the file shutil.copy(filename, out_dir) # Reading the number of markers and samples nb_samples = None nb_markers = None with open(filename, "r") as i_file: for line in i_file: row = line.rstrip("\r\n").split("\t") if len(row) == 1: continue path, ext = os.path.splitext(row[0]) if ext in {".bim", ".tped", ".map"}: nb_markers = row[1] elif ext in {".fam", ".ped", ".tfam"}: nb_samples = row[1] assert nb_samples assert nb_markers return nb_markers, nb_samples
['def', 'get_final_numbers', '(', 'filename', ',', 'out_dir', ')', ':', '# Copying the file', 'shutil', '.', 'copy', '(', 'filename', ',', 'out_dir', ')', '# Reading the number of markers and samples', 'nb_samples', '=', 'None', 'nb_markers', '=', 'None', 'with', 'open', '(', 'filename', ',', '"r"', ')', 'as', 'i_file', ':', 'for', 'line', 'in', 'i_file', ':', 'row', '=', 'line', '.', 'rstrip', '(', '"\\r\\n"', ')', '.', 'split', '(', '"\\t"', ')', 'if', 'len', '(', 'row', ')', '==', '1', ':', 'continue', 'path', ',', 'ext', '=', 'os', '.', 'path', '.', 'splitext', '(', 'row', '[', '0', ']', ')', 'if', 'ext', 'in', '{', '".bim"', ',', '".tped"', ',', '".map"', '}', ':', 'nb_markers', '=', 'row', '[', '1', ']', 'elif', 'ext', 'in', '{', '".fam"', ',', '".ped"', ',', '".tfam"', '}', ':', 'nb_samples', '=', 'row', '[', '1', ']', 'assert', 'nb_samples', 'assert', 'nb_markers', 'return', 'nb_markers', ',', 'nb_samples']
Copy the final_files file and get the number of markers and samples. :param filename: the name of the file. :param out_dir: the output directory. :type filename: str :type out_dir: str :returns: the final number of markers and samples :rtype: tuple
['Copy', 'the', 'final_files', 'file', 'and', 'get', 'the', 'number', 'of', 'markers', 'and', 'samples', '.']
train
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/LaTeX/merge_reports.py#L160-L193
2,347
bcbio/bcbio-nextgen
bcbio/variation/mutect.py
mutect_caller
def mutect_caller(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Run the MuTect paired analysis algorithm. """ config = items[0]["config"] if out_file is None: out_file = "%s-paired-variants.vcf.gz" % os.path.splitext(align_bams[0])[0] if not file_exists(out_file): base_config = items[0]["config"] broad_runner = broad.runner_from_config(base_config, "mutect") out_file_mutect = (out_file.replace(".vcf", "-mutect.vcf") if "vcf" in out_file else out_file + "-mutect.vcf") broad_runner, params = \ _mutect_call_prep(align_bams, items, ref_file, assoc_files, region, out_file_mutect) if (not isinstance(region, (list, tuple)) and not all(has_aligned_reads(x, region) for x in align_bams)): paired = vcfutils.get_paired(items) vcfutils.write_empty_vcf(out_file, samples=[x for x in (paired.tumor_name, paired.normal_name) if x]) return out_file_orig = "%s-orig%s" % utils.splitext_plus(out_file_mutect) if not file_exists(out_file_orig): with file_transaction(config, out_file_orig) as tx_out_file: # Rationale: MuTect writes another table to stdout, which we don't need params += ["--vcf", tx_out_file, "-o", os.devnull] broad_runner.run_mutect(params) is_paired = "-I:normal" in params if not utils.file_uptodate(out_file_mutect, out_file_orig): out_file_mutect = _fix_mutect_output(out_file_orig, config, out_file_mutect, is_paired) indelcaller = vcfutils.get_indelcaller(base_config) if ("scalpel" in indelcaller.lower() and region and isinstance(region, (tuple, list)) and chromhacks.is_autosomal_or_sex(region[0])): # Scalpel InDels out_file_indels = (out_file.replace(".vcf", "-somaticIndels.vcf") if "vcf" in out_file else out_file + "-somaticIndels.vcf") if scalpel.is_installed(items[0]["config"]): if not is_paired: vcfutils.check_paired_problems(items) scalpel._run_scalpel_caller(align_bams, items, ref_file, assoc_files, region=region, out_file=out_file_indels) else: scalpel._run_scalpel_paired(align_bams, items, ref_file, assoc_files, region=region, out_file=out_file_indels) out_file = vcfutils.combine_variant_files(orig_files=[out_file_mutect, out_file_indels], out_file=out_file, ref_file=items[0]["sam_ref"], config=items[0]["config"], region=region) else: utils.symlink_plus(out_file_mutect, out_file) elif "pindel" in indelcaller.lower(): from bcbio.structural import pindel out_file_indels = (out_file.replace(".vcf", "-somaticIndels.vcf") if "vcf" in out_file else out_file + "-somaticIndels.vcf") if pindel.is_installed(items[0]["config"]): pindel._run_tumor_pindel_caller(align_bams, items, ref_file, assoc_files, region=region, out_file=out_file_indels) out_file = vcfutils.combine_variant_files(orig_files=[out_file_mutect, out_file_indels], out_file=out_file, ref_file=ref_file, config=items[0]["config"], region=region) else: utils.symlink_plus(out_file_mutect, out_file) elif (("somaticindeldetector" in indelcaller.lower() or "sid" in indelcaller.lower()) and "appistry" in broad_runner.get_mutect_version()): # SomaticIndelDetector InDels out_file_indels = (out_file.replace(".vcf", "-somaticIndels.vcf") if "vcf" in out_file else out_file + "-somaticIndels.vcf") params_indels = _SID_call_prep(align_bams, items, ref_file, assoc_files, region, out_file_indels) with file_transaction(config, out_file_indels) as tx_out_file: params_indels += ["-o", tx_out_file] broad_runner.run_mutect(params_indels) out_file = vcfutils.combine_variant_files(orig_files=[out_file_mutect, out_file_indels], out_file=out_file, ref_file=items[0]["sam_ref"], config=items[0]["config"], region=region) else: utils.symlink_plus(out_file_mutect, out_file) return out_file
python
def mutect_caller(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Run the MuTect paired analysis algorithm. """ config = items[0]["config"] if out_file is None: out_file = "%s-paired-variants.vcf.gz" % os.path.splitext(align_bams[0])[0] if not file_exists(out_file): base_config = items[0]["config"] broad_runner = broad.runner_from_config(base_config, "mutect") out_file_mutect = (out_file.replace(".vcf", "-mutect.vcf") if "vcf" in out_file else out_file + "-mutect.vcf") broad_runner, params = \ _mutect_call_prep(align_bams, items, ref_file, assoc_files, region, out_file_mutect) if (not isinstance(region, (list, tuple)) and not all(has_aligned_reads(x, region) for x in align_bams)): paired = vcfutils.get_paired(items) vcfutils.write_empty_vcf(out_file, samples=[x for x in (paired.tumor_name, paired.normal_name) if x]) return out_file_orig = "%s-orig%s" % utils.splitext_plus(out_file_mutect) if not file_exists(out_file_orig): with file_transaction(config, out_file_orig) as tx_out_file: # Rationale: MuTect writes another table to stdout, which we don't need params += ["--vcf", tx_out_file, "-o", os.devnull] broad_runner.run_mutect(params) is_paired = "-I:normal" in params if not utils.file_uptodate(out_file_mutect, out_file_orig): out_file_mutect = _fix_mutect_output(out_file_orig, config, out_file_mutect, is_paired) indelcaller = vcfutils.get_indelcaller(base_config) if ("scalpel" in indelcaller.lower() and region and isinstance(region, (tuple, list)) and chromhacks.is_autosomal_or_sex(region[0])): # Scalpel InDels out_file_indels = (out_file.replace(".vcf", "-somaticIndels.vcf") if "vcf" in out_file else out_file + "-somaticIndels.vcf") if scalpel.is_installed(items[0]["config"]): if not is_paired: vcfutils.check_paired_problems(items) scalpel._run_scalpel_caller(align_bams, items, ref_file, assoc_files, region=region, out_file=out_file_indels) else: scalpel._run_scalpel_paired(align_bams, items, ref_file, assoc_files, region=region, out_file=out_file_indels) out_file = vcfutils.combine_variant_files(orig_files=[out_file_mutect, out_file_indels], out_file=out_file, ref_file=items[0]["sam_ref"], config=items[0]["config"], region=region) else: utils.symlink_plus(out_file_mutect, out_file) elif "pindel" in indelcaller.lower(): from bcbio.structural import pindel out_file_indels = (out_file.replace(".vcf", "-somaticIndels.vcf") if "vcf" in out_file else out_file + "-somaticIndels.vcf") if pindel.is_installed(items[0]["config"]): pindel._run_tumor_pindel_caller(align_bams, items, ref_file, assoc_files, region=region, out_file=out_file_indels) out_file = vcfutils.combine_variant_files(orig_files=[out_file_mutect, out_file_indels], out_file=out_file, ref_file=ref_file, config=items[0]["config"], region=region) else: utils.symlink_plus(out_file_mutect, out_file) elif (("somaticindeldetector" in indelcaller.lower() or "sid" in indelcaller.lower()) and "appistry" in broad_runner.get_mutect_version()): # SomaticIndelDetector InDels out_file_indels = (out_file.replace(".vcf", "-somaticIndels.vcf") if "vcf" in out_file else out_file + "-somaticIndels.vcf") params_indels = _SID_call_prep(align_bams, items, ref_file, assoc_files, region, out_file_indels) with file_transaction(config, out_file_indels) as tx_out_file: params_indels += ["-o", tx_out_file] broad_runner.run_mutect(params_indels) out_file = vcfutils.combine_variant_files(orig_files=[out_file_mutect, out_file_indels], out_file=out_file, ref_file=items[0]["sam_ref"], config=items[0]["config"], region=region) else: utils.symlink_plus(out_file_mutect, out_file) return out_file
['def', 'mutect_caller', '(', 'align_bams', ',', 'items', ',', 'ref_file', ',', 'assoc_files', ',', 'region', '=', 'None', ',', 'out_file', '=', 'None', ')', ':', 'config', '=', 'items', '[', '0', ']', '[', '"config"', ']', 'if', 'out_file', 'is', 'None', ':', 'out_file', '=', '"%s-paired-variants.vcf.gz"', '%', 'os', '.', 'path', '.', 'splitext', '(', 'align_bams', '[', '0', ']', ')', '[', '0', ']', 'if', 'not', 'file_exists', '(', 'out_file', ')', ':', 'base_config', '=', 'items', '[', '0', ']', '[', '"config"', ']', 'broad_runner', '=', 'broad', '.', 'runner_from_config', '(', 'base_config', ',', '"mutect"', ')', 'out_file_mutect', '=', '(', 'out_file', '.', 'replace', '(', '".vcf"', ',', '"-mutect.vcf"', ')', 'if', '"vcf"', 'in', 'out_file', 'else', 'out_file', '+', '"-mutect.vcf"', ')', 'broad_runner', ',', 'params', '=', '_mutect_call_prep', '(', 'align_bams', ',', 'items', ',', 'ref_file', ',', 'assoc_files', ',', 'region', ',', 'out_file_mutect', ')', 'if', '(', 'not', 'isinstance', '(', 'region', ',', '(', 'list', ',', 'tuple', ')', ')', 'and', 'not', 'all', '(', 'has_aligned_reads', '(', 'x', ',', 'region', ')', 'for', 'x', 'in', 'align_bams', ')', ')', ':', 'paired', '=', 'vcfutils', '.', 'get_paired', '(', 'items', ')', 'vcfutils', '.', 'write_empty_vcf', '(', 'out_file', ',', 'samples', '=', '[', 'x', 'for', 'x', 'in', '(', 'paired', '.', 'tumor_name', ',', 'paired', '.', 'normal_name', ')', 'if', 'x', ']', ')', 'return', 'out_file_orig', '=', '"%s-orig%s"', '%', 'utils', '.', 'splitext_plus', '(', 'out_file_mutect', ')', 'if', 'not', 'file_exists', '(', 'out_file_orig', ')', ':', 'with', 'file_transaction', '(', 'config', ',', 'out_file_orig', ')', 'as', 'tx_out_file', ':', "# Rationale: MuTect writes another table to stdout, which we don't need", 'params', '+=', '[', '"--vcf"', ',', 'tx_out_file', ',', '"-o"', ',', 'os', '.', 'devnull', ']', 'broad_runner', '.', 'run_mutect', '(', 'params', ')', 'is_paired', '=', '"-I:normal"', 'in', 'params', 'if', 'not', 'utils', '.', 'file_uptodate', '(', 'out_file_mutect', ',', 'out_file_orig', ')', ':', 'out_file_mutect', '=', '_fix_mutect_output', '(', 'out_file_orig', ',', 'config', ',', 'out_file_mutect', ',', 'is_paired', ')', 'indelcaller', '=', 'vcfutils', '.', 'get_indelcaller', '(', 'base_config', ')', 'if', '(', '"scalpel"', 'in', 'indelcaller', '.', 'lower', '(', ')', 'and', 'region', 'and', 'isinstance', '(', 'region', ',', '(', 'tuple', ',', 'list', ')', ')', 'and', 'chromhacks', '.', 'is_autosomal_or_sex', '(', 'region', '[', '0', ']', ')', ')', ':', '# Scalpel InDels', 'out_file_indels', '=', '(', 'out_file', '.', 'replace', '(', '".vcf"', ',', '"-somaticIndels.vcf"', ')', 'if', '"vcf"', 'in', 'out_file', 'else', 'out_file', '+', '"-somaticIndels.vcf"', ')', 'if', 'scalpel', '.', 'is_installed', '(', 'items', '[', '0', ']', '[', '"config"', ']', ')', ':', 'if', 'not', 'is_paired', ':', 'vcfutils', '.', 'check_paired_problems', '(', 'items', ')', 'scalpel', '.', '_run_scalpel_caller', '(', 'align_bams', ',', 'items', ',', 'ref_file', ',', 'assoc_files', ',', 'region', '=', 'region', ',', 'out_file', '=', 'out_file_indels', ')', 'else', ':', 'scalpel', '.', '_run_scalpel_paired', '(', 'align_bams', ',', 'items', ',', 'ref_file', ',', 'assoc_files', ',', 'region', '=', 'region', ',', 'out_file', '=', 'out_file_indels', ')', 'out_file', '=', 'vcfutils', '.', 'combine_variant_files', '(', 'orig_files', '=', '[', 'out_file_mutect', ',', 'out_file_indels', ']', ',', 'out_file', '=', 'out_file', ',', 'ref_file', '=', 'items', '[', '0', ']', '[', '"sam_ref"', ']', ',', 'config', '=', 'items', '[', '0', ']', '[', '"config"', ']', ',', 'region', '=', 'region', ')', 'else', ':', 'utils', '.', 'symlink_plus', '(', 'out_file_mutect', ',', 'out_file', ')', 'elif', '"pindel"', 'in', 'indelcaller', '.', 'lower', '(', ')', ':', 'from', 'bcbio', '.', 'structural', 'import', 'pindel', 'out_file_indels', '=', '(', 'out_file', '.', 'replace', '(', '".vcf"', ',', '"-somaticIndels.vcf"', ')', 'if', '"vcf"', 'in', 'out_file', 'else', 'out_file', '+', '"-somaticIndels.vcf"', ')', 'if', 'pindel', '.', 'is_installed', '(', 'items', '[', '0', ']', '[', '"config"', ']', ')', ':', 'pindel', '.', '_run_tumor_pindel_caller', '(', 'align_bams', ',', 'items', ',', 'ref_file', ',', 'assoc_files', ',', 'region', '=', 'region', ',', 'out_file', '=', 'out_file_indels', ')', 'out_file', '=', 'vcfutils', '.', 'combine_variant_files', '(', 'orig_files', '=', '[', 'out_file_mutect', ',', 'out_file_indels', ']', ',', 'out_file', '=', 'out_file', ',', 'ref_file', '=', 'ref_file', ',', 'config', '=', 'items', '[', '0', ']', '[', '"config"', ']', ',', 'region', '=', 'region', ')', 'else', ':', 'utils', '.', 'symlink_plus', '(', 'out_file_mutect', ',', 'out_file', ')', 'elif', '(', '(', '"somaticindeldetector"', 'in', 'indelcaller', '.', 'lower', '(', ')', 'or', '"sid"', 'in', 'indelcaller', '.', 'lower', '(', ')', ')', 'and', '"appistry"', 'in', 'broad_runner', '.', 'get_mutect_version', '(', ')', ')', ':', '# SomaticIndelDetector InDels', 'out_file_indels', '=', '(', 'out_file', '.', 'replace', '(', '".vcf"', ',', '"-somaticIndels.vcf"', ')', 'if', '"vcf"', 'in', 'out_file', 'else', 'out_file', '+', '"-somaticIndels.vcf"', ')', 'params_indels', '=', '_SID_call_prep', '(', 'align_bams', ',', 'items', ',', 'ref_file', ',', 'assoc_files', ',', 'region', ',', 'out_file_indels', ')', 'with', 'file_transaction', '(', 'config', ',', 'out_file_indels', ')', 'as', 'tx_out_file', ':', 'params_indels', '+=', '[', '"-o"', ',', 'tx_out_file', ']', 'broad_runner', '.', 'run_mutect', '(', 'params_indels', ')', 'out_file', '=', 'vcfutils', '.', 'combine_variant_files', '(', 'orig_files', '=', '[', 'out_file_mutect', ',', 'out_file_indels', ']', ',', 'out_file', '=', 'out_file', ',', 'ref_file', '=', 'items', '[', '0', ']', '[', '"sam_ref"', ']', ',', 'config', '=', 'items', '[', '0', ']', '[', '"config"', ']', ',', 'region', '=', 'region', ')', 'else', ':', 'utils', '.', 'symlink_plus', '(', 'out_file_mutect', ',', 'out_file', ')', 'return', 'out_file']
Run the MuTect paired analysis algorithm.
['Run', 'the', 'MuTect', 'paired', 'analysis', 'algorithm', '.']
train
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/mutect.py#L108-L189
2,348
hotdoc/hotdoc
hotdoc/core/formatter.py
Formatter.copy_assets
def copy_assets(self, assets_path): """Banana banana """ if not os.path.exists(assets_path): os.mkdir(assets_path) extra_files = self._get_extra_files() for ex_files in Formatter.get_extra_files_signal(self): extra_files.extend(ex_files) for src, dest in extra_files: dest = os.path.join(assets_path, dest) destdir = os.path.dirname(dest) if not os.path.exists(destdir): os.makedirs(destdir) if os.path.isfile(src): shutil.copy(src, dest) elif os.path.isdir(src): recursive_overwrite(src, dest)
python
def copy_assets(self, assets_path): """Banana banana """ if not os.path.exists(assets_path): os.mkdir(assets_path) extra_files = self._get_extra_files() for ex_files in Formatter.get_extra_files_signal(self): extra_files.extend(ex_files) for src, dest in extra_files: dest = os.path.join(assets_path, dest) destdir = os.path.dirname(dest) if not os.path.exists(destdir): os.makedirs(destdir) if os.path.isfile(src): shutil.copy(src, dest) elif os.path.isdir(src): recursive_overwrite(src, dest)
['def', 'copy_assets', '(', 'self', ',', 'assets_path', ')', ':', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'assets_path', ')', ':', 'os', '.', 'mkdir', '(', 'assets_path', ')', 'extra_files', '=', 'self', '.', '_get_extra_files', '(', ')', 'for', 'ex_files', 'in', 'Formatter', '.', 'get_extra_files_signal', '(', 'self', ')', ':', 'extra_files', '.', 'extend', '(', 'ex_files', ')', 'for', 'src', ',', 'dest', 'in', 'extra_files', ':', 'dest', '=', 'os', '.', 'path', '.', 'join', '(', 'assets_path', ',', 'dest', ')', 'destdir', '=', 'os', '.', 'path', '.', 'dirname', '(', 'dest', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'destdir', ')', ':', 'os', '.', 'makedirs', '(', 'destdir', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'src', ')', ':', 'shutil', '.', 'copy', '(', 'src', ',', 'dest', ')', 'elif', 'os', '.', 'path', '.', 'isdir', '(', 'src', ')', ':', 'recursive_overwrite', '(', 'src', ',', 'dest', ')']
Banana banana
['Banana', 'banana']
train
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/formatter.py#L269-L289
2,349
apple/turicreate
deps/src/boost_1_68_0/status/boost_check_library.py
check_library.get_library_meta
def get_library_meta(self): ''' Fetches the meta data for the current library. The data could be in the superlib meta data file. If we can't find the data None is returned. ''' parent_dir = os.path.dirname(self.library_dir) if self.test_file_exists(os.path.join(self.library_dir,'meta'),['libraries.json']): with open(os.path.join(self.library_dir,'meta','libraries.json'),'r') as f: meta_data = json.load(f) if isinstance(meta_data,list): for lib in meta_data: if lib['key'] == self.library_key: return lib elif 'key' in meta_data and meta_data['key'] == self.library_key: return meta_data if not self.test_dir_exists(os.path.join(self.library_dir,'meta')) \ and self.test_file_exists(os.path.join(parent_dir,'meta'),['libraries.json']): with open(os.path.join(parent_dir,'meta','libraries.json'),'r') as f: libraries_json = json.load(f) if isinstance(libraries_json,list): for lib in libraries_json: if lib['key'] == self.library_key: return lib return None
python
def get_library_meta(self): ''' Fetches the meta data for the current library. The data could be in the superlib meta data file. If we can't find the data None is returned. ''' parent_dir = os.path.dirname(self.library_dir) if self.test_file_exists(os.path.join(self.library_dir,'meta'),['libraries.json']): with open(os.path.join(self.library_dir,'meta','libraries.json'),'r') as f: meta_data = json.load(f) if isinstance(meta_data,list): for lib in meta_data: if lib['key'] == self.library_key: return lib elif 'key' in meta_data and meta_data['key'] == self.library_key: return meta_data if not self.test_dir_exists(os.path.join(self.library_dir,'meta')) \ and self.test_file_exists(os.path.join(parent_dir,'meta'),['libraries.json']): with open(os.path.join(parent_dir,'meta','libraries.json'),'r') as f: libraries_json = json.load(f) if isinstance(libraries_json,list): for lib in libraries_json: if lib['key'] == self.library_key: return lib return None
['def', 'get_library_meta', '(', 'self', ')', ':', 'parent_dir', '=', 'os', '.', 'path', '.', 'dirname', '(', 'self', '.', 'library_dir', ')', 'if', 'self', '.', 'test_file_exists', '(', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'library_dir', ',', "'meta'", ')', ',', '[', "'libraries.json'", ']', ')', ':', 'with', 'open', '(', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'library_dir', ',', "'meta'", ',', "'libraries.json'", ')', ',', "'r'", ')', 'as', 'f', ':', 'meta_data', '=', 'json', '.', 'load', '(', 'f', ')', 'if', 'isinstance', '(', 'meta_data', ',', 'list', ')', ':', 'for', 'lib', 'in', 'meta_data', ':', 'if', 'lib', '[', "'key'", ']', '==', 'self', '.', 'library_key', ':', 'return', 'lib', 'elif', "'key'", 'in', 'meta_data', 'and', 'meta_data', '[', "'key'", ']', '==', 'self', '.', 'library_key', ':', 'return', 'meta_data', 'if', 'not', 'self', '.', 'test_dir_exists', '(', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'library_dir', ',', "'meta'", ')', ')', 'and', 'self', '.', 'test_file_exists', '(', 'os', '.', 'path', '.', 'join', '(', 'parent_dir', ',', "'meta'", ')', ',', '[', "'libraries.json'", ']', ')', ':', 'with', 'open', '(', 'os', '.', 'path', '.', 'join', '(', 'parent_dir', ',', "'meta'", ',', "'libraries.json'", ')', ',', "'r'", ')', 'as', 'f', ':', 'libraries_json', '=', 'json', '.', 'load', '(', 'f', ')', 'if', 'isinstance', '(', 'libraries_json', ',', 'list', ')', ':', 'for', 'lib', 'in', 'libraries_json', ':', 'if', 'lib', '[', "'key'", ']', '==', 'self', '.', 'library_key', ':', 'return', 'lib', 'return', 'None']
Fetches the meta data for the current library. The data could be in the superlib meta data file. If we can't find the data None is returned.
['Fetches', 'the', 'meta', 'data', 'for', 'the', 'current', 'library', '.', 'The', 'data', 'could', 'be', 'in', 'the', 'superlib', 'meta', 'data', 'file', '.', 'If', 'we', 'can', 't', 'find', 'the', 'data', 'None', 'is', 'returned', '.']
train
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/status/boost_check_library.py#L182-L205
2,350
linkedin/Zopkio
zopkio/adhoc_deployer.py
SSHDeployer.stop
def stop(self, unique_id, configs=None): """Stop the service. If the deployer has not started a service with`unique_id` the deployer will raise an Exception There are two configs that will be considered: 'terminate_only': if this config is passed in then this method is the same as terminate(unique_id) (this is also the behavior if stop_command is None and not overridden) 'stop_command': overrides the default stop_command :param unique_id: :param configs: :return: """ # the following is necessay to set the configs for this function as the combination of the # default configurations and the parameter with the parameter superceding the defaults but # not modifying the defaults if configs is None: configs = {} tmp = self.default_configs.copy() tmp.update(configs) configs = tmp logger.debug("stopping " + unique_id) if unique_id in self.processes: hostname = self.processes[unique_id].hostname else: logger.error("Can't stop {0}: process not known".format(unique_id)) raise DeploymentError("Can't stop {0}: process not known".format(unique_id)) if configs.get('terminate_only', False): self.terminate(unique_id, configs) else: stop_command = configs.get('stop_command') or self.default_configs.get('stop_command') env = configs.get("env", {}) if stop_command is not None: install_path = self.processes[unique_id].install_path with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: log_output(exec_with_env(ssh, "cd {0}; {1}".format(install_path, stop_command), msg="Failed to stop {0}".format(unique_id), env=env)) else: self.terminate(unique_id, configs) if 'delay' in configs: time.sleep(configs['delay'])
python
def stop(self, unique_id, configs=None): """Stop the service. If the deployer has not started a service with`unique_id` the deployer will raise an Exception There are two configs that will be considered: 'terminate_only': if this config is passed in then this method is the same as terminate(unique_id) (this is also the behavior if stop_command is None and not overridden) 'stop_command': overrides the default stop_command :param unique_id: :param configs: :return: """ # the following is necessay to set the configs for this function as the combination of the # default configurations and the parameter with the parameter superceding the defaults but # not modifying the defaults if configs is None: configs = {} tmp = self.default_configs.copy() tmp.update(configs) configs = tmp logger.debug("stopping " + unique_id) if unique_id in self.processes: hostname = self.processes[unique_id].hostname else: logger.error("Can't stop {0}: process not known".format(unique_id)) raise DeploymentError("Can't stop {0}: process not known".format(unique_id)) if configs.get('terminate_only', False): self.terminate(unique_id, configs) else: stop_command = configs.get('stop_command') or self.default_configs.get('stop_command') env = configs.get("env", {}) if stop_command is not None: install_path = self.processes[unique_id].install_path with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: log_output(exec_with_env(ssh, "cd {0}; {1}".format(install_path, stop_command), msg="Failed to stop {0}".format(unique_id), env=env)) else: self.terminate(unique_id, configs) if 'delay' in configs: time.sleep(configs['delay'])
['def', 'stop', '(', 'self', ',', 'unique_id', ',', 'configs', '=', 'None', ')', ':', '# the following is necessay to set the configs for this function as the combination of the', '# default configurations and the parameter with the parameter superceding the defaults but', '# not modifying the defaults', 'if', 'configs', 'is', 'None', ':', 'configs', '=', '{', '}', 'tmp', '=', 'self', '.', 'default_configs', '.', 'copy', '(', ')', 'tmp', '.', 'update', '(', 'configs', ')', 'configs', '=', 'tmp', 'logger', '.', 'debug', '(', '"stopping "', '+', 'unique_id', ')', 'if', 'unique_id', 'in', 'self', '.', 'processes', ':', 'hostname', '=', 'self', '.', 'processes', '[', 'unique_id', ']', '.', 'hostname', 'else', ':', 'logger', '.', 'error', '(', '"Can\'t stop {0}: process not known"', '.', 'format', '(', 'unique_id', ')', ')', 'raise', 'DeploymentError', '(', '"Can\'t stop {0}: process not known"', '.', 'format', '(', 'unique_id', ')', ')', 'if', 'configs', '.', 'get', '(', "'terminate_only'", ',', 'False', ')', ':', 'self', '.', 'terminate', '(', 'unique_id', ',', 'configs', ')', 'else', ':', 'stop_command', '=', 'configs', '.', 'get', '(', "'stop_command'", ')', 'or', 'self', '.', 'default_configs', '.', 'get', '(', "'stop_command'", ')', 'env', '=', 'configs', '.', 'get', '(', '"env"', ',', '{', '}', ')', 'if', 'stop_command', 'is', 'not', 'None', ':', 'install_path', '=', 'self', '.', 'processes', '[', 'unique_id', ']', '.', 'install_path', 'with', 'get_ssh_client', '(', 'hostname', ',', 'username', '=', 'runtime', '.', 'get_username', '(', ')', ',', 'password', '=', 'runtime', '.', 'get_password', '(', ')', ')', 'as', 'ssh', ':', 'log_output', '(', 'exec_with_env', '(', 'ssh', ',', '"cd {0}; {1}"', '.', 'format', '(', 'install_path', ',', 'stop_command', ')', ',', 'msg', '=', '"Failed to stop {0}"', '.', 'format', '(', 'unique_id', ')', ',', 'env', '=', 'env', ')', ')', 'else', ':', 'self', '.', 'terminate', '(', 'unique_id', ',', 'configs', ')', 'if', "'delay'", 'in', 'configs', ':', 'time', '.', 'sleep', '(', 'configs', '[', "'delay'", ']', ')']
Stop the service. If the deployer has not started a service with`unique_id` the deployer will raise an Exception There are two configs that will be considered: 'terminate_only': if this config is passed in then this method is the same as terminate(unique_id) (this is also the behavior if stop_command is None and not overridden) 'stop_command': overrides the default stop_command :param unique_id: :param configs: :return:
['Stop', 'the', 'service', '.', 'If', 'the', 'deployer', 'has', 'not', 'started', 'a', 'service', 'with', 'unique_id', 'the', 'deployer', 'will', 'raise', 'an', 'Exception', 'There', 'are', 'two', 'configs', 'that', 'will', 'be', 'considered', ':', 'terminate_only', ':', 'if', 'this', 'config', 'is', 'passed', 'in', 'then', 'this', 'method', 'is', 'the', 'same', 'as', 'terminate', '(', 'unique_id', ')', '(', 'this', 'is', 'also', 'the', 'behavior', 'if', 'stop_command', 'is', 'None', 'and', 'not', 'overridden', ')', 'stop_command', ':', 'overrides', 'the', 'default', 'stop_command']
train
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/adhoc_deployer.py#L264-L306
2,351
gem/oq-engine
openquake/hazardlib/geo/polygon.py
get_resampled_coordinates
def get_resampled_coordinates(lons, lats): """ Resample polygon line segments and return the coordinates of the new vertices. This limits distortions when projecting a polygon onto a spherical surface. Parameters define longitudes and latitudes of a point collection in the form of lists or numpy arrays. :return: A tuple of two numpy arrays: longitudes and latitudes of resampled vertices. """ num_coords = len(lons) assert num_coords == len(lats) lons1 = numpy.array(lons) lats1 = numpy.array(lats) lons2 = numpy.concatenate((lons1[1:], lons1[:1])) lats2 = numpy.concatenate((lats1[1:], lats1[:1])) distances = geodetic.geodetic_distance(lons1, lats1, lons2, lats2) resampled_lons = [lons[0]] resampled_lats = [lats[0]] for i in range(num_coords): next_point = (i + 1) % num_coords lon1, lat1 = lons[i], lats[i] lon2, lat2 = lons[next_point], lats[next_point] distance = distances[i] num_points = int(distance / UPSAMPLING_STEP_KM) + 1 if num_points >= 2: # We need to increase the resolution of this arc by adding new # points. new_lons, new_lats, _ = geodetic.npoints_between( lon1, lat1, 0, lon2, lat2, 0, num_points) resampled_lons.extend(new_lons[1:]) resampled_lats.extend(new_lats[1:]) else: resampled_lons.append(lon2) resampled_lats.append(lat2) # NB: we cut off the last point because it repeats the first one return numpy.array(resampled_lons[:-1]), numpy.array(resampled_lats[:-1])
python
def get_resampled_coordinates(lons, lats): """ Resample polygon line segments and return the coordinates of the new vertices. This limits distortions when projecting a polygon onto a spherical surface. Parameters define longitudes and latitudes of a point collection in the form of lists or numpy arrays. :return: A tuple of two numpy arrays: longitudes and latitudes of resampled vertices. """ num_coords = len(lons) assert num_coords == len(lats) lons1 = numpy.array(lons) lats1 = numpy.array(lats) lons2 = numpy.concatenate((lons1[1:], lons1[:1])) lats2 = numpy.concatenate((lats1[1:], lats1[:1])) distances = geodetic.geodetic_distance(lons1, lats1, lons2, lats2) resampled_lons = [lons[0]] resampled_lats = [lats[0]] for i in range(num_coords): next_point = (i + 1) % num_coords lon1, lat1 = lons[i], lats[i] lon2, lat2 = lons[next_point], lats[next_point] distance = distances[i] num_points = int(distance / UPSAMPLING_STEP_KM) + 1 if num_points >= 2: # We need to increase the resolution of this arc by adding new # points. new_lons, new_lats, _ = geodetic.npoints_between( lon1, lat1, 0, lon2, lat2, 0, num_points) resampled_lons.extend(new_lons[1:]) resampled_lats.extend(new_lats[1:]) else: resampled_lons.append(lon2) resampled_lats.append(lat2) # NB: we cut off the last point because it repeats the first one return numpy.array(resampled_lons[:-1]), numpy.array(resampled_lats[:-1])
['def', 'get_resampled_coordinates', '(', 'lons', ',', 'lats', ')', ':', 'num_coords', '=', 'len', '(', 'lons', ')', 'assert', 'num_coords', '==', 'len', '(', 'lats', ')', 'lons1', '=', 'numpy', '.', 'array', '(', 'lons', ')', 'lats1', '=', 'numpy', '.', 'array', '(', 'lats', ')', 'lons2', '=', 'numpy', '.', 'concatenate', '(', '(', 'lons1', '[', '1', ':', ']', ',', 'lons1', '[', ':', '1', ']', ')', ')', 'lats2', '=', 'numpy', '.', 'concatenate', '(', '(', 'lats1', '[', '1', ':', ']', ',', 'lats1', '[', ':', '1', ']', ')', ')', 'distances', '=', 'geodetic', '.', 'geodetic_distance', '(', 'lons1', ',', 'lats1', ',', 'lons2', ',', 'lats2', ')', 'resampled_lons', '=', '[', 'lons', '[', '0', ']', ']', 'resampled_lats', '=', '[', 'lats', '[', '0', ']', ']', 'for', 'i', 'in', 'range', '(', 'num_coords', ')', ':', 'next_point', '=', '(', 'i', '+', '1', ')', '%', 'num_coords', 'lon1', ',', 'lat1', '=', 'lons', '[', 'i', ']', ',', 'lats', '[', 'i', ']', 'lon2', ',', 'lat2', '=', 'lons', '[', 'next_point', ']', ',', 'lats', '[', 'next_point', ']', 'distance', '=', 'distances', '[', 'i', ']', 'num_points', '=', 'int', '(', 'distance', '/', 'UPSAMPLING_STEP_KM', ')', '+', '1', 'if', 'num_points', '>=', '2', ':', '# We need to increase the resolution of this arc by adding new', '# points.', 'new_lons', ',', 'new_lats', ',', '_', '=', 'geodetic', '.', 'npoints_between', '(', 'lon1', ',', 'lat1', ',', '0', ',', 'lon2', ',', 'lat2', ',', '0', ',', 'num_points', ')', 'resampled_lons', '.', 'extend', '(', 'new_lons', '[', '1', ':', ']', ')', 'resampled_lats', '.', 'extend', '(', 'new_lats', '[', '1', ':', ']', ')', 'else', ':', 'resampled_lons', '.', 'append', '(', 'lon2', ')', 'resampled_lats', '.', 'append', '(', 'lat2', ')', '# NB: we cut off the last point because it repeats the first one', 'return', 'numpy', '.', 'array', '(', 'resampled_lons', '[', ':', '-', '1', ']', ')', ',', 'numpy', '.', 'array', '(', 'resampled_lats', '[', ':', '-', '1', ']', ')']
Resample polygon line segments and return the coordinates of the new vertices. This limits distortions when projecting a polygon onto a spherical surface. Parameters define longitudes and latitudes of a point collection in the form of lists or numpy arrays. :return: A tuple of two numpy arrays: longitudes and latitudes of resampled vertices.
['Resample', 'polygon', 'line', 'segments', 'and', 'return', 'the', 'coordinates', 'of', 'the', 'new', 'vertices', '.', 'This', 'limits', 'distortions', 'when', 'projecting', 'a', 'polygon', 'onto', 'a', 'spherical', 'surface', '.']
train
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/geo/polygon.py#L249-L291
2,352
programa-stic/barf-project
barf/analysis/codeanalyzer/codeanalyzer.py
CodeAnalyzer._get_var_name
def _get_var_name(self, register_name, mode): """Get variable name for a register considering pre and post mode. """ var_name = { "pre": self._translator.get_name_init(register_name), "post": self._translator.get_name_curr(register_name), } return var_name[mode]
python
def _get_var_name(self, register_name, mode): """Get variable name for a register considering pre and post mode. """ var_name = { "pre": self._translator.get_name_init(register_name), "post": self._translator.get_name_curr(register_name), } return var_name[mode]
['def', '_get_var_name', '(', 'self', ',', 'register_name', ',', 'mode', ')', ':', 'var_name', '=', '{', '"pre"', ':', 'self', '.', '_translator', '.', 'get_name_init', '(', 'register_name', ')', ',', '"post"', ':', 'self', '.', '_translator', '.', 'get_name_curr', '(', 'register_name', ')', ',', '}', 'return', 'var_name', '[', 'mode', ']']
Get variable name for a register considering pre and post mode.
['Get', 'variable', 'name', 'for', 'a', 'register', 'considering', 'pre', 'and', 'post', 'mode', '.']
train
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/analysis/codeanalyzer/codeanalyzer.py#L141-L149
2,353
Gandi/gandi.cli
gandi/cli/commands/dns.py
delete
def delete(gandi, fqdn, name, type, force): """Delete record entry for a domain.""" domains = gandi.dns.list() domains = [domain['fqdn'] for domain in domains] if fqdn not in domains: gandi.echo('Sorry domain %s does not exist' % fqdn) gandi.echo('Please use one of the following: %s' % ', '.join(domains)) return if not force: if not name and not type: prompt = ("Are you sure to delete all records for domain %s ?" % fqdn) elif name and not type: prompt = ("Are you sure to delete all '%s' name records for " "domain %s ?" % (name, fqdn)) else: prompt = ("Are you sure to delete all '%s' records of type %s " "for domain %s ?" % (name, type, fqdn)) proceed = click.confirm(prompt) if not proceed: return result = gandi.dns.del_record(fqdn, name, type) gandi.echo('Delete successful.') return result
python
def delete(gandi, fqdn, name, type, force): """Delete record entry for a domain.""" domains = gandi.dns.list() domains = [domain['fqdn'] for domain in domains] if fqdn not in domains: gandi.echo('Sorry domain %s does not exist' % fqdn) gandi.echo('Please use one of the following: %s' % ', '.join(domains)) return if not force: if not name and not type: prompt = ("Are you sure to delete all records for domain %s ?" % fqdn) elif name and not type: prompt = ("Are you sure to delete all '%s' name records for " "domain %s ?" % (name, fqdn)) else: prompt = ("Are you sure to delete all '%s' records of type %s " "for domain %s ?" % (name, type, fqdn)) proceed = click.confirm(prompt) if not proceed: return result = gandi.dns.del_record(fqdn, name, type) gandi.echo('Delete successful.') return result
['def', 'delete', '(', 'gandi', ',', 'fqdn', ',', 'name', ',', 'type', ',', 'force', ')', ':', 'domains', '=', 'gandi', '.', 'dns', '.', 'list', '(', ')', 'domains', '=', '[', 'domain', '[', "'fqdn'", ']', 'for', 'domain', 'in', 'domains', ']', 'if', 'fqdn', 'not', 'in', 'domains', ':', 'gandi', '.', 'echo', '(', "'Sorry domain %s does not exist'", '%', 'fqdn', ')', 'gandi', '.', 'echo', '(', "'Please use one of the following: %s'", '%', "', '", '.', 'join', '(', 'domains', ')', ')', 'return', 'if', 'not', 'force', ':', 'if', 'not', 'name', 'and', 'not', 'type', ':', 'prompt', '=', '(', '"Are you sure to delete all records for domain %s ?"', '%', 'fqdn', ')', 'elif', 'name', 'and', 'not', 'type', ':', 'prompt', '=', '(', '"Are you sure to delete all \'%s\' name records for "', '"domain %s ?"', '%', '(', 'name', ',', 'fqdn', ')', ')', 'else', ':', 'prompt', '=', '(', '"Are you sure to delete all \'%s\' records of type %s "', '"for domain %s ?"', '%', '(', 'name', ',', 'type', ',', 'fqdn', ')', ')', 'proceed', '=', 'click', '.', 'confirm', '(', 'prompt', ')', 'if', 'not', 'proceed', ':', 'return', 'result', '=', 'gandi', '.', 'dns', '.', 'del_record', '(', 'fqdn', ',', 'name', ',', 'type', ')', 'gandi', '.', 'echo', '(', "'Delete successful.'", ')', 'return', 'result']
Delete record entry for a domain.
['Delete', 'record', 'entry', 'for', 'a', 'domain', '.']
train
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/commands/dns.py#L146-L173
2,354
FNNDSC/pfmisc
pfmisc/C_snode.py
C_stree.root
def root(self): """ Reset all nodes and branches to 'root'. """ str_treeRoot = '/' self.l_cwd = [str_treeRoot] self.snode_current = self.snode_root self.sbranch_current = self.sbranch_root
python
def root(self): """ Reset all nodes and branches to 'root'. """ str_treeRoot = '/' self.l_cwd = [str_treeRoot] self.snode_current = self.snode_root self.sbranch_current = self.sbranch_root
['def', 'root', '(', 'self', ')', ':', 'str_treeRoot', '=', "'/'", 'self', '.', 'l_cwd', '=', '[', 'str_treeRoot', ']', 'self', '.', 'snode_current', '=', 'self', '.', 'snode_root', 'self', '.', 'sbranch_current', '=', 'self', '.', 'sbranch_root']
Reset all nodes and branches to 'root'.
['Reset', 'all', 'nodes', 'and', 'branches', 'to', 'root', '.']
train
https://github.com/FNNDSC/pfmisc/blob/960b4d6135fcc50bed0a8e55db2ab1ddad9b99d8/pfmisc/C_snode.py#L556-L563
2,355
googleapis/google-cloud-python
datastore/google/cloud/datastore/query.py
Query.fetch
def fetch( self, limit=None, offset=0, start_cursor=None, end_cursor=None, client=None, eventual=False, ): """Execute the Query; return an iterator for the matching entities. For example:: >>> from google.cloud import datastore >>> client = datastore.Client() >>> query = client.query(kind='Person') >>> query.add_filter('name', '=', 'Sally') >>> list(query.fetch()) [<Entity object>, <Entity object>, ...] >>> list(query.fetch(1)) [<Entity object>] :type limit: int :param limit: (Optional) limit passed through to the iterator. :type offset: int :param offset: (Optional) offset passed through to the iterator. :type start_cursor: bytes :param start_cursor: (Optional) cursor passed through to the iterator. :type end_cursor: bytes :param end_cursor: (Optional) cursor passed through to the iterator. :type client: :class:`google.cloud.datastore.client.Client` :param client: (Optional) client used to connect to datastore. If not supplied, uses the query's value. :type eventual: bool :param eventual: (Optional) Defaults to strongly consistent (False). Setting True will use eventual consistency, but cannot be used inside a transaction or will raise ValueError. :rtype: :class:`Iterator` :returns: The iterator for the query. """ if client is None: client = self._client return Iterator( self, client, limit=limit, offset=offset, start_cursor=start_cursor, end_cursor=end_cursor, eventual=eventual, )
python
def fetch( self, limit=None, offset=0, start_cursor=None, end_cursor=None, client=None, eventual=False, ): """Execute the Query; return an iterator for the matching entities. For example:: >>> from google.cloud import datastore >>> client = datastore.Client() >>> query = client.query(kind='Person') >>> query.add_filter('name', '=', 'Sally') >>> list(query.fetch()) [<Entity object>, <Entity object>, ...] >>> list(query.fetch(1)) [<Entity object>] :type limit: int :param limit: (Optional) limit passed through to the iterator. :type offset: int :param offset: (Optional) offset passed through to the iterator. :type start_cursor: bytes :param start_cursor: (Optional) cursor passed through to the iterator. :type end_cursor: bytes :param end_cursor: (Optional) cursor passed through to the iterator. :type client: :class:`google.cloud.datastore.client.Client` :param client: (Optional) client used to connect to datastore. If not supplied, uses the query's value. :type eventual: bool :param eventual: (Optional) Defaults to strongly consistent (False). Setting True will use eventual consistency, but cannot be used inside a transaction or will raise ValueError. :rtype: :class:`Iterator` :returns: The iterator for the query. """ if client is None: client = self._client return Iterator( self, client, limit=limit, offset=offset, start_cursor=start_cursor, end_cursor=end_cursor, eventual=eventual, )
['def', 'fetch', '(', 'self', ',', 'limit', '=', 'None', ',', 'offset', '=', '0', ',', 'start_cursor', '=', 'None', ',', 'end_cursor', '=', 'None', ',', 'client', '=', 'None', ',', 'eventual', '=', 'False', ',', ')', ':', 'if', 'client', 'is', 'None', ':', 'client', '=', 'self', '.', '_client', 'return', 'Iterator', '(', 'self', ',', 'client', ',', 'limit', '=', 'limit', ',', 'offset', '=', 'offset', ',', 'start_cursor', '=', 'start_cursor', ',', 'end_cursor', '=', 'end_cursor', ',', 'eventual', '=', 'eventual', ',', ')']
Execute the Query; return an iterator for the matching entities. For example:: >>> from google.cloud import datastore >>> client = datastore.Client() >>> query = client.query(kind='Person') >>> query.add_filter('name', '=', 'Sally') >>> list(query.fetch()) [<Entity object>, <Entity object>, ...] >>> list(query.fetch(1)) [<Entity object>] :type limit: int :param limit: (Optional) limit passed through to the iterator. :type offset: int :param offset: (Optional) offset passed through to the iterator. :type start_cursor: bytes :param start_cursor: (Optional) cursor passed through to the iterator. :type end_cursor: bytes :param end_cursor: (Optional) cursor passed through to the iterator. :type client: :class:`google.cloud.datastore.client.Client` :param client: (Optional) client used to connect to datastore. If not supplied, uses the query's value. :type eventual: bool :param eventual: (Optional) Defaults to strongly consistent (False). Setting True will use eventual consistency, but cannot be used inside a transaction or will raise ValueError. :rtype: :class:`Iterator` :returns: The iterator for the query.
['Execute', 'the', 'Query', ';', 'return', 'an', 'iterator', 'for', 'the', 'matching', 'entities', '.']
train
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/datastore/google/cloud/datastore/query.py#L335-L393
2,356
CamDavidsonPilon/lifelines
lifelines/utils/__init__.py
datetimes_to_durations
def datetimes_to_durations( start_times, end_times, fill_date=datetime.today(), freq="D", dayfirst=False, na_values=None ): """ This is a very flexible function for transforming arrays of start_times and end_times to the proper format for lifelines: duration and event observation arrays. Parameters ---------- start_times: an array, Series or DataFrame iterable representing start times. These can be strings, or datetime objects. end_times: an array, Series or DataFrame iterable representing end times. These can be strings, or datetimes. These values can be None, or an empty string, which corresponds to censorship. fill_date: datetime, optional (default=datetime.Today()) the date to use if end_times is a None or empty string. This corresponds to last date of observation. Anything after this date is also censored. freq: string, optional (default='D') the units of time to use. See Pandas 'freq'. Default 'D' for days. dayfirst: boolean, optional (default=False) convert assuming European-style dates, i.e. day/month/year. na_values : list, optional list of values to recognize as NA/NaN. Ex: ['', 'NaT'] Returns ------- T: numpy array array of floats representing the durations with time units given by freq. C: numpy array boolean array of event observations: 1 if death observed, 0 else. Examples -------- >>> from lifelines.utils import datetimes_to_durations >>> >>> start_dates = ['2015-01-01', '2015-04-01', '2014-04-05'] >>> end_dates = ['2016-02-02', None, '2014-05-06'] >>> >>> T, E = datetimes_to_durations(start_dates, end_dates, freq="D") >>> T # array([ 397., 1414., 31.]) >>> E # array([ True, False, True]) """ fill_date = pd.to_datetime(fill_date) freq_string = "timedelta64[%s]" % freq start_times = pd.Series(start_times).copy() end_times = pd.Series(end_times).copy() C = ~(pd.isnull(end_times).values | end_times.isin(na_values or [""])) end_times[~C] = fill_date start_times_ = pd.to_datetime(start_times, dayfirst=dayfirst) end_times_ = pd.to_datetime(end_times, dayfirst=dayfirst, errors="coerce") deaths_after_cutoff = end_times_ > fill_date C[deaths_after_cutoff] = False T = (end_times_ - start_times_).values.astype(freq_string).astype(float) if (T < 0).sum(): warnings.warn("Warning: some values of start_times are after end_times") return T, C.values
python
def datetimes_to_durations( start_times, end_times, fill_date=datetime.today(), freq="D", dayfirst=False, na_values=None ): """ This is a very flexible function for transforming arrays of start_times and end_times to the proper format for lifelines: duration and event observation arrays. Parameters ---------- start_times: an array, Series or DataFrame iterable representing start times. These can be strings, or datetime objects. end_times: an array, Series or DataFrame iterable representing end times. These can be strings, or datetimes. These values can be None, or an empty string, which corresponds to censorship. fill_date: datetime, optional (default=datetime.Today()) the date to use if end_times is a None or empty string. This corresponds to last date of observation. Anything after this date is also censored. freq: string, optional (default='D') the units of time to use. See Pandas 'freq'. Default 'D' for days. dayfirst: boolean, optional (default=False) convert assuming European-style dates, i.e. day/month/year. na_values : list, optional list of values to recognize as NA/NaN. Ex: ['', 'NaT'] Returns ------- T: numpy array array of floats representing the durations with time units given by freq. C: numpy array boolean array of event observations: 1 if death observed, 0 else. Examples -------- >>> from lifelines.utils import datetimes_to_durations >>> >>> start_dates = ['2015-01-01', '2015-04-01', '2014-04-05'] >>> end_dates = ['2016-02-02', None, '2014-05-06'] >>> >>> T, E = datetimes_to_durations(start_dates, end_dates, freq="D") >>> T # array([ 397., 1414., 31.]) >>> E # array([ True, False, True]) """ fill_date = pd.to_datetime(fill_date) freq_string = "timedelta64[%s]" % freq start_times = pd.Series(start_times).copy() end_times = pd.Series(end_times).copy() C = ~(pd.isnull(end_times).values | end_times.isin(na_values or [""])) end_times[~C] = fill_date start_times_ = pd.to_datetime(start_times, dayfirst=dayfirst) end_times_ = pd.to_datetime(end_times, dayfirst=dayfirst, errors="coerce") deaths_after_cutoff = end_times_ > fill_date C[deaths_after_cutoff] = False T = (end_times_ - start_times_).values.astype(freq_string).astype(float) if (T < 0).sum(): warnings.warn("Warning: some values of start_times are after end_times") return T, C.values
['def', 'datetimes_to_durations', '(', 'start_times', ',', 'end_times', ',', 'fill_date', '=', 'datetime', '.', 'today', '(', ')', ',', 'freq', '=', '"D"', ',', 'dayfirst', '=', 'False', ',', 'na_values', '=', 'None', ')', ':', 'fill_date', '=', 'pd', '.', 'to_datetime', '(', 'fill_date', ')', 'freq_string', '=', '"timedelta64[%s]"', '%', 'freq', 'start_times', '=', 'pd', '.', 'Series', '(', 'start_times', ')', '.', 'copy', '(', ')', 'end_times', '=', 'pd', '.', 'Series', '(', 'end_times', ')', '.', 'copy', '(', ')', 'C', '=', '~', '(', 'pd', '.', 'isnull', '(', 'end_times', ')', '.', 'values', '|', 'end_times', '.', 'isin', '(', 'na_values', 'or', '[', '""', ']', ')', ')', 'end_times', '[', '~', 'C', ']', '=', 'fill_date', 'start_times_', '=', 'pd', '.', 'to_datetime', '(', 'start_times', ',', 'dayfirst', '=', 'dayfirst', ')', 'end_times_', '=', 'pd', '.', 'to_datetime', '(', 'end_times', ',', 'dayfirst', '=', 'dayfirst', ',', 'errors', '=', '"coerce"', ')', 'deaths_after_cutoff', '=', 'end_times_', '>', 'fill_date', 'C', '[', 'deaths_after_cutoff', ']', '=', 'False', 'T', '=', '(', 'end_times_', '-', 'start_times_', ')', '.', 'values', '.', 'astype', '(', 'freq_string', ')', '.', 'astype', '(', 'float', ')', 'if', '(', 'T', '<', '0', ')', '.', 'sum', '(', ')', ':', 'warnings', '.', 'warn', '(', '"Warning: some values of start_times are after end_times"', ')', 'return', 'T', ',', 'C', '.', 'values']
This is a very flexible function for transforming arrays of start_times and end_times to the proper format for lifelines: duration and event observation arrays. Parameters ---------- start_times: an array, Series or DataFrame iterable representing start times. These can be strings, or datetime objects. end_times: an array, Series or DataFrame iterable representing end times. These can be strings, or datetimes. These values can be None, or an empty string, which corresponds to censorship. fill_date: datetime, optional (default=datetime.Today()) the date to use if end_times is a None or empty string. This corresponds to last date of observation. Anything after this date is also censored. freq: string, optional (default='D') the units of time to use. See Pandas 'freq'. Default 'D' for days. dayfirst: boolean, optional (default=False) convert assuming European-style dates, i.e. day/month/year. na_values : list, optional list of values to recognize as NA/NaN. Ex: ['', 'NaT'] Returns ------- T: numpy array array of floats representing the durations with time units given by freq. C: numpy array boolean array of event observations: 1 if death observed, 0 else. Examples -------- >>> from lifelines.utils import datetimes_to_durations >>> >>> start_dates = ['2015-01-01', '2015-04-01', '2014-04-05'] >>> end_dates = ['2016-02-02', None, '2014-05-06'] >>> >>> T, E = datetimes_to_durations(start_dates, end_dates, freq="D") >>> T # array([ 397., 1414., 31.]) >>> E # array([ True, False, True])
['This', 'is', 'a', 'very', 'flexible', 'function', 'for', 'transforming', 'arrays', 'of', 'start_times', 'and', 'end_times', 'to', 'the', 'proper', 'format', 'for', 'lifelines', ':', 'duration', 'and', 'event', 'observation', 'arrays', '.']
train
https://github.com/CamDavidsonPilon/lifelines/blob/bdf6be6f1d10eea4c46365ee0ee6a47d8c30edf8/lifelines/utils/__init__.py#L445-L503
2,357
tBuLi/symfit
symfit/core/minimizers.py
TrustConstr._get_jacobian_hessian_strategy
def _get_jacobian_hessian_strategy(self): """ Figure out how to calculate the jacobian and hessian. Will return a tuple describing how best to calculate the jacobian and hessian, repectively. If None, it should be calculated using the available analytical method. :return: tuple of jacobian_method, hessian_method """ if self.jacobian is not None and self.hessian is None: jacobian = None hessian = 'cs' elif self.jacobian is None and self.hessian is None: jacobian = 'cs' hessian = soBFGS(exception_strategy='damp_update') else: jacobian = None hessian = None return jacobian, hessian
python
def _get_jacobian_hessian_strategy(self): """ Figure out how to calculate the jacobian and hessian. Will return a tuple describing how best to calculate the jacobian and hessian, repectively. If None, it should be calculated using the available analytical method. :return: tuple of jacobian_method, hessian_method """ if self.jacobian is not None and self.hessian is None: jacobian = None hessian = 'cs' elif self.jacobian is None and self.hessian is None: jacobian = 'cs' hessian = soBFGS(exception_strategy='damp_update') else: jacobian = None hessian = None return jacobian, hessian
['def', '_get_jacobian_hessian_strategy', '(', 'self', ')', ':', 'if', 'self', '.', 'jacobian', 'is', 'not', 'None', 'and', 'self', '.', 'hessian', 'is', 'None', ':', 'jacobian', '=', 'None', 'hessian', '=', "'cs'", 'elif', 'self', '.', 'jacobian', 'is', 'None', 'and', 'self', '.', 'hessian', 'is', 'None', ':', 'jacobian', '=', "'cs'", 'hessian', '=', 'soBFGS', '(', 'exception_strategy', '=', "'damp_update'", ')', 'else', ':', 'jacobian', '=', 'None', 'hessian', '=', 'None', 'return', 'jacobian', ',', 'hessian']
Figure out how to calculate the jacobian and hessian. Will return a tuple describing how best to calculate the jacobian and hessian, repectively. If None, it should be calculated using the available analytical method. :return: tuple of jacobian_method, hessian_method
['Figure', 'out', 'how', 'to', 'calculate', 'the', 'jacobian', 'and', 'hessian', '.', 'Will', 'return', 'a', 'tuple', 'describing', 'how', 'best', 'to', 'calculate', 'the', 'jacobian', 'and', 'hessian', 'repectively', '.', 'If', 'None', 'it', 'should', 'be', 'calculated', 'using', 'the', 'available', 'analytical', 'method', '.']
train
https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/minimizers.py#L566-L584
2,358
xmunoz/sodapy
sodapy/__init__.py
Socrata.create
def create(self, name, **kwargs): ''' Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated. ''' new_backend = kwargs.pop("new_backend", False) resource = _format_old_api_request(content_type="json") if new_backend: resource += "?nbe=true" payload = {"name": name} if "row_identifier" in kwargs: payload["metadata"] = { "rowIdentifier": kwargs.pop("row_identifier", None) } payload.update(kwargs) payload = _clear_empty_values(payload) return self._perform_update("post", resource, payload)
python
def create(self, name, **kwargs): ''' Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated. ''' new_backend = kwargs.pop("new_backend", False) resource = _format_old_api_request(content_type="json") if new_backend: resource += "?nbe=true" payload = {"name": name} if "row_identifier" in kwargs: payload["metadata"] = { "rowIdentifier": kwargs.pop("row_identifier", None) } payload.update(kwargs) payload = _clear_empty_values(payload) return self._perform_update("post", resource, payload)
['def', 'create', '(', 'self', ',', 'name', ',', '*', '*', 'kwargs', ')', ':', 'new_backend', '=', 'kwargs', '.', 'pop', '(', '"new_backend"', ',', 'False', ')', 'resource', '=', '_format_old_api_request', '(', 'content_type', '=', '"json"', ')', 'if', 'new_backend', ':', 'resource', '+=', '"?nbe=true"', 'payload', '=', '{', '"name"', ':', 'name', '}', 'if', '"row_identifier"', 'in', 'kwargs', ':', 'payload', '[', '"metadata"', ']', '=', '{', '"rowIdentifier"', ':', 'kwargs', '.', 'pop', '(', '"row_identifier"', ',', 'None', ')', '}', 'payload', '.', 'update', '(', 'kwargs', ')', 'payload', '=', '_clear_empty_values', '(', 'payload', ')', 'return', 'self', '.', '_perform_update', '(', '"post"', ',', 'resource', ',', 'payload', ')']
Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated.
['Create', 'a', 'dataset', 'including', 'the', 'field', 'types', '.', 'Optionally', 'specify', 'args', 'such', 'as', ':', 'description', ':', 'description', 'of', 'the', 'dataset', 'columns', ':', 'list', 'of', 'columns', '(', 'see', 'docs', '/', 'tests', 'for', 'list', 'structure', ')', 'category', ':', 'must', 'exist', 'in', '/', 'admin', '/', 'metadata', 'tags', ':', 'list', 'of', 'tag', 'strings', 'row_identifier', ':', 'field', 'name', 'of', 'primary', 'key', 'new_backend', ':', 'whether', 'to', 'create', 'the', 'dataset', 'in', 'the', 'new', 'backend']
train
https://github.com/xmunoz/sodapy/blob/dad2ca9560cde0acb03bdb4423260e891ca40d7b/sodapy/__init__.py#L207-L234
2,359
reiinakano/scikit-plot
scikitplot/plotters.py
plot_elbow_curve
def plot_elbow_curve(clf, X, title='Elbow Plot', cluster_ranges=None, ax=None, figsize=None, title_fontsize="large", text_fontsize="medium"): """Plots elbow curve of different values of K for KMeans clustering. Args: clf: Clusterer instance that implements ``fit`` and ``fit_predict`` methods and a ``score`` parameter. X (array-like, shape (n_samples, n_features)): Data to cluster, where n_samples is the number of samples and n_features is the number of features. title (string, optional): Title of the generated plot. Defaults to "Elbow Plot" cluster_ranges (None or :obj:`list` of int, optional): List of n_clusters for which to plot the explained variances. Defaults to ``range(1, 12, 2)``. copy (boolean, optional): Determines whether ``fit`` is used on **clf** or on a copy of **clf**. ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to plot the curve. If None, the plot is drawn on a new set of axes. figsize (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6). Defaults to ``None``. title_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "large". text_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "medium". Returns: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn. Example: >>> import scikitplot.plotters as skplt >>> kmeans = KMeans(random_state=1) >>> skplt.plot_elbow_curve(kmeans, cluster_ranges=range(1, 11)) <matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490> >>> plt.show() .. image:: _static/examples/plot_elbow_curve.png :align: center :alt: Elbow Curve """ if cluster_ranges is None: cluster_ranges = range(1, 12, 2) else: cluster_ranges = sorted(cluster_ranges) if not hasattr(clf, 'n_clusters'): raise TypeError('"n_clusters" attribute not in classifier. ' 'Cannot plot elbow method.') clfs = [] for i in cluster_ranges: current_clf = clone(clf) setattr(current_clf, "n_clusters", i) clfs.append(current_clf.fit(X).score(X)) if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize) ax.set_title(title, fontsize=title_fontsize) ax.plot(cluster_ranges, np.absolute(clfs), 'b*-') ax.grid(True) ax.set_xlabel('Number of clusters', fontsize=text_fontsize) ax.set_ylabel('Sum of Squared Errors', fontsize=text_fontsize) ax.tick_params(labelsize=text_fontsize) return ax
python
def plot_elbow_curve(clf, X, title='Elbow Plot', cluster_ranges=None, ax=None, figsize=None, title_fontsize="large", text_fontsize="medium"): """Plots elbow curve of different values of K for KMeans clustering. Args: clf: Clusterer instance that implements ``fit`` and ``fit_predict`` methods and a ``score`` parameter. X (array-like, shape (n_samples, n_features)): Data to cluster, where n_samples is the number of samples and n_features is the number of features. title (string, optional): Title of the generated plot. Defaults to "Elbow Plot" cluster_ranges (None or :obj:`list` of int, optional): List of n_clusters for which to plot the explained variances. Defaults to ``range(1, 12, 2)``. copy (boolean, optional): Determines whether ``fit`` is used on **clf** or on a copy of **clf**. ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to plot the curve. If None, the plot is drawn on a new set of axes. figsize (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6). Defaults to ``None``. title_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "large". text_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "medium". Returns: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn. Example: >>> import scikitplot.plotters as skplt >>> kmeans = KMeans(random_state=1) >>> skplt.plot_elbow_curve(kmeans, cluster_ranges=range(1, 11)) <matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490> >>> plt.show() .. image:: _static/examples/plot_elbow_curve.png :align: center :alt: Elbow Curve """ if cluster_ranges is None: cluster_ranges = range(1, 12, 2) else: cluster_ranges = sorted(cluster_ranges) if not hasattr(clf, 'n_clusters'): raise TypeError('"n_clusters" attribute not in classifier. ' 'Cannot plot elbow method.') clfs = [] for i in cluster_ranges: current_clf = clone(clf) setattr(current_clf, "n_clusters", i) clfs.append(current_clf.fit(X).score(X)) if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize) ax.set_title(title, fontsize=title_fontsize) ax.plot(cluster_ranges, np.absolute(clfs), 'b*-') ax.grid(True) ax.set_xlabel('Number of clusters', fontsize=text_fontsize) ax.set_ylabel('Sum of Squared Errors', fontsize=text_fontsize) ax.tick_params(labelsize=text_fontsize) return ax
['def', 'plot_elbow_curve', '(', 'clf', ',', 'X', ',', 'title', '=', "'Elbow Plot'", ',', 'cluster_ranges', '=', 'None', ',', 'ax', '=', 'None', ',', 'figsize', '=', 'None', ',', 'title_fontsize', '=', '"large"', ',', 'text_fontsize', '=', '"medium"', ')', ':', 'if', 'cluster_ranges', 'is', 'None', ':', 'cluster_ranges', '=', 'range', '(', '1', ',', '12', ',', '2', ')', 'else', ':', 'cluster_ranges', '=', 'sorted', '(', 'cluster_ranges', ')', 'if', 'not', 'hasattr', '(', 'clf', ',', "'n_clusters'", ')', ':', 'raise', 'TypeError', '(', '\'"n_clusters" attribute not in classifier. \'', "'Cannot plot elbow method.'", ')', 'clfs', '=', '[', ']', 'for', 'i', 'in', 'cluster_ranges', ':', 'current_clf', '=', 'clone', '(', 'clf', ')', 'setattr', '(', 'current_clf', ',', '"n_clusters"', ',', 'i', ')', 'clfs', '.', 'append', '(', 'current_clf', '.', 'fit', '(', 'X', ')', '.', 'score', '(', 'X', ')', ')', 'if', 'ax', 'is', 'None', ':', 'fig', ',', 'ax', '=', 'plt', '.', 'subplots', '(', '1', ',', '1', ',', 'figsize', '=', 'figsize', ')', 'ax', '.', 'set_title', '(', 'title', ',', 'fontsize', '=', 'title_fontsize', ')', 'ax', '.', 'plot', '(', 'cluster_ranges', ',', 'np', '.', 'absolute', '(', 'clfs', ')', ',', "'b*-'", ')', 'ax', '.', 'grid', '(', 'True', ')', 'ax', '.', 'set_xlabel', '(', "'Number of clusters'", ',', 'fontsize', '=', 'text_fontsize', ')', 'ax', '.', 'set_ylabel', '(', "'Sum of Squared Errors'", ',', 'fontsize', '=', 'text_fontsize', ')', 'ax', '.', 'tick_params', '(', 'labelsize', '=', 'text_fontsize', ')', 'return', 'ax']
Plots elbow curve of different values of K for KMeans clustering. Args: clf: Clusterer instance that implements ``fit`` and ``fit_predict`` methods and a ``score`` parameter. X (array-like, shape (n_samples, n_features)): Data to cluster, where n_samples is the number of samples and n_features is the number of features. title (string, optional): Title of the generated plot. Defaults to "Elbow Plot" cluster_ranges (None or :obj:`list` of int, optional): List of n_clusters for which to plot the explained variances. Defaults to ``range(1, 12, 2)``. copy (boolean, optional): Determines whether ``fit`` is used on **clf** or on a copy of **clf**. ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to plot the curve. If None, the plot is drawn on a new set of axes. figsize (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6). Defaults to ``None``. title_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "large". text_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "medium". Returns: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn. Example: >>> import scikitplot.plotters as skplt >>> kmeans = KMeans(random_state=1) >>> skplt.plot_elbow_curve(kmeans, cluster_ranges=range(1, 11)) <matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490> >>> plt.show() .. image:: _static/examples/plot_elbow_curve.png :align: center :alt: Elbow Curve
['Plots', 'elbow', 'curve', 'of', 'different', 'values', 'of', 'K', 'for', 'KMeans', 'clustering', '.']
train
https://github.com/reiinakano/scikit-plot/blob/2dd3e6a76df77edcbd724c4db25575f70abb57cb/scikitplot/plotters.py#L893-L970
2,360
gem/oq-engine
openquake/engine/export/core.py
check_version
def check_version(dstore): """ :param dstore: a DataStore instance :returns: a message if the stored version is different from the current version """ ds_version = dstore.hdf5.attrs['engine_version'] if ds_version != __version__: return (': the datastore is at version %s, but the exporter at ' 'version %s' % (ds_version, __version__)) else: return ''
python
def check_version(dstore): """ :param dstore: a DataStore instance :returns: a message if the stored version is different from the current version """ ds_version = dstore.hdf5.attrs['engine_version'] if ds_version != __version__: return (': the datastore is at version %s, but the exporter at ' 'version %s' % (ds_version, __version__)) else: return ''
['def', 'check_version', '(', 'dstore', ')', ':', 'ds_version', '=', 'dstore', '.', 'hdf5', '.', 'attrs', '[', "'engine_version'", ']', 'if', 'ds_version', '!=', '__version__', ':', 'return', '(', "': the datastore is at version %s, but the exporter at '", "'version %s'", '%', '(', 'ds_version', ',', '__version__', ')', ')', 'else', ':', 'return', "''"]
:param dstore: a DataStore instance :returns: a message if the stored version is different from the current version
[':', 'param', 'dstore', ':', 'a', 'DataStore', 'instance', ':', 'returns', ':', 'a', 'message', 'if', 'the', 'stored', 'version', 'is', 'different', 'from', 'the', 'current', 'version']
train
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/engine/export/core.py#L37-L48
2,361
abnerjacobsen/tinydb-jsonorm
src/tinydb_jsonorm/cuid.py
CuidGenerator.counter
def counter(self): """ Rolling counter that ensures same-machine and same-time cuids don't collide. """ self._counter += 1 if self._counter >= DISCRETE_VALUES: self._counter = 0 return self._counter
python
def counter(self): """ Rolling counter that ensures same-machine and same-time cuids don't collide. """ self._counter += 1 if self._counter >= DISCRETE_VALUES: self._counter = 0 return self._counter
['def', 'counter', '(', 'self', ')', ':', 'self', '.', '_counter', '+=', '1', 'if', 'self', '.', '_counter', '>=', 'DISCRETE_VALUES', ':', 'self', '.', '_counter', '=', '0', 'return', 'self', '.', '_counter']
Rolling counter that ensures same-machine and same-time cuids don't collide.
['Rolling', 'counter', 'that', 'ensures', 'same', '-', 'machine', 'and', 'same', '-', 'time', 'cuids', 'don', 't', 'collide', '.']
train
https://github.com/abnerjacobsen/tinydb-jsonorm/blob/704d3f887cc8963769ffbb116eb7e6909deeaecd/src/tinydb_jsonorm/cuid.py#L103-L111
2,362
xhtml2pdf/xhtml2pdf
xhtml2pdf/w3c/cssParser.py
CSSParser.parseInline
def parseInline(self, src): """Parses CSS inline source string using the current cssBuilder. Use to parse a tag's 'sytle'-like attribute.""" self.cssBuilder.beginInline() try: try: src, properties = self._parseDeclarationGroup(src.strip(), braces=False) except self.ParseError as err: err.setFullCSSSource(src, inline=True) raise result = self.cssBuilder.inline(properties) finally: self.cssBuilder.endInline() return result
python
def parseInline(self, src): """Parses CSS inline source string using the current cssBuilder. Use to parse a tag's 'sytle'-like attribute.""" self.cssBuilder.beginInline() try: try: src, properties = self._parseDeclarationGroup(src.strip(), braces=False) except self.ParseError as err: err.setFullCSSSource(src, inline=True) raise result = self.cssBuilder.inline(properties) finally: self.cssBuilder.endInline() return result
['def', 'parseInline', '(', 'self', ',', 'src', ')', ':', 'self', '.', 'cssBuilder', '.', 'beginInline', '(', ')', 'try', ':', 'try', ':', 'src', ',', 'properties', '=', 'self', '.', '_parseDeclarationGroup', '(', 'src', '.', 'strip', '(', ')', ',', 'braces', '=', 'False', ')', 'except', 'self', '.', 'ParseError', 'as', 'err', ':', 'err', '.', 'setFullCSSSource', '(', 'src', ',', 'inline', '=', 'True', ')', 'raise', 'result', '=', 'self', '.', 'cssBuilder', '.', 'inline', '(', 'properties', ')', 'finally', ':', 'self', '.', 'cssBuilder', '.', 'endInline', '(', ')', 'return', 'result']
Parses CSS inline source string using the current cssBuilder. Use to parse a tag's 'sytle'-like attribute.
['Parses', 'CSS', 'inline', 'source', 'string', 'using', 'the', 'current', 'cssBuilder', '.', 'Use', 'to', 'parse', 'a', 'tag', 's', 'sytle', '-', 'like', 'attribute', '.']
train
https://github.com/xhtml2pdf/xhtml2pdf/blob/230357a392f48816532d3c2fa082a680b80ece48/xhtml2pdf/w3c/cssParser.py#L459-L474
2,363
erm0l0v/django-fake-model
django_fake_model/models.py
FakeModel.create_table
def create_table(cls): """ create_table Manually create a temporary table for model in test data base. :return: """ schema_editor = getattr(connection, 'schema_editor', None) if schema_editor: with schema_editor() as schema_editor: schema_editor.create_model(cls) else: raw_sql, _ = connection.creation.sql_create_model( cls, no_style(), []) cls.delete_table() cursor = connection.cursor() try: cursor.execute(*raw_sql) finally: cursor.close()
python
def create_table(cls): """ create_table Manually create a temporary table for model in test data base. :return: """ schema_editor = getattr(connection, 'schema_editor', None) if schema_editor: with schema_editor() as schema_editor: schema_editor.create_model(cls) else: raw_sql, _ = connection.creation.sql_create_model( cls, no_style(), []) cls.delete_table() cursor = connection.cursor() try: cursor.execute(*raw_sql) finally: cursor.close()
['def', 'create_table', '(', 'cls', ')', ':', 'schema_editor', '=', 'getattr', '(', 'connection', ',', "'schema_editor'", ',', 'None', ')', 'if', 'schema_editor', ':', 'with', 'schema_editor', '(', ')', 'as', 'schema_editor', ':', 'schema_editor', '.', 'create_model', '(', 'cls', ')', 'else', ':', 'raw_sql', ',', '_', '=', 'connection', '.', 'creation', '.', 'sql_create_model', '(', 'cls', ',', 'no_style', '(', ')', ',', '[', ']', ')', 'cls', '.', 'delete_table', '(', ')', 'cursor', '=', 'connection', '.', 'cursor', '(', ')', 'try', ':', 'cursor', '.', 'execute', '(', '*', 'raw_sql', ')', 'finally', ':', 'cursor', '.', 'close', '(', ')']
create_table Manually create a temporary table for model in test data base. :return:
['create_table']
train
https://github.com/erm0l0v/django-fake-model/blob/42fb28ac3aa4db5f82b6cb97a7c2a92b83b36314/django_fake_model/models.py#L22-L43
2,364
quantmind/pulsar
pulsar/apps/wsgi/formdata.py
MultipartPart.base64
def base64(self, charset=None): '''Data encoded as base 64''' return b64encode(self.bytes()).decode(charset or self.charset)
python
def base64(self, charset=None): '''Data encoded as base 64''' return b64encode(self.bytes()).decode(charset or self.charset)
['def', 'base64', '(', 'self', ',', 'charset', '=', 'None', ')', ':', 'return', 'b64encode', '(', 'self', '.', 'bytes', '(', ')', ')', '.', 'decode', '(', 'charset', 'or', 'self', '.', 'charset', ')']
Data encoded as base 64
['Data', 'encoded', 'as', 'base', '64']
train
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/apps/wsgi/formdata.py#L318-L320
2,365
aliyun/aliyun-log-python-sdk
aliyun/log/logclient.py
LogClient.get_index_config
def get_index_config(self, project_name, logstore_name): """ get index config detail of a logstore Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :return: GetIndexResponse :raise: LogException """ headers = {} params = {} resource = "/logstores/" + logstore_name + "/index" (resp, header) = self._send("GET", project_name, None, resource, params, headers) return GetIndexResponse(resp, header)
python
def get_index_config(self, project_name, logstore_name): """ get index config detail of a logstore Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :return: GetIndexResponse :raise: LogException """ headers = {} params = {} resource = "/logstores/" + logstore_name + "/index" (resp, header) = self._send("GET", project_name, None, resource, params, headers) return GetIndexResponse(resp, header)
['def', 'get_index_config', '(', 'self', ',', 'project_name', ',', 'logstore_name', ')', ':', 'headers', '=', '{', '}', 'params', '=', '{', '}', 'resource', '=', '"/logstores/"', '+', 'logstore_name', '+', '"/index"', '(', 'resp', ',', 'header', ')', '=', 'self', '.', '_send', '(', '"GET"', ',', 'project_name', ',', 'None', ',', 'resource', ',', 'params', ',', 'headers', ')', 'return', 'GetIndexResponse', '(', 'resp', ',', 'header', ')']
get index config detail of a logstore Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :return: GetIndexResponse :raise: LogException
['get', 'index', 'config', 'detail', 'of', 'a', 'logstore', 'Unsuccessful', 'opertaion', 'will', 'cause', 'an', 'LogException', '.', ':', 'type', 'project_name', ':', 'string', ':', 'param', 'project_name', ':', 'the', 'Project', 'name', ':', 'type', 'logstore_name', ':', 'string', ':', 'param', 'logstore_name', ':', 'the', 'logstore', 'name', ':', 'return', ':', 'GetIndexResponse', ':', 'raise', ':', 'LogException']
train
https://github.com/aliyun/aliyun-log-python-sdk/blob/ac383db0a16abf1e5ef7df36074374184b43516e/aliyun/log/logclient.py#L1479-L1498
2,366
amzn/ion-python
amazon/ion/simple_types.py
_IonNature._copy
def _copy(self): """Copies this instance. Its IonEvent (if any) is not preserved. Keeping this protected until/unless we decide there's use for it publicly. """ args, kwargs = self._to_constructor_args(self) value = self.__class__(*args, **kwargs) value.ion_event = None value.ion_type = self.ion_type value.ion_annotations = self.ion_annotations return value
python
def _copy(self): """Copies this instance. Its IonEvent (if any) is not preserved. Keeping this protected until/unless we decide there's use for it publicly. """ args, kwargs = self._to_constructor_args(self) value = self.__class__(*args, **kwargs) value.ion_event = None value.ion_type = self.ion_type value.ion_annotations = self.ion_annotations return value
['def', '_copy', '(', 'self', ')', ':', 'args', ',', 'kwargs', '=', 'self', '.', '_to_constructor_args', '(', 'self', ')', 'value', '=', 'self', '.', '__class__', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'value', '.', 'ion_event', '=', 'None', 'value', '.', 'ion_type', '=', 'self', '.', 'ion_type', 'value', '.', 'ion_annotations', '=', 'self', '.', 'ion_annotations', 'return', 'value']
Copies this instance. Its IonEvent (if any) is not preserved. Keeping this protected until/unless we decide there's use for it publicly.
['Copies', 'this', 'instance', '.', 'Its', 'IonEvent', '(', 'if', 'any', ')', 'is', 'not', 'preserved', '.']
train
https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/simple_types.py#L57-L67
2,367
moonso/interval_tree
interval_tree/interval_tree.py
IntervalTree.insert_data
def insert_data(self, node, data, start, end): """loops through all the data and inserts them into the empty tree""" for item in data: self.recursive_insert(node, [item[0], item[1]], item[-1], start, end)
python
def insert_data(self, node, data, start, end): """loops through all the data and inserts them into the empty tree""" for item in data: self.recursive_insert(node, [item[0], item[1]], item[-1], start, end)
['def', 'insert_data', '(', 'self', ',', 'node', ',', 'data', ',', 'start', ',', 'end', ')', ':', 'for', 'item', 'in', 'data', ':', 'self', '.', 'recursive_insert', '(', 'node', ',', '[', 'item', '[', '0', ']', ',', 'item', '[', '1', ']', ']', ',', 'item', '[', '-', '1', ']', ',', 'start', ',', 'end', ')']
loops through all the data and inserts them into the empty tree
['loops', 'through', 'all', 'the', 'data', 'and', 'inserts', 'them', 'into', 'the', 'empty', 'tree']
train
https://github.com/moonso/interval_tree/blob/c588177f5bd90bd9e2f1447216c78b024353f7a1/interval_tree/interval_tree.py#L141-L144
2,368
wmayner/pyphi
pyphi/examples.py
blackbox_network
def blackbox_network(): """A micro-network to demonstrate blackboxing. Diagram:: +----------+ +-------------------->+ A (COPY) + <---------------+ | +----------+ | | +----------+ | | +-----------+ B (COPY) + <-------------+ | v v +----------+ | | +-+-----+-+ +-+-----+-+ | | | | | C (AND) | | F (AND) | | | | | +-+-----+-+ +-+-----+-+ | | ^ ^ | | +----------+ | | | +---------> + D (COPY) +---------------+ | | +----------+ | | +----------+ | +-------------------> + E (COPY) +-----------------+ +----------+ Connectivity Matrix: +---+---+---+---+---+---+---+ | . | A | B | C | D | E | F | +---+---+---+---+---+---+---+ | A | 0 | 0 | 1 | 0 | 0 | 0 | +---+---+---+---+---+---+---+ | B | 0 | 0 | 1 | 0 | 0 | 0 | +---+---+---+---+---+---+---+ | C | 0 | 0 | 0 | 1 | 1 | 0 | +---+---+---+---+---+---+---+ | D | 0 | 0 | 0 | 0 | 0 | 1 | +---+---+---+---+---+---+---+ | E | 0 | 0 | 0 | 0 | 0 | 1 | +---+---+---+---+---+---+---+ | F | 1 | 1 | 0 | 0 | 0 | 0 | +---+---+---+---+---+---+---+ In the documentation example, the state is (0, 0, 0, 0, 0, 0). """ num_nodes = 6 num_states = 2 ** num_nodes tpm = np.zeros((num_states, num_nodes)) for index, previous_state in enumerate(all_states(num_nodes)): current_state = [0 for i in range(num_nodes)] if previous_state[5] == 1: current_state[0] = 1 current_state[1] = 1 if previous_state[0] == 1 and previous_state[1]: current_state[2] = 1 if previous_state[2] == 1: current_state[3] = 1 current_state[4] = 1 if previous_state[3] == 1 and previous_state[4] == 1: current_state[5] = 1 tpm[index, :] = current_state cm = np.array([ [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [1, 1, 0, 0, 0, 0] ]) return Network(tpm, cm, node_labels=LABELS[:tpm.shape[1]])
python
def blackbox_network(): """A micro-network to demonstrate blackboxing. Diagram:: +----------+ +-------------------->+ A (COPY) + <---------------+ | +----------+ | | +----------+ | | +-----------+ B (COPY) + <-------------+ | v v +----------+ | | +-+-----+-+ +-+-----+-+ | | | | | C (AND) | | F (AND) | | | | | +-+-----+-+ +-+-----+-+ | | ^ ^ | | +----------+ | | | +---------> + D (COPY) +---------------+ | | +----------+ | | +----------+ | +-------------------> + E (COPY) +-----------------+ +----------+ Connectivity Matrix: +---+---+---+---+---+---+---+ | . | A | B | C | D | E | F | +---+---+---+---+---+---+---+ | A | 0 | 0 | 1 | 0 | 0 | 0 | +---+---+---+---+---+---+---+ | B | 0 | 0 | 1 | 0 | 0 | 0 | +---+---+---+---+---+---+---+ | C | 0 | 0 | 0 | 1 | 1 | 0 | +---+---+---+---+---+---+---+ | D | 0 | 0 | 0 | 0 | 0 | 1 | +---+---+---+---+---+---+---+ | E | 0 | 0 | 0 | 0 | 0 | 1 | +---+---+---+---+---+---+---+ | F | 1 | 1 | 0 | 0 | 0 | 0 | +---+---+---+---+---+---+---+ In the documentation example, the state is (0, 0, 0, 0, 0, 0). """ num_nodes = 6 num_states = 2 ** num_nodes tpm = np.zeros((num_states, num_nodes)) for index, previous_state in enumerate(all_states(num_nodes)): current_state = [0 for i in range(num_nodes)] if previous_state[5] == 1: current_state[0] = 1 current_state[1] = 1 if previous_state[0] == 1 and previous_state[1]: current_state[2] = 1 if previous_state[2] == 1: current_state[3] = 1 current_state[4] = 1 if previous_state[3] == 1 and previous_state[4] == 1: current_state[5] = 1 tpm[index, :] = current_state cm = np.array([ [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [1, 1, 0, 0, 0, 0] ]) return Network(tpm, cm, node_labels=LABELS[:tpm.shape[1]])
['def', 'blackbox_network', '(', ')', ':', 'num_nodes', '=', '6', 'num_states', '=', '2', '**', 'num_nodes', 'tpm', '=', 'np', '.', 'zeros', '(', '(', 'num_states', ',', 'num_nodes', ')', ')', 'for', 'index', ',', 'previous_state', 'in', 'enumerate', '(', 'all_states', '(', 'num_nodes', ')', ')', ':', 'current_state', '=', '[', '0', 'for', 'i', 'in', 'range', '(', 'num_nodes', ')', ']', 'if', 'previous_state', '[', '5', ']', '==', '1', ':', 'current_state', '[', '0', ']', '=', '1', 'current_state', '[', '1', ']', '=', '1', 'if', 'previous_state', '[', '0', ']', '==', '1', 'and', 'previous_state', '[', '1', ']', ':', 'current_state', '[', '2', ']', '=', '1', 'if', 'previous_state', '[', '2', ']', '==', '1', ':', 'current_state', '[', '3', ']', '=', '1', 'current_state', '[', '4', ']', '=', '1', 'if', 'previous_state', '[', '3', ']', '==', '1', 'and', 'previous_state', '[', '4', ']', '==', '1', ':', 'current_state', '[', '5', ']', '=', '1', 'tpm', '[', 'index', ',', ':', ']', '=', 'current_state', 'cm', '=', 'np', '.', 'array', '(', '[', '[', '0', ',', '0', ',', '1', ',', '0', ',', '0', ',', '0', ']', ',', '[', '0', ',', '0', ',', '1', ',', '0', ',', '0', ',', '0', ']', ',', '[', '0', ',', '0', ',', '0', ',', '1', ',', '1', ',', '0', ']', ',', '[', '0', ',', '0', ',', '0', ',', '0', ',', '0', ',', '1', ']', ',', '[', '0', ',', '0', ',', '0', ',', '0', ',', '0', ',', '1', ']', ',', '[', '1', ',', '1', ',', '0', ',', '0', ',', '0', ',', '0', ']', ']', ')', 'return', 'Network', '(', 'tpm', ',', 'cm', ',', 'node_labels', '=', 'LABELS', '[', ':', 'tpm', '.', 'shape', '[', '1', ']', ']', ')']
A micro-network to demonstrate blackboxing. Diagram:: +----------+ +-------------------->+ A (COPY) + <---------------+ | +----------+ | | +----------+ | | +-----------+ B (COPY) + <-------------+ | v v +----------+ | | +-+-----+-+ +-+-----+-+ | | | | | C (AND) | | F (AND) | | | | | +-+-----+-+ +-+-----+-+ | | ^ ^ | | +----------+ | | | +---------> + D (COPY) +---------------+ | | +----------+ | | +----------+ | +-------------------> + E (COPY) +-----------------+ +----------+ Connectivity Matrix: +---+---+---+---+---+---+---+ | . | A | B | C | D | E | F | +---+---+---+---+---+---+---+ | A | 0 | 0 | 1 | 0 | 0 | 0 | +---+---+---+---+---+---+---+ | B | 0 | 0 | 1 | 0 | 0 | 0 | +---+---+---+---+---+---+---+ | C | 0 | 0 | 0 | 1 | 1 | 0 | +---+---+---+---+---+---+---+ | D | 0 | 0 | 0 | 0 | 0 | 1 | +---+---+---+---+---+---+---+ | E | 0 | 0 | 0 | 0 | 0 | 1 | +---+---+---+---+---+---+---+ | F | 1 | 1 | 0 | 0 | 0 | 0 | +---+---+---+---+---+---+---+ In the documentation example, the state is (0, 0, 0, 0, 0, 0).
['A', 'micro', '-', 'network', 'to', 'demonstrate', 'blackboxing', '.']
train
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/examples.py#L531-L603
2,369
pgjones/quart
quart/wrappers/response.py
Response.set_cookie
def set_cookie( # type: ignore self, key: str, value: AnyStr='', max_age: Optional[Union[int, timedelta]]=None, expires: Optional[datetime]=None, path: str='/', domain: Optional[str]=None, secure: bool=False, httponly: bool=False, ) -> None: """Set a cookie in the response headers. The arguments are the standard cookie morsels and this is a wrapper around the stdlib SimpleCookie code. """ if isinstance(value, bytes): value = value.decode() # type: ignore cookie = create_cookie(key, value, max_age, expires, path, domain, secure, httponly) # type: ignore # noqa: E501 self.headers.add('Set-Cookie', cookie.output(header=''))
python
def set_cookie( # type: ignore self, key: str, value: AnyStr='', max_age: Optional[Union[int, timedelta]]=None, expires: Optional[datetime]=None, path: str='/', domain: Optional[str]=None, secure: bool=False, httponly: bool=False, ) -> None: """Set a cookie in the response headers. The arguments are the standard cookie morsels and this is a wrapper around the stdlib SimpleCookie code. """ if isinstance(value, bytes): value = value.decode() # type: ignore cookie = create_cookie(key, value, max_age, expires, path, domain, secure, httponly) # type: ignore # noqa: E501 self.headers.add('Set-Cookie', cookie.output(header=''))
['def', 'set_cookie', '(', '# type: ignore', 'self', ',', 'key', ':', 'str', ',', 'value', ':', 'AnyStr', '=', "''", ',', 'max_age', ':', 'Optional', '[', 'Union', '[', 'int', ',', 'timedelta', ']', ']', '=', 'None', ',', 'expires', ':', 'Optional', '[', 'datetime', ']', '=', 'None', ',', 'path', ':', 'str', '=', "'/'", ',', 'domain', ':', 'Optional', '[', 'str', ']', '=', 'None', ',', 'secure', ':', 'bool', '=', 'False', ',', 'httponly', ':', 'bool', '=', 'False', ',', ')', '->', 'None', ':', 'if', 'isinstance', '(', 'value', ',', 'bytes', ')', ':', 'value', '=', 'value', '.', 'decode', '(', ')', '# type: ignore', 'cookie', '=', 'create_cookie', '(', 'key', ',', 'value', ',', 'max_age', ',', 'expires', ',', 'path', ',', 'domain', ',', 'secure', ',', 'httponly', ')', '# type: ignore # noqa: E501', 'self', '.', 'headers', '.', 'add', '(', "'Set-Cookie'", ',', 'cookie', '.', 'output', '(', 'header', '=', "''", ')', ')']
Set a cookie in the response headers. The arguments are the standard cookie morsels and this is a wrapper around the stdlib SimpleCookie code.
['Set', 'a', 'cookie', 'in', 'the', 'response', 'headers', '.']
train
https://github.com/pgjones/quart/blob/7cb2d3bd98e8746025764f2b933abc12041fa175/quart/wrappers/response.py#L341-L360
2,370
tensorflow/tensor2tensor
tensor2tensor/rl/evaluator.py
evaluate_world_model
def evaluate_world_model( agent_type, loop_hparams, planner_hparams, model_dir, policy_dir, random_starts_step_limit, debug_video_path, log_every_steps ): """Evaluates the world model.""" if debug_video_path: debug_video_path = os.path.join(debug_video_path, "0.avi") storage_env = rl_utils.setup_env(loop_hparams, batch_size=1, max_num_noops=0) stacked_env = rl_utils.BatchStackWrapper( storage_env, loop_hparams.frame_stack_size ) policy_hparams = trainer_lib.create_hparams(loop_hparams.base_algo_params) agent = make_agent_from_hparams( agent_type, storage_env, stacked_env, loop_hparams, policy_hparams, planner_hparams, model_dir, policy_dir, # TODO(koz4k): Loop over eval_sampling_temps? sampling_temp=loop_hparams.eval_sampling_temps[0], ) collect_frames_for_random_starts( storage_env, stacked_env, agent, loop_hparams.frame_stack_size, random_starts_step_limit, log_every_steps ) return rl_utils.evaluate_world_model( storage_env, loop_hparams, model_dir, debug_video_path, split=None )
python
def evaluate_world_model( agent_type, loop_hparams, planner_hparams, model_dir, policy_dir, random_starts_step_limit, debug_video_path, log_every_steps ): """Evaluates the world model.""" if debug_video_path: debug_video_path = os.path.join(debug_video_path, "0.avi") storage_env = rl_utils.setup_env(loop_hparams, batch_size=1, max_num_noops=0) stacked_env = rl_utils.BatchStackWrapper( storage_env, loop_hparams.frame_stack_size ) policy_hparams = trainer_lib.create_hparams(loop_hparams.base_algo_params) agent = make_agent_from_hparams( agent_type, storage_env, stacked_env, loop_hparams, policy_hparams, planner_hparams, model_dir, policy_dir, # TODO(koz4k): Loop over eval_sampling_temps? sampling_temp=loop_hparams.eval_sampling_temps[0], ) collect_frames_for_random_starts( storage_env, stacked_env, agent, loop_hparams.frame_stack_size, random_starts_step_limit, log_every_steps ) return rl_utils.evaluate_world_model( storage_env, loop_hparams, model_dir, debug_video_path, split=None )
['def', 'evaluate_world_model', '(', 'agent_type', ',', 'loop_hparams', ',', 'planner_hparams', ',', 'model_dir', ',', 'policy_dir', ',', 'random_starts_step_limit', ',', 'debug_video_path', ',', 'log_every_steps', ')', ':', 'if', 'debug_video_path', ':', 'debug_video_path', '=', 'os', '.', 'path', '.', 'join', '(', 'debug_video_path', ',', '"0.avi"', ')', 'storage_env', '=', 'rl_utils', '.', 'setup_env', '(', 'loop_hparams', ',', 'batch_size', '=', '1', ',', 'max_num_noops', '=', '0', ')', 'stacked_env', '=', 'rl_utils', '.', 'BatchStackWrapper', '(', 'storage_env', ',', 'loop_hparams', '.', 'frame_stack_size', ')', 'policy_hparams', '=', 'trainer_lib', '.', 'create_hparams', '(', 'loop_hparams', '.', 'base_algo_params', ')', 'agent', '=', 'make_agent_from_hparams', '(', 'agent_type', ',', 'storage_env', ',', 'stacked_env', ',', 'loop_hparams', ',', 'policy_hparams', ',', 'planner_hparams', ',', 'model_dir', ',', 'policy_dir', ',', '# TODO(koz4k): Loop over eval_sampling_temps?', 'sampling_temp', '=', 'loop_hparams', '.', 'eval_sampling_temps', '[', '0', ']', ',', ')', 'collect_frames_for_random_starts', '(', 'storage_env', ',', 'stacked_env', ',', 'agent', ',', 'loop_hparams', '.', 'frame_stack_size', ',', 'random_starts_step_limit', ',', 'log_every_steps', ')', 'return', 'rl_utils', '.', 'evaluate_world_model', '(', 'storage_env', ',', 'loop_hparams', ',', 'model_dir', ',', 'debug_video_path', ',', 'split', '=', 'None', ')']
Evaluates the world model.
['Evaluates', 'the', 'world', 'model', '.']
train
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/evaluator.py#L375-L400
2,371
spyder-ide/spyder
spyder/plugins/editor/widgets/editor.py
TabSwitcherWidget.keyPressEvent
def keyPressEvent(self, event): """Reimplement Qt method to allow cyclic behavior.""" if event.key() == Qt.Key_Down: self.select_row(1) elif event.key() == Qt.Key_Up: self.select_row(-1)
python
def keyPressEvent(self, event): """Reimplement Qt method to allow cyclic behavior.""" if event.key() == Qt.Key_Down: self.select_row(1) elif event.key() == Qt.Key_Up: self.select_row(-1)
['def', 'keyPressEvent', '(', 'self', ',', 'event', ')', ':', 'if', 'event', '.', 'key', '(', ')', '==', 'Qt', '.', 'Key_Down', ':', 'self', '.', 'select_row', '(', '1', ')', 'elif', 'event', '.', 'key', '(', ')', '==', 'Qt', '.', 'Key_Up', ':', 'self', '.', 'select_row', '(', '-', '1', ')']
Reimplement Qt method to allow cyclic behavior.
['Reimplement', 'Qt', 'method', 'to', 'allow', 'cyclic', 'behavior', '.']
train
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/widgets/editor.py#L384-L389
2,372
ambitioninc/django-query-builder
querybuilder/query.py
Query.select
def select(self, return_models=False, nest=False, bypass_safe_limit=False, sql=None, sql_args=None): """ Executes the SELECT statement and returns the rows as a list of dictionaries or a list of model instances :type return_models: bool :param return_models: Set to True to return a list of models instead of a list of dictionaries. Defaults to False :type nest: bool :param nest: Set to True to treat all double underscores in keynames as nested data. This will convert all keys with double underscores to dictionaries keyed off of the left side of the underscores. Ex: {"id": 1", "account__id": 1, "account__name": "Name"} becomes {"id": 1, "account": {"id": 1, "name": "Name"}} :type bypass_safe_limit: bool :param bypass_safe_limit: Ignores the safe_limit option even if the safe_limit is enabled :type sql: str or None :param sql: The sql to execute in the SELECT statement. If one is not specified, then the query will use ``self.get_sql()`` :type sql_args: str or None :param sql_args: The sql args to be used in the SELECT statement. If none are specified, then the query wil use ``self.get_args()`` :rtype: list of dict :return: list of dictionaries of the rows """ # Check if we need to set a safe limit if bypass_safe_limit is False: if Query.enable_safe_limit: if self.count() > Query.safe_limit: self.limit(Query.safe_limit) # determine which sql to use if sql is None: sql = self.get_sql() # determine which sql args to use if sql_args is None: sql_args = self.get_args() # get the cursor to execute the query cursor = self.get_cursor() # execute the query cursor.execute(sql, sql_args) # get the results as a list of dictionaries rows = self._fetch_all_as_dict(cursor) # check if models should be returned instead of dictionaries if return_models: # set nesting to true, so the nested models can easily load the data nest = True # build model map of map name to model model_map = {} for join_item in self.joins: model_map[join_item.right_table.field_prefix] = join_item.right_table.model # check if results should be nested if nest: # convert keys with double underscores to dictionaries for row in rows: _row = row.copy() for key, value in _row.items(): set_value_for_keypath(row, key, value, True, '__') if '__' in key: row.pop(key) # create models if needed if return_models: model_class = self.tables[0].model new_rows = [] for row in rows: model = model_class() # assign all non-model keys first because django 1.5 requires # that the model has an id set before setting a property that is # a foreign key for key, value in row.items(): if key not in model_map: setattr(model, key, value) # assign all model instances for key, value in row.items(): if key in model_map: child_model = model_map[key]() for child_key, child_value in value.items(): setattr(child_model, child_key, child_value) value = child_model setattr(model, key, value) new_rows.append(model) rows = new_rows return rows
python
def select(self, return_models=False, nest=False, bypass_safe_limit=False, sql=None, sql_args=None): """ Executes the SELECT statement and returns the rows as a list of dictionaries or a list of model instances :type return_models: bool :param return_models: Set to True to return a list of models instead of a list of dictionaries. Defaults to False :type nest: bool :param nest: Set to True to treat all double underscores in keynames as nested data. This will convert all keys with double underscores to dictionaries keyed off of the left side of the underscores. Ex: {"id": 1", "account__id": 1, "account__name": "Name"} becomes {"id": 1, "account": {"id": 1, "name": "Name"}} :type bypass_safe_limit: bool :param bypass_safe_limit: Ignores the safe_limit option even if the safe_limit is enabled :type sql: str or None :param sql: The sql to execute in the SELECT statement. If one is not specified, then the query will use ``self.get_sql()`` :type sql_args: str or None :param sql_args: The sql args to be used in the SELECT statement. If none are specified, then the query wil use ``self.get_args()`` :rtype: list of dict :return: list of dictionaries of the rows """ # Check if we need to set a safe limit if bypass_safe_limit is False: if Query.enable_safe_limit: if self.count() > Query.safe_limit: self.limit(Query.safe_limit) # determine which sql to use if sql is None: sql = self.get_sql() # determine which sql args to use if sql_args is None: sql_args = self.get_args() # get the cursor to execute the query cursor = self.get_cursor() # execute the query cursor.execute(sql, sql_args) # get the results as a list of dictionaries rows = self._fetch_all_as_dict(cursor) # check if models should be returned instead of dictionaries if return_models: # set nesting to true, so the nested models can easily load the data nest = True # build model map of map name to model model_map = {} for join_item in self.joins: model_map[join_item.right_table.field_prefix] = join_item.right_table.model # check if results should be nested if nest: # convert keys with double underscores to dictionaries for row in rows: _row = row.copy() for key, value in _row.items(): set_value_for_keypath(row, key, value, True, '__') if '__' in key: row.pop(key) # create models if needed if return_models: model_class = self.tables[0].model new_rows = [] for row in rows: model = model_class() # assign all non-model keys first because django 1.5 requires # that the model has an id set before setting a property that is # a foreign key for key, value in row.items(): if key not in model_map: setattr(model, key, value) # assign all model instances for key, value in row.items(): if key in model_map: child_model = model_map[key]() for child_key, child_value in value.items(): setattr(child_model, child_key, child_value) value = child_model setattr(model, key, value) new_rows.append(model) rows = new_rows return rows
['def', 'select', '(', 'self', ',', 'return_models', '=', 'False', ',', 'nest', '=', 'False', ',', 'bypass_safe_limit', '=', 'False', ',', 'sql', '=', 'None', ',', 'sql_args', '=', 'None', ')', ':', '# Check if we need to set a safe limit', 'if', 'bypass_safe_limit', 'is', 'False', ':', 'if', 'Query', '.', 'enable_safe_limit', ':', 'if', 'self', '.', 'count', '(', ')', '>', 'Query', '.', 'safe_limit', ':', 'self', '.', 'limit', '(', 'Query', '.', 'safe_limit', ')', '# determine which sql to use', 'if', 'sql', 'is', 'None', ':', 'sql', '=', 'self', '.', 'get_sql', '(', ')', '# determine which sql args to use', 'if', 'sql_args', 'is', 'None', ':', 'sql_args', '=', 'self', '.', 'get_args', '(', ')', '# get the cursor to execute the query', 'cursor', '=', 'self', '.', 'get_cursor', '(', ')', '# execute the query', 'cursor', '.', 'execute', '(', 'sql', ',', 'sql_args', ')', '# get the results as a list of dictionaries', 'rows', '=', 'self', '.', '_fetch_all_as_dict', '(', 'cursor', ')', '# check if models should be returned instead of dictionaries', 'if', 'return_models', ':', '# set nesting to true, so the nested models can easily load the data', 'nest', '=', 'True', '# build model map of map name to model', 'model_map', '=', '{', '}', 'for', 'join_item', 'in', 'self', '.', 'joins', ':', 'model_map', '[', 'join_item', '.', 'right_table', '.', 'field_prefix', ']', '=', 'join_item', '.', 'right_table', '.', 'model', '# check if results should be nested', 'if', 'nest', ':', '# convert keys with double underscores to dictionaries', 'for', 'row', 'in', 'rows', ':', '_row', '=', 'row', '.', 'copy', '(', ')', 'for', 'key', ',', 'value', 'in', '_row', '.', 'items', '(', ')', ':', 'set_value_for_keypath', '(', 'row', ',', 'key', ',', 'value', ',', 'True', ',', "'__'", ')', 'if', "'__'", 'in', 'key', ':', 'row', '.', 'pop', '(', 'key', ')', '# create models if needed', 'if', 'return_models', ':', 'model_class', '=', 'self', '.', 'tables', '[', '0', ']', '.', 'model', 'new_rows', '=', '[', ']', 'for', 'row', 'in', 'rows', ':', 'model', '=', 'model_class', '(', ')', '# assign all non-model keys first because django 1.5 requires', '# that the model has an id set before setting a property that is', '# a foreign key', 'for', 'key', ',', 'value', 'in', 'row', '.', 'items', '(', ')', ':', 'if', 'key', 'not', 'in', 'model_map', ':', 'setattr', '(', 'model', ',', 'key', ',', 'value', ')', '# assign all model instances', 'for', 'key', ',', 'value', 'in', 'row', '.', 'items', '(', ')', ':', 'if', 'key', 'in', 'model_map', ':', 'child_model', '=', 'model_map', '[', 'key', ']', '(', ')', 'for', 'child_key', ',', 'child_value', 'in', 'value', '.', 'items', '(', ')', ':', 'setattr', '(', 'child_model', ',', 'child_key', ',', 'child_value', ')', 'value', '=', 'child_model', 'setattr', '(', 'model', ',', 'key', ',', 'value', ')', 'new_rows', '.', 'append', '(', 'model', ')', 'rows', '=', 'new_rows', 'return', 'rows']
Executes the SELECT statement and returns the rows as a list of dictionaries or a list of model instances :type return_models: bool :param return_models: Set to True to return a list of models instead of a list of dictionaries. Defaults to False :type nest: bool :param nest: Set to True to treat all double underscores in keynames as nested data. This will convert all keys with double underscores to dictionaries keyed off of the left side of the underscores. Ex: {"id": 1", "account__id": 1, "account__name": "Name"} becomes {"id": 1, "account": {"id": 1, "name": "Name"}} :type bypass_safe_limit: bool :param bypass_safe_limit: Ignores the safe_limit option even if the safe_limit is enabled :type sql: str or None :param sql: The sql to execute in the SELECT statement. If one is not specified, then the query will use ``self.get_sql()`` :type sql_args: str or None :param sql_args: The sql args to be used in the SELECT statement. If none are specified, then the query wil use ``self.get_args()`` :rtype: list of dict :return: list of dictionaries of the rows
['Executes', 'the', 'SELECT', 'statement', 'and', 'returns', 'the', 'rows', 'as', 'a', 'list', 'of', 'dictionaries', 'or', 'a', 'list', 'of', 'model', 'instances']
train
https://github.com/ambitioninc/django-query-builder/blob/113a7d845d3ddc6a45621b9880308e756f87c5bf/querybuilder/query.py#L1583-L1680
2,373
jeffknupp/sandman
sandman/sandmanctl.py
run
def run(generate_pks, show_pks, host, port, uri): """Connect sandman to <URI> and start the API server/admin interface.""" app.config['SQLALCHEMY_DATABASE_URI'] = uri app.config['SANDMAN_GENERATE_PKS'] = generate_pks app.config['SANDMAN_SHOW_PKS'] = show_pks app.config['SERVER_HOST'] = host app.config['SERVER_PORT'] = port activate(name='sandmanctl') app.run(host=host, port=int(port), debug=True)
python
def run(generate_pks, show_pks, host, port, uri): """Connect sandman to <URI> and start the API server/admin interface.""" app.config['SQLALCHEMY_DATABASE_URI'] = uri app.config['SANDMAN_GENERATE_PKS'] = generate_pks app.config['SANDMAN_SHOW_PKS'] = show_pks app.config['SERVER_HOST'] = host app.config['SERVER_PORT'] = port activate(name='sandmanctl') app.run(host=host, port=int(port), debug=True)
['def', 'run', '(', 'generate_pks', ',', 'show_pks', ',', 'host', ',', 'port', ',', 'uri', ')', ':', 'app', '.', 'config', '[', "'SQLALCHEMY_DATABASE_URI'", ']', '=', 'uri', 'app', '.', 'config', '[', "'SANDMAN_GENERATE_PKS'", ']', '=', 'generate_pks', 'app', '.', 'config', '[', "'SANDMAN_SHOW_PKS'", ']', '=', 'show_pks', 'app', '.', 'config', '[', "'SERVER_HOST'", ']', '=', 'host', 'app', '.', 'config', '[', "'SERVER_PORT'", ']', '=', 'port', 'activate', '(', 'name', '=', "'sandmanctl'", ')', 'app', '.', 'run', '(', 'host', '=', 'host', ',', 'port', '=', 'int', '(', 'port', ')', ',', 'debug', '=', 'True', ')']
Connect sandman to <URI> and start the API server/admin interface.
['Connect', 'sandman', 'to', '<URI', '>', 'and', 'start', 'the', 'API', 'server', '/', 'admin', 'interface', '.']
train
https://github.com/jeffknupp/sandman/blob/253ea4d15cbccd9f0016d66fedd7478614cc0b2f/sandman/sandmanctl.py#L35-L44
2,374
smarie/python-valid8
valid8/validation_lib/collections.py
minlen
def minlen(min_length, strict=False # type: bool ): """ 'Minimum length' validation_function generator. Returns a validation_function to check that len(x) >= min_length (strict=False, default) or len(x) > min_length (strict=True) :param min_length: minimum length for x :param strict: Boolean flag to switch between len(x) >= min_length (strict=False) and len(x) > min_length (strict=True) :return: """ if strict: def minlen_(x): if len(x) > min_length: return True else: # raise Failure('minlen: len(x) > ' + str(min_length) + ' does not hold for x=' + str(x)) raise TooShort(wrong_value=x, min_length=min_length, strict=True) else: def minlen_(x): if len(x) >= min_length: return True else: # raise Failure('minlen: len(x) >= ' + str(min_length) + ' does not hold for x=' + str(x)) raise TooShort(wrong_value=x, min_length=min_length, strict=False) minlen_.__name__ = 'length_{}greater_than_{}'.format('strictly_' if strict else '', min_length) return minlen_
python
def minlen(min_length, strict=False # type: bool ): """ 'Minimum length' validation_function generator. Returns a validation_function to check that len(x) >= min_length (strict=False, default) or len(x) > min_length (strict=True) :param min_length: minimum length for x :param strict: Boolean flag to switch between len(x) >= min_length (strict=False) and len(x) > min_length (strict=True) :return: """ if strict: def minlen_(x): if len(x) > min_length: return True else: # raise Failure('minlen: len(x) > ' + str(min_length) + ' does not hold for x=' + str(x)) raise TooShort(wrong_value=x, min_length=min_length, strict=True) else: def minlen_(x): if len(x) >= min_length: return True else: # raise Failure('minlen: len(x) >= ' + str(min_length) + ' does not hold for x=' + str(x)) raise TooShort(wrong_value=x, min_length=min_length, strict=False) minlen_.__name__ = 'length_{}greater_than_{}'.format('strictly_' if strict else '', min_length) return minlen_
['def', 'minlen', '(', 'min_length', ',', 'strict', '=', 'False', '# type: bool', ')', ':', 'if', 'strict', ':', 'def', 'minlen_', '(', 'x', ')', ':', 'if', 'len', '(', 'x', ')', '>', 'min_length', ':', 'return', 'True', 'else', ':', "# raise Failure('minlen: len(x) > ' + str(min_length) + ' does not hold for x=' + str(x))", 'raise', 'TooShort', '(', 'wrong_value', '=', 'x', ',', 'min_length', '=', 'min_length', ',', 'strict', '=', 'True', ')', 'else', ':', 'def', 'minlen_', '(', 'x', ')', ':', 'if', 'len', '(', 'x', ')', '>=', 'min_length', ':', 'return', 'True', 'else', ':', "# raise Failure('minlen: len(x) >= ' + str(min_length) + ' does not hold for x=' + str(x))", 'raise', 'TooShort', '(', 'wrong_value', '=', 'x', ',', 'min_length', '=', 'min_length', ',', 'strict', '=', 'False', ')', 'minlen_', '.', '__name__', '=', "'length_{}greater_than_{}'", '.', 'format', '(', "'strictly_'", 'if', 'strict', 'else', "''", ',', 'min_length', ')', 'return', 'minlen_']
'Minimum length' validation_function generator. Returns a validation_function to check that len(x) >= min_length (strict=False, default) or len(x) > min_length (strict=True) :param min_length: minimum length for x :param strict: Boolean flag to switch between len(x) >= min_length (strict=False) and len(x) > min_length (strict=True) :return:
['Minimum', 'length', 'validation_function', 'generator', '.', 'Returns', 'a', 'validation_function', 'to', 'check', 'that', 'len', '(', 'x', ')', '>', '=', 'min_length', '(', 'strict', '=', 'False', 'default', ')', 'or', 'len', '(', 'x', ')', '>', 'min_length', '(', 'strict', '=', 'True', ')']
train
https://github.com/smarie/python-valid8/blob/5e15d1de11602933c5114eb9f73277ad91d97800/valid8/validation_lib/collections.py#L18-L47
2,375
oscarlazoarjona/fast
fast/electric_field.py
electric_field_amplitude_top
def electric_field_amplitude_top(P, a, Omega=1e6, units="ad-hoc"): """Return the amplitude of the electric field for a top hat beam. This is the amplitude of a laser beam of power P (in Watts) and a top-hat\ intensity distribution of radius a (in meters). The value of E0 is given in\ rescaled units according to the frequency scale Omega (in Hertz)\ understood as absolute frequency (as opposed to angular frequency). >>> print(electric_field_amplitude_top(0.001, 0.001)) 27.8404157371 """ e0 = hbar*Omega/(e*a0) # This is the electric field scale. E0 = sqrt((c*mu0*P)/(Pi*a**2)) if units == "ad-hoc": E0 = E0/e0 return E0
python
def electric_field_amplitude_top(P, a, Omega=1e6, units="ad-hoc"): """Return the amplitude of the electric field for a top hat beam. This is the amplitude of a laser beam of power P (in Watts) and a top-hat\ intensity distribution of radius a (in meters). The value of E0 is given in\ rescaled units according to the frequency scale Omega (in Hertz)\ understood as absolute frequency (as opposed to angular frequency). >>> print(electric_field_amplitude_top(0.001, 0.001)) 27.8404157371 """ e0 = hbar*Omega/(e*a0) # This is the electric field scale. E0 = sqrt((c*mu0*P)/(Pi*a**2)) if units == "ad-hoc": E0 = E0/e0 return E0
['def', 'electric_field_amplitude_top', '(', 'P', ',', 'a', ',', 'Omega', '=', '1e6', ',', 'units', '=', '"ad-hoc"', ')', ':', 'e0', '=', 'hbar', '*', 'Omega', '/', '(', 'e', '*', 'a0', ')', '# This is the electric field scale.\r', 'E0', '=', 'sqrt', '(', '(', 'c', '*', 'mu0', '*', 'P', ')', '/', '(', 'Pi', '*', 'a', '**', '2', ')', ')', 'if', 'units', '==', '"ad-hoc"', ':', 'E0', '=', 'E0', '/', 'e0', 'return', 'E0']
Return the amplitude of the electric field for a top hat beam. This is the amplitude of a laser beam of power P (in Watts) and a top-hat\ intensity distribution of radius a (in meters). The value of E0 is given in\ rescaled units according to the frequency scale Omega (in Hertz)\ understood as absolute frequency (as opposed to angular frequency). >>> print(electric_field_amplitude_top(0.001, 0.001)) 27.8404157371
['Return', 'the', 'amplitude', 'of', 'the', 'electric', 'field', 'for', 'a', 'top', 'hat', 'beam', '.', 'This', 'is', 'the', 'amplitude', 'of', 'a', 'laser', 'beam', 'of', 'power', 'P', '(', 'in', 'Watts', ')', 'and', 'a', 'top', '-', 'hat', '\\', 'intensity', 'distribution', 'of', 'radius', 'a', '(', 'in', 'meters', ')', '.', 'The', 'value', 'of', 'E0', 'is', 'given', 'in', '\\', 'rescaled', 'units', 'according', 'to', 'the', 'frequency', 'scale', 'Omega', '(', 'in', 'Hertz', ')', '\\', 'understood', 'as', 'absolute', 'frequency', '(', 'as', 'opposed', 'to', 'angular', 'frequency', ')', '.', '>>>', 'print', '(', 'electric_field_amplitude_top', '(', '0', '.', '001', '0', '.', '001', '))', '27', '.', '8404157371']
train
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/electric_field.py#L341-L357
2,376
JukeboxPipeline/jukebox-core
src/jukeboxcore/filesys.py
TaskFileInfo.create_from_taskfile
def create_from_taskfile(self, taskfile): """Create a new TaskFileInfo and return it for the given taskfile :param taskfile: the taskfile to represent :type taskfile: :class:`jukeboxcore.djadapter.models.TaskFile` :returns: a taskfileinfo :rtype: :class:`TaskFileInfo` :raises: None """ return TaskFileInfo(task=taskfile.task, version=taskfile.version, releasetype=taskfile.releasetype, descriptor=taskfile.descriptor, typ=taskfile.typ)
python
def create_from_taskfile(self, taskfile): """Create a new TaskFileInfo and return it for the given taskfile :param taskfile: the taskfile to represent :type taskfile: :class:`jukeboxcore.djadapter.models.TaskFile` :returns: a taskfileinfo :rtype: :class:`TaskFileInfo` :raises: None """ return TaskFileInfo(task=taskfile.task, version=taskfile.version, releasetype=taskfile.releasetype, descriptor=taskfile.descriptor, typ=taskfile.typ)
['def', 'create_from_taskfile', '(', 'self', ',', 'taskfile', ')', ':', 'return', 'TaskFileInfo', '(', 'task', '=', 'taskfile', '.', 'task', ',', 'version', '=', 'taskfile', '.', 'version', ',', 'releasetype', '=', 'taskfile', '.', 'releasetype', ',', 'descriptor', '=', 'taskfile', '.', 'descriptor', ',', 'typ', '=', 'taskfile', '.', 'typ', ')']
Create a new TaskFileInfo and return it for the given taskfile :param taskfile: the taskfile to represent :type taskfile: :class:`jukeboxcore.djadapter.models.TaskFile` :returns: a taskfileinfo :rtype: :class:`TaskFileInfo` :raises: None
['Create', 'a', 'new', 'TaskFileInfo', 'and', 'return', 'it', 'for', 'the', 'given', 'taskfile']
train
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/filesys.py#L173-L183
2,377
BYU-PCCL/holodeck
example.py
editor_multi_agent_example
def editor_multi_agent_example(): """This editor example shows how to interact with holodeck worlds that have multiple agents. This is specifically for when working with UE4 directly and not a prebuilt binary. """ agent_definitions = [ AgentDefinition("uav0", agents.UavAgent, [Sensors.PIXEL_CAMERA, Sensors.LOCATION_SENSOR]), AgentDefinition("uav1", agents.UavAgent, [Sensors.LOCATION_SENSOR, Sensors.VELOCITY_SENSOR]) ] env = HolodeckEnvironment(agent_definitions, start_world=False) cmd0 = np.array([0, 0, -2, 10]) cmd1 = np.array([0, 0, 5, 10]) for i in range(10): env.reset() env.act("uav0", cmd0) env.act("uav1", cmd1) for _ in range(1000): states = env.tick() uav0_terminal = states["uav0"][Sensors.TERMINAL] uav1_reward = states["uav1"][Sensors.REWARD]
python
def editor_multi_agent_example(): """This editor example shows how to interact with holodeck worlds that have multiple agents. This is specifically for when working with UE4 directly and not a prebuilt binary. """ agent_definitions = [ AgentDefinition("uav0", agents.UavAgent, [Sensors.PIXEL_CAMERA, Sensors.LOCATION_SENSOR]), AgentDefinition("uav1", agents.UavAgent, [Sensors.LOCATION_SENSOR, Sensors.VELOCITY_SENSOR]) ] env = HolodeckEnvironment(agent_definitions, start_world=False) cmd0 = np.array([0, 0, -2, 10]) cmd1 = np.array([0, 0, 5, 10]) for i in range(10): env.reset() env.act("uav0", cmd0) env.act("uav1", cmd1) for _ in range(1000): states = env.tick() uav0_terminal = states["uav0"][Sensors.TERMINAL] uav1_reward = states["uav1"][Sensors.REWARD]
['def', 'editor_multi_agent_example', '(', ')', ':', 'agent_definitions', '=', '[', 'AgentDefinition', '(', '"uav0"', ',', 'agents', '.', 'UavAgent', ',', '[', 'Sensors', '.', 'PIXEL_CAMERA', ',', 'Sensors', '.', 'LOCATION_SENSOR', ']', ')', ',', 'AgentDefinition', '(', '"uav1"', ',', 'agents', '.', 'UavAgent', ',', '[', 'Sensors', '.', 'LOCATION_SENSOR', ',', 'Sensors', '.', 'VELOCITY_SENSOR', ']', ')', ']', 'env', '=', 'HolodeckEnvironment', '(', 'agent_definitions', ',', 'start_world', '=', 'False', ')', 'cmd0', '=', 'np', '.', 'array', '(', '[', '0', ',', '0', ',', '-', '2', ',', '10', ']', ')', 'cmd1', '=', 'np', '.', 'array', '(', '[', '0', ',', '0', ',', '5', ',', '10', ']', ')', 'for', 'i', 'in', 'range', '(', '10', ')', ':', 'env', '.', 'reset', '(', ')', 'env', '.', 'act', '(', '"uav0"', ',', 'cmd0', ')', 'env', '.', 'act', '(', '"uav1"', ',', 'cmd1', ')', 'for', '_', 'in', 'range', '(', '1000', ')', ':', 'states', '=', 'env', '.', 'tick', '(', ')', 'uav0_terminal', '=', 'states', '[', '"uav0"', ']', '[', 'Sensors', '.', 'TERMINAL', ']', 'uav1_reward', '=', 'states', '[', '"uav1"', ']', '[', 'Sensors', '.', 'REWARD', ']']
This editor example shows how to interact with holodeck worlds that have multiple agents. This is specifically for when working with UE4 directly and not a prebuilt binary.
['This', 'editor', 'example', 'shows', 'how', 'to', 'interact', 'with', 'holodeck', 'worlds', 'that', 'have', 'multiple', 'agents', '.', 'This', 'is', 'specifically', 'for', 'when', 'working', 'with', 'UE4', 'directly', 'and', 'not', 'a', 'prebuilt', 'binary', '.']
train
https://github.com/BYU-PCCL/holodeck/blob/01acd4013f5acbd9f61fbc9caaafe19975e8b121/example.py#L162-L183
2,378
AdvancedClimateSystems/uModbus
umodbus/server/serial/__init__.py
AbstractSerialServer.execute_route
def execute_route(self, meta_data, request_pdu): """ Execute configured route based on requests meta data and request PDU. :param meta_data: A dict with meta data. It must at least contain key 'unit_id'. :param request_pdu: A bytearray containing request PDU. :return: A bytearry containing reponse PDU. """ try: function = create_function_from_request_pdu(request_pdu) results =\ function.execute(meta_data['unit_id'], self.route_map) try: # ReadFunction's use results of callbacks to build response # PDU... return function.create_response_pdu(results) except TypeError: # ...other functions don't. return function.create_response_pdu() except ModbusError as e: function_code = get_function_code_from_request_pdu(request_pdu) return pack_exception_pdu(function_code, e.error_code) except Exception as e: log.exception('Could not handle request: {0}.'.format(e)) function_code = get_function_code_from_request_pdu(request_pdu) return pack_exception_pdu(function_code, ServerDeviceFailureError.error_code)
python
def execute_route(self, meta_data, request_pdu): """ Execute configured route based on requests meta data and request PDU. :param meta_data: A dict with meta data. It must at least contain key 'unit_id'. :param request_pdu: A bytearray containing request PDU. :return: A bytearry containing reponse PDU. """ try: function = create_function_from_request_pdu(request_pdu) results =\ function.execute(meta_data['unit_id'], self.route_map) try: # ReadFunction's use results of callbacks to build response # PDU... return function.create_response_pdu(results) except TypeError: # ...other functions don't. return function.create_response_pdu() except ModbusError as e: function_code = get_function_code_from_request_pdu(request_pdu) return pack_exception_pdu(function_code, e.error_code) except Exception as e: log.exception('Could not handle request: {0}.'.format(e)) function_code = get_function_code_from_request_pdu(request_pdu) return pack_exception_pdu(function_code, ServerDeviceFailureError.error_code)
['def', 'execute_route', '(', 'self', ',', 'meta_data', ',', 'request_pdu', ')', ':', 'try', ':', 'function', '=', 'create_function_from_request_pdu', '(', 'request_pdu', ')', 'results', '=', 'function', '.', 'execute', '(', 'meta_data', '[', "'unit_id'", ']', ',', 'self', '.', 'route_map', ')', 'try', ':', "# ReadFunction's use results of callbacks to build response", '# PDU...', 'return', 'function', '.', 'create_response_pdu', '(', 'results', ')', 'except', 'TypeError', ':', "# ...other functions don't.", 'return', 'function', '.', 'create_response_pdu', '(', ')', 'except', 'ModbusError', 'as', 'e', ':', 'function_code', '=', 'get_function_code_from_request_pdu', '(', 'request_pdu', ')', 'return', 'pack_exception_pdu', '(', 'function_code', ',', 'e', '.', 'error_code', ')', 'except', 'Exception', 'as', 'e', ':', 'log', '.', 'exception', '(', "'Could not handle request: {0}.'", '.', 'format', '(', 'e', ')', ')', 'function_code', '=', 'get_function_code_from_request_pdu', '(', 'request_pdu', ')', 'return', 'pack_exception_pdu', '(', 'function_code', ',', 'ServerDeviceFailureError', '.', 'error_code', ')']
Execute configured route based on requests meta data and request PDU. :param meta_data: A dict with meta data. It must at least contain key 'unit_id'. :param request_pdu: A bytearray containing request PDU. :return: A bytearry containing reponse PDU.
['Execute', 'configured', 'route', 'based', 'on', 'requests', 'meta', 'data', 'and', 'request', 'PDU', '.']
train
https://github.com/AdvancedClimateSystems/uModbus/blob/0560a42308003f4072d988f28042b8d55b694ad4/umodbus/server/serial/__init__.py#L88-L117
2,379
zhmcclient/python-zhmcclient
zhmcclient_mock/_urihandler.py
CpcStopHandler.post
def post(method, hmc, uri, uri_parms, body, logon_required, wait_for_completion): """Operation: Stop CPC (requires DPM mode).""" assert wait_for_completion is True # async not supported yet cpc_oid = uri_parms[0] try: cpc = hmc.cpcs.lookup_by_oid(cpc_oid) except KeyError: raise InvalidResourceError(method, uri) if not cpc.dpm_enabled: raise CpcNotInDpmError(method, uri, cpc) cpc.properties['status'] = 'not-operating'
python
def post(method, hmc, uri, uri_parms, body, logon_required, wait_for_completion): """Operation: Stop CPC (requires DPM mode).""" assert wait_for_completion is True # async not supported yet cpc_oid = uri_parms[0] try: cpc = hmc.cpcs.lookup_by_oid(cpc_oid) except KeyError: raise InvalidResourceError(method, uri) if not cpc.dpm_enabled: raise CpcNotInDpmError(method, uri, cpc) cpc.properties['status'] = 'not-operating'
['def', 'post', '(', 'method', ',', 'hmc', ',', 'uri', ',', 'uri_parms', ',', 'body', ',', 'logon_required', ',', 'wait_for_completion', ')', ':', 'assert', 'wait_for_completion', 'is', 'True', '# async not supported yet', 'cpc_oid', '=', 'uri_parms', '[', '0', ']', 'try', ':', 'cpc', '=', 'hmc', '.', 'cpcs', '.', 'lookup_by_oid', '(', 'cpc_oid', ')', 'except', 'KeyError', ':', 'raise', 'InvalidResourceError', '(', 'method', ',', 'uri', ')', 'if', 'not', 'cpc', '.', 'dpm_enabled', ':', 'raise', 'CpcNotInDpmError', '(', 'method', ',', 'uri', ',', 'cpc', ')', 'cpc', '.', 'properties', '[', "'status'", ']', '=', "'not-operating'"]
Operation: Stop CPC (requires DPM mode).
['Operation', ':', 'Stop', 'CPC', '(', 'requires', 'DPM', 'mode', ')', '.']
train
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient_mock/_urihandler.py#L1110-L1121
2,380
aloetesting/aloe_webdriver
aloe_webdriver/css.py
check_no_element_by_selector
def check_no_element_by_selector(self, selector): """Assert an element does not exist matching the given selector.""" elems = find_elements_by_jquery(world.browser, selector) if elems: raise AssertionError("Expected no matching elements, found {}.".format( len(elems)))
python
def check_no_element_by_selector(self, selector): """Assert an element does not exist matching the given selector.""" elems = find_elements_by_jquery(world.browser, selector) if elems: raise AssertionError("Expected no matching elements, found {}.".format( len(elems)))
['def', 'check_no_element_by_selector', '(', 'self', ',', 'selector', ')', ':', 'elems', '=', 'find_elements_by_jquery', '(', 'world', '.', 'browser', ',', 'selector', ')', 'if', 'elems', ':', 'raise', 'AssertionError', '(', '"Expected no matching elements, found {}."', '.', 'format', '(', 'len', '(', 'elems', ')', ')', ')']
Assert an element does not exist matching the given selector.
['Assert', 'an', 'element', 'does', 'not', 'exist', 'matching', 'the', 'given', 'selector', '.']
train
https://github.com/aloetesting/aloe_webdriver/blob/65d847da4bdc63f9c015cb19d4efdee87df8ffad/aloe_webdriver/css.py#L142-L147
2,381
lowandrew/OLCTools
spadespipeline/skesa.py
Skesa.best_assemblyfile
def best_assemblyfile(self): """ Determine whether the contigs.fasta output file from the assembler is present. If not, set the .bestassembly attribute to 'NA' """ for sample in self.metadata: try: # Set the name of the filtered assembly file filtered_outputfile = os.path.join(self.path, 'raw_assemblies', '{}.fasta'.format(sample.name)) # Set the name of the unfiltered spades assembly output file if os.path.isfile(sample.general.assemblyfile): size = os.path.getsize(sample.general.assemblyfile) # Ensure that the assembly isn't just an empty file if size == 0: sample.general.bestassemblyfile = 'NA' else: sample.general.bestassemblyfile = sample.general.assemblyfile shutil.copyfile(sample.general.bestassemblyfile, filtered_outputfile) else: sample.general.bestassemblyfile = 'NA' # Add the name and path of the filtered file to the metadata sample.general.filteredfile = filtered_outputfile except AttributeError: sample.general.assemblyfile = 'NA' sample.general.bestassemblyfile = 'NA'
python
def best_assemblyfile(self): """ Determine whether the contigs.fasta output file from the assembler is present. If not, set the .bestassembly attribute to 'NA' """ for sample in self.metadata: try: # Set the name of the filtered assembly file filtered_outputfile = os.path.join(self.path, 'raw_assemblies', '{}.fasta'.format(sample.name)) # Set the name of the unfiltered spades assembly output file if os.path.isfile(sample.general.assemblyfile): size = os.path.getsize(sample.general.assemblyfile) # Ensure that the assembly isn't just an empty file if size == 0: sample.general.bestassemblyfile = 'NA' else: sample.general.bestassemblyfile = sample.general.assemblyfile shutil.copyfile(sample.general.bestassemblyfile, filtered_outputfile) else: sample.general.bestassemblyfile = 'NA' # Add the name and path of the filtered file to the metadata sample.general.filteredfile = filtered_outputfile except AttributeError: sample.general.assemblyfile = 'NA' sample.general.bestassemblyfile = 'NA'
['def', 'best_assemblyfile', '(', 'self', ')', ':', 'for', 'sample', 'in', 'self', '.', 'metadata', ':', 'try', ':', '# Set the name of the filtered assembly file', 'filtered_outputfile', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'path', ',', "'raw_assemblies'", ',', "'{}.fasta'", '.', 'format', '(', 'sample', '.', 'name', ')', ')', '# Set the name of the unfiltered spades assembly output file', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'sample', '.', 'general', '.', 'assemblyfile', ')', ':', 'size', '=', 'os', '.', 'path', '.', 'getsize', '(', 'sample', '.', 'general', '.', 'assemblyfile', ')', "# Ensure that the assembly isn't just an empty file", 'if', 'size', '==', '0', ':', 'sample', '.', 'general', '.', 'bestassemblyfile', '=', "'NA'", 'else', ':', 'sample', '.', 'general', '.', 'bestassemblyfile', '=', 'sample', '.', 'general', '.', 'assemblyfile', 'shutil', '.', 'copyfile', '(', 'sample', '.', 'general', '.', 'bestassemblyfile', ',', 'filtered_outputfile', ')', 'else', ':', 'sample', '.', 'general', '.', 'bestassemblyfile', '=', "'NA'", '# Add the name and path of the filtered file to the metadata', 'sample', '.', 'general', '.', 'filteredfile', '=', 'filtered_outputfile', 'except', 'AttributeError', ':', 'sample', '.', 'general', '.', 'assemblyfile', '=', "'NA'", 'sample', '.', 'general', '.', 'bestassemblyfile', '=', "'NA'"]
Determine whether the contigs.fasta output file from the assembler is present. If not, set the .bestassembly attribute to 'NA'
['Determine', 'whether', 'the', 'contigs', '.', 'fasta', 'output', 'file', 'from', 'the', 'assembler', 'is', 'present', '.', 'If', 'not', 'set', 'the', '.', 'bestassembly', 'attribute', 'to', 'NA']
train
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/skesa.py#L179-L203
2,382
treycucco/bidon
bidon/db/model/model_base.py
ModelBase._other_to_dict
def _other_to_dict(self, other): """When serializing models, this allows attached models (children, parents, etc.) to also be serialized. """ if isinstance(other, ModelBase): return other.to_dict() elif isinstance(other, list): # TODO: what if it's not a list? return [self._other_to_dict(i) for i in other] else: return other
python
def _other_to_dict(self, other): """When serializing models, this allows attached models (children, parents, etc.) to also be serialized. """ if isinstance(other, ModelBase): return other.to_dict() elif isinstance(other, list): # TODO: what if it's not a list? return [self._other_to_dict(i) for i in other] else: return other
['def', '_other_to_dict', '(', 'self', ',', 'other', ')', ':', 'if', 'isinstance', '(', 'other', ',', 'ModelBase', ')', ':', 'return', 'other', '.', 'to_dict', '(', ')', 'elif', 'isinstance', '(', 'other', ',', 'list', ')', ':', "# TODO: what if it's not a list?", 'return', '[', 'self', '.', '_other_to_dict', '(', 'i', ')', 'for', 'i', 'in', 'other', ']', 'else', ':', 'return', 'other']
When serializing models, this allows attached models (children, parents, etc.) to also be serialized.
['When', 'serializing', 'models', 'this', 'allows', 'attached', 'models', '(', 'children', 'parents', 'etc', '.', ')', 'to', 'also', 'be', 'serialized', '.']
train
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/db/model/model_base.py#L192-L202
2,383
GoogleCloudPlatform/datastore-ndb-python
ndb/model.py
Property._apply_list
def _apply_list(self, methods): """Return a single callable that applies a list of methods to a value. If a method returns None, the last value is kept; if it returns some other value, that replaces the last value. Exceptions are not caught. """ def call(value): for method in methods: newvalue = method(self, value) if newvalue is not None: value = newvalue return value return call
python
def _apply_list(self, methods): """Return a single callable that applies a list of methods to a value. If a method returns None, the last value is kept; if it returns some other value, that replaces the last value. Exceptions are not caught. """ def call(value): for method in methods: newvalue = method(self, value) if newvalue is not None: value = newvalue return value return call
['def', '_apply_list', '(', 'self', ',', 'methods', ')', ':', 'def', 'call', '(', 'value', ')', ':', 'for', 'method', 'in', 'methods', ':', 'newvalue', '=', 'method', '(', 'self', ',', 'value', ')', 'if', 'newvalue', 'is', 'not', 'None', ':', 'value', '=', 'newvalue', 'return', 'value', 'return', 'call']
Return a single callable that applies a list of methods to a value. If a method returns None, the last value is kept; if it returns some other value, that replaces the last value. Exceptions are not caught.
['Return', 'a', 'single', 'callable', 'that', 'applies', 'a', 'list', 'of', 'methods', 'to', 'a', 'value', '.']
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/model.py#L1322-L1335
2,384
rshk/python-libxdo
xdo/__init__.py
Xdo.get_symbol_map
def get_symbol_map(self): """ If you need the symbol map, use this method. The symbol map is an array of string pairs mapping common tokens to X Keysym strings, such as "alt" to "Alt_L" :return: array of strings. """ # todo: make sure we return a list of strings! sm = _libxdo.xdo_get_symbol_map() # Return value is like: # ['alt', 'Alt_L', ..., None, None, None, ...] # We want to return only values up to the first None. # todo: any better solution than this? i = 0 ret = [] while True: c = sm[i] if c is None: return ret ret.append(c) i += 1
python
def get_symbol_map(self): """ If you need the symbol map, use this method. The symbol map is an array of string pairs mapping common tokens to X Keysym strings, such as "alt" to "Alt_L" :return: array of strings. """ # todo: make sure we return a list of strings! sm = _libxdo.xdo_get_symbol_map() # Return value is like: # ['alt', 'Alt_L', ..., None, None, None, ...] # We want to return only values up to the first None. # todo: any better solution than this? i = 0 ret = [] while True: c = sm[i] if c is None: return ret ret.append(c) i += 1
['def', 'get_symbol_map', '(', 'self', ')', ':', '# todo: make sure we return a list of strings!', 'sm', '=', '_libxdo', '.', 'xdo_get_symbol_map', '(', ')', '# Return value is like:', "# ['alt', 'Alt_L', ..., None, None, None, ...]", '# We want to return only values up to the first None.', '# todo: any better solution than this?', 'i', '=', '0', 'ret', '=', '[', ']', 'while', 'True', ':', 'c', '=', 'sm', '[', 'i', ']', 'if', 'c', 'is', 'None', ':', 'return', 'ret', 'ret', '.', 'append', '(', 'c', ')', 'i', '+=', '1']
If you need the symbol map, use this method. The symbol map is an array of string pairs mapping common tokens to X Keysym strings, such as "alt" to "Alt_L" :return: array of strings.
['If', 'you', 'need', 'the', 'symbol', 'map', 'use', 'this', 'method', '.']
train
https://github.com/rshk/python-libxdo/blob/84cafa5943b005bc423edd28203a5266b3579ac3/xdo/__init__.py#L782-L805
2,385
rackerlabs/rackspace-python-neutronclient
neutronclient/v2_0/client.py
Client.update_security_group
def update_security_group(self, security_group, body=None): """Updates a security group.""" return self.put(self.security_group_path % security_group, body=body)
python
def update_security_group(self, security_group, body=None): """Updates a security group.""" return self.put(self.security_group_path % security_group, body=body)
['def', 'update_security_group', '(', 'self', ',', 'security_group', ',', 'body', '=', 'None', ')', ':', 'return', 'self', '.', 'put', '(', 'self', '.', 'security_group_path', '%', 'security_group', ',', 'body', '=', 'body', ')']
Updates a security group.
['Updates', 'a', 'security', 'group', '.']
train
https://github.com/rackerlabs/rackspace-python-neutronclient/blob/5a5009a8fe078e3aa1d582176669f1b28ab26bef/neutronclient/v2_0/client.py#L801-L804
2,386
PmagPy/PmagPy
pmagpy/pmag.py
execute
def execute(st, **kwargs): """ Work around for Python3 exec function which doesn't allow changes to the local namespace because of scope. This breaks a lot of the old functionality in the code which was origionally in Python2. So this function runs just like exec except that it returns the output of the input statement to the local namespace. It may break if you start feeding it multiline monoliths of statements (haven't tested) but you shouldn't do that anyway (bad programming). Parameters ----------- st : the statement you want executed and for which you want the return kwargs : anything that may need to be in this namespace to execute st Returns ------- The return value of executing the input statement """ namespace = kwargs exec("b = {}".format(st), namespace) return namespace['b']
python
def execute(st, **kwargs): """ Work around for Python3 exec function which doesn't allow changes to the local namespace because of scope. This breaks a lot of the old functionality in the code which was origionally in Python2. So this function runs just like exec except that it returns the output of the input statement to the local namespace. It may break if you start feeding it multiline monoliths of statements (haven't tested) but you shouldn't do that anyway (bad programming). Parameters ----------- st : the statement you want executed and for which you want the return kwargs : anything that may need to be in this namespace to execute st Returns ------- The return value of executing the input statement """ namespace = kwargs exec("b = {}".format(st), namespace) return namespace['b']
['def', 'execute', '(', 'st', ',', '*', '*', 'kwargs', ')', ':', 'namespace', '=', 'kwargs', 'exec', '(', '"b = {}"', '.', 'format', '(', 'st', ')', ',', 'namespace', ')', 'return', 'namespace', '[', "'b'", ']']
Work around for Python3 exec function which doesn't allow changes to the local namespace because of scope. This breaks a lot of the old functionality in the code which was origionally in Python2. So this function runs just like exec except that it returns the output of the input statement to the local namespace. It may break if you start feeding it multiline monoliths of statements (haven't tested) but you shouldn't do that anyway (bad programming). Parameters ----------- st : the statement you want executed and for which you want the return kwargs : anything that may need to be in this namespace to execute st Returns ------- The return value of executing the input statement
['Work', 'around', 'for', 'Python3', 'exec', 'function', 'which', 'doesn', 't', 'allow', 'changes', 'to', 'the', 'local', 'namespace', 'because', 'of', 'scope', '.', 'This', 'breaks', 'a', 'lot', 'of', 'the', 'old', 'functionality', 'in', 'the', 'code', 'which', 'was', 'origionally', 'in', 'Python2', '.', 'So', 'this', 'function', 'runs', 'just', 'like', 'exec', 'except', 'that', 'it', 'returns', 'the', 'output', 'of', 'the', 'input', 'statement', 'to', 'the', 'local', 'namespace', '.', 'It', 'may', 'break', 'if', 'you', 'start', 'feeding', 'it', 'multiline', 'monoliths', 'of', 'statements', '(', 'haven', 't', 'tested', ')', 'but', 'you', 'shouldn', 't', 'do', 'that', 'anyway', '(', 'bad', 'programming', ')', '.']
train
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L9684-L9703
2,387
SKA-ScienceDataProcessor/integration-prototype
sip/tango_control/tango_master/app/sdp_master_ds.py
main
def main(args=None, **kwargs): """Run the Tango SDP Master device server.""" LOG.info('Starting %s', __service_id__) return run([SDPMasterDevice], verbose=True, msg_stream=sys.stdout, args=args, **kwargs)
python
def main(args=None, **kwargs): """Run the Tango SDP Master device server.""" LOG.info('Starting %s', __service_id__) return run([SDPMasterDevice], verbose=True, msg_stream=sys.stdout, args=args, **kwargs)
['def', 'main', '(', 'args', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'LOG', '.', 'info', '(', "'Starting %s'", ',', '__service_id__', ')', 'return', 'run', '(', '[', 'SDPMasterDevice', ']', ',', 'verbose', '=', 'True', ',', 'msg_stream', '=', 'sys', '.', 'stdout', ',', 'args', '=', 'args', ',', '*', '*', 'kwargs', ')']
Run the Tango SDP Master device server.
['Run', 'the', 'Tango', 'SDP', 'Master', 'device', 'server', '.']
train
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/tango_control/tango_master/app/sdp_master_ds.py#L36-L40
2,388
horejsek/python-fastjsonschema
fastjsonschema/generator.py
CodeGenerator.global_state_code
def global_state_code(self): """ Returns global variables for generating function from ``func_code`` as code. Includes compiled regular expressions and imports. """ self._generate_func_code() if not self._compile_regexps: return '\n'.join( [ 'from fastjsonschema import JsonSchemaException', '', '', ] ) regexs = ['"{}": re.compile(r"{}")'.format(key, value.pattern) for key, value in self._compile_regexps.items()] return '\n'.join( [ 'import re', 'from fastjsonschema import JsonSchemaException', '', '', 'REGEX_PATTERNS = {', ' ' + ',\n '.join(regexs), '}', '', ] )
python
def global_state_code(self): """ Returns global variables for generating function from ``func_code`` as code. Includes compiled regular expressions and imports. """ self._generate_func_code() if not self._compile_regexps: return '\n'.join( [ 'from fastjsonschema import JsonSchemaException', '', '', ] ) regexs = ['"{}": re.compile(r"{}")'.format(key, value.pattern) for key, value in self._compile_regexps.items()] return '\n'.join( [ 'import re', 'from fastjsonschema import JsonSchemaException', '', '', 'REGEX_PATTERNS = {', ' ' + ',\n '.join(regexs), '}', '', ] )
['def', 'global_state_code', '(', 'self', ')', ':', 'self', '.', '_generate_func_code', '(', ')', 'if', 'not', 'self', '.', '_compile_regexps', ':', 'return', "'\\n'", '.', 'join', '(', '[', "'from fastjsonschema import JsonSchemaException'", ',', "''", ',', "''", ',', ']', ')', 'regexs', '=', '[', '\'"{}": re.compile(r"{}")\'', '.', 'format', '(', 'key', ',', 'value', '.', 'pattern', ')', 'for', 'key', ',', 'value', 'in', 'self', '.', '_compile_regexps', '.', 'items', '(', ')', ']', 'return', "'\\n'", '.', 'join', '(', '[', "'import re'", ',', "'from fastjsonschema import JsonSchemaException'", ',', "''", ',', "''", ',', "'REGEX_PATTERNS = {'", ',', "' '", '+', "',\\n '", '.', 'join', '(', 'regexs', ')', ',', "'}'", ',', "''", ',', ']', ')']
Returns global variables for generating function from ``func_code`` as code. Includes compiled regular expressions and imports.
['Returns', 'global', 'variables', 'for', 'generating', 'function', 'from', 'func_code', 'as', 'code', '.', 'Includes', 'compiled', 'regular', 'expressions', 'and', 'imports', '.']
train
https://github.com/horejsek/python-fastjsonschema/blob/8c38d0f91fa5d928ff629080cdb75ab23f96590f/fastjsonschema/generator.py#L81-L108
2,389
teepark/greenhouse
greenhouse/scheduler.py
handle_exception
def handle_exception(klass, exc, tb, coro=None): """run all the registered exception handlers the first 3 arguments to this function match the output of ``sys.exc_info()`` :param klass: the exception klass :type klass: type :param exc: the exception instance :type exc: Exception :param tb: the traceback object :type tb: Traceback :param coro: behave as though the exception occurred in this coroutine (defaults to the current coroutine) :type coro: greenlet exception handlers run would be all those added with :func:`global_exception_handler`, and any added for the relevant coroutine with :func:`local_exception_handler`. """ if coro is None: coro = compat.getcurrent() replacement = [] for weak in state.local_exception_handlers.get(coro, ()): func = weak() if func is None: continue try: func(klass, exc, tb) except Exception: continue replacement.append(weak) if replacement: state.local_exception_handlers[coro][:] = replacement replacement = [] for weak in state.global_exception_handlers: func = weak() if func is None: continue try: func(klass, exc, tb) except Exception: continue replacement.append(weak) state.global_exception_handlers[:] = replacement
python
def handle_exception(klass, exc, tb, coro=None): """run all the registered exception handlers the first 3 arguments to this function match the output of ``sys.exc_info()`` :param klass: the exception klass :type klass: type :param exc: the exception instance :type exc: Exception :param tb: the traceback object :type tb: Traceback :param coro: behave as though the exception occurred in this coroutine (defaults to the current coroutine) :type coro: greenlet exception handlers run would be all those added with :func:`global_exception_handler`, and any added for the relevant coroutine with :func:`local_exception_handler`. """ if coro is None: coro = compat.getcurrent() replacement = [] for weak in state.local_exception_handlers.get(coro, ()): func = weak() if func is None: continue try: func(klass, exc, tb) except Exception: continue replacement.append(weak) if replacement: state.local_exception_handlers[coro][:] = replacement replacement = [] for weak in state.global_exception_handlers: func = weak() if func is None: continue try: func(klass, exc, tb) except Exception: continue replacement.append(weak) state.global_exception_handlers[:] = replacement
['def', 'handle_exception', '(', 'klass', ',', 'exc', ',', 'tb', ',', 'coro', '=', 'None', ')', ':', 'if', 'coro', 'is', 'None', ':', 'coro', '=', 'compat', '.', 'getcurrent', '(', ')', 'replacement', '=', '[', ']', 'for', 'weak', 'in', 'state', '.', 'local_exception_handlers', '.', 'get', '(', 'coro', ',', '(', ')', ')', ':', 'func', '=', 'weak', '(', ')', 'if', 'func', 'is', 'None', ':', 'continue', 'try', ':', 'func', '(', 'klass', ',', 'exc', ',', 'tb', ')', 'except', 'Exception', ':', 'continue', 'replacement', '.', 'append', '(', 'weak', ')', 'if', 'replacement', ':', 'state', '.', 'local_exception_handlers', '[', 'coro', ']', '[', ':', ']', '=', 'replacement', 'replacement', '=', '[', ']', 'for', 'weak', 'in', 'state', '.', 'global_exception_handlers', ':', 'func', '=', 'weak', '(', ')', 'if', 'func', 'is', 'None', ':', 'continue', 'try', ':', 'func', '(', 'klass', ',', 'exc', ',', 'tb', ')', 'except', 'Exception', ':', 'continue', 'replacement', '.', 'append', '(', 'weak', ')', 'state', '.', 'global_exception_handlers', '[', ':', ']', '=', 'replacement']
run all the registered exception handlers the first 3 arguments to this function match the output of ``sys.exc_info()`` :param klass: the exception klass :type klass: type :param exc: the exception instance :type exc: Exception :param tb: the traceback object :type tb: Traceback :param coro: behave as though the exception occurred in this coroutine (defaults to the current coroutine) :type coro: greenlet exception handlers run would be all those added with :func:`global_exception_handler`, and any added for the relevant coroutine with :func:`local_exception_handler`.
['run', 'all', 'the', 'registered', 'exception', 'handlers']
train
https://github.com/teepark/greenhouse/blob/8fd1be4f5443ba090346b5ec82fdbeb0a060d956/greenhouse/scheduler.py#L606-L659
2,390
inveniosoftware/invenio-db
invenio_db/utils.py
versioning_model_classname
def versioning_model_classname(manager, model): """Get the name of the versioned model class.""" if manager.options.get('use_module_name', True): return '%s%sVersion' % ( model.__module__.title().replace('.', ''), model.__name__) else: return '%sVersion' % (model.__name__,)
python
def versioning_model_classname(manager, model): """Get the name of the versioned model class.""" if manager.options.get('use_module_name', True): return '%s%sVersion' % ( model.__module__.title().replace('.', ''), model.__name__) else: return '%sVersion' % (model.__name__,)
['def', 'versioning_model_classname', '(', 'manager', ',', 'model', ')', ':', 'if', 'manager', '.', 'options', '.', 'get', '(', "'use_module_name'", ',', 'True', ')', ':', 'return', "'%s%sVersion'", '%', '(', 'model', '.', '__module__', '.', 'title', '(', ')', '.', 'replace', '(', "'.'", ',', "''", ')', ',', 'model', '.', '__name__', ')', 'else', ':', 'return', "'%sVersion'", '%', '(', 'model', '.', '__name__', ',', ')']
Get the name of the versioned model class.
['Get', 'the', 'name', 'of', 'the', 'versioned', 'model', 'class', '.']
train
https://github.com/inveniosoftware/invenio-db/blob/9009a4cf79574083e129909cf3d2656568550184/invenio_db/utils.py#L78-L84
2,391
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/type.py
generated_target_ps
def generated_target_ps(is_suffix, type, prop_set): """ Returns suffix that should be used when generating target of 'type', with the specified properties. If not suffix were specified for 'type', returns suffix for base type, if any. """ if __debug__: from .property_set import PropertySet assert isinstance(is_suffix, (int, bool)) assert isinstance(type, basestring) assert isinstance(prop_set, PropertySet) key = (is_suffix, type, prop_set) v = __target_suffixes_cache.get(key, None) if not v: v = generated_target_ps_real(is_suffix, type, prop_set.raw()) __target_suffixes_cache [key] = v return v
python
def generated_target_ps(is_suffix, type, prop_set): """ Returns suffix that should be used when generating target of 'type', with the specified properties. If not suffix were specified for 'type', returns suffix for base type, if any. """ if __debug__: from .property_set import PropertySet assert isinstance(is_suffix, (int, bool)) assert isinstance(type, basestring) assert isinstance(prop_set, PropertySet) key = (is_suffix, type, prop_set) v = __target_suffixes_cache.get(key, None) if not v: v = generated_target_ps_real(is_suffix, type, prop_set.raw()) __target_suffixes_cache [key] = v return v
['def', 'generated_target_ps', '(', 'is_suffix', ',', 'type', ',', 'prop_set', ')', ':', 'if', '__debug__', ':', 'from', '.', 'property_set', 'import', 'PropertySet', 'assert', 'isinstance', '(', 'is_suffix', ',', '(', 'int', ',', 'bool', ')', ')', 'assert', 'isinstance', '(', 'type', ',', 'basestring', ')', 'assert', 'isinstance', '(', 'prop_set', ',', 'PropertySet', ')', 'key', '=', '(', 'is_suffix', ',', 'type', ',', 'prop_set', ')', 'v', '=', '__target_suffixes_cache', '.', 'get', '(', 'key', ',', 'None', ')', 'if', 'not', 'v', ':', 'v', '=', 'generated_target_ps_real', '(', 'is_suffix', ',', 'type', ',', 'prop_set', '.', 'raw', '(', ')', ')', '__target_suffixes_cache', '[', 'key', ']', '=', 'v', 'return', 'v']
Returns suffix that should be used when generating target of 'type', with the specified properties. If not suffix were specified for 'type', returns suffix for base type, if any.
['Returns', 'suffix', 'that', 'should', 'be', 'used', 'when', 'generating', 'target', 'of', 'type', 'with', 'the', 'specified', 'properties', '.', 'If', 'not', 'suffix', 'were', 'specified', 'for', 'type', 'returns', 'suffix', 'for', 'base', 'type', 'if', 'any', '.']
train
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/type.py#L334-L351
2,392
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/quaternion.py
Quaternion._q_to_dcm
def _q_to_dcm(self, q): """ Create DCM (Matrix3) from q :param q: array q which represents a quaternion [w, x, y, z] :returns: Matrix3 """ assert(len(q) == 4) arr = super(Quaternion, self)._q_to_dcm(q) return self._dcm_array_to_matrix3(arr)
python
def _q_to_dcm(self, q): """ Create DCM (Matrix3) from q :param q: array q which represents a quaternion [w, x, y, z] :returns: Matrix3 """ assert(len(q) == 4) arr = super(Quaternion, self)._q_to_dcm(q) return self._dcm_array_to_matrix3(arr)
['def', '_q_to_dcm', '(', 'self', ',', 'q', ')', ':', 'assert', '(', 'len', '(', 'q', ')', '==', '4', ')', 'arr', '=', 'super', '(', 'Quaternion', ',', 'self', ')', '.', '_q_to_dcm', '(', 'q', ')', 'return', 'self', '.', '_dcm_array_to_matrix3', '(', 'arr', ')']
Create DCM (Matrix3) from q :param q: array q which represents a quaternion [w, x, y, z] :returns: Matrix3
['Create', 'DCM', '(', 'Matrix3', ')', 'from', 'q', ':', 'param', 'q', ':', 'array', 'q', 'which', 'represents', 'a', 'quaternion', '[', 'w', 'x', 'y', 'z', ']', ':', 'returns', ':', 'Matrix3']
train
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/quaternion.py#L574-L582
2,393
nvbn/thefuck
thefuck/ui.py
read_actions
def read_actions(): """Yields actions for pressed keys.""" while True: key = get_key() # Handle arrows, j/k (qwerty), and n/e (colemak) if key in (const.KEY_UP, const.KEY_CTRL_N, 'k', 'e'): yield const.ACTION_PREVIOUS elif key in (const.KEY_DOWN, const.KEY_CTRL_P, 'j', 'n'): yield const.ACTION_NEXT elif key in (const.KEY_CTRL_C, 'q'): yield const.ACTION_ABORT elif key in ('\n', '\r'): yield const.ACTION_SELECT
python
def read_actions(): """Yields actions for pressed keys.""" while True: key = get_key() # Handle arrows, j/k (qwerty), and n/e (colemak) if key in (const.KEY_UP, const.KEY_CTRL_N, 'k', 'e'): yield const.ACTION_PREVIOUS elif key in (const.KEY_DOWN, const.KEY_CTRL_P, 'j', 'n'): yield const.ACTION_NEXT elif key in (const.KEY_CTRL_C, 'q'): yield const.ACTION_ABORT elif key in ('\n', '\r'): yield const.ACTION_SELECT
['def', 'read_actions', '(', ')', ':', 'while', 'True', ':', 'key', '=', 'get_key', '(', ')', '# Handle arrows, j/k (qwerty), and n/e (colemak)', 'if', 'key', 'in', '(', 'const', '.', 'KEY_UP', ',', 'const', '.', 'KEY_CTRL_N', ',', "'k'", ',', "'e'", ')', ':', 'yield', 'const', '.', 'ACTION_PREVIOUS', 'elif', 'key', 'in', '(', 'const', '.', 'KEY_DOWN', ',', 'const', '.', 'KEY_CTRL_P', ',', "'j'", ',', "'n'", ')', ':', 'yield', 'const', '.', 'ACTION_NEXT', 'elif', 'key', 'in', '(', 'const', '.', 'KEY_CTRL_C', ',', "'q'", ')', ':', 'yield', 'const', '.', 'ACTION_ABORT', 'elif', 'key', 'in', '(', "'\\n'", ',', "'\\r'", ')', ':', 'yield', 'const', '.', 'ACTION_SELECT']
Yields actions for pressed keys.
['Yields', 'actions', 'for', 'pressed', 'keys', '.']
train
https://github.com/nvbn/thefuck/blob/40ab4eb62db57627bff10cf029d29c94704086a2/thefuck/ui.py#L11-L24
2,394
MacHu-GWU/angora-project
angora/dtypes/dicttree.py
DictTree.stats_on_depth
def stats_on_depth(d, depth): """Display the node stats info on specific depth in this dict """ root_nodes, leaf_nodes = 0, 0 for _, node in DictTree.kv_depth(d, depth): if DictTree.length(node) == 0: leaf_nodes += 1 else: root_nodes += 1 total = root_nodes + leaf_nodes print("On depth %s, having %s root nodes, %s leaf nodes. " "%s nodes in total." % (depth, root_nodes, leaf_nodes, total))
python
def stats_on_depth(d, depth): """Display the node stats info on specific depth in this dict """ root_nodes, leaf_nodes = 0, 0 for _, node in DictTree.kv_depth(d, depth): if DictTree.length(node) == 0: leaf_nodes += 1 else: root_nodes += 1 total = root_nodes + leaf_nodes print("On depth %s, having %s root nodes, %s leaf nodes. " "%s nodes in total." % (depth, root_nodes, leaf_nodes, total))
['def', 'stats_on_depth', '(', 'd', ',', 'depth', ')', ':', 'root_nodes', ',', 'leaf_nodes', '=', '0', ',', '0', 'for', '_', ',', 'node', 'in', 'DictTree', '.', 'kv_depth', '(', 'd', ',', 'depth', ')', ':', 'if', 'DictTree', '.', 'length', '(', 'node', ')', '==', '0', ':', 'leaf_nodes', '+=', '1', 'else', ':', 'root_nodes', '+=', '1', 'total', '=', 'root_nodes', '+', 'leaf_nodes', 'print', '(', '"On depth %s, having %s root nodes, %s leaf nodes. "', '"%s nodes in total."', '%', '(', 'depth', ',', 'root_nodes', ',', 'leaf_nodes', ',', 'total', ')', ')']
Display the node stats info on specific depth in this dict
['Display', 'the', 'node', 'stats', 'info', 'on', 'specific', 'depth', 'in', 'this', 'dict']
train
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/dtypes/dicttree.py#L385-L396
2,395
ethereum/py-evm
eth/vm/computation.py
BaseComputation._get_log_entries
def _get_log_entries(self) -> List[Tuple[int, bytes, List[int], bytes]]: """ Return the log entries for this computation and its children. They are sorted in the same order they were emitted during the transaction processing, and include the sequential counter as the first element of the tuple representing every entry. """ if self.is_error: return [] else: return sorted(itertools.chain( self._log_entries, *(child._get_log_entries() for child in self.children) ))
python
def _get_log_entries(self) -> List[Tuple[int, bytes, List[int], bytes]]: """ Return the log entries for this computation and its children. They are sorted in the same order they were emitted during the transaction processing, and include the sequential counter as the first element of the tuple representing every entry. """ if self.is_error: return [] else: return sorted(itertools.chain( self._log_entries, *(child._get_log_entries() for child in self.children) ))
['def', '_get_log_entries', '(', 'self', ')', '->', 'List', '[', 'Tuple', '[', 'int', ',', 'bytes', ',', 'List', '[', 'int', ']', ',', 'bytes', ']', ']', ':', 'if', 'self', '.', 'is_error', ':', 'return', '[', ']', 'else', ':', 'return', 'sorted', '(', 'itertools', '.', 'chain', '(', 'self', '.', '_log_entries', ',', '*', '(', 'child', '.', '_get_log_entries', '(', ')', 'for', 'child', 'in', 'self', '.', 'children', ')', ')', ')']
Return the log entries for this computation and its children. They are sorted in the same order they were emitted during the transaction processing, and include the sequential counter as the first element of the tuple representing every entry.
['Return', 'the', 'log', 'entries', 'for', 'this', 'computation', 'and', 'its', 'children', '.']
train
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/computation.py#L463-L476
2,396
aouyar/PyMunin
pymunin/plugins/apachestats.py
MuninApachePlugin.autoconf
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ apacheInfo = ApacheInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) return apacheInfo is not None
python
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ apacheInfo = ApacheInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) return apacheInfo is not None
['def', 'autoconf', '(', 'self', ')', ':', 'apacheInfo', '=', 'ApacheInfo', '(', 'self', '.', '_host', ',', 'self', '.', '_port', ',', 'self', '.', '_user', ',', 'self', '.', '_password', ',', 'self', '.', '_statuspath', ',', 'self', '.', '_ssl', ')', 'return', 'apacheInfo', 'is', 'not', 'None']
Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise.
['Implements', 'Munin', 'Plugin', 'Auto', '-', 'Configuration', 'Option', '.']
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/apachestats.py#L140-L149
2,397
Scoppio/RagnarokEngine3
RagnarokEngine3/RE3.py
World.__sort_up
def __sort_up(self): """Sort the updatable objects according to ascending order""" if self.__do_need_sort_up: self.__up_objects.sort(key=cmp_to_key(self.__up_cmp)) self.__do_need_sort_up = False
python
def __sort_up(self): """Sort the updatable objects according to ascending order""" if self.__do_need_sort_up: self.__up_objects.sort(key=cmp_to_key(self.__up_cmp)) self.__do_need_sort_up = False
['def', '__sort_up', '(', 'self', ')', ':', 'if', 'self', '.', '__do_need_sort_up', ':', 'self', '.', '__up_objects', '.', 'sort', '(', 'key', '=', 'cmp_to_key', '(', 'self', '.', '__up_cmp', ')', ')', 'self', '.', '__do_need_sort_up', '=', 'False']
Sort the updatable objects according to ascending order
['Sort', 'the', 'updatable', 'objects', 'according', 'to', 'ascending', 'order']
train
https://github.com/Scoppio/RagnarokEngine3/blob/4395d419ccd64fe9327c41f200b72ee0176ad896/RagnarokEngine3/RE3.py#L3214-L3219
2,398
ethereum/py-evm
eth/db/chain.py
ChainDB.get_transaction_by_index
def get_transaction_by_index( self, block_number: BlockNumber, transaction_index: int, transaction_class: Type['BaseTransaction']) -> 'BaseTransaction': """ Returns the transaction at the specified `transaction_index` from the block specified by `block_number` from the canonical chain. Raises TransactionNotFound if no block """ try: block_header = self.get_canonical_block_header_by_number(block_number) except HeaderNotFound: raise TransactionNotFound("Block {} is not in the canonical chain".format(block_number)) transaction_db = HexaryTrie(self.db, root_hash=block_header.transaction_root) encoded_index = rlp.encode(transaction_index) if encoded_index in transaction_db: encoded_transaction = transaction_db[encoded_index] return rlp.decode(encoded_transaction, sedes=transaction_class) else: raise TransactionNotFound( "No transaction is at index {} of block {}".format(transaction_index, block_number))
python
def get_transaction_by_index( self, block_number: BlockNumber, transaction_index: int, transaction_class: Type['BaseTransaction']) -> 'BaseTransaction': """ Returns the transaction at the specified `transaction_index` from the block specified by `block_number` from the canonical chain. Raises TransactionNotFound if no block """ try: block_header = self.get_canonical_block_header_by_number(block_number) except HeaderNotFound: raise TransactionNotFound("Block {} is not in the canonical chain".format(block_number)) transaction_db = HexaryTrie(self.db, root_hash=block_header.transaction_root) encoded_index = rlp.encode(transaction_index) if encoded_index in transaction_db: encoded_transaction = transaction_db[encoded_index] return rlp.decode(encoded_transaction, sedes=transaction_class) else: raise TransactionNotFound( "No transaction is at index {} of block {}".format(transaction_index, block_number))
['def', 'get_transaction_by_index', '(', 'self', ',', 'block_number', ':', 'BlockNumber', ',', 'transaction_index', ':', 'int', ',', 'transaction_class', ':', 'Type', '[', "'BaseTransaction'", ']', ')', '->', "'BaseTransaction'", ':', 'try', ':', 'block_header', '=', 'self', '.', 'get_canonical_block_header_by_number', '(', 'block_number', ')', 'except', 'HeaderNotFound', ':', 'raise', 'TransactionNotFound', '(', '"Block {} is not in the canonical chain"', '.', 'format', '(', 'block_number', ')', ')', 'transaction_db', '=', 'HexaryTrie', '(', 'self', '.', 'db', ',', 'root_hash', '=', 'block_header', '.', 'transaction_root', ')', 'encoded_index', '=', 'rlp', '.', 'encode', '(', 'transaction_index', ')', 'if', 'encoded_index', 'in', 'transaction_db', ':', 'encoded_transaction', '=', 'transaction_db', '[', 'encoded_index', ']', 'return', 'rlp', '.', 'decode', '(', 'encoded_transaction', ',', 'sedes', '=', 'transaction_class', ')', 'else', ':', 'raise', 'TransactionNotFound', '(', '"No transaction is at index {} of block {}"', '.', 'format', '(', 'transaction_index', ',', 'block_number', ')', ')']
Returns the transaction at the specified `transaction_index` from the block specified by `block_number` from the canonical chain. Raises TransactionNotFound if no block
['Returns', 'the', 'transaction', 'at', 'the', 'specified', 'transaction_index', 'from', 'the', 'block', 'specified', 'by', 'block_number', 'from', 'the', 'canonical', 'chain', '.']
train
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/db/chain.py#L362-L384
2,399
orbingol/NURBS-Python
geomdl/linalg.py
vector_angle_between
def vector_angle_between(vector1, vector2, **kwargs): """ Computes the angle between the two input vectors. If the keyword argument ``degrees`` is set to *True*, then the angle will be in degrees. Otherwise, it will be in radians. By default, ``degrees`` is set to *True*. :param vector1: vector :type vector1: list, tuple :param vector2: vector :type vector2: list, tuple :return: angle between the vectors :rtype: float """ degrees = kwargs.get('degrees', True) magn1 = vector_magnitude(vector1) magn2 = vector_magnitude(vector2) acos_val = vector_dot(vector1, vector2) / (magn1 * magn2) angle_radians = math.acos(acos_val) if degrees: return math.degrees(angle_radians) else: return angle_radians
python
def vector_angle_between(vector1, vector2, **kwargs): """ Computes the angle between the two input vectors. If the keyword argument ``degrees`` is set to *True*, then the angle will be in degrees. Otherwise, it will be in radians. By default, ``degrees`` is set to *True*. :param vector1: vector :type vector1: list, tuple :param vector2: vector :type vector2: list, tuple :return: angle between the vectors :rtype: float """ degrees = kwargs.get('degrees', True) magn1 = vector_magnitude(vector1) magn2 = vector_magnitude(vector2) acos_val = vector_dot(vector1, vector2) / (magn1 * magn2) angle_radians = math.acos(acos_val) if degrees: return math.degrees(angle_radians) else: return angle_radians
['def', 'vector_angle_between', '(', 'vector1', ',', 'vector2', ',', '*', '*', 'kwargs', ')', ':', 'degrees', '=', 'kwargs', '.', 'get', '(', "'degrees'", ',', 'True', ')', 'magn1', '=', 'vector_magnitude', '(', 'vector1', ')', 'magn2', '=', 'vector_magnitude', '(', 'vector2', ')', 'acos_val', '=', 'vector_dot', '(', 'vector1', ',', 'vector2', ')', '/', '(', 'magn1', '*', 'magn2', ')', 'angle_radians', '=', 'math', '.', 'acos', '(', 'acos_val', ')', 'if', 'degrees', ':', 'return', 'math', '.', 'degrees', '(', 'angle_radians', ')', 'else', ':', 'return', 'angle_radians']
Computes the angle between the two input vectors. If the keyword argument ``degrees`` is set to *True*, then the angle will be in degrees. Otherwise, it will be in radians. By default, ``degrees`` is set to *True*. :param vector1: vector :type vector1: list, tuple :param vector2: vector :type vector2: list, tuple :return: angle between the vectors :rtype: float
['Computes', 'the', 'angle', 'between', 'the', 'two', 'input', 'vectors', '.']
train
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L237-L258