Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
9,600 | ninuxorg/nodeshot | nodeshot/community/profiles/social_auth_extra/pipeline.py | create_user | def create_user(backend, details, response, uid, username, user=None, *args, **kwargs):
"""
Creates user. Depends on get_username pipeline.
"""
if user:
return {'user': user}
if not username:
return None
email = details.get('email')
original_email = None
# email is required
if not email:
message = _("""your social account needs to have a verified email address in order to proceed.""")
raise AuthFailed(backend, message)
# Avoid hitting field max length
if email and len(email) > 75:
original_email = email
email = ''
return {
'user': UserSocialAuth.create_user(username=username,
email=email,
sync_emailaddress=False),
'original_email': original_email,
'is_new': True
} | python | def create_user(backend, details, response, uid, username, user=None, *args, **kwargs):
"""
Creates user. Depends on get_username pipeline.
"""
if user:
return {'user': user}
if not username:
return None
email = details.get('email')
original_email = None
# email is required
if not email:
message = _("""your social account needs to have a verified email address in order to proceed.""")
raise AuthFailed(backend, message)
# Avoid hitting field max length
if email and len(email) > 75:
original_email = email
email = ''
return {
'user': UserSocialAuth.create_user(username=username,
email=email,
sync_emailaddress=False),
'original_email': original_email,
'is_new': True
} | ['def', 'create_user', '(', 'backend', ',', 'details', ',', 'response', ',', 'uid', ',', 'username', ',', 'user', '=', 'None', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'if', 'user', ':', 'return', '{', "'user'", ':', 'user', '}', 'if', 'not', 'username', ':', 'return', 'None', 'email', '=', 'details', '.', 'get', '(', "'email'", ')', 'original_email', '=', 'None', '# email is required', 'if', 'not', 'email', ':', 'message', '=', '_', '(', '"""your social account needs to have a verified email address in order to proceed."""', ')', 'raise', 'AuthFailed', '(', 'backend', ',', 'message', ')', '# Avoid hitting field max length', 'if', 'email', 'and', 'len', '(', 'email', ')', '>', '75', ':', 'original_email', '=', 'email', 'email', '=', "''", 'return', '{', "'user'", ':', 'UserSocialAuth', '.', 'create_user', '(', 'username', '=', 'username', ',', 'email', '=', 'email', ',', 'sync_emailaddress', '=', 'False', ')', ',', "'original_email'", ':', 'original_email', ',', "'is_new'", ':', 'True', '}'] | Creates user. Depends on get_username pipeline. | ['Creates', 'user', '.', 'Depends', 'on', 'get_username', 'pipeline', '.'] | train | https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/community/profiles/social_auth_extra/pipeline.py#L13-L37 |
9,601 | daviddrysdale/python-phonenumbers | python/phonenumbers/phonenumberutil.py | _region_code_for_number_from_list | def _region_code_for_number_from_list(numobj, regions):
"""Find the region in a list that matches a number"""
national_number = national_significant_number(numobj)
for region_code in regions:
# If leading_digits is present, use this. Otherwise, do full
# validation.
# Metadata cannot be None because the region codes come from
# the country calling code map.
metadata = PhoneMetadata.metadata_for_region(region_code.upper(), None)
if metadata is None:
continue
if metadata.leading_digits is not None:
leading_digit_re = re.compile(metadata.leading_digits)
match = leading_digit_re.match(national_number)
if match:
return region_code
elif _number_type_helper(national_number, metadata) != PhoneNumberType.UNKNOWN:
return region_code
return None | python | def _region_code_for_number_from_list(numobj, regions):
"""Find the region in a list that matches a number"""
national_number = national_significant_number(numobj)
for region_code in regions:
# If leading_digits is present, use this. Otherwise, do full
# validation.
# Metadata cannot be None because the region codes come from
# the country calling code map.
metadata = PhoneMetadata.metadata_for_region(region_code.upper(), None)
if metadata is None:
continue
if metadata.leading_digits is not None:
leading_digit_re = re.compile(metadata.leading_digits)
match = leading_digit_re.match(national_number)
if match:
return region_code
elif _number_type_helper(national_number, metadata) != PhoneNumberType.UNKNOWN:
return region_code
return None | ['def', '_region_code_for_number_from_list', '(', 'numobj', ',', 'regions', ')', ':', 'national_number', '=', 'national_significant_number', '(', 'numobj', ')', 'for', 'region_code', 'in', 'regions', ':', '# If leading_digits is present, use this. Otherwise, do full', '# validation.', '# Metadata cannot be None because the region codes come from', '# the country calling code map.', 'metadata', '=', 'PhoneMetadata', '.', 'metadata_for_region', '(', 'region_code', '.', 'upper', '(', ')', ',', 'None', ')', 'if', 'metadata', 'is', 'None', ':', 'continue', 'if', 'metadata', '.', 'leading_digits', 'is', 'not', 'None', ':', 'leading_digit_re', '=', 're', '.', 'compile', '(', 'metadata', '.', 'leading_digits', ')', 'match', '=', 'leading_digit_re', '.', 'match', '(', 'national_number', ')', 'if', 'match', ':', 'return', 'region_code', 'elif', '_number_type_helper', '(', 'national_number', ',', 'metadata', ')', '!=', 'PhoneNumberType', '.', 'UNKNOWN', ':', 'return', 'region_code', 'return', 'None'] | Find the region in a list that matches a number | ['Find', 'the', 'region', 'in', 'a', 'list', 'that', 'matches', 'a', 'number'] | train | https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/phonenumberutil.py#L2047-L2065 |
9,602 | hannorein/rebound | rebound/simulation.py | Simulation.configure_ghostboxes | def configure_ghostboxes(self, nghostx=0, nghosty=0, nghostz=0):
"""
Initialize the ghost boxes.
This function only needs to be called it boundary conditions other than "none" or
"open" are used. In such a case the number of ghostboxes must be known and is set
with this function.
Parameters
----------
nghostx, nghosty, nghostz : int
The number of ghost boxes in each direction. All values default to 0 (no ghost boxes).
"""
clibrebound.nghostx = c_int(nghostx)
clibrebound.nghosty = c_int(nghosty)
clibrebound.nghostz = c_int(nghostz)
return | python | def configure_ghostboxes(self, nghostx=0, nghosty=0, nghostz=0):
"""
Initialize the ghost boxes.
This function only needs to be called it boundary conditions other than "none" or
"open" are used. In such a case the number of ghostboxes must be known and is set
with this function.
Parameters
----------
nghostx, nghosty, nghostz : int
The number of ghost boxes in each direction. All values default to 0 (no ghost boxes).
"""
clibrebound.nghostx = c_int(nghostx)
clibrebound.nghosty = c_int(nghosty)
clibrebound.nghostz = c_int(nghostz)
return | ['def', 'configure_ghostboxes', '(', 'self', ',', 'nghostx', '=', '0', ',', 'nghosty', '=', '0', ',', 'nghostz', '=', '0', ')', ':', 'clibrebound', '.', 'nghostx', '=', 'c_int', '(', 'nghostx', ')', 'clibrebound', '.', 'nghosty', '=', 'c_int', '(', 'nghosty', ')', 'clibrebound', '.', 'nghostz', '=', 'c_int', '(', 'nghostz', ')', 'return'] | Initialize the ghost boxes.
This function only needs to be called it boundary conditions other than "none" or
"open" are used. In such a case the number of ghostboxes must be known and is set
with this function.
Parameters
----------
nghostx, nghosty, nghostz : int
The number of ghost boxes in each direction. All values default to 0 (no ghost boxes). | ['Initialize', 'the', 'ghost', 'boxes', '.'] | train | https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L1474-L1490 |
9,603 | rwl/pylon | contrib/public/services/jsonrpc/__init__.py | ResponseEvent.waitForResponse | def waitForResponse(self, timeOut=None):
"""blocks until the response arrived or timeout is reached."""
self.__evt.wait(timeOut)
if self.waiting():
raise Timeout()
else:
if self.response["error"]:
raise Exception(self.response["error"])
else:
return self.response["result"] | python | def waitForResponse(self, timeOut=None):
"""blocks until the response arrived or timeout is reached."""
self.__evt.wait(timeOut)
if self.waiting():
raise Timeout()
else:
if self.response["error"]:
raise Exception(self.response["error"])
else:
return self.response["result"] | ['def', 'waitForResponse', '(', 'self', ',', 'timeOut', '=', 'None', ')', ':', 'self', '.', '__evt', '.', 'wait', '(', 'timeOut', ')', 'if', 'self', '.', 'waiting', '(', ')', ':', 'raise', 'Timeout', '(', ')', 'else', ':', 'if', 'self', '.', 'response', '[', '"error"', ']', ':', 'raise', 'Exception', '(', 'self', '.', 'response', '[', '"error"', ']', ')', 'else', ':', 'return', 'self', '.', 'response', '[', '"result"', ']'] | blocks until the response arrived or timeout is reached. | ['blocks', 'until', 'the', 'response', 'arrived', 'or', 'timeout', 'is', 'reached', '.'] | train | https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/contrib/public/services/jsonrpc/__init__.py#L51-L60 |
9,604 | 7sDream/zhihu-py3 | zhihu/answer.py | Answer.upvoters | def upvoters(self):
"""获取答案点赞用户,返回生成器.
:return: 点赞用户
:rtype: Author.Iterable
"""
self._make_soup()
next_req = '/answer/' + str(self.aid) + '/voters_profile'
while next_req != '':
data = self._session.get(Zhihu_URL + next_req).json()
next_req = data['paging']['next']
for html in data['payload']:
soup = BeautifulSoup(html)
yield self._parse_author_soup(soup) | python | def upvoters(self):
"""获取答案点赞用户,返回生成器.
:return: 点赞用户
:rtype: Author.Iterable
"""
self._make_soup()
next_req = '/answer/' + str(self.aid) + '/voters_profile'
while next_req != '':
data = self._session.get(Zhihu_URL + next_req).json()
next_req = data['paging']['next']
for html in data['payload']:
soup = BeautifulSoup(html)
yield self._parse_author_soup(soup) | ['def', 'upvoters', '(', 'self', ')', ':', 'self', '.', '_make_soup', '(', ')', 'next_req', '=', "'/answer/'", '+', 'str', '(', 'self', '.', 'aid', ')', '+', "'/voters_profile'", 'while', 'next_req', '!=', "''", ':', 'data', '=', 'self', '.', '_session', '.', 'get', '(', 'Zhihu_URL', '+', 'next_req', ')', '.', 'json', '(', ')', 'next_req', '=', 'data', '[', "'paging'", ']', '[', "'next'", ']', 'for', 'html', 'in', 'data', '[', "'payload'", ']', ':', 'soup', '=', 'BeautifulSoup', '(', 'html', ')', 'yield', 'self', '.', '_parse_author_soup', '(', 'soup', ')'] | 获取答案点赞用户,返回生成器.
:return: 点赞用户
:rtype: Author.Iterable | ['获取答案点赞用户,返回生成器', '.'] | train | https://github.com/7sDream/zhihu-py3/blob/bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc/zhihu/answer.py#L128-L141 |
9,605 | pazz/alot | alot/settings/utils.py | resolve_att | def resolve_att(a, fallback):
""" replace '' and 'default' by fallback values """
if a is None:
return fallback
if a.background in ['default', '']:
bg = fallback.background
else:
bg = a.background
if a.foreground in ['default', '']:
fg = fallback.foreground
else:
fg = a.foreground
return AttrSpec(fg, bg) | python | def resolve_att(a, fallback):
""" replace '' and 'default' by fallback values """
if a is None:
return fallback
if a.background in ['default', '']:
bg = fallback.background
else:
bg = a.background
if a.foreground in ['default', '']:
fg = fallback.foreground
else:
fg = a.foreground
return AttrSpec(fg, bg) | ['def', 'resolve_att', '(', 'a', ',', 'fallback', ')', ':', 'if', 'a', 'is', 'None', ':', 'return', 'fallback', 'if', 'a', '.', 'background', 'in', '[', "'default'", ',', "''", ']', ':', 'bg', '=', 'fallback', '.', 'background', 'else', ':', 'bg', '=', 'a', '.', 'background', 'if', 'a', '.', 'foreground', 'in', '[', "'default'", ',', "''", ']', ':', 'fg', '=', 'fallback', '.', 'foreground', 'else', ':', 'fg', '=', 'a', '.', 'foreground', 'return', 'AttrSpec', '(', 'fg', ',', 'bg', ')'] | replace '' and 'default' by fallback values | ['replace', 'and', 'default', 'by', 'fallback', 'values'] | train | https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/settings/utils.py#L85-L97 |
9,606 | hobson/pug-dj | pug/dj/db.py | Columns.pierson | def pierson(self, ddof=0):
"""Matrix of pierson linear correlation coefficients (rho values) for each pair of columns
https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient
>>> Columns([[1, 2, 3], [4, 5, 6]]).pierson()
[[1.0, 1.0], [1.0, 1.0]]
>>> Columns([[1, 2, 3], [2.5, 3.5, 4.5]], transpose=True).pierson()
[[1.0, 1.0], [1.0, 1.0]]
>>> Columns([[1, 3, 2], [4, 5, 7]], transpose=1).pierson() # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[[1.0, 0.327326835...], [0.327326835..., 1.0]]
"""
C = self.cov(ddof=ddof)
rho = []
N = len(C)
for i in range(N):
rho += [[1.] * N]
for i in range(N):
for j in range(i + 1, N):
rho[i][j] = C[i][j] / (C[i][i] * C[j][j] or 1.) ** 0.5
rho[j][i] = rho[i][j]
return rho | python | def pierson(self, ddof=0):
"""Matrix of pierson linear correlation coefficients (rho values) for each pair of columns
https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient
>>> Columns([[1, 2, 3], [4, 5, 6]]).pierson()
[[1.0, 1.0], [1.0, 1.0]]
>>> Columns([[1, 2, 3], [2.5, 3.5, 4.5]], transpose=True).pierson()
[[1.0, 1.0], [1.0, 1.0]]
>>> Columns([[1, 3, 2], [4, 5, 7]], transpose=1).pierson() # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[[1.0, 0.327326835...], [0.327326835..., 1.0]]
"""
C = self.cov(ddof=ddof)
rho = []
N = len(C)
for i in range(N):
rho += [[1.] * N]
for i in range(N):
for j in range(i + 1, N):
rho[i][j] = C[i][j] / (C[i][i] * C[j][j] or 1.) ** 0.5
rho[j][i] = rho[i][j]
return rho | ['def', 'pierson', '(', 'self', ',', 'ddof', '=', '0', ')', ':', 'C', '=', 'self', '.', 'cov', '(', 'ddof', '=', 'ddof', ')', 'rho', '=', '[', ']', 'N', '=', 'len', '(', 'C', ')', 'for', 'i', 'in', 'range', '(', 'N', ')', ':', 'rho', '+=', '[', '[', '1.', ']', '*', 'N', ']', 'for', 'i', 'in', 'range', '(', 'N', ')', ':', 'for', 'j', 'in', 'range', '(', 'i', '+', '1', ',', 'N', ')', ':', 'rho', '[', 'i', ']', '[', 'j', ']', '=', 'C', '[', 'i', ']', '[', 'j', ']', '/', '(', 'C', '[', 'i', ']', '[', 'i', ']', '*', 'C', '[', 'j', ']', '[', 'j', ']', 'or', '1.', ')', '**', '0.5', 'rho', '[', 'j', ']', '[', 'i', ']', '=', 'rho', '[', 'i', ']', '[', 'j', ']', 'return', 'rho'] | Matrix of pierson linear correlation coefficients (rho values) for each pair of columns
https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient
>>> Columns([[1, 2, 3], [4, 5, 6]]).pierson()
[[1.0, 1.0], [1.0, 1.0]]
>>> Columns([[1, 2, 3], [2.5, 3.5, 4.5]], transpose=True).pierson()
[[1.0, 1.0], [1.0, 1.0]]
>>> Columns([[1, 3, 2], [4, 5, 7]], transpose=1).pierson() # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[[1.0, 0.327326835...], [0.327326835..., 1.0]] | ['Matrix', 'of', 'pierson', 'linear', 'correlation', 'coefficients', '(', 'rho', 'values', ')', 'for', 'each', 'pair', 'of', 'columns'] | train | https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L1294-L1314 |
9,607 | fermiPy/fermipy | fermipy/jobs/link.py | Link._latch_file_info | def _latch_file_info(self):
"""Internal function to update the dictionaries
keeping track of input and output files
"""
self.files.file_dict.clear()
self.files.latch_file_info(self.args) | python | def _latch_file_info(self):
"""Internal function to update the dictionaries
keeping track of input and output files
"""
self.files.file_dict.clear()
self.files.latch_file_info(self.args) | ['def', '_latch_file_info', '(', 'self', ')', ':', 'self', '.', 'files', '.', 'file_dict', '.', 'clear', '(', ')', 'self', '.', 'files', '.', 'latch_file_info', '(', 'self', '.', 'args', ')'] | Internal function to update the dictionaries
keeping track of input and output files | ['Internal', 'function', 'to', 'update', 'the', 'dictionaries', 'keeping', 'track', 'of', 'input', 'and', 'output', 'files'] | train | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/link.py#L361-L366 |
9,608 | sernst/cauldron | cauldron/cli/threads.py | abort_thread | def abort_thread():
"""
This function checks to see if the user has indicated that they want the
currently running execution to stop prematurely by marking the running
thread as aborted. It only applies to operations that are run within
CauldronThreads and not the main thread.
"""
thread = threading.current_thread()
if not isinstance(thread, CauldronThread):
return
if thread.is_executing and thread.abort:
raise ThreadAbortError('User Aborted Execution') | python | def abort_thread():
"""
This function checks to see if the user has indicated that they want the
currently running execution to stop prematurely by marking the running
thread as aborted. It only applies to operations that are run within
CauldronThreads and not the main thread.
"""
thread = threading.current_thread()
if not isinstance(thread, CauldronThread):
return
if thread.is_executing and thread.abort:
raise ThreadAbortError('User Aborted Execution') | ['def', 'abort_thread', '(', ')', ':', 'thread', '=', 'threading', '.', 'current_thread', '(', ')', 'if', 'not', 'isinstance', '(', 'thread', ',', 'CauldronThread', ')', ':', 'return', 'if', 'thread', '.', 'is_executing', 'and', 'thread', '.', 'abort', ':', 'raise', 'ThreadAbortError', '(', "'User Aborted Execution'", ')'] | This function checks to see if the user has indicated that they want the
currently running execution to stop prematurely by marking the running
thread as aborted. It only applies to operations that are run within
CauldronThreads and not the main thread. | ['This', 'function', 'checks', 'to', 'see', 'if', 'the', 'user', 'has', 'indicated', 'that', 'they', 'want', 'the', 'currently', 'running', 'execution', 'to', 'stop', 'prematurely', 'by', 'marking', 'the', 'running', 'thread', 'as', 'aborted', '.', 'It', 'only', 'applies', 'to', 'operations', 'that', 'are', 'run', 'within', 'CauldronThreads', 'and', 'not', 'the', 'main', 'thread', '.'] | train | https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/cli/threads.py#L105-L119 |
9,609 | jim-easterbrook/pywws | src/pywws/device_pyusb1.py | USBDevice.write_data | def write_data(self, buf):
"""Send data to the device.
If the write fails for any reason, an :obj:`IOError` exception
is raised.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool
"""
bmRequestType = usb.util.build_request_type(
usb.util.ENDPOINT_OUT,
usb.util.CTRL_TYPE_CLASS,
usb.util.CTRL_RECIPIENT_INTERFACE
)
result = self.dev.ctrl_transfer(
bmRequestType=bmRequestType,
bRequest=usb.REQ_SET_CONFIGURATION,
data_or_wLength=buf,
wValue=0x200,
timeout=50)
if result != len(buf):
raise IOError('pywws.device_pyusb1.USBDevice.write_data failed')
return True | python | def write_data(self, buf):
"""Send data to the device.
If the write fails for any reason, an :obj:`IOError` exception
is raised.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool
"""
bmRequestType = usb.util.build_request_type(
usb.util.ENDPOINT_OUT,
usb.util.CTRL_TYPE_CLASS,
usb.util.CTRL_RECIPIENT_INTERFACE
)
result = self.dev.ctrl_transfer(
bmRequestType=bmRequestType,
bRequest=usb.REQ_SET_CONFIGURATION,
data_or_wLength=buf,
wValue=0x200,
timeout=50)
if result != len(buf):
raise IOError('pywws.device_pyusb1.USBDevice.write_data failed')
return True | ['def', 'write_data', '(', 'self', ',', 'buf', ')', ':', 'bmRequestType', '=', 'usb', '.', 'util', '.', 'build_request_type', '(', 'usb', '.', 'util', '.', 'ENDPOINT_OUT', ',', 'usb', '.', 'util', '.', 'CTRL_TYPE_CLASS', ',', 'usb', '.', 'util', '.', 'CTRL_RECIPIENT_INTERFACE', ')', 'result', '=', 'self', '.', 'dev', '.', 'ctrl_transfer', '(', 'bmRequestType', '=', 'bmRequestType', ',', 'bRequest', '=', 'usb', '.', 'REQ_SET_CONFIGURATION', ',', 'data_or_wLength', '=', 'buf', ',', 'wValue', '=', '0x200', ',', 'timeout', '=', '50', ')', 'if', 'result', '!=', 'len', '(', 'buf', ')', ':', 'raise', 'IOError', '(', "'pywws.device_pyusb1.USBDevice.write_data failed'", ')', 'return', 'True'] | Send data to the device.
If the write fails for any reason, an :obj:`IOError` exception
is raised.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool | ['Send', 'data', 'to', 'the', 'device', '.'] | train | https://github.com/jim-easterbrook/pywws/blob/4e4d74cee5a3ac5bf42286feaa251cd2ffcaf02c/src/pywws/device_pyusb1.py#L108-L136 |
9,610 | clld/clldutils | src/clldutils/sfm.py | Entry.get | def get(self, key, default=None):
"""Retrieve the first value for a marker or None."""
for k, v in self:
if k == key:
return v
return default | python | def get(self, key, default=None):
"""Retrieve the first value for a marker or None."""
for k, v in self:
if k == key:
return v
return default | ['def', 'get', '(', 'self', ',', 'key', ',', 'default', '=', 'None', ')', ':', 'for', 'k', ',', 'v', 'in', 'self', ':', 'if', 'k', '==', 'key', ':', 'return', 'v', 'return', 'default'] | Retrieve the first value for a marker or None. | ['Retrieve', 'the', 'first', 'value', 'for', 'a', 'marker', 'or', 'None', '.'] | train | https://github.com/clld/clldutils/blob/7b8587ef5b56a2fc6cafaff90bc5004355c2b13f/src/clldutils/sfm.py#L71-L76 |
9,611 | jssimporter/python-jss | jss/jssobjects.py | LDAPServer.is_user_in_group | def is_user_in_group(self, user, group):
"""Test for whether a user is in a group.
There is also the ability in the API to test for whether
multiple users are members of an LDAP group, but you should just
call is_user_in_group over an enumerated list of users.
Args:
user: String username.
group: String group name.
Returns bool.
"""
search_url = "%s/%s/%s/%s/%s" % (self.url, "group", group,
"user", user)
response = self.jss.get(search_url)
# Sanity check
length = len(response)
result = False
if length == 1:
# User doesn't exist. Use default False value.
pass
elif length == 2:
if response.findtext("ldap_user/username") == user:
if response.findtext("ldap_user/is_member") == "Yes":
result = True
elif len(response) >= 2:
raise JSSGetError("Unexpected response.")
return result | python | def is_user_in_group(self, user, group):
"""Test for whether a user is in a group.
There is also the ability in the API to test for whether
multiple users are members of an LDAP group, but you should just
call is_user_in_group over an enumerated list of users.
Args:
user: String username.
group: String group name.
Returns bool.
"""
search_url = "%s/%s/%s/%s/%s" % (self.url, "group", group,
"user", user)
response = self.jss.get(search_url)
# Sanity check
length = len(response)
result = False
if length == 1:
# User doesn't exist. Use default False value.
pass
elif length == 2:
if response.findtext("ldap_user/username") == user:
if response.findtext("ldap_user/is_member") == "Yes":
result = True
elif len(response) >= 2:
raise JSSGetError("Unexpected response.")
return result | ['def', 'is_user_in_group', '(', 'self', ',', 'user', ',', 'group', ')', ':', 'search_url', '=', '"%s/%s/%s/%s/%s"', '%', '(', 'self', '.', 'url', ',', '"group"', ',', 'group', ',', '"user"', ',', 'user', ')', 'response', '=', 'self', '.', 'jss', '.', 'get', '(', 'search_url', ')', '# Sanity check', 'length', '=', 'len', '(', 'response', ')', 'result', '=', 'False', 'if', 'length', '==', '1', ':', "# User doesn't exist. Use default False value.", 'pass', 'elif', 'length', '==', '2', ':', 'if', 'response', '.', 'findtext', '(', '"ldap_user/username"', ')', '==', 'user', ':', 'if', 'response', '.', 'findtext', '(', '"ldap_user/is_member"', ')', '==', '"Yes"', ':', 'result', '=', 'True', 'elif', 'len', '(', 'response', ')', '>=', '2', ':', 'raise', 'JSSGetError', '(', '"Unexpected response."', ')', 'return', 'result'] | Test for whether a user is in a group.
There is also the ability in the API to test for whether
multiple users are members of an LDAP group, but you should just
call is_user_in_group over an enumerated list of users.
Args:
user: String username.
group: String group name.
Returns bool. | ['Test', 'for', 'whether', 'a', 'user', 'is', 'in', 'a', 'group', '.'] | train | https://github.com/jssimporter/python-jss/blob/b95185d74e0c0531b0b563f280d4129e21d5fe5d/jss/jssobjects.py#L522-L550 |
9,612 | swistakm/graceful | src/graceful/authentication.py | DummyUserStorage.get_user | def get_user(
self, identified_with, identifier, req, resp, resource, uri_kwargs
):
"""Return default user object."""
return self.user | python | def get_user(
self, identified_with, identifier, req, resp, resource, uri_kwargs
):
"""Return default user object."""
return self.user | ['def', 'get_user', '(', 'self', ',', 'identified_with', ',', 'identifier', ',', 'req', ',', 'resp', ',', 'resource', ',', 'uri_kwargs', ')', ':', 'return', 'self', '.', 'user'] | Return default user object. | ['Return', 'default', 'user', 'object', '.'] | train | https://github.com/swistakm/graceful/blob/d4678cb6349a5c843a5e58002fc80140821609e4/src/graceful/authentication.py#L77-L81 |
9,613 | Murali-group/halp | halp/directed_hypergraph.py | DirectedHypergraph.get_symmetric_image | def get_symmetric_image(self):
"""Creates a new DirectedHypergraph object that is the symmetric
image of this hypergraph (i.e., identical hypergraph with all
edge directions reversed).
Copies of each of the nodes' and hyperedges' attributes are stored
and used in the new hypergraph.
:returns: DirectedHypergraph -- a new hypergraph that is the symmetric
image of the current hypergraph.
"""
new_H = self.copy()
# No change to _node_attributes necessary, as nodes remain the same
# Reverse the tail and head (and __frozen_tail and __frozen_head) for
# every hyperedge
for hyperedge_id in self.get_hyperedge_id_set():
attr_dict = new_H._hyperedge_attributes[hyperedge_id]
attr_dict["tail"], attr_dict["head"] = \
attr_dict["head"], attr_dict["tail"]
attr_dict["__frozen_tail"], attr_dict["__frozen_head"] = \
attr_dict["__frozen_head"], attr_dict["__frozen_tail"]
# Reverse the definition of forward star and backward star
new_H._forward_star, new_H._backward_star = \
new_H._backward_star, new_H._forward_star
# Reverse the definition of successor and predecessor
new_H._successors, new_H._predecessors = \
new_H._predecessors, new_H._successors
return new_H | python | def get_symmetric_image(self):
"""Creates a new DirectedHypergraph object that is the symmetric
image of this hypergraph (i.e., identical hypergraph with all
edge directions reversed).
Copies of each of the nodes' and hyperedges' attributes are stored
and used in the new hypergraph.
:returns: DirectedHypergraph -- a new hypergraph that is the symmetric
image of the current hypergraph.
"""
new_H = self.copy()
# No change to _node_attributes necessary, as nodes remain the same
# Reverse the tail and head (and __frozen_tail and __frozen_head) for
# every hyperedge
for hyperedge_id in self.get_hyperedge_id_set():
attr_dict = new_H._hyperedge_attributes[hyperedge_id]
attr_dict["tail"], attr_dict["head"] = \
attr_dict["head"], attr_dict["tail"]
attr_dict["__frozen_tail"], attr_dict["__frozen_head"] = \
attr_dict["__frozen_head"], attr_dict["__frozen_tail"]
# Reverse the definition of forward star and backward star
new_H._forward_star, new_H._backward_star = \
new_H._backward_star, new_H._forward_star
# Reverse the definition of successor and predecessor
new_H._successors, new_H._predecessors = \
new_H._predecessors, new_H._successors
return new_H | ['def', 'get_symmetric_image', '(', 'self', ')', ':', 'new_H', '=', 'self', '.', 'copy', '(', ')', '# No change to _node_attributes necessary, as nodes remain the same', '# Reverse the tail and head (and __frozen_tail and __frozen_head) for', '# every hyperedge', 'for', 'hyperedge_id', 'in', 'self', '.', 'get_hyperedge_id_set', '(', ')', ':', 'attr_dict', '=', 'new_H', '.', '_hyperedge_attributes', '[', 'hyperedge_id', ']', 'attr_dict', '[', '"tail"', ']', ',', 'attr_dict', '[', '"head"', ']', '=', 'attr_dict', '[', '"head"', ']', ',', 'attr_dict', '[', '"tail"', ']', 'attr_dict', '[', '"__frozen_tail"', ']', ',', 'attr_dict', '[', '"__frozen_head"', ']', '=', 'attr_dict', '[', '"__frozen_head"', ']', ',', 'attr_dict', '[', '"__frozen_tail"', ']', '# Reverse the definition of forward star and backward star', 'new_H', '.', '_forward_star', ',', 'new_H', '.', '_backward_star', '=', 'new_H', '.', '_backward_star', ',', 'new_H', '.', '_forward_star', '# Reverse the definition of successor and predecessor', 'new_H', '.', '_successors', ',', 'new_H', '.', '_predecessors', '=', 'new_H', '.', '_predecessors', ',', 'new_H', '.', '_successors', 'return', 'new_H'] | Creates a new DirectedHypergraph object that is the symmetric
image of this hypergraph (i.e., identical hypergraph with all
edge directions reversed).
Copies of each of the nodes' and hyperedges' attributes are stored
and used in the new hypergraph.
:returns: DirectedHypergraph -- a new hypergraph that is the symmetric
image of the current hypergraph. | ['Creates', 'a', 'new', 'DirectedHypergraph', 'object', 'that', 'is', 'the', 'symmetric', 'image', 'of', 'this', 'hypergraph', '(', 'i', '.', 'e', '.', 'identical', 'hypergraph', 'with', 'all', 'edge', 'directions', 'reversed', ')', '.', 'Copies', 'of', 'each', 'of', 'the', 'nodes', 'and', 'hyperedges', 'attributes', 'are', 'stored', 'and', 'used', 'in', 'the', 'new', 'hypergraph', '.'] | train | https://github.com/Murali-group/halp/blob/6eb27466ba84e2281e18f93b62aae5efb21ef8b3/halp/directed_hypergraph.py#L1012-L1044 |
9,614 | tanghaibao/jcvi | jcvi/formats/fastq.py | some | def some(args):
"""
%prog some idsfile afastq [bfastq]
Select a subset of the reads with ids present in the idsfile.
`bfastq` is optional (only if reads are paired)
"""
p = OptionParser(some.__doc__)
opts, args = p.parse_args(args)
if len(args) not in (2, 3):
sys.exit(not p.print_help())
idsfile, afastq, = args[:2]
bfastq = args[2] if len(args) == 3 else None
ids = DictFile(idsfile, valuepos=None)
ai = iter_fastq(open(afastq))
arec = next(ai)
if bfastq:
bi = iter_fastq(open(bfastq))
brec = next(bi)
while arec:
if arec.name[1:] in ids:
print(arec)
if bfastq:
print(brec)
arec = next(ai)
if bfastq:
brec = next(bi) | python | def some(args):
"""
%prog some idsfile afastq [bfastq]
Select a subset of the reads with ids present in the idsfile.
`bfastq` is optional (only if reads are paired)
"""
p = OptionParser(some.__doc__)
opts, args = p.parse_args(args)
if len(args) not in (2, 3):
sys.exit(not p.print_help())
idsfile, afastq, = args[:2]
bfastq = args[2] if len(args) == 3 else None
ids = DictFile(idsfile, valuepos=None)
ai = iter_fastq(open(afastq))
arec = next(ai)
if bfastq:
bi = iter_fastq(open(bfastq))
brec = next(bi)
while arec:
if arec.name[1:] in ids:
print(arec)
if bfastq:
print(brec)
arec = next(ai)
if bfastq:
brec = next(bi) | ['def', 'some', '(', 'args', ')', ':', 'p', '=', 'OptionParser', '(', 'some', '.', '__doc__', ')', 'opts', ',', 'args', '=', 'p', '.', 'parse_args', '(', 'args', ')', 'if', 'len', '(', 'args', ')', 'not', 'in', '(', '2', ',', '3', ')', ':', 'sys', '.', 'exit', '(', 'not', 'p', '.', 'print_help', '(', ')', ')', 'idsfile', ',', 'afastq', ',', '=', 'args', '[', ':', '2', ']', 'bfastq', '=', 'args', '[', '2', ']', 'if', 'len', '(', 'args', ')', '==', '3', 'else', 'None', 'ids', '=', 'DictFile', '(', 'idsfile', ',', 'valuepos', '=', 'None', ')', 'ai', '=', 'iter_fastq', '(', 'open', '(', 'afastq', ')', ')', 'arec', '=', 'next', '(', 'ai', ')', 'if', 'bfastq', ':', 'bi', '=', 'iter_fastq', '(', 'open', '(', 'bfastq', ')', ')', 'brec', '=', 'next', '(', 'bi', ')', 'while', 'arec', ':', 'if', 'arec', '.', 'name', '[', '1', ':', ']', 'in', 'ids', ':', 'print', '(', 'arec', ')', 'if', 'bfastq', ':', 'print', '(', 'brec', ')', 'arec', '=', 'next', '(', 'ai', ')', 'if', 'bfastq', ':', 'brec', '=', 'next', '(', 'bi', ')'] | %prog some idsfile afastq [bfastq]
Select a subset of the reads with ids present in the idsfile.
`bfastq` is optional (only if reads are paired) | ['%prog', 'some', 'idsfile', 'afastq', '[', 'bfastq', ']'] | train | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/fastq.py#L686-L718 |
9,615 | msoulier/tftpy | tftpy/TftpStates.py | TftpState.handleOACK | def handleOACK(self, pkt):
"""This method handles an OACK from the server, syncing any accepted
options."""
if len(pkt.options.keys()) > 0:
if pkt.match_options(self.context.options):
log.info("Successful negotiation of options")
# Set options to OACK options
self.context.options = pkt.options
for key in self.context.options:
log.info(" %s = %s" % (key, self.context.options[key]))
else:
log.error("Failed to negotiate options")
raise TftpException("Failed to negotiate options")
else:
raise TftpException("No options found in OACK") | python | def handleOACK(self, pkt):
"""This method handles an OACK from the server, syncing any accepted
options."""
if len(pkt.options.keys()) > 0:
if pkt.match_options(self.context.options):
log.info("Successful negotiation of options")
# Set options to OACK options
self.context.options = pkt.options
for key in self.context.options:
log.info(" %s = %s" % (key, self.context.options[key]))
else:
log.error("Failed to negotiate options")
raise TftpException("Failed to negotiate options")
else:
raise TftpException("No options found in OACK") | ['def', 'handleOACK', '(', 'self', ',', 'pkt', ')', ':', 'if', 'len', '(', 'pkt', '.', 'options', '.', 'keys', '(', ')', ')', '>', '0', ':', 'if', 'pkt', '.', 'match_options', '(', 'self', '.', 'context', '.', 'options', ')', ':', 'log', '.', 'info', '(', '"Successful negotiation of options"', ')', '# Set options to OACK options', 'self', '.', 'context', '.', 'options', '=', 'pkt', '.', 'options', 'for', 'key', 'in', 'self', '.', 'context', '.', 'options', ':', 'log', '.', 'info', '(', '" %s = %s"', '%', '(', 'key', ',', 'self', '.', 'context', '.', 'options', '[', 'key', ']', ')', ')', 'else', ':', 'log', '.', 'error', '(', '"Failed to negotiate options"', ')', 'raise', 'TftpException', '(', '"Failed to negotiate options"', ')', 'else', ':', 'raise', 'TftpException', '(', '"No options found in OACK"', ')'] | This method handles an OACK from the server, syncing any accepted
options. | ['This', 'method', 'handles', 'an', 'OACK', 'from', 'the', 'server', 'syncing', 'any', 'accepted', 'options', '.'] | train | https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpStates.py#L39-L53 |
9,616 | bcbio/bcbio-nextgen | bcbio/broad/__init__.py | _clean_java_out | def _clean_java_out(version_str):
"""Remove extra environmental information reported in java when querying for versions.
Java will report information like _JAVA_OPTIONS environmental variables in the output.
"""
out = []
for line in version_str.decode().split("\n"):
if line.startswith("Picked up"):
pass
if line.find("setlocale") > 0:
pass
else:
out.append(line)
return "\n".join(out) | python | def _clean_java_out(version_str):
"""Remove extra environmental information reported in java when querying for versions.
Java will report information like _JAVA_OPTIONS environmental variables in the output.
"""
out = []
for line in version_str.decode().split("\n"):
if line.startswith("Picked up"):
pass
if line.find("setlocale") > 0:
pass
else:
out.append(line)
return "\n".join(out) | ['def', '_clean_java_out', '(', 'version_str', ')', ':', 'out', '=', '[', ']', 'for', 'line', 'in', 'version_str', '.', 'decode', '(', ')', '.', 'split', '(', '"\\n"', ')', ':', 'if', 'line', '.', 'startswith', '(', '"Picked up"', ')', ':', 'pass', 'if', 'line', '.', 'find', '(', '"setlocale"', ')', '>', '0', ':', 'pass', 'else', ':', 'out', '.', 'append', '(', 'line', ')', 'return', '"\\n"', '.', 'join', '(', 'out', ')'] | Remove extra environmental information reported in java when querying for versions.
Java will report information like _JAVA_OPTIONS environmental variables in the output. | ['Remove', 'extra', 'environmental', 'information', 'reported', 'in', 'java', 'when', 'querying', 'for', 'versions', '.'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/broad/__init__.py#L69-L82 |
9,617 | tgbugs/pyontutils | ilxutils/ilxutils/simple_scicrunch_client.py | Client.custom_update | def custom_update(self, data, pred, obj):
''' Updates existing entity proprty based on the predicate input '''
if isinstance(data[pred], str): # for all simple properties of str value
data[pred] = str(obj)
else: # synonyms, superclasses, and existing_ids have special requirements
if pred == 'synonyms':
literals = [d['literal'] for d in data[pred]]
if obj not in literals:
data[pred].append({'literal': obj}) # synonyms req for post
elif pred == 'superclasses':
ilx_ids = [d['ilx'] for d in data[pred]]
if obj not in ilx_ids:
_obj = obj.replace('ILX:', 'ilx_')
super_data, success = self.get_data_from_ilx(ilx_id=_obj)
super_data = super_data['data']
if success:
# superclass req post
data[pred].append({'id': super_data['id'], 'ilx': _obj})
else:
return self.test_check('Your superclass ILX ID '
+ _obj + ' does not exist.')
elif pred == 'existing_ids': # FIXME need to autogenerate curies from a map
iris = [d['iri'] for d in data[pred]]
if obj not in iris:
if 'http' not in obj:
return self.test_check('exisiting id value must \
be a uri containing "http"')
data[pred].append({
'curie': self.qname(obj),
'iri': obj,
'preferred': '0' # preferred is auto generated by preferred_change
})
#data[pred] = []
data = self.preferred_change(data) # One ex id is determined to be preferred
else:
# Somehow broke this code
return self.test_check(pred + ' Has slipped through the cracks')
return data | python | def custom_update(self, data, pred, obj):
''' Updates existing entity proprty based on the predicate input '''
if isinstance(data[pred], str): # for all simple properties of str value
data[pred] = str(obj)
else: # synonyms, superclasses, and existing_ids have special requirements
if pred == 'synonyms':
literals = [d['literal'] for d in data[pred]]
if obj not in literals:
data[pred].append({'literal': obj}) # synonyms req for post
elif pred == 'superclasses':
ilx_ids = [d['ilx'] for d in data[pred]]
if obj not in ilx_ids:
_obj = obj.replace('ILX:', 'ilx_')
super_data, success = self.get_data_from_ilx(ilx_id=_obj)
super_data = super_data['data']
if success:
# superclass req post
data[pred].append({'id': super_data['id'], 'ilx': _obj})
else:
return self.test_check('Your superclass ILX ID '
+ _obj + ' does not exist.')
elif pred == 'existing_ids': # FIXME need to autogenerate curies from a map
iris = [d['iri'] for d in data[pred]]
if obj not in iris:
if 'http' not in obj:
return self.test_check('exisiting id value must \
be a uri containing "http"')
data[pred].append({
'curie': self.qname(obj),
'iri': obj,
'preferred': '0' # preferred is auto generated by preferred_change
})
#data[pred] = []
data = self.preferred_change(data) # One ex id is determined to be preferred
else:
# Somehow broke this code
return self.test_check(pred + ' Has slipped through the cracks')
return data | ['def', 'custom_update', '(', 'self', ',', 'data', ',', 'pred', ',', 'obj', ')', ':', 'if', 'isinstance', '(', 'data', '[', 'pred', ']', ',', 'str', ')', ':', '# for all simple properties of str value', 'data', '[', 'pred', ']', '=', 'str', '(', 'obj', ')', 'else', ':', '# synonyms, superclasses, and existing_ids have special requirements', 'if', 'pred', '==', "'synonyms'", ':', 'literals', '=', '[', 'd', '[', "'literal'", ']', 'for', 'd', 'in', 'data', '[', 'pred', ']', ']', 'if', 'obj', 'not', 'in', 'literals', ':', 'data', '[', 'pred', ']', '.', 'append', '(', '{', "'literal'", ':', 'obj', '}', ')', '# synonyms req for post', 'elif', 'pred', '==', "'superclasses'", ':', 'ilx_ids', '=', '[', 'd', '[', "'ilx'", ']', 'for', 'd', 'in', 'data', '[', 'pred', ']', ']', 'if', 'obj', 'not', 'in', 'ilx_ids', ':', '_obj', '=', 'obj', '.', 'replace', '(', "'ILX:'", ',', "'ilx_'", ')', 'super_data', ',', 'success', '=', 'self', '.', 'get_data_from_ilx', '(', 'ilx_id', '=', '_obj', ')', 'super_data', '=', 'super_data', '[', "'data'", ']', 'if', 'success', ':', '# superclass req post', 'data', '[', 'pred', ']', '.', 'append', '(', '{', "'id'", ':', 'super_data', '[', "'id'", ']', ',', "'ilx'", ':', '_obj', '}', ')', 'else', ':', 'return', 'self', '.', 'test_check', '(', "'Your superclass ILX ID '", '+', '_obj', '+', "' does not exist.'", ')', 'elif', 'pred', '==', "'existing_ids'", ':', '# FIXME need to autogenerate curies from a map', 'iris', '=', '[', 'd', '[', "'iri'", ']', 'for', 'd', 'in', 'data', '[', 'pred', ']', ']', 'if', 'obj', 'not', 'in', 'iris', ':', 'if', "'http'", 'not', 'in', 'obj', ':', 'return', 'self', '.', 'test_check', '(', '\'exisiting id value must \\\n be a uri containing "http"\'', ')', 'data', '[', 'pred', ']', '.', 'append', '(', '{', "'curie'", ':', 'self', '.', 'qname', '(', 'obj', ')', ',', "'iri'", ':', 'obj', ',', "'preferred'", ':', "'0'", '# preferred is auto generated by preferred_change', '}', ')', '#data[pred] = []', 'data', '=', 'self', '.', 'preferred_change', '(', 'data', ')', '# One ex id is determined to be preferred', 'else', ':', '# Somehow broke this code', 'return', 'self', '.', 'test_check', '(', 'pred', '+', "' Has slipped through the cracks'", ')', 'return', 'data'] | Updates existing entity proprty based on the predicate input | ['Updates', 'existing', 'entity', 'proprty', 'based', 'on', 'the', 'predicate', 'input'] | train | https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_scicrunch_client.py#L237-L274 |
9,618 | allenai/allennlp | allennlp/modules/conditional_random_field.py | ConditionalRandomField._input_likelihood | def _input_likelihood(self, logits: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
"""
Computes the (batch_size,) denominator term for the log-likelihood, which is the
sum of the likelihoods across all possible state sequences.
"""
batch_size, sequence_length, num_tags = logits.size()
# Transpose batch size and sequence dimensions
mask = mask.float().transpose(0, 1).contiguous()
logits = logits.transpose(0, 1).contiguous()
# Initial alpha is the (batch_size, num_tags) tensor of likelihoods combining the
# transitions to the initial states and the logits for the first timestep.
if self.include_start_end_transitions:
alpha = self.start_transitions.view(1, num_tags) + logits[0]
else:
alpha = logits[0]
# For each i we compute logits for the transitions from timestep i-1 to timestep i.
# We do so in a (batch_size, num_tags, num_tags) tensor where the axes are
# (instance, current_tag, next_tag)
for i in range(1, sequence_length):
# The emit scores are for time i ("next_tag") so we broadcast along the current_tag axis.
emit_scores = logits[i].view(batch_size, 1, num_tags)
# Transition scores are (current_tag, next_tag) so we broadcast along the instance axis.
transition_scores = self.transitions.view(1, num_tags, num_tags)
# Alpha is for the current_tag, so we broadcast along the next_tag axis.
broadcast_alpha = alpha.view(batch_size, num_tags, 1)
# Add all the scores together and logexp over the current_tag axis
inner = broadcast_alpha + emit_scores + transition_scores
# In valid positions (mask == 1) we want to take the logsumexp over the current_tag dimension
# of ``inner``. Otherwise (mask == 0) we want to retain the previous alpha.
alpha = (util.logsumexp(inner, 1) * mask[i].view(batch_size, 1) +
alpha * (1 - mask[i]).view(batch_size, 1))
# Every sequence needs to end with a transition to the stop_tag.
if self.include_start_end_transitions:
stops = alpha + self.end_transitions.view(1, num_tags)
else:
stops = alpha
# Finally we log_sum_exp along the num_tags dim, result is (batch_size,)
return util.logsumexp(stops) | python | def _input_likelihood(self, logits: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
"""
Computes the (batch_size,) denominator term for the log-likelihood, which is the
sum of the likelihoods across all possible state sequences.
"""
batch_size, sequence_length, num_tags = logits.size()
# Transpose batch size and sequence dimensions
mask = mask.float().transpose(0, 1).contiguous()
logits = logits.transpose(0, 1).contiguous()
# Initial alpha is the (batch_size, num_tags) tensor of likelihoods combining the
# transitions to the initial states and the logits for the first timestep.
if self.include_start_end_transitions:
alpha = self.start_transitions.view(1, num_tags) + logits[0]
else:
alpha = logits[0]
# For each i we compute logits for the transitions from timestep i-1 to timestep i.
# We do so in a (batch_size, num_tags, num_tags) tensor where the axes are
# (instance, current_tag, next_tag)
for i in range(1, sequence_length):
# The emit scores are for time i ("next_tag") so we broadcast along the current_tag axis.
emit_scores = logits[i].view(batch_size, 1, num_tags)
# Transition scores are (current_tag, next_tag) so we broadcast along the instance axis.
transition_scores = self.transitions.view(1, num_tags, num_tags)
# Alpha is for the current_tag, so we broadcast along the next_tag axis.
broadcast_alpha = alpha.view(batch_size, num_tags, 1)
# Add all the scores together and logexp over the current_tag axis
inner = broadcast_alpha + emit_scores + transition_scores
# In valid positions (mask == 1) we want to take the logsumexp over the current_tag dimension
# of ``inner``. Otherwise (mask == 0) we want to retain the previous alpha.
alpha = (util.logsumexp(inner, 1) * mask[i].view(batch_size, 1) +
alpha * (1 - mask[i]).view(batch_size, 1))
# Every sequence needs to end with a transition to the stop_tag.
if self.include_start_end_transitions:
stops = alpha + self.end_transitions.view(1, num_tags)
else:
stops = alpha
# Finally we log_sum_exp along the num_tags dim, result is (batch_size,)
return util.logsumexp(stops) | ['def', '_input_likelihood', '(', 'self', ',', 'logits', ':', 'torch', '.', 'Tensor', ',', 'mask', ':', 'torch', '.', 'Tensor', ')', '->', 'torch', '.', 'Tensor', ':', 'batch_size', ',', 'sequence_length', ',', 'num_tags', '=', 'logits', '.', 'size', '(', ')', '# Transpose batch size and sequence dimensions', 'mask', '=', 'mask', '.', 'float', '(', ')', '.', 'transpose', '(', '0', ',', '1', ')', '.', 'contiguous', '(', ')', 'logits', '=', 'logits', '.', 'transpose', '(', '0', ',', '1', ')', '.', 'contiguous', '(', ')', '# Initial alpha is the (batch_size, num_tags) tensor of likelihoods combining the', '# transitions to the initial states and the logits for the first timestep.', 'if', 'self', '.', 'include_start_end_transitions', ':', 'alpha', '=', 'self', '.', 'start_transitions', '.', 'view', '(', '1', ',', 'num_tags', ')', '+', 'logits', '[', '0', ']', 'else', ':', 'alpha', '=', 'logits', '[', '0', ']', '# For each i we compute logits for the transitions from timestep i-1 to timestep i.', '# We do so in a (batch_size, num_tags, num_tags) tensor where the axes are', '# (instance, current_tag, next_tag)', 'for', 'i', 'in', 'range', '(', '1', ',', 'sequence_length', ')', ':', '# The emit scores are for time i ("next_tag") so we broadcast along the current_tag axis.', 'emit_scores', '=', 'logits', '[', 'i', ']', '.', 'view', '(', 'batch_size', ',', '1', ',', 'num_tags', ')', '# Transition scores are (current_tag, next_tag) so we broadcast along the instance axis.', 'transition_scores', '=', 'self', '.', 'transitions', '.', 'view', '(', '1', ',', 'num_tags', ',', 'num_tags', ')', '# Alpha is for the current_tag, so we broadcast along the next_tag axis.', 'broadcast_alpha', '=', 'alpha', '.', 'view', '(', 'batch_size', ',', 'num_tags', ',', '1', ')', '# Add all the scores together and logexp over the current_tag axis', 'inner', '=', 'broadcast_alpha', '+', 'emit_scores', '+', 'transition_scores', '# In valid positions (mask == 1) we want to take the logsumexp over the current_tag dimension', '# of ``inner``. Otherwise (mask == 0) we want to retain the previous alpha.', 'alpha', '=', '(', 'util', '.', 'logsumexp', '(', 'inner', ',', '1', ')', '*', 'mask', '[', 'i', ']', '.', 'view', '(', 'batch_size', ',', '1', ')', '+', 'alpha', '*', '(', '1', '-', 'mask', '[', 'i', ']', ')', '.', 'view', '(', 'batch_size', ',', '1', ')', ')', '# Every sequence needs to end with a transition to the stop_tag.', 'if', 'self', '.', 'include_start_end_transitions', ':', 'stops', '=', 'alpha', '+', 'self', '.', 'end_transitions', '.', 'view', '(', '1', ',', 'num_tags', ')', 'else', ':', 'stops', '=', 'alpha', '# Finally we log_sum_exp along the num_tags dim, result is (batch_size,)', 'return', 'util', '.', 'logsumexp', '(', 'stops', ')'] | Computes the (batch_size,) denominator term for the log-likelihood, which is the
sum of the likelihoods across all possible state sequences. | ['Computes', 'the', '(', 'batch_size', ')', 'denominator', 'term', 'for', 'the', 'log', '-', 'likelihood', 'which', 'is', 'the', 'sum', 'of', 'the', 'likelihoods', 'across', 'all', 'possible', 'state', 'sequences', '.'] | train | https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/modules/conditional_random_field.py#L207-L251 |
9,619 | honzamach/pynspect | pynspect/filters.py | DataObjectFilter.prepare | def prepare(self, rule):
"""
Parse and/or compile given rule into rule tree.
:param rule: Filtering grammar rule.
:return: Parsed and/or compiled rule.
"""
if self.parser:
rule = self.parser.parse(rule)
if self.compiler:
rule = self.compiler.compile(rule)
return rule | python | def prepare(self, rule):
"""
Parse and/or compile given rule into rule tree.
:param rule: Filtering grammar rule.
:return: Parsed and/or compiled rule.
"""
if self.parser:
rule = self.parser.parse(rule)
if self.compiler:
rule = self.compiler.compile(rule)
return rule | ['def', 'prepare', '(', 'self', ',', 'rule', ')', ':', 'if', 'self', '.', 'parser', ':', 'rule', '=', 'self', '.', 'parser', '.', 'parse', '(', 'rule', ')', 'if', 'self', '.', 'compiler', ':', 'rule', '=', 'self', '.', 'compiler', '.', 'compile', '(', 'rule', ')', 'return', 'rule'] | Parse and/or compile given rule into rule tree.
:param rule: Filtering grammar rule.
:return: Parsed and/or compiled rule. | ['Parse', 'and', '/', 'or', 'compile', 'given', 'rule', 'into', 'rule', 'tree', '.'] | train | https://github.com/honzamach/pynspect/blob/0582dcc1f7aafe50e25a21c792ea1b3367ea5881/pynspect/filters.py#L209-L220 |
9,620 | sffjunkie/astral | src/astral.py | Astral.solar_zenith | def solar_zenith(self, dateandtime, latitude, longitude):
"""Calculates the solar zenith angle.
:param dateandtime: The date and time for which to calculate
the angle.
:type dateandtime: :class:`~datetime.datetime`
:param latitude: Latitude - Northern latitudes should be positive
:type latitude: float
:param longitude: Longitude - Eastern longitudes should be positive
:type longitude: float
:return: The zenith angle in degrees from vertical.
:rtype: float
If `dateandtime` is a naive Python datetime then it is assumed to be
in the UTC timezone.
"""
return 90.0 - self.solar_elevation(dateandtime, latitude, longitude) | python | def solar_zenith(self, dateandtime, latitude, longitude):
"""Calculates the solar zenith angle.
:param dateandtime: The date and time for which to calculate
the angle.
:type dateandtime: :class:`~datetime.datetime`
:param latitude: Latitude - Northern latitudes should be positive
:type latitude: float
:param longitude: Longitude - Eastern longitudes should be positive
:type longitude: float
:return: The zenith angle in degrees from vertical.
:rtype: float
If `dateandtime` is a naive Python datetime then it is assumed to be
in the UTC timezone.
"""
return 90.0 - self.solar_elevation(dateandtime, latitude, longitude) | ['def', 'solar_zenith', '(', 'self', ',', 'dateandtime', ',', 'latitude', ',', 'longitude', ')', ':', 'return', '90.0', '-', 'self', '.', 'solar_elevation', '(', 'dateandtime', ',', 'latitude', ',', 'longitude', ')'] | Calculates the solar zenith angle.
:param dateandtime: The date and time for which to calculate
the angle.
:type dateandtime: :class:`~datetime.datetime`
:param latitude: Latitude - Northern latitudes should be positive
:type latitude: float
:param longitude: Longitude - Eastern longitudes should be positive
:type longitude: float
:return: The zenith angle in degrees from vertical.
:rtype: float
If `dateandtime` is a naive Python datetime then it is assumed to be
in the UTC timezone. | ['Calculates', 'the', 'solar', 'zenith', 'angle', '.'] | train | https://github.com/sffjunkie/astral/blob/b0aa63fce692357cd33c2bf36c69ed5b6582440c/src/astral.py#L2482-L2500 |
9,621 | santoshphilip/eppy | eppy/hvacbuilder.py | replacebranch | def replacebranch(idf, loop, branch,
listofcomponents, fluid=None,
debugsave=False,
testing=None):
"""It will replace the components in the branch with components in
listofcomponents"""
if fluid is None:
fluid = ''
# -------- testing ---------
testn = 0
# -------- testing ---------
# join them into a branch
# -----------------------
# np1_inlet -> np1 -> np1_np2_node -> np2 -> np2_outlet
# change the node names in the component
# empty the old branch
# fill in the new components with the node names into this branch
listofcomponents = _clean_listofcomponents(listofcomponents)
components = [item[0] for item in listofcomponents]
connectcomponents(idf, listofcomponents, fluid=fluid)
if debugsave:
idf.savecopy("hhh3.idf")
# -------- testing ---------
testn = doingtesting(testing, testn)
if testn == None:
returnnone()
# -------- testing ---------
fields = SomeFields.a_fields
thebranch = branch
componentsintobranch(idf, thebranch, listofcomponents, fluid=fluid)
if debugsave:
idf.savecopy("hhh4.idf")
# -------- testing ---------
testn = doingtesting(testing, testn)
if testn == None:
returnnone()
# -------- testing ---------
# # gather all renamed nodes
# # do the renaming
renamenodes(idf, 'node')
if debugsave:
idf.savecopy("hhh7.idf")
# -------- testing ---------
testn = doingtesting(testing, testn)
if testn == None:
returnnone()
# -------- testing ---------
# check for the end nodes of the loop
if loop.key == 'AIRLOOPHVAC':
fields = SomeFields.a_fields
if loop.key == 'PLANTLOOP':
fields = SomeFields.p_fields
if loop.key == 'CONDENSERLOOP':
fields = SomeFields.c_fields
# for use in bunch
flnames = [field.replace(' ', '_') for field in fields]
if fluid.upper() == 'WATER':
supplyconlistname = loop[flnames[3]]
# Plant_Side_Connector_List_Name or Condenser_Side_Connector_List_Name
elif fluid.upper() == 'AIR':
supplyconlistname = loop[flnames[1]] # Connector_List_Name'
supplyconlist = idf.getobject('CONNECTORLIST', supplyconlistname)
for i in range(1, 100000): # large range to hit end
try:
fieldname = 'Connector_%s_Object_Type' % (i,)
ctype = supplyconlist[fieldname]
except bunch_subclass.BadEPFieldError:
break
if ctype.strip() == '':
break
fieldname = 'Connector_%s_Name' % (i,)
cname = supplyconlist[fieldname]
connector = idf.getobject(ctype.upper(), cname)
if connector.key == 'CONNECTOR:SPLITTER':
firstbranchname = connector.Inlet_Branch_Name
cbranchname = firstbranchname
isfirst = True
if connector.key == 'CONNECTOR:MIXER':
lastbranchname = connector.Outlet_Branch_Name
cbranchname = lastbranchname
isfirst = False
if cbranchname == thebranch.Name:
# rename end nodes
comps = getbranchcomponents(idf, thebranch)
if isfirst:
comp = comps[0]
inletnodename = getnodefieldname(
comp,
"Inlet_Node_Name", fluid)
comp[inletnodename] = [
comp[inletnodename],
loop[flnames[0]]] # Plant_Side_Inlet_Node_Name
else:
comp = comps[-1]
outletnodename = getnodefieldname(
comp,
"Outlet_Node_Name", fluid)
comp[outletnodename] = [
comp[outletnodename],
loop[flnames[1]]] # .Plant_Side_Outlet_Node_Name
# -------- testing ---------
testn = doingtesting(testing, testn)
if testn == None:
returnnone()
# -------- testing ---------
if fluid.upper() == 'WATER':
demandconlistname = loop[flnames[7]] # .Demand_Side_Connector_List_Name
demandconlist = idf.getobject('CONNECTORLIST', demandconlistname)
for i in range(1, 100000): # large range to hit end
try:
fieldname = 'Connector_%s_Object_Type' % (i,)
ctype = demandconlist[fieldname]
except bunch_subclass.BadEPFieldError:
break
if ctype.strip() == '':
break
fieldname = 'Connector_%s_Name' % (i,)
cname = demandconlist[fieldname]
connector = idf.getobject(ctype.upper(), cname)
if connector.key == 'CONNECTOR:SPLITTER':
firstbranchname = connector.Inlet_Branch_Name
cbranchname = firstbranchname
isfirst = True
if connector.key == 'CONNECTOR:MIXER':
lastbranchname = connector.Outlet_Branch_Name
cbranchname = lastbranchname
isfirst = False
if cbranchname == thebranch.Name:
# rename end nodes
comps = getbranchcomponents(idf, thebranch)
if isfirst:
comp = comps[0]
inletnodename = getnodefieldname(
comp,
"Inlet_Node_Name", fluid)
comp[inletnodename] = [
comp[inletnodename],
loop[flnames[4]]] # .Demand_Side_Inlet_Node_Name
if not isfirst:
comp = comps[-1]
outletnodename = getnodefieldname(
comp,
"Outlet_Node_Name", fluid)
comp[outletnodename] = [
comp[outletnodename],
loop[flnames[5]]] # .Demand_Side_Outlet_Node_Name
# -------- testing ---------
testn = doingtesting(testing, testn)
if testn == None:
returnnone()
# -------- testing ---------
if debugsave:
idf.savecopy("hhh8.idf")
# # gather all renamed nodes
# # do the renaming
renamenodes(idf, 'node')
# -------- testing ---------
testn = doingtesting(testing, testn)
if testn == None:
returnnone()
# -------- testing ---------
if debugsave:
idf.savecopy("hhh9.idf")
return thebranch | python | def replacebranch(idf, loop, branch,
listofcomponents, fluid=None,
debugsave=False,
testing=None):
"""It will replace the components in the branch with components in
listofcomponents"""
if fluid is None:
fluid = ''
# -------- testing ---------
testn = 0
# -------- testing ---------
# join them into a branch
# -----------------------
# np1_inlet -> np1 -> np1_np2_node -> np2 -> np2_outlet
# change the node names in the component
# empty the old branch
# fill in the new components with the node names into this branch
listofcomponents = _clean_listofcomponents(listofcomponents)
components = [item[0] for item in listofcomponents]
connectcomponents(idf, listofcomponents, fluid=fluid)
if debugsave:
idf.savecopy("hhh3.idf")
# -------- testing ---------
testn = doingtesting(testing, testn)
if testn == None:
returnnone()
# -------- testing ---------
fields = SomeFields.a_fields
thebranch = branch
componentsintobranch(idf, thebranch, listofcomponents, fluid=fluid)
if debugsave:
idf.savecopy("hhh4.idf")
# -------- testing ---------
testn = doingtesting(testing, testn)
if testn == None:
returnnone()
# -------- testing ---------
# # gather all renamed nodes
# # do the renaming
renamenodes(idf, 'node')
if debugsave:
idf.savecopy("hhh7.idf")
# -------- testing ---------
testn = doingtesting(testing, testn)
if testn == None:
returnnone()
# -------- testing ---------
# check for the end nodes of the loop
if loop.key == 'AIRLOOPHVAC':
fields = SomeFields.a_fields
if loop.key == 'PLANTLOOP':
fields = SomeFields.p_fields
if loop.key == 'CONDENSERLOOP':
fields = SomeFields.c_fields
# for use in bunch
flnames = [field.replace(' ', '_') for field in fields]
if fluid.upper() == 'WATER':
supplyconlistname = loop[flnames[3]]
# Plant_Side_Connector_List_Name or Condenser_Side_Connector_List_Name
elif fluid.upper() == 'AIR':
supplyconlistname = loop[flnames[1]] # Connector_List_Name'
supplyconlist = idf.getobject('CONNECTORLIST', supplyconlistname)
for i in range(1, 100000): # large range to hit end
try:
fieldname = 'Connector_%s_Object_Type' % (i,)
ctype = supplyconlist[fieldname]
except bunch_subclass.BadEPFieldError:
break
if ctype.strip() == '':
break
fieldname = 'Connector_%s_Name' % (i,)
cname = supplyconlist[fieldname]
connector = idf.getobject(ctype.upper(), cname)
if connector.key == 'CONNECTOR:SPLITTER':
firstbranchname = connector.Inlet_Branch_Name
cbranchname = firstbranchname
isfirst = True
if connector.key == 'CONNECTOR:MIXER':
lastbranchname = connector.Outlet_Branch_Name
cbranchname = lastbranchname
isfirst = False
if cbranchname == thebranch.Name:
# rename end nodes
comps = getbranchcomponents(idf, thebranch)
if isfirst:
comp = comps[0]
inletnodename = getnodefieldname(
comp,
"Inlet_Node_Name", fluid)
comp[inletnodename] = [
comp[inletnodename],
loop[flnames[0]]] # Plant_Side_Inlet_Node_Name
else:
comp = comps[-1]
outletnodename = getnodefieldname(
comp,
"Outlet_Node_Name", fluid)
comp[outletnodename] = [
comp[outletnodename],
loop[flnames[1]]] # .Plant_Side_Outlet_Node_Name
# -------- testing ---------
testn = doingtesting(testing, testn)
if testn == None:
returnnone()
# -------- testing ---------
if fluid.upper() == 'WATER':
demandconlistname = loop[flnames[7]] # .Demand_Side_Connector_List_Name
demandconlist = idf.getobject('CONNECTORLIST', demandconlistname)
for i in range(1, 100000): # large range to hit end
try:
fieldname = 'Connector_%s_Object_Type' % (i,)
ctype = demandconlist[fieldname]
except bunch_subclass.BadEPFieldError:
break
if ctype.strip() == '':
break
fieldname = 'Connector_%s_Name' % (i,)
cname = demandconlist[fieldname]
connector = idf.getobject(ctype.upper(), cname)
if connector.key == 'CONNECTOR:SPLITTER':
firstbranchname = connector.Inlet_Branch_Name
cbranchname = firstbranchname
isfirst = True
if connector.key == 'CONNECTOR:MIXER':
lastbranchname = connector.Outlet_Branch_Name
cbranchname = lastbranchname
isfirst = False
if cbranchname == thebranch.Name:
# rename end nodes
comps = getbranchcomponents(idf, thebranch)
if isfirst:
comp = comps[0]
inletnodename = getnodefieldname(
comp,
"Inlet_Node_Name", fluid)
comp[inletnodename] = [
comp[inletnodename],
loop[flnames[4]]] # .Demand_Side_Inlet_Node_Name
if not isfirst:
comp = comps[-1]
outletnodename = getnodefieldname(
comp,
"Outlet_Node_Name", fluid)
comp[outletnodename] = [
comp[outletnodename],
loop[flnames[5]]] # .Demand_Side_Outlet_Node_Name
# -------- testing ---------
testn = doingtesting(testing, testn)
if testn == None:
returnnone()
# -------- testing ---------
if debugsave:
idf.savecopy("hhh8.idf")
# # gather all renamed nodes
# # do the renaming
renamenodes(idf, 'node')
# -------- testing ---------
testn = doingtesting(testing, testn)
if testn == None:
returnnone()
# -------- testing ---------
if debugsave:
idf.savecopy("hhh9.idf")
return thebranch | ['def', 'replacebranch', '(', 'idf', ',', 'loop', ',', 'branch', ',', 'listofcomponents', ',', 'fluid', '=', 'None', ',', 'debugsave', '=', 'False', ',', 'testing', '=', 'None', ')', ':', 'if', 'fluid', 'is', 'None', ':', 'fluid', '=', "''", '# -------- testing ---------', 'testn', '=', '0', '# -------- testing ---------', '# join them into a branch', '# -----------------------', '# np1_inlet -> np1 -> np1_np2_node -> np2 -> np2_outlet', '# change the node names in the component', '# empty the old branch', '# fill in the new components with the node names into this branch', 'listofcomponents', '=', '_clean_listofcomponents', '(', 'listofcomponents', ')', 'components', '=', '[', 'item', '[', '0', ']', 'for', 'item', 'in', 'listofcomponents', ']', 'connectcomponents', '(', 'idf', ',', 'listofcomponents', ',', 'fluid', '=', 'fluid', ')', 'if', 'debugsave', ':', 'idf', '.', 'savecopy', '(', '"hhh3.idf"', ')', '# -------- testing ---------', 'testn', '=', 'doingtesting', '(', 'testing', ',', 'testn', ')', 'if', 'testn', '==', 'None', ':', 'returnnone', '(', ')', '# -------- testing ---------', 'fields', '=', 'SomeFields', '.', 'a_fields', 'thebranch', '=', 'branch', 'componentsintobranch', '(', 'idf', ',', 'thebranch', ',', 'listofcomponents', ',', 'fluid', '=', 'fluid', ')', 'if', 'debugsave', ':', 'idf', '.', 'savecopy', '(', '"hhh4.idf"', ')', '# -------- testing ---------', 'testn', '=', 'doingtesting', '(', 'testing', ',', 'testn', ')', 'if', 'testn', '==', 'None', ':', 'returnnone', '(', ')', '# -------- testing ---------', '# # gather all renamed nodes', '# # do the renaming', 'renamenodes', '(', 'idf', ',', "'node'", ')', 'if', 'debugsave', ':', 'idf', '.', 'savecopy', '(', '"hhh7.idf"', ')', '# -------- testing ---------', 'testn', '=', 'doingtesting', '(', 'testing', ',', 'testn', ')', 'if', 'testn', '==', 'None', ':', 'returnnone', '(', ')', '# -------- testing ---------', '# check for the end nodes of the loop', 'if', 'loop', '.', 'key', '==', "'AIRLOOPHVAC'", ':', 'fields', '=', 'SomeFields', '.', 'a_fields', 'if', 'loop', '.', 'key', '==', "'PLANTLOOP'", ':', 'fields', '=', 'SomeFields', '.', 'p_fields', 'if', 'loop', '.', 'key', '==', "'CONDENSERLOOP'", ':', 'fields', '=', 'SomeFields', '.', 'c_fields', '# for use in bunch', 'flnames', '=', '[', 'field', '.', 'replace', '(', "' '", ',', "'_'", ')', 'for', 'field', 'in', 'fields', ']', 'if', 'fluid', '.', 'upper', '(', ')', '==', "'WATER'", ':', 'supplyconlistname', '=', 'loop', '[', 'flnames', '[', '3', ']', ']', '# Plant_Side_Connector_List_Name or Condenser_Side_Connector_List_Name', 'elif', 'fluid', '.', 'upper', '(', ')', '==', "'AIR'", ':', 'supplyconlistname', '=', 'loop', '[', 'flnames', '[', '1', ']', ']', "# Connector_List_Name'", 'supplyconlist', '=', 'idf', '.', 'getobject', '(', "'CONNECTORLIST'", ',', 'supplyconlistname', ')', 'for', 'i', 'in', 'range', '(', '1', ',', '100000', ')', ':', '# large range to hit end', 'try', ':', 'fieldname', '=', "'Connector_%s_Object_Type'", '%', '(', 'i', ',', ')', 'ctype', '=', 'supplyconlist', '[', 'fieldname', ']', 'except', 'bunch_subclass', '.', 'BadEPFieldError', ':', 'break', 'if', 'ctype', '.', 'strip', '(', ')', '==', "''", ':', 'break', 'fieldname', '=', "'Connector_%s_Name'", '%', '(', 'i', ',', ')', 'cname', '=', 'supplyconlist', '[', 'fieldname', ']', 'connector', '=', 'idf', '.', 'getobject', '(', 'ctype', '.', 'upper', '(', ')', ',', 'cname', ')', 'if', 'connector', '.', 'key', '==', "'CONNECTOR:SPLITTER'", ':', 'firstbranchname', '=', 'connector', '.', 'Inlet_Branch_Name', 'cbranchname', '=', 'firstbranchname', 'isfirst', '=', 'True', 'if', 'connector', '.', 'key', '==', "'CONNECTOR:MIXER'", ':', 'lastbranchname', '=', 'connector', '.', 'Outlet_Branch_Name', 'cbranchname', '=', 'lastbranchname', 'isfirst', '=', 'False', 'if', 'cbranchname', '==', 'thebranch', '.', 'Name', ':', '# rename end nodes', 'comps', '=', 'getbranchcomponents', '(', 'idf', ',', 'thebranch', ')', 'if', 'isfirst', ':', 'comp', '=', 'comps', '[', '0', ']', 'inletnodename', '=', 'getnodefieldname', '(', 'comp', ',', '"Inlet_Node_Name"', ',', 'fluid', ')', 'comp', '[', 'inletnodename', ']', '=', '[', 'comp', '[', 'inletnodename', ']', ',', 'loop', '[', 'flnames', '[', '0', ']', ']', ']', '# Plant_Side_Inlet_Node_Name', 'else', ':', 'comp', '=', 'comps', '[', '-', '1', ']', 'outletnodename', '=', 'getnodefieldname', '(', 'comp', ',', '"Outlet_Node_Name"', ',', 'fluid', ')', 'comp', '[', 'outletnodename', ']', '=', '[', 'comp', '[', 'outletnodename', ']', ',', 'loop', '[', 'flnames', '[', '1', ']', ']', ']', '# .Plant_Side_Outlet_Node_Name', '# -------- testing ---------', 'testn', '=', 'doingtesting', '(', 'testing', ',', 'testn', ')', 'if', 'testn', '==', 'None', ':', 'returnnone', '(', ')', '# -------- testing ---------', 'if', 'fluid', '.', 'upper', '(', ')', '==', "'WATER'", ':', 'demandconlistname', '=', 'loop', '[', 'flnames', '[', '7', ']', ']', '# .Demand_Side_Connector_List_Name', 'demandconlist', '=', 'idf', '.', 'getobject', '(', "'CONNECTORLIST'", ',', 'demandconlistname', ')', 'for', 'i', 'in', 'range', '(', '1', ',', '100000', ')', ':', '# large range to hit end', 'try', ':', 'fieldname', '=', "'Connector_%s_Object_Type'", '%', '(', 'i', ',', ')', 'ctype', '=', 'demandconlist', '[', 'fieldname', ']', 'except', 'bunch_subclass', '.', 'BadEPFieldError', ':', 'break', 'if', 'ctype', '.', 'strip', '(', ')', '==', "''", ':', 'break', 'fieldname', '=', "'Connector_%s_Name'", '%', '(', 'i', ',', ')', 'cname', '=', 'demandconlist', '[', 'fieldname', ']', 'connector', '=', 'idf', '.', 'getobject', '(', 'ctype', '.', 'upper', '(', ')', ',', 'cname', ')', 'if', 'connector', '.', 'key', '==', "'CONNECTOR:SPLITTER'", ':', 'firstbranchname', '=', 'connector', '.', 'Inlet_Branch_Name', 'cbranchname', '=', 'firstbranchname', 'isfirst', '=', 'True', 'if', 'connector', '.', 'key', '==', "'CONNECTOR:MIXER'", ':', 'lastbranchname', '=', 'connector', '.', 'Outlet_Branch_Name', 'cbranchname', '=', 'lastbranchname', 'isfirst', '=', 'False', 'if', 'cbranchname', '==', 'thebranch', '.', 'Name', ':', '# rename end nodes', 'comps', '=', 'getbranchcomponents', '(', 'idf', ',', 'thebranch', ')', 'if', 'isfirst', ':', 'comp', '=', 'comps', '[', '0', ']', 'inletnodename', '=', 'getnodefieldname', '(', 'comp', ',', '"Inlet_Node_Name"', ',', 'fluid', ')', 'comp', '[', 'inletnodename', ']', '=', '[', 'comp', '[', 'inletnodename', ']', ',', 'loop', '[', 'flnames', '[', '4', ']', ']', ']', '# .Demand_Side_Inlet_Node_Name', 'if', 'not', 'isfirst', ':', 'comp', '=', 'comps', '[', '-', '1', ']', 'outletnodename', '=', 'getnodefieldname', '(', 'comp', ',', '"Outlet_Node_Name"', ',', 'fluid', ')', 'comp', '[', 'outletnodename', ']', '=', '[', 'comp', '[', 'outletnodename', ']', ',', 'loop', '[', 'flnames', '[', '5', ']', ']', ']', '# .Demand_Side_Outlet_Node_Name', '# -------- testing ---------', 'testn', '=', 'doingtesting', '(', 'testing', ',', 'testn', ')', 'if', 'testn', '==', 'None', ':', 'returnnone', '(', ')', '# -------- testing ---------', 'if', 'debugsave', ':', 'idf', '.', 'savecopy', '(', '"hhh8.idf"', ')', '# # gather all renamed nodes', '# # do the renaming', 'renamenodes', '(', 'idf', ',', "'node'", ')', '# -------- testing ---------', 'testn', '=', 'doingtesting', '(', 'testing', ',', 'testn', ')', 'if', 'testn', '==', 'None', ':', 'returnnone', '(', ')', '# -------- testing ---------', 'if', 'debugsave', ':', 'idf', '.', 'savecopy', '(', '"hhh9.idf"', ')', 'return', 'thebranch'] | It will replace the components in the branch with components in
listofcomponents | ['It', 'will', 'replace', 'the', 'components', 'in', 'the', 'branch', 'with', 'components', 'in', 'listofcomponents'] | train | https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L1005-L1178 |
9,622 | aliyun/aliyun-log-python-sdk | aliyun/log/logclient.py | LogClient.get_logs | def get_logs(self, request):
""" Get logs from log service.
Unsuccessful opertaion will cause an LogException.
Note: for larger volume of data (e.g. > 1 million logs), use get_log_all
:type request: GetLogsRequest
:param request: the GetLogs request parameters class.
:return: GetLogsResponse
:raise: LogException
"""
project = request.get_project()
logstore = request.get_logstore()
from_time = request.get_from()
to_time = request.get_to()
topic = request.get_topic()
query = request.get_query()
reverse = request.get_reverse()
offset = request.get_offset()
size = request.get_line()
return self.get_log(project, logstore, from_time, to_time, topic,
query, reverse, offset, size) | python | def get_logs(self, request):
""" Get logs from log service.
Unsuccessful opertaion will cause an LogException.
Note: for larger volume of data (e.g. > 1 million logs), use get_log_all
:type request: GetLogsRequest
:param request: the GetLogs request parameters class.
:return: GetLogsResponse
:raise: LogException
"""
project = request.get_project()
logstore = request.get_logstore()
from_time = request.get_from()
to_time = request.get_to()
topic = request.get_topic()
query = request.get_query()
reverse = request.get_reverse()
offset = request.get_offset()
size = request.get_line()
return self.get_log(project, logstore, from_time, to_time, topic,
query, reverse, offset, size) | ['def', 'get_logs', '(', 'self', ',', 'request', ')', ':', 'project', '=', 'request', '.', 'get_project', '(', ')', 'logstore', '=', 'request', '.', 'get_logstore', '(', ')', 'from_time', '=', 'request', '.', 'get_from', '(', ')', 'to_time', '=', 'request', '.', 'get_to', '(', ')', 'topic', '=', 'request', '.', 'get_topic', '(', ')', 'query', '=', 'request', '.', 'get_query', '(', ')', 'reverse', '=', 'request', '.', 'get_reverse', '(', ')', 'offset', '=', 'request', '.', 'get_offset', '(', ')', 'size', '=', 'request', '.', 'get_line', '(', ')', 'return', 'self', '.', 'get_log', '(', 'project', ',', 'logstore', ',', 'from_time', ',', 'to_time', ',', 'topic', ',', 'query', ',', 'reverse', ',', 'offset', ',', 'size', ')'] | Get logs from log service.
Unsuccessful opertaion will cause an LogException.
Note: for larger volume of data (e.g. > 1 million logs), use get_log_all
:type request: GetLogsRequest
:param request: the GetLogs request parameters class.
:return: GetLogsResponse
:raise: LogException | ['Get', 'logs', 'from', 'log', 'service', '.', 'Unsuccessful', 'opertaion', 'will', 'cause', 'an', 'LogException', '.', 'Note', ':', 'for', 'larger', 'volume', 'of', 'data', '(', 'e', '.', 'g', '.', '>', '1', 'million', 'logs', ')', 'use', 'get_log_all', ':', 'type', 'request', ':', 'GetLogsRequest', ':', 'param', 'request', ':', 'the', 'GetLogs', 'request', 'parameters', 'class', '.', ':', 'return', ':', 'GetLogsResponse', ':', 'raise', ':', 'LogException'] | train | https://github.com/aliyun/aliyun-log-python-sdk/blob/ac383db0a16abf1e5ef7df36074374184b43516e/aliyun/log/logclient.py#L547-L570 |
9,623 | manahl/arctic | arctic/tickstore/toplevel.py | TopLevelTickStore.add | def add(self, date_range, library_name):
"""
Adds the library with the given date range to the underlying collection of libraries used by this store.
The underlying libraries should not overlap as the date ranges are assumed to be CLOSED_CLOSED by this function
and the rest of the class.
Arguments:
date_range: A date range provided on the assumption that it is CLOSED_CLOSED. If for example the underlying
libraries were split by year, the start of the date range would be datetime.datetime(year, 1, 1) and the end
would be datetime.datetime(year, 12, 31, 23, 59, 59, 999000). The date range must fall on UTC day boundaries,
that is the start must be add midnight and the end must be 1 millisecond before midnight.
library_name: The name of the underlying library. This must be the name of a valid Arctic library
"""
# check that the library is valid
try:
self._arctic_lib.arctic[library_name]
except Exception as e:
logger.error("Could not load library")
raise e
assert date_range.start and date_range.end, "Date range should have start and end properties {}".format(date_range)
start = date_range.start.astimezone(mktz('UTC')) if date_range.start.tzinfo is not None else date_range.start.replace(tzinfo=mktz('UTC'))
end = date_range.end.astimezone(mktz('UTC')) if date_range.end.tzinfo is not None else date_range.end.replace(tzinfo=mktz('UTC'))
assert start.time() == time.min and end.time() == end_time_min, "Date range should fall on UTC day boundaries {}".format(date_range)
# check that the date range does not overlap
library_metadata = self._get_library_metadata(date_range)
if len(library_metadata) > 1 or (len(library_metadata) == 1 and library_metadata[0] != library_name):
raise OverlappingDataException("""There are libraries that overlap with the date range:
library: {}
overlapping libraries: {}""".format(library_name, [l.library for l in library_metadata]))
self._collection.update_one({'library_name': library_name},
{'$set': {'start': start, 'end': end}}, upsert=True) | python | def add(self, date_range, library_name):
"""
Adds the library with the given date range to the underlying collection of libraries used by this store.
The underlying libraries should not overlap as the date ranges are assumed to be CLOSED_CLOSED by this function
and the rest of the class.
Arguments:
date_range: A date range provided on the assumption that it is CLOSED_CLOSED. If for example the underlying
libraries were split by year, the start of the date range would be datetime.datetime(year, 1, 1) and the end
would be datetime.datetime(year, 12, 31, 23, 59, 59, 999000). The date range must fall on UTC day boundaries,
that is the start must be add midnight and the end must be 1 millisecond before midnight.
library_name: The name of the underlying library. This must be the name of a valid Arctic library
"""
# check that the library is valid
try:
self._arctic_lib.arctic[library_name]
except Exception as e:
logger.error("Could not load library")
raise e
assert date_range.start and date_range.end, "Date range should have start and end properties {}".format(date_range)
start = date_range.start.astimezone(mktz('UTC')) if date_range.start.tzinfo is not None else date_range.start.replace(tzinfo=mktz('UTC'))
end = date_range.end.astimezone(mktz('UTC')) if date_range.end.tzinfo is not None else date_range.end.replace(tzinfo=mktz('UTC'))
assert start.time() == time.min and end.time() == end_time_min, "Date range should fall on UTC day boundaries {}".format(date_range)
# check that the date range does not overlap
library_metadata = self._get_library_metadata(date_range)
if len(library_metadata) > 1 or (len(library_metadata) == 1 and library_metadata[0] != library_name):
raise OverlappingDataException("""There are libraries that overlap with the date range:
library: {}
overlapping libraries: {}""".format(library_name, [l.library for l in library_metadata]))
self._collection.update_one({'library_name': library_name},
{'$set': {'start': start, 'end': end}}, upsert=True) | ['def', 'add', '(', 'self', ',', 'date_range', ',', 'library_name', ')', ':', '# check that the library is valid', 'try', ':', 'self', '.', '_arctic_lib', '.', 'arctic', '[', 'library_name', ']', 'except', 'Exception', 'as', 'e', ':', 'logger', '.', 'error', '(', '"Could not load library"', ')', 'raise', 'e', 'assert', 'date_range', '.', 'start', 'and', 'date_range', '.', 'end', ',', '"Date range should have start and end properties {}"', '.', 'format', '(', 'date_range', ')', 'start', '=', 'date_range', '.', 'start', '.', 'astimezone', '(', 'mktz', '(', "'UTC'", ')', ')', 'if', 'date_range', '.', 'start', '.', 'tzinfo', 'is', 'not', 'None', 'else', 'date_range', '.', 'start', '.', 'replace', '(', 'tzinfo', '=', 'mktz', '(', "'UTC'", ')', ')', 'end', '=', 'date_range', '.', 'end', '.', 'astimezone', '(', 'mktz', '(', "'UTC'", ')', ')', 'if', 'date_range', '.', 'end', '.', 'tzinfo', 'is', 'not', 'None', 'else', 'date_range', '.', 'end', '.', 'replace', '(', 'tzinfo', '=', 'mktz', '(', "'UTC'", ')', ')', 'assert', 'start', '.', 'time', '(', ')', '==', 'time', '.', 'min', 'and', 'end', '.', 'time', '(', ')', '==', 'end_time_min', ',', '"Date range should fall on UTC day boundaries {}"', '.', 'format', '(', 'date_range', ')', '# check that the date range does not overlap', 'library_metadata', '=', 'self', '.', '_get_library_metadata', '(', 'date_range', ')', 'if', 'len', '(', 'library_metadata', ')', '>', '1', 'or', '(', 'len', '(', 'library_metadata', ')', '==', '1', 'and', 'library_metadata', '[', '0', ']', '!=', 'library_name', ')', ':', 'raise', 'OverlappingDataException', '(', '"""There are libraries that overlap with the date range:\nlibrary: {}\noverlapping libraries: {}"""', '.', 'format', '(', 'library_name', ',', '[', 'l', '.', 'library', 'for', 'l', 'in', 'library_metadata', ']', ')', ')', 'self', '.', '_collection', '.', 'update_one', '(', '{', "'library_name'", ':', 'library_name', '}', ',', '{', "'$set'", ':', '{', "'start'", ':', 'start', ',', "'end'", ':', 'end', '}', '}', ',', 'upsert', '=', 'True', ')'] | Adds the library with the given date range to the underlying collection of libraries used by this store.
The underlying libraries should not overlap as the date ranges are assumed to be CLOSED_CLOSED by this function
and the rest of the class.
Arguments:
date_range: A date range provided on the assumption that it is CLOSED_CLOSED. If for example the underlying
libraries were split by year, the start of the date range would be datetime.datetime(year, 1, 1) and the end
would be datetime.datetime(year, 12, 31, 23, 59, 59, 999000). The date range must fall on UTC day boundaries,
that is the start must be add midnight and the end must be 1 millisecond before midnight.
library_name: The name of the underlying library. This must be the name of a valid Arctic library | ['Adds', 'the', 'library', 'with', 'the', 'given', 'date', 'range', 'to', 'the', 'underlying', 'collection', 'of', 'libraries', 'used', 'by', 'this', 'store', '.', 'The', 'underlying', 'libraries', 'should', 'not', 'overlap', 'as', 'the', 'date', 'ranges', 'are', 'assumed', 'to', 'be', 'CLOSED_CLOSED', 'by', 'this', 'function', 'and', 'the', 'rest', 'of', 'the', 'class', '.'] | train | https://github.com/manahl/arctic/blob/57e110b6e182dbab00e7e214dc26f7d9ec47c120/arctic/tickstore/toplevel.py#L71-L103 |
9,624 | user-cont/conu | conu/backend/k8s/backend.py | K8sBackend.cleanup_deployments | def cleanup_deployments(self):
"""
Delete all deployments created in namespaces associated with this backend
:return: None
"""
deployments = self.list_deployments()
for deployment in deployments:
if deployment.namespace in self.managed_namespaces:
deployment.delete() | python | def cleanup_deployments(self):
"""
Delete all deployments created in namespaces associated with this backend
:return: None
"""
deployments = self.list_deployments()
for deployment in deployments:
if deployment.namespace in self.managed_namespaces:
deployment.delete() | ['def', 'cleanup_deployments', '(', 'self', ')', ':', 'deployments', '=', 'self', '.', 'list_deployments', '(', ')', 'for', 'deployment', 'in', 'deployments', ':', 'if', 'deployment', '.', 'namespace', 'in', 'self', '.', 'managed_namespaces', ':', 'deployment', '.', 'delete', '(', ')'] | Delete all deployments created in namespaces associated with this backend
:return: None | ['Delete', 'all', 'deployments', 'created', 'in', 'namespaces', 'associated', 'with', 'this', 'backend', ':', 'return', ':', 'None'] | train | https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/backend/k8s/backend.py#L228-L237 |
9,625 | mitsei/dlkit | dlkit/json_/assessment/sessions.py | BankHierarchySession.get_parent_bank_ids | def get_parent_bank_ids(self, bank_id):
"""Gets the parent ``Ids`` of the given bank.
arg: bank_id (osid.id.Id): a bank ``Id``
return: (osid.id.IdList) - the parent ``Ids`` of the bank
raise: NotFound - ``bank_id`` is not found
raise: NullArgument - ``bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_parent_bin_ids
if self._catalog_session is not None:
return self._catalog_session.get_parent_catalog_ids(catalog_id=bank_id)
return self._hierarchy_session.get_parents(id_=bank_id) | python | def get_parent_bank_ids(self, bank_id):
"""Gets the parent ``Ids`` of the given bank.
arg: bank_id (osid.id.Id): a bank ``Id``
return: (osid.id.IdList) - the parent ``Ids`` of the bank
raise: NotFound - ``bank_id`` is not found
raise: NullArgument - ``bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_parent_bin_ids
if self._catalog_session is not None:
return self._catalog_session.get_parent_catalog_ids(catalog_id=bank_id)
return self._hierarchy_session.get_parents(id_=bank_id) | ['def', 'get_parent_bank_ids', '(', 'self', ',', 'bank_id', ')', ':', '# Implemented from template for', '# osid.resource.BinHierarchySession.get_parent_bin_ids', 'if', 'self', '.', '_catalog_session', 'is', 'not', 'None', ':', 'return', 'self', '.', '_catalog_session', '.', 'get_parent_catalog_ids', '(', 'catalog_id', '=', 'bank_id', ')', 'return', 'self', '.', '_hierarchy_session', '.', 'get_parents', '(', 'id_', '=', 'bank_id', ')'] | Gets the parent ``Ids`` of the given bank.
arg: bank_id (osid.id.Id): a bank ``Id``
return: (osid.id.IdList) - the parent ``Ids`` of the bank
raise: NotFound - ``bank_id`` is not found
raise: NullArgument - ``bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.* | ['Gets', 'the', 'parent', 'Ids', 'of', 'the', 'given', 'bank', '.'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/sessions.py#L9075-L9091 |
9,626 | flowersteam/explauto | explauto/utils/utils.py | discrete_random_draw | def discrete_random_draw(data, nb=1):
''' Code from Steve Nguyen'''
data = np.array(data)
if not data.any():
data = np.ones_like(data)
data = data/data.sum()
xk = np.arange(len(data))
custm = stats.rv_discrete(name='custm', values=(xk, data))
return custm.rvs(size=nb) | python | def discrete_random_draw(data, nb=1):
''' Code from Steve Nguyen'''
data = np.array(data)
if not data.any():
data = np.ones_like(data)
data = data/data.sum()
xk = np.arange(len(data))
custm = stats.rv_discrete(name='custm', values=(xk, data))
return custm.rvs(size=nb) | ['def', 'discrete_random_draw', '(', 'data', ',', 'nb', '=', '1', ')', ':', 'data', '=', 'np', '.', 'array', '(', 'data', ')', 'if', 'not', 'data', '.', 'any', '(', ')', ':', 'data', '=', 'np', '.', 'ones_like', '(', 'data', ')', 'data', '=', 'data', '/', 'data', '.', 'sum', '(', ')', 'xk', '=', 'np', '.', 'arange', '(', 'len', '(', 'data', ')', ')', 'custm', '=', 'stats', '.', 'rv_discrete', '(', 'name', '=', "'custm'", ',', 'values', '=', '(', 'xk', ',', 'data', ')', ')', 'return', 'custm', '.', 'rvs', '(', 'size', '=', 'nb', ')'] | Code from Steve Nguyen | ['Code', 'from', 'Steve', 'Nguyen'] | train | https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/utils/utils.py#L46-L54 |
9,627 | exa-analytics/exa | exa/core/editor.py | Editor.head | def head(self, n=10):
"""
Display the top of the file.
Args:
n (int): Number of lines to display
"""
r = self.__repr__().split('\n')
print('\n'.join(r[:n]), end=' ') | python | def head(self, n=10):
"""
Display the top of the file.
Args:
n (int): Number of lines to display
"""
r = self.__repr__().split('\n')
print('\n'.join(r[:n]), end=' ') | ['def', 'head', '(', 'self', ',', 'n', '=', '10', ')', ':', 'r', '=', 'self', '.', '__repr__', '(', ')', '.', 'split', '(', "'\\n'", ')', 'print', '(', "'\\n'", '.', 'join', '(', 'r', '[', ':', 'n', ']', ')', ',', 'end', '=', "' '", ')'] | Display the top of the file.
Args:
n (int): Number of lines to display | ['Display', 'the', 'top', 'of', 'the', 'file', '.'] | train | https://github.com/exa-analytics/exa/blob/40fb3c22b531d460dbc51e603de75b856cc28f0d/exa/core/editor.py#L88-L96 |
9,628 | pywbem/pywbem | pywbem/_recorder.py | _represent_undefined | def _represent_undefined(self, data):
"""Raises flag for objects that cannot be represented"""
raise RepresenterError(
_format("Cannot represent an object: {0!A} of type: {1}; "
"yaml_representers: {2!A}, "
"yaml_multi_representers: {3!A}",
data, type(data), self.yaml_representers.keys(),
self.yaml_multi_representers.keys())) | python | def _represent_undefined(self, data):
"""Raises flag for objects that cannot be represented"""
raise RepresenterError(
_format("Cannot represent an object: {0!A} of type: {1}; "
"yaml_representers: {2!A}, "
"yaml_multi_representers: {3!A}",
data, type(data), self.yaml_representers.keys(),
self.yaml_multi_representers.keys())) | ['def', '_represent_undefined', '(', 'self', ',', 'data', ')', ':', 'raise', 'RepresenterError', '(', '_format', '(', '"Cannot represent an object: {0!A} of type: {1}; "', '"yaml_representers: {2!A}, "', '"yaml_multi_representers: {3!A}"', ',', 'data', ',', 'type', '(', 'data', ')', ',', 'self', '.', 'yaml_representers', '.', 'keys', '(', ')', ',', 'self', '.', 'yaml_multi_representers', '.', 'keys', '(', ')', ')', ')'] | Raises flag for objects that cannot be represented | ['Raises', 'flag', 'for', 'objects', 'that', 'cannot', 'be', 'represented'] | train | https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/_recorder.py#L133-L140 |
9,629 | insightindustry/validator-collection | validator_collection/checkers.py | is_uuid | def is_uuid(value, **kwargs):
"""Indicate whether ``value`` contains a :class:`UUID <python:uuid.UUID>`
:param value: The value to evaluate.
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
try:
validators.uuid(value, **kwargs)
except SyntaxError as error:
raise error
except Exception:
return False
return True | python | def is_uuid(value, **kwargs):
"""Indicate whether ``value`` contains a :class:`UUID <python:uuid.UUID>`
:param value: The value to evaluate.
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
try:
validators.uuid(value, **kwargs)
except SyntaxError as error:
raise error
except Exception:
return False
return True | ['def', 'is_uuid', '(', 'value', ',', '*', '*', 'kwargs', ')', ':', 'try', ':', 'validators', '.', 'uuid', '(', 'value', ',', '*', '*', 'kwargs', ')', 'except', 'SyntaxError', 'as', 'error', ':', 'raise', 'error', 'except', 'Exception', ':', 'return', 'False', 'return', 'True'] | Indicate whether ``value`` contains a :class:`UUID <python:uuid.UUID>`
:param value: The value to evaluate.
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator | ['Indicate', 'whether', 'value', 'contains', 'a', ':', 'class', ':', 'UUID', '<python', ':', 'uuid', '.', 'UUID', '>'] | train | https://github.com/insightindustry/validator-collection/blob/8c8047a0fa36cc88a021771279898278c4cc98e3/validator_collection/checkers.py#L585-L604 |
9,630 | bapakode/OmMongo | ommongo/query.py | Query.fields | def fields(self, *fields):
''' Only return the specified fields from the object. Accessing a \
field that was not specified in ``fields`` will result in a \
:class:``ommongo.document.FieldNotRetrieved`` exception being \
raised
:param fields: Instances of :class:``ommongo.query.QueryField`` specifying \
which fields to return
'''
if self._fields is None:
self._fields = set()
for f in fields:
f = resolve_name(self.type, f)
self._fields.add(f)
self._fields.add(self.type.mongo_id)
return self | python | def fields(self, *fields):
''' Only return the specified fields from the object. Accessing a \
field that was not specified in ``fields`` will result in a \
:class:``ommongo.document.FieldNotRetrieved`` exception being \
raised
:param fields: Instances of :class:``ommongo.query.QueryField`` specifying \
which fields to return
'''
if self._fields is None:
self._fields = set()
for f in fields:
f = resolve_name(self.type, f)
self._fields.add(f)
self._fields.add(self.type.mongo_id)
return self | ['def', 'fields', '(', 'self', ',', '*', 'fields', ')', ':', 'if', 'self', '.', '_fields', 'is', 'None', ':', 'self', '.', '_fields', '=', 'set', '(', ')', 'for', 'f', 'in', 'fields', ':', 'f', '=', 'resolve_name', '(', 'self', '.', 'type', ',', 'f', ')', 'self', '.', '_fields', '.', 'add', '(', 'f', ')', 'self', '.', '_fields', '.', 'add', '(', 'self', '.', 'type', '.', 'mongo_id', ')', 'return', 'self'] | Only return the specified fields from the object. Accessing a \
field that was not specified in ``fields`` will result in a \
:class:``ommongo.document.FieldNotRetrieved`` exception being \
raised
:param fields: Instances of :class:``ommongo.query.QueryField`` specifying \
which fields to return | ['Only', 'return', 'the', 'specified', 'fields', 'from', 'the', 'object', '.', 'Accessing', 'a', '\\', 'field', 'that', 'was', 'not', 'specified', 'in', 'fields', 'will', 'result', 'in', 'a', '\\', ':', 'class', ':', 'ommongo', '.', 'document', '.', 'FieldNotRetrieved', 'exception', 'being', '\\', 'raised'] | train | https://github.com/bapakode/OmMongo/blob/52b5a5420516dc709f2d2eb065818c7973991ce3/ommongo/query.py#L230-L245 |
9,631 | genialis/resolwe | resolwe/process/runtime.py | Process.run_process | def run_process(self, slug, inputs):
"""Run a new process from a running process."""
def export_files(value):
"""Export input files of spawned process."""
if isinstance(value, str) and os.path.isfile(value):
# TODO: Use the protocol to export files and get the
# process schema to check field type.
print("export {}".format(value))
elif isinstance(value, dict):
for item in value.values():
export_files(item)
elif isinstance(value, list):
for item in value:
export_files(item)
export_files(inputs)
print('run {}'.format(json.dumps({'process': slug, 'input': inputs}, separators=(',', ':')))) | python | def run_process(self, slug, inputs):
"""Run a new process from a running process."""
def export_files(value):
"""Export input files of spawned process."""
if isinstance(value, str) and os.path.isfile(value):
# TODO: Use the protocol to export files and get the
# process schema to check field type.
print("export {}".format(value))
elif isinstance(value, dict):
for item in value.values():
export_files(item)
elif isinstance(value, list):
for item in value:
export_files(item)
export_files(inputs)
print('run {}'.format(json.dumps({'process': slug, 'input': inputs}, separators=(',', ':')))) | ['def', 'run_process', '(', 'self', ',', 'slug', ',', 'inputs', ')', ':', 'def', 'export_files', '(', 'value', ')', ':', '"""Export input files of spawned process."""', 'if', 'isinstance', '(', 'value', ',', 'str', ')', 'and', 'os', '.', 'path', '.', 'isfile', '(', 'value', ')', ':', '# TODO: Use the protocol to export files and get the', '# process schema to check field type.', 'print', '(', '"export {}"', '.', 'format', '(', 'value', ')', ')', 'elif', 'isinstance', '(', 'value', ',', 'dict', ')', ':', 'for', 'item', 'in', 'value', '.', 'values', '(', ')', ':', 'export_files', '(', 'item', ')', 'elif', 'isinstance', '(', 'value', ',', 'list', ')', ':', 'for', 'item', 'in', 'value', ':', 'export_files', '(', 'item', ')', 'export_files', '(', 'inputs', ')', 'print', '(', "'run {}'", '.', 'format', '(', 'json', '.', 'dumps', '(', '{', "'process'", ':', 'slug', ',', "'input'", ':', 'inputs', '}', ',', 'separators', '=', '(', "','", ',', "':'", ')', ')', ')', ')'] | Run a new process from a running process. | ['Run', 'a', 'new', 'process', 'from', 'a', 'running', 'process', '.'] | train | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/process/runtime.py#L177-L193 |
9,632 | LuqueDaniel/pybooru | pybooru/moebooru.py | Moebooru._build_hash_string | def _build_hash_string(self):
"""Function for build password hash string.
Raises:
PybooruError: When isn't provide hash string.
PybooruError: When aren't provide username or password.
PybooruError: When Pybooru can't add password to hash strring.
"""
# Build AUTENTICATION hash_string
# Check if hash_string exists
if self.site_name in SITE_LIST or self.hash_string:
if self.username and self.password:
try:
hash_string = self.hash_string.format(self.password)
except TypeError:
raise PybooruError("Pybooru can't add 'password' "
"to 'hash_string'")
# encrypt hashed_string to SHA1 and return hexdigest string
self.password_hash = hashlib.sha1(
hash_string.encode('utf-8')).hexdigest()
else:
raise PybooruError("Specify the 'username' and 'password' "
"parameters of the Pybooru object, for "
"setting 'password_hash' attribute.")
else:
raise PybooruError(
"Specify the 'hash_string' parameter of the Pybooru"
" object, for the functions that requires login.") | python | def _build_hash_string(self):
"""Function for build password hash string.
Raises:
PybooruError: When isn't provide hash string.
PybooruError: When aren't provide username or password.
PybooruError: When Pybooru can't add password to hash strring.
"""
# Build AUTENTICATION hash_string
# Check if hash_string exists
if self.site_name in SITE_LIST or self.hash_string:
if self.username and self.password:
try:
hash_string = self.hash_string.format(self.password)
except TypeError:
raise PybooruError("Pybooru can't add 'password' "
"to 'hash_string'")
# encrypt hashed_string to SHA1 and return hexdigest string
self.password_hash = hashlib.sha1(
hash_string.encode('utf-8')).hexdigest()
else:
raise PybooruError("Specify the 'username' and 'password' "
"parameters of the Pybooru object, for "
"setting 'password_hash' attribute.")
else:
raise PybooruError(
"Specify the 'hash_string' parameter of the Pybooru"
" object, for the functions that requires login.") | ['def', '_build_hash_string', '(', 'self', ')', ':', '# Build AUTENTICATION hash_string', '# Check if hash_string exists', 'if', 'self', '.', 'site_name', 'in', 'SITE_LIST', 'or', 'self', '.', 'hash_string', ':', 'if', 'self', '.', 'username', 'and', 'self', '.', 'password', ':', 'try', ':', 'hash_string', '=', 'self', '.', 'hash_string', '.', 'format', '(', 'self', '.', 'password', ')', 'except', 'TypeError', ':', 'raise', 'PybooruError', '(', '"Pybooru can\'t add \'password\' "', '"to \'hash_string\'"', ')', '# encrypt hashed_string to SHA1 and return hexdigest string', 'self', '.', 'password_hash', '=', 'hashlib', '.', 'sha1', '(', 'hash_string', '.', 'encode', '(', "'utf-8'", ')', ')', '.', 'hexdigest', '(', ')', 'else', ':', 'raise', 'PybooruError', '(', '"Specify the \'username\' and \'password\' "', '"parameters of the Pybooru object, for "', '"setting \'password_hash\' attribute."', ')', 'else', ':', 'raise', 'PybooruError', '(', '"Specify the \'hash_string\' parameter of the Pybooru"', '" object, for the functions that requires login."', ')'] | Function for build password hash string.
Raises:
PybooruError: When isn't provide hash string.
PybooruError: When aren't provide username or password.
PybooruError: When Pybooru can't add password to hash strring. | ['Function', 'for', 'build', 'password', 'hash', 'string', '.'] | train | https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/moebooru.py#L104-L131 |
9,633 | yyuu/botornado | boto/ec2/volume.py | Volume.snapshots | def snapshots(self, owner=None, restorable_by=None):
"""
Get all snapshots related to this volume. Note that this requires
that all available snapshots for the account be retrieved from EC2
first and then the list is filtered client-side to contain only
those for this volume.
:type owner: str
:param owner: If present, only the snapshots owned by the specified user
will be returned. Valid values are:
self | amazon | AWS Account ID
:type restorable_by: str
:param restorable_by: If present, only the snapshots that are restorable
by the specified account id will be returned.
:rtype: list of L{boto.ec2.snapshot.Snapshot}
:return: The requested Snapshot objects
"""
rs = self.connection.get_all_snapshots(owner=owner,
restorable_by=restorable_by)
mine = []
for snap in rs:
if snap.volume_id == self.id:
mine.append(snap)
return mine | python | def snapshots(self, owner=None, restorable_by=None):
"""
Get all snapshots related to this volume. Note that this requires
that all available snapshots for the account be retrieved from EC2
first and then the list is filtered client-side to contain only
those for this volume.
:type owner: str
:param owner: If present, only the snapshots owned by the specified user
will be returned. Valid values are:
self | amazon | AWS Account ID
:type restorable_by: str
:param restorable_by: If present, only the snapshots that are restorable
by the specified account id will be returned.
:rtype: list of L{boto.ec2.snapshot.Snapshot}
:return: The requested Snapshot objects
"""
rs = self.connection.get_all_snapshots(owner=owner,
restorable_by=restorable_by)
mine = []
for snap in rs:
if snap.volume_id == self.id:
mine.append(snap)
return mine | ['def', 'snapshots', '(', 'self', ',', 'owner', '=', 'None', ',', 'restorable_by', '=', 'None', ')', ':', 'rs', '=', 'self', '.', 'connection', '.', 'get_all_snapshots', '(', 'owner', '=', 'owner', ',', 'restorable_by', '=', 'restorable_by', ')', 'mine', '=', '[', ']', 'for', 'snap', 'in', 'rs', ':', 'if', 'snap', '.', 'volume_id', '==', 'self', '.', 'id', ':', 'mine', '.', 'append', '(', 'snap', ')', 'return', 'mine'] | Get all snapshots related to this volume. Note that this requires
that all available snapshots for the account be retrieved from EC2
first and then the list is filtered client-side to contain only
those for this volume.
:type owner: str
:param owner: If present, only the snapshots owned by the specified user
will be returned. Valid values are:
self | amazon | AWS Account ID
:type restorable_by: str
:param restorable_by: If present, only the snapshots that are restorable
by the specified account id will be returned.
:rtype: list of L{boto.ec2.snapshot.Snapshot}
:return: The requested Snapshot objects | ['Get', 'all', 'snapshots', 'related', 'to', 'this', 'volume', '.', 'Note', 'that', 'this', 'requires', 'that', 'all', 'available', 'snapshots', 'for', 'the', 'account', 'be', 'retrieved', 'from', 'EC2', 'first', 'and', 'then', 'the', 'list', 'is', 'filtered', 'client', '-', 'side', 'to', 'contain', 'only', 'those', 'for', 'this', 'volume', '.'] | train | https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/ec2/volume.py#L173-L199 |
9,634 | polyaxon/polyaxon | polyaxon/scheduler/spawners/templates/experiment_jobs/manager.py | ResourceManager.get_init_container | def get_init_container(self,
init_command,
init_args,
env_vars,
context_mounts,
persistence_outputs,
persistence_data):
"""Pod init container for setting outputs path."""
env_vars = to_list(env_vars, check_none=True)
if self.original_name is not None and self.cloning_strategy == CloningStrategy.RESUME:
return []
if self.original_name is not None and self.cloning_strategy == CloningStrategy.COPY:
command = InitCommands.COPY
original_outputs_path = stores.get_experiment_outputs_path(
persistence=persistence_outputs,
experiment_name=self.original_name)
else:
command = InitCommands.CREATE
original_outputs_path = None
outputs_path = stores.get_experiment_outputs_path(
persistence=persistence_outputs,
experiment_name=self.experiment_name)
_, outputs_volume_mount = get_pod_outputs_volume(persistence_outputs=persistence_outputs)
volume_mounts = outputs_volume_mount + to_list(context_mounts, check_none=True)
init_command = init_command or ["/bin/sh", "-c"]
init_args = init_args or to_list(
get_output_args(command=command,
outputs_path=outputs_path,
original_outputs_path=original_outputs_path))
init_args += to_list(get_auth_context_args(entity='experiment',
entity_name=self.experiment_name))
return [
client.V1Container(
name=self.init_container_name,
image=self.init_docker_image,
image_pull_policy=self.init_docker_image_pull_policy,
command=init_command,
args=[''.join(init_args)],
env=env_vars,
volume_mounts=volume_mounts)
] | python | def get_init_container(self,
init_command,
init_args,
env_vars,
context_mounts,
persistence_outputs,
persistence_data):
"""Pod init container for setting outputs path."""
env_vars = to_list(env_vars, check_none=True)
if self.original_name is not None and self.cloning_strategy == CloningStrategy.RESUME:
return []
if self.original_name is not None and self.cloning_strategy == CloningStrategy.COPY:
command = InitCommands.COPY
original_outputs_path = stores.get_experiment_outputs_path(
persistence=persistence_outputs,
experiment_name=self.original_name)
else:
command = InitCommands.CREATE
original_outputs_path = None
outputs_path = stores.get_experiment_outputs_path(
persistence=persistence_outputs,
experiment_name=self.experiment_name)
_, outputs_volume_mount = get_pod_outputs_volume(persistence_outputs=persistence_outputs)
volume_mounts = outputs_volume_mount + to_list(context_mounts, check_none=True)
init_command = init_command or ["/bin/sh", "-c"]
init_args = init_args or to_list(
get_output_args(command=command,
outputs_path=outputs_path,
original_outputs_path=original_outputs_path))
init_args += to_list(get_auth_context_args(entity='experiment',
entity_name=self.experiment_name))
return [
client.V1Container(
name=self.init_container_name,
image=self.init_docker_image,
image_pull_policy=self.init_docker_image_pull_policy,
command=init_command,
args=[''.join(init_args)],
env=env_vars,
volume_mounts=volume_mounts)
] | ['def', 'get_init_container', '(', 'self', ',', 'init_command', ',', 'init_args', ',', 'env_vars', ',', 'context_mounts', ',', 'persistence_outputs', ',', 'persistence_data', ')', ':', 'env_vars', '=', 'to_list', '(', 'env_vars', ',', 'check_none', '=', 'True', ')', 'if', 'self', '.', 'original_name', 'is', 'not', 'None', 'and', 'self', '.', 'cloning_strategy', '==', 'CloningStrategy', '.', 'RESUME', ':', 'return', '[', ']', 'if', 'self', '.', 'original_name', 'is', 'not', 'None', 'and', 'self', '.', 'cloning_strategy', '==', 'CloningStrategy', '.', 'COPY', ':', 'command', '=', 'InitCommands', '.', 'COPY', 'original_outputs_path', '=', 'stores', '.', 'get_experiment_outputs_path', '(', 'persistence', '=', 'persistence_outputs', ',', 'experiment_name', '=', 'self', '.', 'original_name', ')', 'else', ':', 'command', '=', 'InitCommands', '.', 'CREATE', 'original_outputs_path', '=', 'None', 'outputs_path', '=', 'stores', '.', 'get_experiment_outputs_path', '(', 'persistence', '=', 'persistence_outputs', ',', 'experiment_name', '=', 'self', '.', 'experiment_name', ')', '_', ',', 'outputs_volume_mount', '=', 'get_pod_outputs_volume', '(', 'persistence_outputs', '=', 'persistence_outputs', ')', 'volume_mounts', '=', 'outputs_volume_mount', '+', 'to_list', '(', 'context_mounts', ',', 'check_none', '=', 'True', ')', 'init_command', '=', 'init_command', 'or', '[', '"/bin/sh"', ',', '"-c"', ']', 'init_args', '=', 'init_args', 'or', 'to_list', '(', 'get_output_args', '(', 'command', '=', 'command', ',', 'outputs_path', '=', 'outputs_path', ',', 'original_outputs_path', '=', 'original_outputs_path', ')', ')', 'init_args', '+=', 'to_list', '(', 'get_auth_context_args', '(', 'entity', '=', "'experiment'", ',', 'entity_name', '=', 'self', '.', 'experiment_name', ')', ')', 'return', '[', 'client', '.', 'V1Container', '(', 'name', '=', 'self', '.', 'init_container_name', ',', 'image', '=', 'self', '.', 'init_docker_image', ',', 'image_pull_policy', '=', 'self', '.', 'init_docker_image_pull_policy', ',', 'command', '=', 'init_command', ',', 'args', '=', '[', "''", '.', 'join', '(', 'init_args', ')', ']', ',', 'env', '=', 'env_vars', ',', 'volume_mounts', '=', 'volume_mounts', ')', ']'] | Pod init container for setting outputs path. | ['Pod', 'init', 'container', 'for', 'setting', 'outputs', 'path', '.'] | train | https://github.com/polyaxon/polyaxon/blob/e1724f0756b1a42f9e7aa08a976584a84ef7f016/polyaxon/scheduler/spawners/templates/experiment_jobs/manager.py#L169-L210 |
9,635 | Kronuz/pyScss | scss/extension/compass/sprites.py | sprite | def sprite(map, sprite, offset_x=None, offset_y=None, cache_buster=True):
"""
Returns the image and background position for use in a single shorthand
property
"""
map = map.render()
sprite_maps = _get_cache('sprite_maps')
sprite_map = sprite_maps.get(map)
sprite_name = String.unquoted(sprite).value
sprite = sprite_map and sprite_map.get(sprite_name)
if not sprite_map:
log.error("No sprite map found: %s", map, extra={'stack': True})
elif not sprite:
log.error("No sprite found: %s in %s", sprite_name, sprite_map['*n*'], extra={'stack': True})
if sprite:
url = '%s%s' % (config.ASSETS_URL, sprite_map['*f*'])
if cache_buster:
url += '?_=%s' % sprite_map['*t*']
x = Number(offset_x or 0, 'px')
y = Number(offset_y or 0, 'px')
if not x.value or (x.value <= -1 or x.value >= 1) and not x.is_simple_unit('%'):
x -= Number(sprite[2], 'px')
if not y.value or (y.value <= -1 or y.value >= 1) and not y.is_simple_unit('%'):
y -= Number(sprite[3], 'px')
url = "url(%s)" % escape(url)
return List([String.unquoted(url), x, y])
return List([Number(0), Number(0)]) | python | def sprite(map, sprite, offset_x=None, offset_y=None, cache_buster=True):
"""
Returns the image and background position for use in a single shorthand
property
"""
map = map.render()
sprite_maps = _get_cache('sprite_maps')
sprite_map = sprite_maps.get(map)
sprite_name = String.unquoted(sprite).value
sprite = sprite_map and sprite_map.get(sprite_name)
if not sprite_map:
log.error("No sprite map found: %s", map, extra={'stack': True})
elif not sprite:
log.error("No sprite found: %s in %s", sprite_name, sprite_map['*n*'], extra={'stack': True})
if sprite:
url = '%s%s' % (config.ASSETS_URL, sprite_map['*f*'])
if cache_buster:
url += '?_=%s' % sprite_map['*t*']
x = Number(offset_x or 0, 'px')
y = Number(offset_y or 0, 'px')
if not x.value or (x.value <= -1 or x.value >= 1) and not x.is_simple_unit('%'):
x -= Number(sprite[2], 'px')
if not y.value or (y.value <= -1 or y.value >= 1) and not y.is_simple_unit('%'):
y -= Number(sprite[3], 'px')
url = "url(%s)" % escape(url)
return List([String.unquoted(url), x, y])
return List([Number(0), Number(0)]) | ['def', 'sprite', '(', 'map', ',', 'sprite', ',', 'offset_x', '=', 'None', ',', 'offset_y', '=', 'None', ',', 'cache_buster', '=', 'True', ')', ':', 'map', '=', 'map', '.', 'render', '(', ')', 'sprite_maps', '=', '_get_cache', '(', "'sprite_maps'", ')', 'sprite_map', '=', 'sprite_maps', '.', 'get', '(', 'map', ')', 'sprite_name', '=', 'String', '.', 'unquoted', '(', 'sprite', ')', '.', 'value', 'sprite', '=', 'sprite_map', 'and', 'sprite_map', '.', 'get', '(', 'sprite_name', ')', 'if', 'not', 'sprite_map', ':', 'log', '.', 'error', '(', '"No sprite map found: %s"', ',', 'map', ',', 'extra', '=', '{', "'stack'", ':', 'True', '}', ')', 'elif', 'not', 'sprite', ':', 'log', '.', 'error', '(', '"No sprite found: %s in %s"', ',', 'sprite_name', ',', 'sprite_map', '[', "'*n*'", ']', ',', 'extra', '=', '{', "'stack'", ':', 'True', '}', ')', 'if', 'sprite', ':', 'url', '=', "'%s%s'", '%', '(', 'config', '.', 'ASSETS_URL', ',', 'sprite_map', '[', "'*f*'", ']', ')', 'if', 'cache_buster', ':', 'url', '+=', "'?_=%s'", '%', 'sprite_map', '[', "'*t*'", ']', 'x', '=', 'Number', '(', 'offset_x', 'or', '0', ',', "'px'", ')', 'y', '=', 'Number', '(', 'offset_y', 'or', '0', ',', "'px'", ')', 'if', 'not', 'x', '.', 'value', 'or', '(', 'x', '.', 'value', '<=', '-', '1', 'or', 'x', '.', 'value', '>=', '1', ')', 'and', 'not', 'x', '.', 'is_simple_unit', '(', "'%'", ')', ':', 'x', '-=', 'Number', '(', 'sprite', '[', '2', ']', ',', "'px'", ')', 'if', 'not', 'y', '.', 'value', 'or', '(', 'y', '.', 'value', '<=', '-', '1', 'or', 'y', '.', 'value', '>=', '1', ')', 'and', 'not', 'y', '.', 'is_simple_unit', '(', "'%'", ')', ':', 'y', '-=', 'Number', '(', 'sprite', '[', '3', ']', ',', "'px'", ')', 'url', '=', '"url(%s)"', '%', 'escape', '(', 'url', ')', 'return', 'List', '(', '[', 'String', '.', 'unquoted', '(', 'url', ')', ',', 'x', ',', 'y', ']', ')', 'return', 'List', '(', '[', 'Number', '(', '0', ')', ',', 'Number', '(', '0', ')', ']', ')'] | Returns the image and background position for use in a single shorthand
property | ['Returns', 'the', 'image', 'and', 'background', 'position', 'for', 'use', 'in', 'a', 'single', 'shorthand', 'property'] | train | https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/extension/compass/sprites.py#L479-L505 |
9,636 | dslackw/slpkg | slpkg/sbo/dependency.py | Requires.sbo | def sbo(self, name):
"""Build all dependencies of a package
"""
if (self.meta.rsl_deps in ["on", "ON"] and
"--resolve-off" not in self.flag):
sys.setrecursionlimit(10000)
dependencies = []
requires = SBoGrep(name).requires()
if requires:
for req in requires:
status(0.03)
# toolbar_width = status(index, toolbar_width, 1)
# avoid to add %README% as dependency and
# if require in blacklist
if "%README%" not in req and req not in self.blacklist:
dependencies.append(req)
if dependencies:
self.dep_results.append(dependencies)
for dep in dependencies:
self.sbo(dep)
return self.dep_results
else:
return [] | python | def sbo(self, name):
"""Build all dependencies of a package
"""
if (self.meta.rsl_deps in ["on", "ON"] and
"--resolve-off" not in self.flag):
sys.setrecursionlimit(10000)
dependencies = []
requires = SBoGrep(name).requires()
if requires:
for req in requires:
status(0.03)
# toolbar_width = status(index, toolbar_width, 1)
# avoid to add %README% as dependency and
# if require in blacklist
if "%README%" not in req and req not in self.blacklist:
dependencies.append(req)
if dependencies:
self.dep_results.append(dependencies)
for dep in dependencies:
self.sbo(dep)
return self.dep_results
else:
return [] | ['def', 'sbo', '(', 'self', ',', 'name', ')', ':', 'if', '(', 'self', '.', 'meta', '.', 'rsl_deps', 'in', '[', '"on"', ',', '"ON"', ']', 'and', '"--resolve-off"', 'not', 'in', 'self', '.', 'flag', ')', ':', 'sys', '.', 'setrecursionlimit', '(', '10000', ')', 'dependencies', '=', '[', ']', 'requires', '=', 'SBoGrep', '(', 'name', ')', '.', 'requires', '(', ')', 'if', 'requires', ':', 'for', 'req', 'in', 'requires', ':', 'status', '(', '0.03', ')', '# toolbar_width = status(index, toolbar_width, 1)', '# avoid to add %README% as dependency and', '# if require in blacklist', 'if', '"%README%"', 'not', 'in', 'req', 'and', 'req', 'not', 'in', 'self', '.', 'blacklist', ':', 'dependencies', '.', 'append', '(', 'req', ')', 'if', 'dependencies', ':', 'self', '.', 'dep_results', '.', 'append', '(', 'dependencies', ')', 'for', 'dep', 'in', 'dependencies', ':', 'self', '.', 'sbo', '(', 'dep', ')', 'return', 'self', '.', 'dep_results', 'else', ':', 'return', '[', ']'] | Build all dependencies of a package | ['Build', 'all', 'dependencies', 'of', 'a', 'package'] | train | https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/sbo/dependency.py#L45-L67 |
9,637 | aiidateam/aiida-codtools | aiida_codtools/workflows/cif_clean.py | CifCleanWorkChain.results | def results(self):
"""If successfully created, add the cleaned `CifData` and `StructureData` as output nodes to the workchain.
The filter and select calculations were successful, so we return the cleaned CifData node. If the `group_cif`
was defined in the inputs, the node is added to it. If the structure should have been parsed, verify that it
is was put in the context by the `parse_cif_structure` step and add it to the group and outputs, otherwise
return the finish status that should correspond to the exit code of the `primitive_structure_from_cif` function.
"""
self.out('cif', self.ctx.cif)
if 'group_cif' in self.inputs:
self.inputs.group_cif.add_nodes([self.ctx.cif])
if 'group_structure' in self.inputs:
try:
structure = self.ctx.structure
except AttributeError:
return self.ctx.exit_code
else:
self.inputs.group_structure.add_nodes([structure])
self.out('structure', structure)
self.report('workchain finished successfully') | python | def results(self):
"""If successfully created, add the cleaned `CifData` and `StructureData` as output nodes to the workchain.
The filter and select calculations were successful, so we return the cleaned CifData node. If the `group_cif`
was defined in the inputs, the node is added to it. If the structure should have been parsed, verify that it
is was put in the context by the `parse_cif_structure` step and add it to the group and outputs, otherwise
return the finish status that should correspond to the exit code of the `primitive_structure_from_cif` function.
"""
self.out('cif', self.ctx.cif)
if 'group_cif' in self.inputs:
self.inputs.group_cif.add_nodes([self.ctx.cif])
if 'group_structure' in self.inputs:
try:
structure = self.ctx.structure
except AttributeError:
return self.ctx.exit_code
else:
self.inputs.group_structure.add_nodes([structure])
self.out('structure', structure)
self.report('workchain finished successfully') | ['def', 'results', '(', 'self', ')', ':', 'self', '.', 'out', '(', "'cif'", ',', 'self', '.', 'ctx', '.', 'cif', ')', 'if', "'group_cif'", 'in', 'self', '.', 'inputs', ':', 'self', '.', 'inputs', '.', 'group_cif', '.', 'add_nodes', '(', '[', 'self', '.', 'ctx', '.', 'cif', ']', ')', 'if', "'group_structure'", 'in', 'self', '.', 'inputs', ':', 'try', ':', 'structure', '=', 'self', '.', 'ctx', '.', 'structure', 'except', 'AttributeError', ':', 'return', 'self', '.', 'ctx', '.', 'exit_code', 'else', ':', 'self', '.', 'inputs', '.', 'group_structure', '.', 'add_nodes', '(', '[', 'structure', ']', ')', 'self', '.', 'out', '(', "'structure'", ',', 'structure', ')', 'self', '.', 'report', '(', "'workchain finished successfully'", ')'] | If successfully created, add the cleaned `CifData` and `StructureData` as output nodes to the workchain.
The filter and select calculations were successful, so we return the cleaned CifData node. If the `group_cif`
was defined in the inputs, the node is added to it. If the structure should have been parsed, verify that it
is was put in the context by the `parse_cif_structure` step and add it to the group and outputs, otherwise
return the finish status that should correspond to the exit code of the `primitive_structure_from_cif` function. | ['If', 'successfully', 'created', 'add', 'the', 'cleaned', 'CifData', 'and', 'StructureData', 'as', 'output', 'nodes', 'to', 'the', 'workchain', '.'] | train | https://github.com/aiidateam/aiida-codtools/blob/da5e4259b7a2e86cf0cc3f997e11dd36d445fa94/aiida_codtools/workflows/cif_clean.py#L180-L202 |
9,638 | acutesoftware/AIKIF | aikif/cls_log.py | Log.get_session_id | def get_session_id(self):
"""
get a unique id (shortish string) to allow simple aggregation
of log records from multiple sources. This id is used for the
life of the running program to allow extraction from all logs.
WARING - this can give duplicate sessions when 2 apps hit it
at the same time.
"""
max_session = '0'
try:
with open(self.log_folder + os.sep + '_sessions.txt', 'r') as f:
for _ in f:
txt = f.readline()
if txt.strip('\n') != '':
max_session = txt
except Exception:
max_session = '1'
this_session = str(int(max_session) + random.randint(9,100)).zfill(9) # not a great way to ensure uniqueness - TODO FIX
with open(self.log_folder + os.sep + '_sessions.txt', 'a') as f2:
f2.write(this_session + '\n')
return this_session | python | def get_session_id(self):
"""
get a unique id (shortish string) to allow simple aggregation
of log records from multiple sources. This id is used for the
life of the running program to allow extraction from all logs.
WARING - this can give duplicate sessions when 2 apps hit it
at the same time.
"""
max_session = '0'
try:
with open(self.log_folder + os.sep + '_sessions.txt', 'r') as f:
for _ in f:
txt = f.readline()
if txt.strip('\n') != '':
max_session = txt
except Exception:
max_session = '1'
this_session = str(int(max_session) + random.randint(9,100)).zfill(9) # not a great way to ensure uniqueness - TODO FIX
with open(self.log_folder + os.sep + '_sessions.txt', 'a') as f2:
f2.write(this_session + '\n')
return this_session | ['def', 'get_session_id', '(', 'self', ')', ':', 'max_session', '=', "'0'", 'try', ':', 'with', 'open', '(', 'self', '.', 'log_folder', '+', 'os', '.', 'sep', '+', "'_sessions.txt'", ',', "'r'", ')', 'as', 'f', ':', 'for', '_', 'in', 'f', ':', 'txt', '=', 'f', '.', 'readline', '(', ')', 'if', 'txt', '.', 'strip', '(', "'\\n'", ')', '!=', "''", ':', 'max_session', '=', 'txt', 'except', 'Exception', ':', 'max_session', '=', "'1'", 'this_session', '=', 'str', '(', 'int', '(', 'max_session', ')', '+', 'random', '.', 'randint', '(', '9', ',', '100', ')', ')', '.', 'zfill', '(', '9', ')', '# not a great way to ensure uniqueness - TODO FIX ', 'with', 'open', '(', 'self', '.', 'log_folder', '+', 'os', '.', 'sep', '+', "'_sessions.txt'", ',', "'a'", ')', 'as', 'f2', ':', 'f2', '.', 'write', '(', 'this_session', '+', "'\\n'", ')', 'return', 'this_session'] | get a unique id (shortish string) to allow simple aggregation
of log records from multiple sources. This id is used for the
life of the running program to allow extraction from all logs.
WARING - this can give duplicate sessions when 2 apps hit it
at the same time. | ['get', 'a', 'unique', 'id', '(', 'shortish', 'string', ')', 'to', 'allow', 'simple', 'aggregation', 'of', 'log', 'records', 'from', 'multiple', 'sources', '.', 'This', 'id', 'is', 'used', 'for', 'the', 'life', 'of', 'the', 'running', 'program', 'to', 'allow', 'extraction', 'from', 'all', 'logs', '.', 'WARING', '-', 'this', 'can', 'give', 'duplicate', 'sessions', 'when', '2', 'apps', 'hit', 'it', 'at', 'the', 'same', 'time', '.'] | train | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_log.py#L64-L85 |
9,639 | buriburisuri/sugartensor | sugartensor/sg_layer.py | sg_aconv | def sg_aconv(tensor, opt):
r"""Applies a 2-D atrous (or dilated) convolution.
Args:
tensor: A 4-D `Tensor` (automatically passed by decorator).
opt:
size: A tuple/list of positive integers of length 2 representing `[kernel height, kernel width]`.
Can be an integer if both values are the same.
If not specified, (3, 3) is set automatically.
rate: A positive integer. The stride with which we sample input values across
the `height` and `width` dimensions. Default is 2.
in_dim: A positive `integer`. The size of input dimension.
dim: A positive `integer`. The size of output dimension.
pad: Either `SAME` (Default) or `VALID`.
bias: Boolean. If True, biases are added.
regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable
will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization
summary: If True, summaries are added. The default is True.
Returns:
A `Tensor` with the same type as `tensor`.
"""
# default options
opt += tf.sg_opt(size=(3, 3), rate=2, pad='SAME')
opt.size = opt.size if isinstance(opt.size, (tuple, list)) else [opt.size, opt.size]
# parameter tf.sg_initializer
w = tf.sg_initializer.he_uniform('W', (opt.size[0], opt.size[1], opt.in_dim, opt.dim),
regularizer=opt.regularizer, summary=opt.summary)
b = tf.sg_initializer.constant('b', opt.dim, summary=opt.summary) if opt.bias else 0
# apply convolution
out = tf.nn.atrous_conv2d(tensor, w, rate=opt.rate, padding=opt.pad) + b
return out | python | def sg_aconv(tensor, opt):
r"""Applies a 2-D atrous (or dilated) convolution.
Args:
tensor: A 4-D `Tensor` (automatically passed by decorator).
opt:
size: A tuple/list of positive integers of length 2 representing `[kernel height, kernel width]`.
Can be an integer if both values are the same.
If not specified, (3, 3) is set automatically.
rate: A positive integer. The stride with which we sample input values across
the `height` and `width` dimensions. Default is 2.
in_dim: A positive `integer`. The size of input dimension.
dim: A positive `integer`. The size of output dimension.
pad: Either `SAME` (Default) or `VALID`.
bias: Boolean. If True, biases are added.
regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable
will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization
summary: If True, summaries are added. The default is True.
Returns:
A `Tensor` with the same type as `tensor`.
"""
# default options
opt += tf.sg_opt(size=(3, 3), rate=2, pad='SAME')
opt.size = opt.size if isinstance(opt.size, (tuple, list)) else [opt.size, opt.size]
# parameter tf.sg_initializer
w = tf.sg_initializer.he_uniform('W', (opt.size[0], opt.size[1], opt.in_dim, opt.dim),
regularizer=opt.regularizer, summary=opt.summary)
b = tf.sg_initializer.constant('b', opt.dim, summary=opt.summary) if opt.bias else 0
# apply convolution
out = tf.nn.atrous_conv2d(tensor, w, rate=opt.rate, padding=opt.pad) + b
return out | ['def', 'sg_aconv', '(', 'tensor', ',', 'opt', ')', ':', '# default options', 'opt', '+=', 'tf', '.', 'sg_opt', '(', 'size', '=', '(', '3', ',', '3', ')', ',', 'rate', '=', '2', ',', 'pad', '=', "'SAME'", ')', 'opt', '.', 'size', '=', 'opt', '.', 'size', 'if', 'isinstance', '(', 'opt', '.', 'size', ',', '(', 'tuple', ',', 'list', ')', ')', 'else', '[', 'opt', '.', 'size', ',', 'opt', '.', 'size', ']', '# parameter tf.sg_initializer', 'w', '=', 'tf', '.', 'sg_initializer', '.', 'he_uniform', '(', "'W'", ',', '(', 'opt', '.', 'size', '[', '0', ']', ',', 'opt', '.', 'size', '[', '1', ']', ',', 'opt', '.', 'in_dim', ',', 'opt', '.', 'dim', ')', ',', 'regularizer', '=', 'opt', '.', 'regularizer', ',', 'summary', '=', 'opt', '.', 'summary', ')', 'b', '=', 'tf', '.', 'sg_initializer', '.', 'constant', '(', "'b'", ',', 'opt', '.', 'dim', ',', 'summary', '=', 'opt', '.', 'summary', ')', 'if', 'opt', '.', 'bias', 'else', '0', '# apply convolution', 'out', '=', 'tf', '.', 'nn', '.', 'atrous_conv2d', '(', 'tensor', ',', 'w', ',', 'rate', '=', 'opt', '.', 'rate', ',', 'padding', '=', 'opt', '.', 'pad', ')', '+', 'b', 'return', 'out'] | r"""Applies a 2-D atrous (or dilated) convolution.
Args:
tensor: A 4-D `Tensor` (automatically passed by decorator).
opt:
size: A tuple/list of positive integers of length 2 representing `[kernel height, kernel width]`.
Can be an integer if both values are the same.
If not specified, (3, 3) is set automatically.
rate: A positive integer. The stride with which we sample input values across
the `height` and `width` dimensions. Default is 2.
in_dim: A positive `integer`. The size of input dimension.
dim: A positive `integer`. The size of output dimension.
pad: Either `SAME` (Default) or `VALID`.
bias: Boolean. If True, biases are added.
regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable
will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization
summary: If True, summaries are added. The default is True.
Returns:
A `Tensor` with the same type as `tensor`. | ['r', 'Applies', 'a', '2', '-', 'D', 'atrous', '(', 'or', 'dilated', ')', 'convolution', '.', 'Args', ':', 'tensor', ':', 'A', '4', '-', 'D', 'Tensor', '(', 'automatically', 'passed', 'by', 'decorator', ')', '.', 'opt', ':', 'size', ':', 'A', 'tuple', '/', 'list', 'of', 'positive', 'integers', 'of', 'length', '2', 'representing', '[', 'kernel', 'height', 'kernel', 'width', ']', '.', 'Can', 'be', 'an', 'integer', 'if', 'both', 'values', 'are', 'the', 'same', '.', 'If', 'not', 'specified', '(', '3', '3', ')', 'is', 'set', 'automatically', '.', 'rate', ':', 'A', 'positive', 'integer', '.', 'The', 'stride', 'with', 'which', 'we', 'sample', 'input', 'values', 'across', 'the', 'height', 'and', 'width', 'dimensions', '.', 'Default', 'is', '2', '.', 'in_dim', ':', 'A', 'positive', 'integer', '.', 'The', 'size', 'of', 'input', 'dimension', '.', 'dim', ':', 'A', 'positive', 'integer', '.', 'The', 'size', 'of', 'output', 'dimension', '.', 'pad', ':', 'Either', 'SAME', '(', 'Default', ')', 'or', 'VALID', '.', 'bias', ':', 'Boolean', '.', 'If', 'True', 'biases', 'are', 'added', '.', 'regularizer', ':', 'A', '(', 'Tensor', '-', '>', 'Tensor', 'or', 'None', ')', 'function', ';', 'the', 'result', 'of', 'applying', 'it', 'on', 'a', 'newly', 'created', 'variable', 'will', 'be', 'added', 'to', 'the', 'collection', 'tf', '.', 'GraphKeys', '.', 'REGULARIZATION_LOSSES', 'and', 'can', 'be', 'used', 'for', 'regularization', 'summary', ':', 'If', 'True', 'summaries', 'are', 'added', '.', 'The', 'default', 'is', 'True', '.'] | train | https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_layer.py#L141-L175 |
9,640 | bram85/topydo | topydo/lib/TodoListBase.py | TodoListBase.number | def number(self, p_todo):
"""
Returns the line number or text ID of a todo (depends on the
configuration.
"""
if config().identifiers() == "text":
return self.uid(p_todo)
else:
return self.linenumber(p_todo) | python | def number(self, p_todo):
"""
Returns the line number or text ID of a todo (depends on the
configuration.
"""
if config().identifiers() == "text":
return self.uid(p_todo)
else:
return self.linenumber(p_todo) | ['def', 'number', '(', 'self', ',', 'p_todo', ')', ':', 'if', 'config', '(', ')', '.', 'identifiers', '(', ')', '==', '"text"', ':', 'return', 'self', '.', 'uid', '(', 'p_todo', ')', 'else', ':', 'return', 'self', '.', 'linenumber', '(', 'p_todo', ')'] | Returns the line number or text ID of a todo (depends on the
configuration. | ['Returns', 'the', 'line', 'number', 'or', 'text', 'ID', 'of', 'a', 'todo', '(', 'depends', 'on', 'the', 'configuration', '.'] | train | https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/lib/TodoListBase.py#L269-L277 |
9,641 | pandas-dev/pandas | pandas/io/html.py | _LxmlFrameParser._build_doc | def _build_doc(self):
"""
Raises
------
ValueError
* If a URL that lxml cannot parse is passed.
Exception
* Any other ``Exception`` thrown. For example, trying to parse a
URL that is syntactically correct on a machine with no internet
connection will fail.
See Also
--------
pandas.io.html._HtmlFrameParser._build_doc
"""
from lxml.html import parse, fromstring, HTMLParser
from lxml.etree import XMLSyntaxError
parser = HTMLParser(recover=True, encoding=self.encoding)
try:
if _is_url(self.io):
with urlopen(self.io) as f:
r = parse(f, parser=parser)
else:
# try to parse the input in the simplest way
r = parse(self.io, parser=parser)
try:
r = r.getroot()
except AttributeError:
pass
except (UnicodeDecodeError, IOError) as e:
# if the input is a blob of html goop
if not _is_url(self.io):
r = fromstring(self.io, parser=parser)
try:
r = r.getroot()
except AttributeError:
pass
else:
raise e
else:
if not hasattr(r, 'text_content'):
raise XMLSyntaxError("no text parsed from document", 0, 0, 0)
return r | python | def _build_doc(self):
"""
Raises
------
ValueError
* If a URL that lxml cannot parse is passed.
Exception
* Any other ``Exception`` thrown. For example, trying to parse a
URL that is syntactically correct on a machine with no internet
connection will fail.
See Also
--------
pandas.io.html._HtmlFrameParser._build_doc
"""
from lxml.html import parse, fromstring, HTMLParser
from lxml.etree import XMLSyntaxError
parser = HTMLParser(recover=True, encoding=self.encoding)
try:
if _is_url(self.io):
with urlopen(self.io) as f:
r = parse(f, parser=parser)
else:
# try to parse the input in the simplest way
r = parse(self.io, parser=parser)
try:
r = r.getroot()
except AttributeError:
pass
except (UnicodeDecodeError, IOError) as e:
# if the input is a blob of html goop
if not _is_url(self.io):
r = fromstring(self.io, parser=parser)
try:
r = r.getroot()
except AttributeError:
pass
else:
raise e
else:
if not hasattr(r, 'text_content'):
raise XMLSyntaxError("no text parsed from document", 0, 0, 0)
return r | ['def', '_build_doc', '(', 'self', ')', ':', 'from', 'lxml', '.', 'html', 'import', 'parse', ',', 'fromstring', ',', 'HTMLParser', 'from', 'lxml', '.', 'etree', 'import', 'XMLSyntaxError', 'parser', '=', 'HTMLParser', '(', 'recover', '=', 'True', ',', 'encoding', '=', 'self', '.', 'encoding', ')', 'try', ':', 'if', '_is_url', '(', 'self', '.', 'io', ')', ':', 'with', 'urlopen', '(', 'self', '.', 'io', ')', 'as', 'f', ':', 'r', '=', 'parse', '(', 'f', ',', 'parser', '=', 'parser', ')', 'else', ':', '# try to parse the input in the simplest way', 'r', '=', 'parse', '(', 'self', '.', 'io', ',', 'parser', '=', 'parser', ')', 'try', ':', 'r', '=', 'r', '.', 'getroot', '(', ')', 'except', 'AttributeError', ':', 'pass', 'except', '(', 'UnicodeDecodeError', ',', 'IOError', ')', 'as', 'e', ':', '# if the input is a blob of html goop', 'if', 'not', '_is_url', '(', 'self', '.', 'io', ')', ':', 'r', '=', 'fromstring', '(', 'self', '.', 'io', ',', 'parser', '=', 'parser', ')', 'try', ':', 'r', '=', 'r', '.', 'getroot', '(', ')', 'except', 'AttributeError', ':', 'pass', 'else', ':', 'raise', 'e', 'else', ':', 'if', 'not', 'hasattr', '(', 'r', ',', "'text_content'", ')', ':', 'raise', 'XMLSyntaxError', '(', '"no text parsed from document"', ',', '0', ',', '0', ',', '0', ')', 'return', 'r'] | Raises
------
ValueError
* If a URL that lxml cannot parse is passed.
Exception
* Any other ``Exception`` thrown. For example, trying to parse a
URL that is syntactically correct on a machine with no internet
connection will fail.
See Also
--------
pandas.io.html._HtmlFrameParser._build_doc | ['Raises', '------', 'ValueError', '*', 'If', 'a', 'URL', 'that', 'lxml', 'cannot', 'parse', 'is', 'passed', '.'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/html.py#L690-L735 |
9,642 | Hackerfleet/hfos | hfos/database.py | _build_model_factories | def _build_model_factories(store):
"""Generate factories to construct objects from schemata"""
result = {}
for schemaname in store:
schema = None
try:
schema = store[schemaname]['schema']
except KeyError:
schemata_log("No schema found for ", schemaname, lvl=critical, exc=True)
try:
result[schemaname] = warmongo.model_factory(schema)
except Exception as e:
schemata_log("Could not create factory for schema ", schemaname, schema, lvl=critical, exc=True)
return result | python | def _build_model_factories(store):
"""Generate factories to construct objects from schemata"""
result = {}
for schemaname in store:
schema = None
try:
schema = store[schemaname]['schema']
except KeyError:
schemata_log("No schema found for ", schemaname, lvl=critical, exc=True)
try:
result[schemaname] = warmongo.model_factory(schema)
except Exception as e:
schemata_log("Could not create factory for schema ", schemaname, schema, lvl=critical, exc=True)
return result | ['def', '_build_model_factories', '(', 'store', ')', ':', 'result', '=', '{', '}', 'for', 'schemaname', 'in', 'store', ':', 'schema', '=', 'None', 'try', ':', 'schema', '=', 'store', '[', 'schemaname', ']', '[', "'schema'", ']', 'except', 'KeyError', ':', 'schemata_log', '(', '"No schema found for "', ',', 'schemaname', ',', 'lvl', '=', 'critical', ',', 'exc', '=', 'True', ')', 'try', ':', 'result', '[', 'schemaname', ']', '=', 'warmongo', '.', 'model_factory', '(', 'schema', ')', 'except', 'Exception', 'as', 'e', ':', 'schemata_log', '(', '"Could not create factory for schema "', ',', 'schemaname', ',', 'schema', ',', 'lvl', '=', 'critical', ',', 'exc', '=', 'True', ')', 'return', 'result'] | Generate factories to construct objects from schemata | ['Generate', 'factories', 'to', 'construct', 'objects', 'from', 'schemata'] | train | https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/database.py#L257-L276 |
9,643 | chaoss/grimoirelab-elk | grimoire_elk/enriched/study_ceres_onion.py | ESOnionConnector.__list_uniques | def __list_uniques(self, date_range, field_name):
"""Retrieve a list of unique values in a given field within a date range.
:param date_range:
:param field_name:
:return: list of unique values.
"""
# Get project list
s = Search(using=self._es_conn, index=self._es_index)
s = s.filter('range', **date_range)
# from:to parameters (=> from: 0, size: 0)
s = s[0:0]
s.aggs.bucket('uniques', 'terms', field=field_name, size=1000)
response = s.execute()
uniques_list = []
for item in response.aggregations.uniques.buckets:
uniques_list.append(item.key)
return uniques_list | python | def __list_uniques(self, date_range, field_name):
"""Retrieve a list of unique values in a given field within a date range.
:param date_range:
:param field_name:
:return: list of unique values.
"""
# Get project list
s = Search(using=self._es_conn, index=self._es_index)
s = s.filter('range', **date_range)
# from:to parameters (=> from: 0, size: 0)
s = s[0:0]
s.aggs.bucket('uniques', 'terms', field=field_name, size=1000)
response = s.execute()
uniques_list = []
for item in response.aggregations.uniques.buckets:
uniques_list.append(item.key)
return uniques_list | ['def', '__list_uniques', '(', 'self', ',', 'date_range', ',', 'field_name', ')', ':', '# Get project list', 's', '=', 'Search', '(', 'using', '=', 'self', '.', '_es_conn', ',', 'index', '=', 'self', '.', '_es_index', ')', 's', '=', 's', '.', 'filter', '(', "'range'", ',', '*', '*', 'date_range', ')', '# from:to parameters (=> from: 0, size: 0)', 's', '=', 's', '[', '0', ':', '0', ']', 's', '.', 'aggs', '.', 'bucket', '(', "'uniques'", ',', "'terms'", ',', 'field', '=', 'field_name', ',', 'size', '=', '1000', ')', 'response', '=', 's', '.', 'execute', '(', ')', 'uniques_list', '=', '[', ']', 'for', 'item', 'in', 'response', '.', 'aggregations', '.', 'uniques', '.', 'buckets', ':', 'uniques_list', '.', 'append', '(', 'item', '.', 'key', ')', 'return', 'uniques_list'] | Retrieve a list of unique values in a given field within a date range.
:param date_range:
:param field_name:
:return: list of unique values. | ['Retrieve', 'a', 'list', 'of', 'unique', 'values', 'in', 'a', 'given', 'field', 'within', 'a', 'date', 'range', '.'] | train | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/study_ceres_onion.py#L241-L259 |
9,644 | refnode/liquid | src/liquid/strscan.py | get_regex | def get_regex(regex):
"""
Ensure we have a compiled regular expression object.
>>> import re
>>> get_regex('string') # doctest: +ELLIPSIS
<_sre.SRE_Pattern object at 0x...>
>>> pattern = re.compile(r'string')
>>> get_regex(pattern) is pattern
True
>>> get_regex(3) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: Invalid regex type: 3
"""
if isinstance(regex, basestring):
return re.compile(regex)
elif not isinstance(regex, re._pattern_type):
raise TypeError("Invalid regex type: %r" % (regex,))
return regex | python | def get_regex(regex):
"""
Ensure we have a compiled regular expression object.
>>> import re
>>> get_regex('string') # doctest: +ELLIPSIS
<_sre.SRE_Pattern object at 0x...>
>>> pattern = re.compile(r'string')
>>> get_regex(pattern) is pattern
True
>>> get_regex(3) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: Invalid regex type: 3
"""
if isinstance(regex, basestring):
return re.compile(regex)
elif not isinstance(regex, re._pattern_type):
raise TypeError("Invalid regex type: %r" % (regex,))
return regex | ['def', 'get_regex', '(', 'regex', ')', ':', 'if', 'isinstance', '(', 'regex', ',', 'basestring', ')', ':', 'return', 're', '.', 'compile', '(', 'regex', ')', 'elif', 'not', 'isinstance', '(', 'regex', ',', 're', '.', '_pattern_type', ')', ':', 'raise', 'TypeError', '(', '"Invalid regex type: %r"', '%', '(', 'regex', ',', ')', ')', 'return', 'regex'] | Ensure we have a compiled regular expression object.
>>> import re
>>> get_regex('string') # doctest: +ELLIPSIS
<_sre.SRE_Pattern object at 0x...>
>>> pattern = re.compile(r'string')
>>> get_regex(pattern) is pattern
True
>>> get_regex(3) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: Invalid regex type: 3 | ['Ensure', 'we', 'have', 'a', 'compiled', 'regular', 'expression', 'object', '.'] | train | https://github.com/refnode/liquid/blob/8b2b5efc635b0dbfe610db9036fdb4ae3e3d5439/src/liquid/strscan.py#L516-L535 |
9,645 | unt-libraries/edtf-validate | edtf_validate/valid_edtf.py | replace_u_end_month | def replace_u_end_month(month):
"""Find the latest legitimate month."""
month = month.lstrip('-')
if month == 'uu' or month == '1u':
return '12'
if month == 'u0':
return '10'
if month == '0u':
return '09'
if month[1] in ['1', '2']:
# 'u1' or 'u2'
return month.replace('u', '1')
# Otherwise it should match r'u[3-9]'.
return month.replace('u', '0') | python | def replace_u_end_month(month):
"""Find the latest legitimate month."""
month = month.lstrip('-')
if month == 'uu' or month == '1u':
return '12'
if month == 'u0':
return '10'
if month == '0u':
return '09'
if month[1] in ['1', '2']:
# 'u1' or 'u2'
return month.replace('u', '1')
# Otherwise it should match r'u[3-9]'.
return month.replace('u', '0') | ['def', 'replace_u_end_month', '(', 'month', ')', ':', 'month', '=', 'month', '.', 'lstrip', '(', "'-'", ')', 'if', 'month', '==', "'uu'", 'or', 'month', '==', "'1u'", ':', 'return', "'12'", 'if', 'month', '==', "'u0'", ':', 'return', "'10'", 'if', 'month', '==', "'0u'", ':', 'return', "'09'", 'if', 'month', '[', '1', ']', 'in', '[', "'1'", ',', "'2'", ']', ':', "# 'u1' or 'u2'", 'return', 'month', '.', 'replace', '(', "'u'", ',', "'1'", ')', "# Otherwise it should match r'u[3-9]'.", 'return', 'month', '.', 'replace', '(', "'u'", ',', "'0'", ')'] | Find the latest legitimate month. | ['Find', 'the', 'latest', 'legitimate', 'month', '.'] | train | https://github.com/unt-libraries/edtf-validate/blob/d6d63141919a66aea4ff1c31fa0cb8ff744ef9d9/edtf_validate/valid_edtf.py#L264-L277 |
9,646 | tmr232/Sark | sark/data.py | get_string | def get_string(ea):
"""Read the string at the given ea.
This function uses IDA's string APIs and does not implement any special logic.
"""
# We get the item-head because the `GetStringType` function only works on the head of an item.
string_type = idc.GetStringType(idaapi.get_item_head(ea))
if string_type is None:
raise exceptions.SarkNoString("No string at 0x{:08X}".format(ea))
string = idc.GetString(ea, strtype=string_type)
if not string:
raise exceptions.SarkNoString("No string at 0x{:08X}".format(ea))
return string | python | def get_string(ea):
"""Read the string at the given ea.
This function uses IDA's string APIs and does not implement any special logic.
"""
# We get the item-head because the `GetStringType` function only works on the head of an item.
string_type = idc.GetStringType(idaapi.get_item_head(ea))
if string_type is None:
raise exceptions.SarkNoString("No string at 0x{:08X}".format(ea))
string = idc.GetString(ea, strtype=string_type)
if not string:
raise exceptions.SarkNoString("No string at 0x{:08X}".format(ea))
return string | ['def', 'get_string', '(', 'ea', ')', ':', '# We get the item-head because the `GetStringType` function only works on the head of an item.', 'string_type', '=', 'idc', '.', 'GetStringType', '(', 'idaapi', '.', 'get_item_head', '(', 'ea', ')', ')', 'if', 'string_type', 'is', 'None', ':', 'raise', 'exceptions', '.', 'SarkNoString', '(', '"No string at 0x{:08X}"', '.', 'format', '(', 'ea', ')', ')', 'string', '=', 'idc', '.', 'GetString', '(', 'ea', ',', 'strtype', '=', 'string_type', ')', 'if', 'not', 'string', ':', 'raise', 'exceptions', '.', 'SarkNoString', '(', '"No string at 0x{:08X}"', '.', 'format', '(', 'ea', ')', ')', 'return', 'string'] | Read the string at the given ea.
This function uses IDA's string APIs and does not implement any special logic. | ['Read', 'the', 'string', 'at', 'the', 'given', 'ea', '.'] | train | https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/data.py#L147-L163 |
9,647 | jacebrowning/comparable | comparable/base.py | _Base._repr | def _repr(self, *args, **kwargs):
"""Return a __repr__ string from the arguments provided to __init__.
@param args: list of arguments to __init__
@param kwargs: dictionary of keyword arguments to __init__
@return: __repr__ string
"""
# Remove unnecessary empty keywords arguments and sort the arguments
kwargs = {k: v for k, v in kwargs.items() if v is not None}
kwargs = OrderedDict(sorted(kwargs.items()))
# Build the __repr__ string pieces
args_repr = ', '.join(repr(arg) for arg in args)
kwargs_repr = ', '.join(k + '=' + repr(v) for k, v in kwargs.items())
if args_repr and kwargs_repr:
kwargs_repr = ', ' + kwargs_repr
name = self.__class__.__name__
return "{}({}{})".format(name, args_repr, kwargs_repr) | python | def _repr(self, *args, **kwargs):
"""Return a __repr__ string from the arguments provided to __init__.
@param args: list of arguments to __init__
@param kwargs: dictionary of keyword arguments to __init__
@return: __repr__ string
"""
# Remove unnecessary empty keywords arguments and sort the arguments
kwargs = {k: v for k, v in kwargs.items() if v is not None}
kwargs = OrderedDict(sorted(kwargs.items()))
# Build the __repr__ string pieces
args_repr = ', '.join(repr(arg) for arg in args)
kwargs_repr = ', '.join(k + '=' + repr(v) for k, v in kwargs.items())
if args_repr and kwargs_repr:
kwargs_repr = ', ' + kwargs_repr
name = self.__class__.__name__
return "{}({}{})".format(name, args_repr, kwargs_repr) | ['def', '_repr', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', '# Remove unnecessary empty keywords arguments and sort the arguments', 'kwargs', '=', '{', 'k', ':', 'v', 'for', 'k', ',', 'v', 'in', 'kwargs', '.', 'items', '(', ')', 'if', 'v', 'is', 'not', 'None', '}', 'kwargs', '=', 'OrderedDict', '(', 'sorted', '(', 'kwargs', '.', 'items', '(', ')', ')', ')', '# Build the __repr__ string pieces', 'args_repr', '=', "', '", '.', 'join', '(', 'repr', '(', 'arg', ')', 'for', 'arg', 'in', 'args', ')', 'kwargs_repr', '=', "', '", '.', 'join', '(', 'k', '+', "'='", '+', 'repr', '(', 'v', ')', 'for', 'k', ',', 'v', 'in', 'kwargs', '.', 'items', '(', ')', ')', 'if', 'args_repr', 'and', 'kwargs_repr', ':', 'kwargs_repr', '=', "', '", '+', 'kwargs_repr', 'name', '=', 'self', '.', '__class__', '.', '__name__', 'return', '"{}({}{})"', '.', 'format', '(', 'name', ',', 'args_repr', ',', 'kwargs_repr', ')'] | Return a __repr__ string from the arguments provided to __init__.
@param args: list of arguments to __init__
@param kwargs: dictionary of keyword arguments to __init__
@return: __repr__ string | ['Return', 'a', '__repr__', 'string', 'from', 'the', 'arguments', 'provided', 'to', '__init__', '.'] | train | https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/base.py#L12-L31 |
9,648 | openid/python-openid | openid/server/server.py | Decoder.decode | def decode(self, query):
"""I transform query parameters into an L{OpenIDRequest}.
If the query does not seem to be an OpenID request at all, I return
C{None}.
@param query: The query parameters as a dictionary with each
key mapping to one value.
@type query: dict
@raises ProtocolError: When the query does not seem to be a valid
OpenID request.
@returntype: L{OpenIDRequest}
"""
if not query:
return None
try:
message = Message.fromPostArgs(query)
except InvalidOpenIDNamespace, err:
# It's useful to have a Message attached to a ProtocolError, so we
# override the bad ns value to build a Message out of it. Kinda
# kludgy, since it's made of lies, but the parts that aren't lies
# are more useful than a 'None'.
query = query.copy()
query['openid.ns'] = OPENID2_NS
message = Message.fromPostArgs(query)
raise ProtocolError(message, str(err))
mode = message.getArg(OPENID_NS, 'mode')
if not mode:
fmt = "No mode value in message %s"
raise ProtocolError(message, text=fmt % (message,))
handler = self._handlers.get(mode, self.defaultDecoder)
return handler(message, self.server.op_endpoint) | python | def decode(self, query):
"""I transform query parameters into an L{OpenIDRequest}.
If the query does not seem to be an OpenID request at all, I return
C{None}.
@param query: The query parameters as a dictionary with each
key mapping to one value.
@type query: dict
@raises ProtocolError: When the query does not seem to be a valid
OpenID request.
@returntype: L{OpenIDRequest}
"""
if not query:
return None
try:
message = Message.fromPostArgs(query)
except InvalidOpenIDNamespace, err:
# It's useful to have a Message attached to a ProtocolError, so we
# override the bad ns value to build a Message out of it. Kinda
# kludgy, since it's made of lies, but the parts that aren't lies
# are more useful than a 'None'.
query = query.copy()
query['openid.ns'] = OPENID2_NS
message = Message.fromPostArgs(query)
raise ProtocolError(message, str(err))
mode = message.getArg(OPENID_NS, 'mode')
if not mode:
fmt = "No mode value in message %s"
raise ProtocolError(message, text=fmt % (message,))
handler = self._handlers.get(mode, self.defaultDecoder)
return handler(message, self.server.op_endpoint) | ['def', 'decode', '(', 'self', ',', 'query', ')', ':', 'if', 'not', 'query', ':', 'return', 'None', 'try', ':', 'message', '=', 'Message', '.', 'fromPostArgs', '(', 'query', ')', 'except', 'InvalidOpenIDNamespace', ',', 'err', ':', "# It's useful to have a Message attached to a ProtocolError, so we", '# override the bad ns value to build a Message out of it. Kinda', "# kludgy, since it's made of lies, but the parts that aren't lies", "# are more useful than a 'None'.", 'query', '=', 'query', '.', 'copy', '(', ')', 'query', '[', "'openid.ns'", ']', '=', 'OPENID2_NS', 'message', '=', 'Message', '.', 'fromPostArgs', '(', 'query', ')', 'raise', 'ProtocolError', '(', 'message', ',', 'str', '(', 'err', ')', ')', 'mode', '=', 'message', '.', 'getArg', '(', 'OPENID_NS', ',', "'mode'", ')', 'if', 'not', 'mode', ':', 'fmt', '=', '"No mode value in message %s"', 'raise', 'ProtocolError', '(', 'message', ',', 'text', '=', 'fmt', '%', '(', 'message', ',', ')', ')', 'handler', '=', 'self', '.', '_handlers', '.', 'get', '(', 'mode', ',', 'self', '.', 'defaultDecoder', ')', 'return', 'handler', '(', 'message', ',', 'self', '.', 'server', '.', 'op_endpoint', ')'] | I transform query parameters into an L{OpenIDRequest}.
If the query does not seem to be an OpenID request at all, I return
C{None}.
@param query: The query parameters as a dictionary with each
key mapping to one value.
@type query: dict
@raises ProtocolError: When the query does not seem to be a valid
OpenID request.
@returntype: L{OpenIDRequest} | ['I', 'transform', 'query', 'parameters', 'into', 'an', 'L', '{', 'OpenIDRequest', '}', '.'] | train | https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/server/server.py#L1409-L1445 |
9,649 | JukeboxPipeline/jukebox-core | src/jukeboxcore/addons/guerilla/guerillamgmt.py | GuerillaMGMTWin.prj_create_user | def prj_create_user(self, *args, **kwargs):
"""Create a new project
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_prj:
return
user = self.create_user(projects=[self.cur_prj])
if user:
userdata = djitemdata.UserItemData(user)
treemodel.TreeItem(userdata, self.prj_user_model.root) | python | def prj_create_user(self, *args, **kwargs):
"""Create a new project
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_prj:
return
user = self.create_user(projects=[self.cur_prj])
if user:
userdata = djitemdata.UserItemData(user)
treemodel.TreeItem(userdata, self.prj_user_model.root) | ['def', 'prj_create_user', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'if', 'not', 'self', '.', 'cur_prj', ':', 'return', 'user', '=', 'self', '.', 'create_user', '(', 'projects', '=', '[', 'self', '.', 'cur_prj', ']', ')', 'if', 'user', ':', 'userdata', '=', 'djitemdata', '.', 'UserItemData', '(', 'user', ')', 'treemodel', '.', 'TreeItem', '(', 'userdata', ',', 'self', '.', 'prj_user_model', '.', 'root', ')'] | Create a new project
:returns: None
:rtype: None
:raises: None | ['Create', 'a', 'new', 'project'] | train | https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/addons/guerilla/guerillamgmt.py#L1412-L1424 |
9,650 | davidfokkema/artist | artist/multi_plot.py | MultiPlot.set_ytick_labels | def set_ytick_labels(self, row, column, labels):
"""Manually specify the y-axis tick labels.
:param row,column: specify the subplot.
:param labels: list of tick labels.
"""
subplot = self.get_subplot_at(row, column)
subplot.set_ytick_labels(labels) | python | def set_ytick_labels(self, row, column, labels):
"""Manually specify the y-axis tick labels.
:param row,column: specify the subplot.
:param labels: list of tick labels.
"""
subplot = self.get_subplot_at(row, column)
subplot.set_ytick_labels(labels) | ['def', 'set_ytick_labels', '(', 'self', ',', 'row', ',', 'column', ',', 'labels', ')', ':', 'subplot', '=', 'self', '.', 'get_subplot_at', '(', 'row', ',', 'column', ')', 'subplot', '.', 'set_ytick_labels', '(', 'labels', ')'] | Manually specify the y-axis tick labels.
:param row,column: specify the subplot.
:param labels: list of tick labels. | ['Manually', 'specify', 'the', 'y', '-', 'axis', 'tick', 'labels', '.'] | train | https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/multi_plot.py#L456-L464 |
9,651 | jtwhite79/pyemu | pyemu/pst/pst_handler.py | Pst.add_transform_columns | def add_transform_columns(self):
""" add transformed values to the Pst.parameter_data attribute
"""
for col in ["parval1","parlbnd","parubnd","increment"]:
if col not in self.parameter_data.columns:
continue
self.parameter_data.loc[:,col+"_trans"] = (self.parameter_data.loc[:,col] *
self.parameter_data.scale) +\
self.parameter_data.offset
#isnotfixed = self.parameter_data.partrans != "fixed"
islog = self.parameter_data.partrans == "log"
self.parameter_data.loc[islog,col+"_trans"] = \
self.parameter_data.loc[islog,col+"_trans"].\
apply(lambda x:np.log10(x)) | python | def add_transform_columns(self):
""" add transformed values to the Pst.parameter_data attribute
"""
for col in ["parval1","parlbnd","parubnd","increment"]:
if col not in self.parameter_data.columns:
continue
self.parameter_data.loc[:,col+"_trans"] = (self.parameter_data.loc[:,col] *
self.parameter_data.scale) +\
self.parameter_data.offset
#isnotfixed = self.parameter_data.partrans != "fixed"
islog = self.parameter_data.partrans == "log"
self.parameter_data.loc[islog,col+"_trans"] = \
self.parameter_data.loc[islog,col+"_trans"].\
apply(lambda x:np.log10(x)) | ['def', 'add_transform_columns', '(', 'self', ')', ':', 'for', 'col', 'in', '[', '"parval1"', ',', '"parlbnd"', ',', '"parubnd"', ',', '"increment"', ']', ':', 'if', 'col', 'not', 'in', 'self', '.', 'parameter_data', '.', 'columns', ':', 'continue', 'self', '.', 'parameter_data', '.', 'loc', '[', ':', ',', 'col', '+', '"_trans"', ']', '=', '(', 'self', '.', 'parameter_data', '.', 'loc', '[', ':', ',', 'col', ']', '*', 'self', '.', 'parameter_data', '.', 'scale', ')', '+', 'self', '.', 'parameter_data', '.', 'offset', '#isnotfixed = self.parameter_data.partrans != "fixed"', 'islog', '=', 'self', '.', 'parameter_data', '.', 'partrans', '==', '"log"', 'self', '.', 'parameter_data', '.', 'loc', '[', 'islog', ',', 'col', '+', '"_trans"', ']', '=', 'self', '.', 'parameter_data', '.', 'loc', '[', 'islog', ',', 'col', '+', '"_trans"', ']', '.', 'apply', '(', 'lambda', 'x', ':', 'np', '.', 'log10', '(', 'x', ')', ')'] | add transformed values to the Pst.parameter_data attribute | ['add', 'transformed', 'values', 'to', 'the', 'Pst', '.', 'parameter_data', 'attribute'] | train | https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/pst/pst_handler.py#L2035-L2049 |
9,652 | praekeltfoundation/seaworthy | seaworthy/client.py | ContainerHttpClient.put | def put(self, path=None, url_kwargs=None, **kwargs):
"""
Sends a PUT request.
:param path:
The HTTP path (either absolute or relative).
:param url_kwargs:
Parameters to override in the generated URL. See `~hyperlink.URL`.
:param **kwargs:
Optional arguments that ``request`` takes.
:return: response object
"""
return self._session.put(self._url(path, url_kwargs), **kwargs) | python | def put(self, path=None, url_kwargs=None, **kwargs):
"""
Sends a PUT request.
:param path:
The HTTP path (either absolute or relative).
:param url_kwargs:
Parameters to override in the generated URL. See `~hyperlink.URL`.
:param **kwargs:
Optional arguments that ``request`` takes.
:return: response object
"""
return self._session.put(self._url(path, url_kwargs), **kwargs) | ['def', 'put', '(', 'self', ',', 'path', '=', 'None', ',', 'url_kwargs', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'return', 'self', '.', '_session', '.', 'put', '(', 'self', '.', '_url', '(', 'path', ',', 'url_kwargs', ')', ',', '*', '*', 'kwargs', ')'] | Sends a PUT request.
:param path:
The HTTP path (either absolute or relative).
:param url_kwargs:
Parameters to override in the generated URL. See `~hyperlink.URL`.
:param **kwargs:
Optional arguments that ``request`` takes.
:return: response object | ['Sends', 'a', 'PUT', 'request', '.'] | train | https://github.com/praekeltfoundation/seaworthy/blob/6f10a19b45d4ea1dc3bd0553cc4d0438696c079c/seaworthy/client.py#L162-L174 |
9,653 | icgood/pymap | pymap/backend/dict/__init__.py | Session.login | async def login(cls, credentials: AuthenticationCredentials,
config: Config) -> 'Session':
"""Checks the given credentials for a valid login and returns a new
session. The mailbox data is shared between concurrent and future
sessions, but only for the lifetime of the process.
"""
user = credentials.authcid
password = cls._get_password(config, user)
if user != credentials.identity:
raise InvalidAuth()
elif not credentials.check_secret(password):
raise InvalidAuth()
mailbox_set, filter_set = config.set_cache.get(user, (None, None))
if not mailbox_set or not filter_set:
mailbox_set = MailboxSet()
filter_set = FilterSet()
if config.demo_data:
await cls._load_demo(mailbox_set, filter_set)
config.set_cache[user] = (mailbox_set, filter_set)
return cls(credentials.identity, config, mailbox_set, filter_set) | python | async def login(cls, credentials: AuthenticationCredentials,
config: Config) -> 'Session':
"""Checks the given credentials for a valid login and returns a new
session. The mailbox data is shared between concurrent and future
sessions, but only for the lifetime of the process.
"""
user = credentials.authcid
password = cls._get_password(config, user)
if user != credentials.identity:
raise InvalidAuth()
elif not credentials.check_secret(password):
raise InvalidAuth()
mailbox_set, filter_set = config.set_cache.get(user, (None, None))
if not mailbox_set or not filter_set:
mailbox_set = MailboxSet()
filter_set = FilterSet()
if config.demo_data:
await cls._load_demo(mailbox_set, filter_set)
config.set_cache[user] = (mailbox_set, filter_set)
return cls(credentials.identity, config, mailbox_set, filter_set) | ['async', 'def', 'login', '(', 'cls', ',', 'credentials', ':', 'AuthenticationCredentials', ',', 'config', ':', 'Config', ')', '->', "'Session'", ':', 'user', '=', 'credentials', '.', 'authcid', 'password', '=', 'cls', '.', '_get_password', '(', 'config', ',', 'user', ')', 'if', 'user', '!=', 'credentials', '.', 'identity', ':', 'raise', 'InvalidAuth', '(', ')', 'elif', 'not', 'credentials', '.', 'check_secret', '(', 'password', ')', ':', 'raise', 'InvalidAuth', '(', ')', 'mailbox_set', ',', 'filter_set', '=', 'config', '.', 'set_cache', '.', 'get', '(', 'user', ',', '(', 'None', ',', 'None', ')', ')', 'if', 'not', 'mailbox_set', 'or', 'not', 'filter_set', ':', 'mailbox_set', '=', 'MailboxSet', '(', ')', 'filter_set', '=', 'FilterSet', '(', ')', 'if', 'config', '.', 'demo_data', ':', 'await', 'cls', '.', '_load_demo', '(', 'mailbox_set', ',', 'filter_set', ')', 'config', '.', 'set_cache', '[', 'user', ']', '=', '(', 'mailbox_set', ',', 'filter_set', ')', 'return', 'cls', '(', 'credentials', '.', 'identity', ',', 'config', ',', 'mailbox_set', ',', 'filter_set', ')'] | Checks the given credentials for a valid login and returns a new
session. The mailbox data is shared between concurrent and future
sessions, but only for the lifetime of the process. | ['Checks', 'the', 'given', 'credentials', 'for', 'a', 'valid', 'login', 'and', 'returns', 'a', 'new', 'session', '.', 'The', 'mailbox', 'data', 'is', 'shared', 'between', 'concurrent', 'and', 'future', 'sessions', 'but', 'only', 'for', 'the', 'lifetime', 'of', 'the', 'process', '.'] | train | https://github.com/icgood/pymap/blob/e77d9a54d760e3cbe044a548883bb4299ed61dc2/pymap/backend/dict/__init__.py#L125-L145 |
9,654 | viniciuschiele/flask-io | flask_io/io.py | FlaskIO.permissions | def permissions(self, perms):
"""
A decorator that sets a list of permissions for a function.
:param perms: The list of permission instances or classes.
:return: A function
"""
if not isinstance(perms, (list, tuple)):
perms = [perms]
instances = []
for perm in perms:
if isclass(perm):
instances.append(perm())
else:
instances.append(perm)
def decorator(func):
func.permissions = instances
return func
return decorator | python | def permissions(self, perms):
"""
A decorator that sets a list of permissions for a function.
:param perms: The list of permission instances or classes.
:return: A function
"""
if not isinstance(perms, (list, tuple)):
perms = [perms]
instances = []
for perm in perms:
if isclass(perm):
instances.append(perm())
else:
instances.append(perm)
def decorator(func):
func.permissions = instances
return func
return decorator | ['def', 'permissions', '(', 'self', ',', 'perms', ')', ':', 'if', 'not', 'isinstance', '(', 'perms', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'perms', '=', '[', 'perms', ']', 'instances', '=', '[', ']', 'for', 'perm', 'in', 'perms', ':', 'if', 'isclass', '(', 'perm', ')', ':', 'instances', '.', 'append', '(', 'perm', '(', ')', ')', 'else', ':', 'instances', '.', 'append', '(', 'perm', ')', 'def', 'decorator', '(', 'func', ')', ':', 'func', '.', 'permissions', '=', 'instances', 'return', 'func', 'return', 'decorator'] | A decorator that sets a list of permissions for a function.
:param perms: The list of permission instances or classes.
:return: A function | ['A', 'decorator', 'that', 'sets', 'a', 'list', 'of', 'permissions', 'for', 'a', 'function', '.'] | train | https://github.com/viniciuschiele/flask-io/blob/4e559419b3d8e6859f83fa16557b00542d5f3aa7/flask_io/io.py#L166-L188 |
9,655 | Microsoft/azure-devops-python-api | azure-devops/azure/devops/v5_1/profile_regions/profile_regions_client.py | ProfileRegionsClient.get_regions | def get_regions(self):
"""GetRegions.
[Preview API]
:rtype: :class:`<ProfileRegions> <azure.devops.v5_1.profile-regions.models.ProfileRegions>`
"""
response = self._send(http_method='GET',
location_id='b129ca90-999d-47bb-ab37-0dcf784ee633',
version='5.1-preview.1')
return self._deserialize('ProfileRegions', response) | python | def get_regions(self):
"""GetRegions.
[Preview API]
:rtype: :class:`<ProfileRegions> <azure.devops.v5_1.profile-regions.models.ProfileRegions>`
"""
response = self._send(http_method='GET',
location_id='b129ca90-999d-47bb-ab37-0dcf784ee633',
version='5.1-preview.1')
return self._deserialize('ProfileRegions', response) | ['def', 'get_regions', '(', 'self', ')', ':', 'response', '=', 'self', '.', '_send', '(', 'http_method', '=', "'GET'", ',', 'location_id', '=', "'b129ca90-999d-47bb-ab37-0dcf784ee633'", ',', 'version', '=', "'5.1-preview.1'", ')', 'return', 'self', '.', '_deserialize', '(', "'ProfileRegions'", ',', 'response', ')'] | GetRegions.
[Preview API]
:rtype: :class:`<ProfileRegions> <azure.devops.v5_1.profile-regions.models.ProfileRegions>` | ['GetRegions', '.', '[', 'Preview', 'API', ']', ':', 'rtype', ':', ':', 'class', ':', '<ProfileRegions', '>', '<azure', '.', 'devops', '.', 'v5_1', '.', 'profile', '-', 'regions', '.', 'models', '.', 'ProfileRegions', '>'] | train | https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/profile_regions/profile_regions_client.py#L43-L51 |
9,656 | gwastro/pycbc | pycbc/types/optparse.py | required_opts | def required_opts(opt, parser, opt_list, required_by=None):
"""Check that all the opts are defined
Parameters
----------
opt : object
Result of option parsing
parser : object
OptionParser instance.
opt_list : list of strings
required_by : string, optional
the option that requires these options (if applicable)
"""
for name in opt_list:
attr = name[2:].replace('-', '_')
if not hasattr(opt, attr) or (getattr(opt, attr) is None):
err_str = "%s is missing " % name
if required_by is not None:
err_str += ", required by %s" % required_by
parser.error(err_str) | python | def required_opts(opt, parser, opt_list, required_by=None):
"""Check that all the opts are defined
Parameters
----------
opt : object
Result of option parsing
parser : object
OptionParser instance.
opt_list : list of strings
required_by : string, optional
the option that requires these options (if applicable)
"""
for name in opt_list:
attr = name[2:].replace('-', '_')
if not hasattr(opt, attr) or (getattr(opt, attr) is None):
err_str = "%s is missing " % name
if required_by is not None:
err_str += ", required by %s" % required_by
parser.error(err_str) | ['def', 'required_opts', '(', 'opt', ',', 'parser', ',', 'opt_list', ',', 'required_by', '=', 'None', ')', ':', 'for', 'name', 'in', 'opt_list', ':', 'attr', '=', 'name', '[', '2', ':', ']', '.', 'replace', '(', "'-'", ',', "'_'", ')', 'if', 'not', 'hasattr', '(', 'opt', ',', 'attr', ')', 'or', '(', 'getattr', '(', 'opt', ',', 'attr', ')', 'is', 'None', ')', ':', 'err_str', '=', '"%s is missing "', '%', 'name', 'if', 'required_by', 'is', 'not', 'None', ':', 'err_str', '+=', '", required by %s"', '%', 'required_by', 'parser', '.', 'error', '(', 'err_str', ')'] | Check that all the opts are defined
Parameters
----------
opt : object
Result of option parsing
parser : object
OptionParser instance.
opt_list : list of strings
required_by : string, optional
the option that requires these options (if applicable) | ['Check', 'that', 'all', 'the', 'opts', 'are', 'defined'] | train | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/types/optparse.py#L190-L209 |
9,657 | MartinThoma/hwrt | hwrt/handwritten_data.py | HandwrittenData.preprocessing | def preprocessing(self, algorithms):
"""Apply preprocessing algorithms.
Parameters
----------
algorithms : a list objects
Preprocessing allgorithms which get applied in order.
Examples
--------
>>> import preprocessing
>>> a = HandwrittenData(...)
>>> preprocessing_queue = [(preprocessing.scale_and_shift, []),
... (preprocessing.connect_strokes, []),
... (preprocessing.douglas_peucker,
... {'EPSILON': 0.2}),
... (preprocessing.space_evenly,
... {'number': 100,
... 'KIND': 'cubic'})]
>>> a.preprocessing(preprocessing_queue)
"""
assert type(algorithms) is list
for algorithm in algorithms:
algorithm(self) | python | def preprocessing(self, algorithms):
"""Apply preprocessing algorithms.
Parameters
----------
algorithms : a list objects
Preprocessing allgorithms which get applied in order.
Examples
--------
>>> import preprocessing
>>> a = HandwrittenData(...)
>>> preprocessing_queue = [(preprocessing.scale_and_shift, []),
... (preprocessing.connect_strokes, []),
... (preprocessing.douglas_peucker,
... {'EPSILON': 0.2}),
... (preprocessing.space_evenly,
... {'number': 100,
... 'KIND': 'cubic'})]
>>> a.preprocessing(preprocessing_queue)
"""
assert type(algorithms) is list
for algorithm in algorithms:
algorithm(self) | ['def', 'preprocessing', '(', 'self', ',', 'algorithms', ')', ':', 'assert', 'type', '(', 'algorithms', ')', 'is', 'list', 'for', 'algorithm', 'in', 'algorithms', ':', 'algorithm', '(', 'self', ')'] | Apply preprocessing algorithms.
Parameters
----------
algorithms : a list objects
Preprocessing allgorithms which get applied in order.
Examples
--------
>>> import preprocessing
>>> a = HandwrittenData(...)
>>> preprocessing_queue = [(preprocessing.scale_and_shift, []),
... (preprocessing.connect_strokes, []),
... (preprocessing.douglas_peucker,
... {'EPSILON': 0.2}),
... (preprocessing.space_evenly,
... {'number': 100,
... 'KIND': 'cubic'})]
>>> a.preprocessing(preprocessing_queue) | ['Apply', 'preprocessing', 'algorithms', '.'] | train | https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/handwritten_data.py#L196-L219 |
9,658 | spyder-ide/spyder | spyder/preferences/languageserver.py | LSPServerTable.previous_row | def previous_row(self):
"""Move to previous row from currently selected row."""
row = self.currentIndex().row()
rows = self.source_model.rowCount()
if row == 0:
row = rows
self.selectRow(row - 1) | python | def previous_row(self):
"""Move to previous row from currently selected row."""
row = self.currentIndex().row()
rows = self.source_model.rowCount()
if row == 0:
row = rows
self.selectRow(row - 1) | ['def', 'previous_row', '(', 'self', ')', ':', 'row', '=', 'self', '.', 'currentIndex', '(', ')', '.', 'row', '(', ')', 'rows', '=', 'self', '.', 'source_model', '.', 'rowCount', '(', ')', 'if', 'row', '==', '0', ':', 'row', '=', 'rows', 'self', '.', 'selectRow', '(', 'row', '-', '1', ')'] | Move to previous row from currently selected row. | ['Move', 'to', 'previous', 'row', 'from', 'currently', 'selected', 'row', '.'] | train | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/preferences/languageserver.py#L581-L587 |
9,659 | onicagroup/runway | runway/tfenv.py | get_available_tf_versions | def get_available_tf_versions(include_prerelease=False):
"""Return available Terraform versions."""
tf_releases = json.loads(
requests.get('https://releases.hashicorp.com/index.json').text
)['terraform']
tf_versions = sorted([k # descending
for k, _v in tf_releases['versions'].items()],
key=LooseVersion,
reverse=True)
if include_prerelease:
return tf_versions
return [i for i in tf_versions if '-' not in i] | python | def get_available_tf_versions(include_prerelease=False):
"""Return available Terraform versions."""
tf_releases = json.loads(
requests.get('https://releases.hashicorp.com/index.json').text
)['terraform']
tf_versions = sorted([k # descending
for k, _v in tf_releases['versions'].items()],
key=LooseVersion,
reverse=True)
if include_prerelease:
return tf_versions
return [i for i in tf_versions if '-' not in i] | ['def', 'get_available_tf_versions', '(', 'include_prerelease', '=', 'False', ')', ':', 'tf_releases', '=', 'json', '.', 'loads', '(', 'requests', '.', 'get', '(', "'https://releases.hashicorp.com/index.json'", ')', '.', 'text', ')', '[', "'terraform'", ']', 'tf_versions', '=', 'sorted', '(', '[', 'k', '# descending', 'for', 'k', ',', '_v', 'in', 'tf_releases', '[', "'versions'", ']', '.', 'items', '(', ')', ']', ',', 'key', '=', 'LooseVersion', ',', 'reverse', '=', 'True', ')', 'if', 'include_prerelease', ':', 'return', 'tf_versions', 'return', '[', 'i', 'for', 'i', 'in', 'tf_versions', 'if', "'-'", 'not', 'in', 'i', ']'] | Return available Terraform versions. | ['Return', 'available', 'Terraform', 'versions', '.'] | train | https://github.com/onicagroup/runway/blob/3f3549ec3bf6e39b9f27d9738a1847f3a4369e7f/runway/tfenv.py#L76-L87 |
9,660 | svven/summary | summary/filters.py | AdblockURLFilterMeta.load_raw_rules | def load_raw_rules(cls, url):
"Load raw rules from url or package file."
raw_rules = []
filename = url.split('/')[-1] # e.g.: easylist.txt
try:
with closing(request.get(url, stream=True)) as file:
file.raise_for_status()
# lines = 0 # to be removed
for rule in file.iter_lines():
raw_rules.append(rule.strip())
# lines += 1 # tbr
# if lines == 2500: break # tbr, only for windoze with no re2
logger.info("Adblock online %s: %d", filename, len(raw_rules))
except: # file server down or bad url
with open(resource_filename('summary', filename), 'r') as file:
for rule in file:
raw_rules.append(rule.strip())
logger.info("Adblock offline %s: %d", filename, len(raw_rules))
return raw_rules | python | def load_raw_rules(cls, url):
"Load raw rules from url or package file."
raw_rules = []
filename = url.split('/')[-1] # e.g.: easylist.txt
try:
with closing(request.get(url, stream=True)) as file:
file.raise_for_status()
# lines = 0 # to be removed
for rule in file.iter_lines():
raw_rules.append(rule.strip())
# lines += 1 # tbr
# if lines == 2500: break # tbr, only for windoze with no re2
logger.info("Adblock online %s: %d", filename, len(raw_rules))
except: # file server down or bad url
with open(resource_filename('summary', filename), 'r') as file:
for rule in file:
raw_rules.append(rule.strip())
logger.info("Adblock offline %s: %d", filename, len(raw_rules))
return raw_rules | ['def', 'load_raw_rules', '(', 'cls', ',', 'url', ')', ':', 'raw_rules', '=', '[', ']', 'filename', '=', 'url', '.', 'split', '(', "'/'", ')', '[', '-', '1', ']', '# e.g.: easylist.txt\r', 'try', ':', 'with', 'closing', '(', 'request', '.', 'get', '(', 'url', ',', 'stream', '=', 'True', ')', ')', 'as', 'file', ':', 'file', '.', 'raise_for_status', '(', ')', '# lines = 0 # to be removed\r', 'for', 'rule', 'in', 'file', '.', 'iter_lines', '(', ')', ':', 'raw_rules', '.', 'append', '(', 'rule', '.', 'strip', '(', ')', ')', '# lines += 1 # tbr\r', '# if lines == 2500: break # tbr, only for windoze with no re2\r', 'logger', '.', 'info', '(', '"Adblock online %s: %d"', ',', 'filename', ',', 'len', '(', 'raw_rules', ')', ')', 'except', ':', '# file server down or bad url\r', 'with', 'open', '(', 'resource_filename', '(', "'summary'", ',', 'filename', ')', ',', "'r'", ')', 'as', 'file', ':', 'for', 'rule', 'in', 'file', ':', 'raw_rules', '.', 'append', '(', 'rule', '.', 'strip', '(', ')', ')', 'logger', '.', 'info', '(', '"Adblock offline %s: %d"', ',', 'filename', ',', 'len', '(', 'raw_rules', ')', ')', 'return', 'raw_rules'] | Load raw rules from url or package file. | ['Load', 'raw', 'rules', 'from', 'url', 'or', 'package', 'file', '.'] | train | https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summary/filters.py#L38-L56 |
9,661 | Mindwerks/worldengine | worldengine/generation.py | initialize_ocean_and_thresholds | def initialize_ocean_and_thresholds(world, ocean_level=1.0):
"""
Calculate the ocean, the sea depth and the elevation thresholds
:param world: a world having elevation but not thresholds
:param ocean_level: the elevation representing the ocean level
:return: nothing, the world will be changed
"""
e = world.layers['elevation'].data
ocean = fill_ocean(e, ocean_level)
hl = find_threshold_f(e, 0.10) # the highest 10% of all (!) land are declared hills
ml = find_threshold_f(e, 0.03) # the highest 3% are declared mountains
e_th = [('sea', ocean_level),
('plain', hl),
('hill', ml),
('mountain', None)]
harmonize_ocean(ocean, e, ocean_level)
world.ocean = ocean
world.elevation = (e, e_th)
world.sea_depth = sea_depth(world, ocean_level) | python | def initialize_ocean_and_thresholds(world, ocean_level=1.0):
"""
Calculate the ocean, the sea depth and the elevation thresholds
:param world: a world having elevation but not thresholds
:param ocean_level: the elevation representing the ocean level
:return: nothing, the world will be changed
"""
e = world.layers['elevation'].data
ocean = fill_ocean(e, ocean_level)
hl = find_threshold_f(e, 0.10) # the highest 10% of all (!) land are declared hills
ml = find_threshold_f(e, 0.03) # the highest 3% are declared mountains
e_th = [('sea', ocean_level),
('plain', hl),
('hill', ml),
('mountain', None)]
harmonize_ocean(ocean, e, ocean_level)
world.ocean = ocean
world.elevation = (e, e_th)
world.sea_depth = sea_depth(world, ocean_level) | ['def', 'initialize_ocean_and_thresholds', '(', 'world', ',', 'ocean_level', '=', '1.0', ')', ':', 'e', '=', 'world', '.', 'layers', '[', "'elevation'", ']', '.', 'data', 'ocean', '=', 'fill_ocean', '(', 'e', ',', 'ocean_level', ')', 'hl', '=', 'find_threshold_f', '(', 'e', ',', '0.10', ')', '# the highest 10% of all (!) land are declared hills', 'ml', '=', 'find_threshold_f', '(', 'e', ',', '0.03', ')', '# the highest 3% are declared mountains', 'e_th', '=', '[', '(', "'sea'", ',', 'ocean_level', ')', ',', '(', "'plain'", ',', 'hl', ')', ',', '(', "'hill'", ',', 'ml', ')', ',', '(', "'mountain'", ',', 'None', ')', ']', 'harmonize_ocean', '(', 'ocean', ',', 'e', ',', 'ocean_level', ')', 'world', '.', 'ocean', '=', 'ocean', 'world', '.', 'elevation', '=', '(', 'e', ',', 'e_th', ')', 'world', '.', 'sea_depth', '=', 'sea_depth', '(', 'world', ',', 'ocean_level', ')'] | Calculate the ocean, the sea depth and the elevation thresholds
:param world: a world having elevation but not thresholds
:param ocean_level: the elevation representing the ocean level
:return: nothing, the world will be changed | ['Calculate', 'the', 'ocean', 'the', 'sea', 'depth', 'and', 'the', 'elevation', 'thresholds', ':', 'param', 'world', ':', 'a', 'world', 'having', 'elevation', 'but', 'not', 'thresholds', ':', 'param', 'ocean_level', ':', 'the', 'elevation', 'representing', 'the', 'ocean', 'level', ':', 'return', ':', 'nothing', 'the', 'world', 'will', 'be', 'changed'] | train | https://github.com/Mindwerks/worldengine/blob/64dff8eb7824ce46b5b6cb8006bcef21822ef144/worldengine/generation.py#L101-L119 |
9,662 | boriel/zxbasic | zxb.py | output | def output(memory, ofile=None):
""" Filters the output removing useless preprocessor #directives
and writes it to the given file or to the screen if no file is passed
"""
for m in memory:
m = m.rstrip('\r\n\t ') # Ensures no trailing newlines (might with upon includes)
if m and m[0] == '#': # Preprocessor directive?
if ofile is None:
print(m)
else:
ofile.write('%s\n' % m)
continue
# Prints a 4 spaces "tab" for non labels
if m and ':' not in m:
if ofile is None:
print(' '),
else:
ofile.write('\t')
if ofile is None:
print(m)
else:
ofile.write('%s\n' % m) | python | def output(memory, ofile=None):
""" Filters the output removing useless preprocessor #directives
and writes it to the given file or to the screen if no file is passed
"""
for m in memory:
m = m.rstrip('\r\n\t ') # Ensures no trailing newlines (might with upon includes)
if m and m[0] == '#': # Preprocessor directive?
if ofile is None:
print(m)
else:
ofile.write('%s\n' % m)
continue
# Prints a 4 spaces "tab" for non labels
if m and ':' not in m:
if ofile is None:
print(' '),
else:
ofile.write('\t')
if ofile is None:
print(m)
else:
ofile.write('%s\n' % m) | ['def', 'output', '(', 'memory', ',', 'ofile', '=', 'None', ')', ':', 'for', 'm', 'in', 'memory', ':', 'm', '=', 'm', '.', 'rstrip', '(', "'\\r\\n\\t '", ')', '# Ensures no trailing newlines (might with upon includes)', 'if', 'm', 'and', 'm', '[', '0', ']', '==', "'#'", ':', '# Preprocessor directive?', 'if', 'ofile', 'is', 'None', ':', 'print', '(', 'm', ')', 'else', ':', 'ofile', '.', 'write', '(', "'%s\\n'", '%', 'm', ')', 'continue', '# Prints a 4 spaces "tab" for non labels', 'if', 'm', 'and', "':'", 'not', 'in', 'm', ':', 'if', 'ofile', 'is', 'None', ':', 'print', '(', "' '", ')', ',', 'else', ':', 'ofile', '.', 'write', '(', "'\\t'", ')', 'if', 'ofile', 'is', 'None', ':', 'print', '(', 'm', ')', 'else', ':', 'ofile', '.', 'write', '(', "'%s\\n'", '%', 'm', ')'] | Filters the output removing useless preprocessor #directives
and writes it to the given file or to the screen if no file is passed | ['Filters', 'the', 'output', 'removing', 'useless', 'preprocessor', '#directives', 'and', 'writes', 'it', 'to', 'the', 'given', 'file', 'or', 'to', 'the', 'screen', 'if', 'no', 'file', 'is', 'passed'] | train | https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxb.py#L48-L71 |
9,663 | dmwm/DBS | Server/Python/src/dbs/business/DBSProcessingEra.py | DBSProcessingEra.listProcessingEras | def listProcessingEras(self, processing_version=''):
"""
Returns all processing eras in dbs
"""
conn = self.dbi.connection()
try:
result = self.pelst.execute(conn, processing_version)
return result
finally:
if conn:
conn.close() | python | def listProcessingEras(self, processing_version=''):
"""
Returns all processing eras in dbs
"""
conn = self.dbi.connection()
try:
result = self.pelst.execute(conn, processing_version)
return result
finally:
if conn:
conn.close() | ['def', 'listProcessingEras', '(', 'self', ',', 'processing_version', '=', "''", ')', ':', 'conn', '=', 'self', '.', 'dbi', '.', 'connection', '(', ')', 'try', ':', 'result', '=', 'self', '.', 'pelst', '.', 'execute', '(', 'conn', ',', 'processing_version', ')', 'return', 'result', 'finally', ':', 'if', 'conn', ':', 'conn', '.', 'close', '(', ')'] | Returns all processing eras in dbs | ['Returns', 'all', 'processing', 'eras', 'in', 'dbs'] | train | https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/business/DBSProcessingEra.py#L25-L35 |
9,664 | DLR-RM/RAFCON | source/rafcon/gui/mygaphas/constraint.py | PortRectConstraint.limit_pos | def limit_pos(p, se_pos, nw_pos):
"""
Limits position p to stay inside containing state
:param p: Position to limit
:param se_pos: Bottom/Right boundary
:param nw_pos: Top/Left boundary
:return:
"""
if p > se_pos:
_update(p, se_pos)
elif p < nw_pos:
_update(p, nw_pos) | python | def limit_pos(p, se_pos, nw_pos):
"""
Limits position p to stay inside containing state
:param p: Position to limit
:param se_pos: Bottom/Right boundary
:param nw_pos: Top/Left boundary
:return:
"""
if p > se_pos:
_update(p, se_pos)
elif p < nw_pos:
_update(p, nw_pos) | ['def', 'limit_pos', '(', 'p', ',', 'se_pos', ',', 'nw_pos', ')', ':', 'if', 'p', '>', 'se_pos', ':', '_update', '(', 'p', ',', 'se_pos', ')', 'elif', 'p', '<', 'nw_pos', ':', '_update', '(', 'p', ',', 'nw_pos', ')'] | Limits position p to stay inside containing state
:param p: Position to limit
:param se_pos: Bottom/Right boundary
:param nw_pos: Top/Left boundary
:return: | ['Limits', 'position', 'p', 'to', 'stay', 'inside', 'containing', 'state', ':', 'param', 'p', ':', 'Position', 'to', 'limit', ':', 'param', 'se_pos', ':', 'Bottom', '/', 'Right', 'boundary', ':', 'param', 'nw_pos', ':', 'Top', '/', 'Left', 'boundary', ':', 'return', ':'] | train | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/mygaphas/constraint.py#L302-L313 |
9,665 | Erotemic/utool | utool/util_autogen.py | dump_autogen_code | def dump_autogen_code(fpath, autogen_text, codetype='python', fullprint=None,
show_diff=None, dowrite=None):
"""
Helper that write a file if -w is given on command line, otherwise
it just prints it out. It has the opption of comparing a diff to the file.
"""
import utool as ut
if dowrite is None:
dowrite = ut.get_argflag(('-w', '--write'))
if show_diff is None:
show_diff = ut.get_argflag('--diff')
num_context_lines = ut.get_argval('--diff', type_=int, default=None)
show_diff = show_diff or num_context_lines is not None
num_context_lines = ut.get_argval('--diff', type_=int, default=None)
if fullprint is None:
fullprint = True
if fullprint is False:
fullprint = ut.get_argflag('--print')
print('[autogen] Autogenerated %s...\n+---\n' % (fpath,))
if not dowrite:
if fullprint:
ut.print_code(autogen_text, lexer_name=codetype)
print('\nL___')
else:
print('specify --print to write to stdout')
pass
print('specify -w to write, or --diff to compare')
print('...would write to: %s' % fpath)
if show_diff:
if ut.checkpath(fpath, verbose=True):
prev_text = ut.read_from(fpath)
textdiff = ut.get_textdiff(prev_text, autogen_text,
num_context_lines=num_context_lines)
try:
ut.print_difftext(textdiff)
except UnicodeDecodeError:
import unicodedata
textdiff = unicodedata.normalize('NFKD', textdiff).encode('ascii', 'ignore')
ut.print_difftext(textdiff)
if dowrite:
print('WARNING: Not writing. Remove --diff from command line')
elif dowrite:
ut.write_to(fpath, autogen_text) | python | def dump_autogen_code(fpath, autogen_text, codetype='python', fullprint=None,
show_diff=None, dowrite=None):
"""
Helper that write a file if -w is given on command line, otherwise
it just prints it out. It has the opption of comparing a diff to the file.
"""
import utool as ut
if dowrite is None:
dowrite = ut.get_argflag(('-w', '--write'))
if show_diff is None:
show_diff = ut.get_argflag('--diff')
num_context_lines = ut.get_argval('--diff', type_=int, default=None)
show_diff = show_diff or num_context_lines is not None
num_context_lines = ut.get_argval('--diff', type_=int, default=None)
if fullprint is None:
fullprint = True
if fullprint is False:
fullprint = ut.get_argflag('--print')
print('[autogen] Autogenerated %s...\n+---\n' % (fpath,))
if not dowrite:
if fullprint:
ut.print_code(autogen_text, lexer_name=codetype)
print('\nL___')
else:
print('specify --print to write to stdout')
pass
print('specify -w to write, or --diff to compare')
print('...would write to: %s' % fpath)
if show_diff:
if ut.checkpath(fpath, verbose=True):
prev_text = ut.read_from(fpath)
textdiff = ut.get_textdiff(prev_text, autogen_text,
num_context_lines=num_context_lines)
try:
ut.print_difftext(textdiff)
except UnicodeDecodeError:
import unicodedata
textdiff = unicodedata.normalize('NFKD', textdiff).encode('ascii', 'ignore')
ut.print_difftext(textdiff)
if dowrite:
print('WARNING: Not writing. Remove --diff from command line')
elif dowrite:
ut.write_to(fpath, autogen_text) | ['def', 'dump_autogen_code', '(', 'fpath', ',', 'autogen_text', ',', 'codetype', '=', "'python'", ',', 'fullprint', '=', 'None', ',', 'show_diff', '=', 'None', ',', 'dowrite', '=', 'None', ')', ':', 'import', 'utool', 'as', 'ut', 'if', 'dowrite', 'is', 'None', ':', 'dowrite', '=', 'ut', '.', 'get_argflag', '(', '(', "'-w'", ',', "'--write'", ')', ')', 'if', 'show_diff', 'is', 'None', ':', 'show_diff', '=', 'ut', '.', 'get_argflag', '(', "'--diff'", ')', 'num_context_lines', '=', 'ut', '.', 'get_argval', '(', "'--diff'", ',', 'type_', '=', 'int', ',', 'default', '=', 'None', ')', 'show_diff', '=', 'show_diff', 'or', 'num_context_lines', 'is', 'not', 'None', 'num_context_lines', '=', 'ut', '.', 'get_argval', '(', "'--diff'", ',', 'type_', '=', 'int', ',', 'default', '=', 'None', ')', 'if', 'fullprint', 'is', 'None', ':', 'fullprint', '=', 'True', 'if', 'fullprint', 'is', 'False', ':', 'fullprint', '=', 'ut', '.', 'get_argflag', '(', "'--print'", ')', 'print', '(', "'[autogen] Autogenerated %s...\\n+---\\n'", '%', '(', 'fpath', ',', ')', ')', 'if', 'not', 'dowrite', ':', 'if', 'fullprint', ':', 'ut', '.', 'print_code', '(', 'autogen_text', ',', 'lexer_name', '=', 'codetype', ')', 'print', '(', "'\\nL___'", ')', 'else', ':', 'print', '(', "'specify --print to write to stdout'", ')', 'pass', 'print', '(', "'specify -w to write, or --diff to compare'", ')', 'print', '(', "'...would write to: %s'", '%', 'fpath', ')', 'if', 'show_diff', ':', 'if', 'ut', '.', 'checkpath', '(', 'fpath', ',', 'verbose', '=', 'True', ')', ':', 'prev_text', '=', 'ut', '.', 'read_from', '(', 'fpath', ')', 'textdiff', '=', 'ut', '.', 'get_textdiff', '(', 'prev_text', ',', 'autogen_text', ',', 'num_context_lines', '=', 'num_context_lines', ')', 'try', ':', 'ut', '.', 'print_difftext', '(', 'textdiff', ')', 'except', 'UnicodeDecodeError', ':', 'import', 'unicodedata', 'textdiff', '=', 'unicodedata', '.', 'normalize', '(', "'NFKD'", ',', 'textdiff', ')', '.', 'encode', '(', "'ascii'", ',', "'ignore'", ')', 'ut', '.', 'print_difftext', '(', 'textdiff', ')', 'if', 'dowrite', ':', 'print', '(', "'WARNING: Not writing. Remove --diff from command line'", ')', 'elif', 'dowrite', ':', 'ut', '.', 'write_to', '(', 'fpath', ',', 'autogen_text', ')'] | Helper that write a file if -w is given on command line, otherwise
it just prints it out. It has the opption of comparing a diff to the file. | ['Helper', 'that', 'write', 'a', 'file', 'if', '-', 'w', 'is', 'given', 'on', 'command', 'line', 'otherwise', 'it', 'just', 'prints', 'it', 'out', '.', 'It', 'has', 'the', 'opption', 'of', 'comparing', 'a', 'diff', 'to', 'the', 'file', '.'] | train | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_autogen.py#L22-L69 |
9,666 | hammerlab/cohorts | cohorts/utils.py | _strip_column_name | def _strip_column_name(col_name, keep_paren_contents=True):
"""
Utility script applying several regexs to a string.
Intended to be used by `strip_column_names`.
This function will:
1. replace informative punctuation components with text
2. (optionally) remove text within parentheses
3. replace remaining punctuation/whitespace with _
4. strip leading/trailing punctuation/whitespace
Parameters
----------
col_name (str): input character string
keep_paren_contents (logical):
controls behavior of within-paren elements of text
- if True, (the default) all text within parens retained
- if False, text within parens will be removed from the field name
Returns
--------
modified string for new field name
Examples
--------
> print([_strip_column_name(col) for col in ['PD-L1','PD L1','PD L1_']])
"""
# start with input
new_col_name = col_name
# replace meaningful punctuation with text equivalents
# surround each with whitespace to enforce consistent use of _
punctuation_to_text = {
'<=': 'le',
'>=': 'ge',
'=<': 'le',
'=>': 'ge',
'<': 'lt',
'>': 'gt',
'#': 'num'
}
for punctuation, punctuation_text in punctuation_to_text.items():
new_col_name = new_col_name.replace(punctuation, punctuation_text)
# remove contents within ()
if not(keep_paren_contents):
new_col_name = re.sub('\([^)]*\)', '', new_col_name)
# replace remaining punctuation/whitespace with _
punct_pattern = '[\W_]+'
punct_replacement = '_'
new_col_name = re.sub(punct_pattern, punct_replacement, new_col_name)
# remove leading/trailing _ if it exists (if last char was punctuation)
new_col_name = new_col_name.strip("_")
# TODO: check for empty string
# return lower-case version of column name
return new_col_name.lower() | python | def _strip_column_name(col_name, keep_paren_contents=True):
"""
Utility script applying several regexs to a string.
Intended to be used by `strip_column_names`.
This function will:
1. replace informative punctuation components with text
2. (optionally) remove text within parentheses
3. replace remaining punctuation/whitespace with _
4. strip leading/trailing punctuation/whitespace
Parameters
----------
col_name (str): input character string
keep_paren_contents (logical):
controls behavior of within-paren elements of text
- if True, (the default) all text within parens retained
- if False, text within parens will be removed from the field name
Returns
--------
modified string for new field name
Examples
--------
> print([_strip_column_name(col) for col in ['PD-L1','PD L1','PD L1_']])
"""
# start with input
new_col_name = col_name
# replace meaningful punctuation with text equivalents
# surround each with whitespace to enforce consistent use of _
punctuation_to_text = {
'<=': 'le',
'>=': 'ge',
'=<': 'le',
'=>': 'ge',
'<': 'lt',
'>': 'gt',
'#': 'num'
}
for punctuation, punctuation_text in punctuation_to_text.items():
new_col_name = new_col_name.replace(punctuation, punctuation_text)
# remove contents within ()
if not(keep_paren_contents):
new_col_name = re.sub('\([^)]*\)', '', new_col_name)
# replace remaining punctuation/whitespace with _
punct_pattern = '[\W_]+'
punct_replacement = '_'
new_col_name = re.sub(punct_pattern, punct_replacement, new_col_name)
# remove leading/trailing _ if it exists (if last char was punctuation)
new_col_name = new_col_name.strip("_")
# TODO: check for empty string
# return lower-case version of column name
return new_col_name.lower() | ['def', '_strip_column_name', '(', 'col_name', ',', 'keep_paren_contents', '=', 'True', ')', ':', '# start with input', 'new_col_name', '=', 'col_name', '# replace meaningful punctuation with text equivalents', '# surround each with whitespace to enforce consistent use of _', 'punctuation_to_text', '=', '{', "'<='", ':', "'le'", ',', "'>='", ':', "'ge'", ',', "'=<'", ':', "'le'", ',', "'=>'", ':', "'ge'", ',', "'<'", ':', "'lt'", ',', "'>'", ':', "'gt'", ',', "'#'", ':', "'num'", '}', 'for', 'punctuation', ',', 'punctuation_text', 'in', 'punctuation_to_text', '.', 'items', '(', ')', ':', 'new_col_name', '=', 'new_col_name', '.', 'replace', '(', 'punctuation', ',', 'punctuation_text', ')', '# remove contents within ()', 'if', 'not', '(', 'keep_paren_contents', ')', ':', 'new_col_name', '=', 're', '.', 'sub', '(', "'\\([^)]*\\)'", ',', "''", ',', 'new_col_name', ')', '# replace remaining punctuation/whitespace with _', 'punct_pattern', '=', "'[\\W_]+'", 'punct_replacement', '=', "'_'", 'new_col_name', '=', 're', '.', 'sub', '(', 'punct_pattern', ',', 'punct_replacement', ',', 'new_col_name', ')', '# remove leading/trailing _ if it exists (if last char was punctuation)', 'new_col_name', '=', 'new_col_name', '.', 'strip', '(', '"_"', ')', '# TODO: check for empty string', '# return lower-case version of column name', 'return', 'new_col_name', '.', 'lower', '(', ')'] | Utility script applying several regexs to a string.
Intended to be used by `strip_column_names`.
This function will:
1. replace informative punctuation components with text
2. (optionally) remove text within parentheses
3. replace remaining punctuation/whitespace with _
4. strip leading/trailing punctuation/whitespace
Parameters
----------
col_name (str): input character string
keep_paren_contents (logical):
controls behavior of within-paren elements of text
- if True, (the default) all text within parens retained
- if False, text within parens will be removed from the field name
Returns
--------
modified string for new field name
Examples
--------
> print([_strip_column_name(col) for col in ['PD-L1','PD L1','PD L1_']]) | ['Utility', 'script', 'applying', 'several', 'regexs', 'to', 'a', 'string', '.', 'Intended', 'to', 'be', 'used', 'by', 'strip_column_names', '.'] | train | https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/utils.py#L84-L142 |
9,667 | vtkiorg/vtki | vtki/utilities.py | numpy_to_texture | def numpy_to_texture(image):
"""Convert a NumPy image array to a vtk.vtkTexture"""
if not isinstance(image, np.ndarray):
raise TypeError('Unknown input type ({})'.format(type(image)))
if image.ndim != 3 or image.shape[2] != 3:
raise AssertionError('Input image must be nn by nm by RGB')
grid = vtki.UniformGrid((image.shape[1], image.shape[0], 1))
grid.point_arrays['Image'] = np.flip(image.swapaxes(0,1), axis=1).reshape((-1, 3), order='F')
grid.set_active_scalar('Image')
return image_to_texture(grid) | python | def numpy_to_texture(image):
"""Convert a NumPy image array to a vtk.vtkTexture"""
if not isinstance(image, np.ndarray):
raise TypeError('Unknown input type ({})'.format(type(image)))
if image.ndim != 3 or image.shape[2] != 3:
raise AssertionError('Input image must be nn by nm by RGB')
grid = vtki.UniformGrid((image.shape[1], image.shape[0], 1))
grid.point_arrays['Image'] = np.flip(image.swapaxes(0,1), axis=1).reshape((-1, 3), order='F')
grid.set_active_scalar('Image')
return image_to_texture(grid) | ['def', 'numpy_to_texture', '(', 'image', ')', ':', 'if', 'not', 'isinstance', '(', 'image', ',', 'np', '.', 'ndarray', ')', ':', 'raise', 'TypeError', '(', "'Unknown input type ({})'", '.', 'format', '(', 'type', '(', 'image', ')', ')', ')', 'if', 'image', '.', 'ndim', '!=', '3', 'or', 'image', '.', 'shape', '[', '2', ']', '!=', '3', ':', 'raise', 'AssertionError', '(', "'Input image must be nn by nm by RGB'", ')', 'grid', '=', 'vtki', '.', 'UniformGrid', '(', '(', 'image', '.', 'shape', '[', '1', ']', ',', 'image', '.', 'shape', '[', '0', ']', ',', '1', ')', ')', 'grid', '.', 'point_arrays', '[', "'Image'", ']', '=', 'np', '.', 'flip', '(', 'image', '.', 'swapaxes', '(', '0', ',', '1', ')', ',', 'axis', '=', '1', ')', '.', 'reshape', '(', '(', '-', '1', ',', '3', ')', ',', 'order', '=', "'F'", ')', 'grid', '.', 'set_active_scalar', '(', "'Image'", ')', 'return', 'image_to_texture', '(', 'grid', ')'] | Convert a NumPy image array to a vtk.vtkTexture | ['Convert', 'a', 'NumPy', 'image', 'array', 'to', 'a', 'vtk', '.', 'vtkTexture'] | train | https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/utilities.py#L251-L260 |
9,668 | seleniumbase/SeleniumBase | seleniumbase/fixtures/base_case.py | BaseCase.click_chain | def click_chain(self, selectors_list, by=By.CSS_SELECTOR,
timeout=settings.SMALL_TIMEOUT, spacing=0):
""" This method clicks on a list of elements in succession.
'spacing' is the amount of time to wait between clicks. (sec) """
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
for selector in selectors_list:
self.click(selector, by=by, timeout=timeout)
if spacing > 0:
time.sleep(spacing) | python | def click_chain(self, selectors_list, by=By.CSS_SELECTOR,
timeout=settings.SMALL_TIMEOUT, spacing=0):
""" This method clicks on a list of elements in succession.
'spacing' is the amount of time to wait between clicks. (sec) """
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
for selector in selectors_list:
self.click(selector, by=by, timeout=timeout)
if spacing > 0:
time.sleep(spacing) | ['def', 'click_chain', '(', 'self', ',', 'selectors_list', ',', 'by', '=', 'By', '.', 'CSS_SELECTOR', ',', 'timeout', '=', 'settings', '.', 'SMALL_TIMEOUT', ',', 'spacing', '=', '0', ')', ':', 'if', 'self', '.', 'timeout_multiplier', 'and', 'timeout', '==', 'settings', '.', 'SMALL_TIMEOUT', ':', 'timeout', '=', 'self', '.', '__get_new_timeout', '(', 'timeout', ')', 'for', 'selector', 'in', 'selectors_list', ':', 'self', '.', 'click', '(', 'selector', ',', 'by', '=', 'by', ',', 'timeout', '=', 'timeout', ')', 'if', 'spacing', '>', '0', ':', 'time', '.', 'sleep', '(', 'spacing', ')'] | This method clicks on a list of elements in succession.
'spacing' is the amount of time to wait between clicks. (sec) | ['This', 'method', 'clicks', 'on', 'a', 'list', 'of', 'elements', 'in', 'succession', '.', 'spacing', 'is', 'the', 'amount', 'of', 'time', 'to', 'wait', 'between', 'clicks', '.', '(', 'sec', ')'] | train | https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/base_case.py#L181-L190 |
9,669 | materialsproject/pymatgen | pymatgen/io/abinit/utils.py | abi_splitext | def abi_splitext(filename):
"""
Split the ABINIT extension from a filename.
"Extension" are found by searching in an internal database.
Returns "(root, ext)" where ext is the registered ABINIT extension
The final ".nc" is included (if any)
>>> assert abi_splitext("foo_WFK") == ('foo_', 'WFK')
>>> assert abi_splitext("/home/guido/foo_bar_WFK.nc") == ('foo_bar_', 'WFK.nc')
"""
filename = os.path.basename(filename)
is_ncfile = False
if filename.endswith(".nc"):
is_ncfile = True
filename = filename[:-3]
known_extensions = abi_extensions()
# This algorith fails if we have two files
# e.g. HAYDR_SAVE, ANOTHER_HAYDR_SAVE
for i in range(len(filename)-1, -1, -1):
ext = filename[i:]
if ext in known_extensions:
break
else:
raise ValueError("Cannot find a registered extension in %s" % filename)
root = filename[:i]
if is_ncfile:
ext += ".nc"
return root, ext | python | def abi_splitext(filename):
"""
Split the ABINIT extension from a filename.
"Extension" are found by searching in an internal database.
Returns "(root, ext)" where ext is the registered ABINIT extension
The final ".nc" is included (if any)
>>> assert abi_splitext("foo_WFK") == ('foo_', 'WFK')
>>> assert abi_splitext("/home/guido/foo_bar_WFK.nc") == ('foo_bar_', 'WFK.nc')
"""
filename = os.path.basename(filename)
is_ncfile = False
if filename.endswith(".nc"):
is_ncfile = True
filename = filename[:-3]
known_extensions = abi_extensions()
# This algorith fails if we have two files
# e.g. HAYDR_SAVE, ANOTHER_HAYDR_SAVE
for i in range(len(filename)-1, -1, -1):
ext = filename[i:]
if ext in known_extensions:
break
else:
raise ValueError("Cannot find a registered extension in %s" % filename)
root = filename[:i]
if is_ncfile:
ext += ".nc"
return root, ext | ['def', 'abi_splitext', '(', 'filename', ')', ':', 'filename', '=', 'os', '.', 'path', '.', 'basename', '(', 'filename', ')', 'is_ncfile', '=', 'False', 'if', 'filename', '.', 'endswith', '(', '".nc"', ')', ':', 'is_ncfile', '=', 'True', 'filename', '=', 'filename', '[', ':', '-', '3', ']', 'known_extensions', '=', 'abi_extensions', '(', ')', '# This algorith fails if we have two files', '# e.g. HAYDR_SAVE, ANOTHER_HAYDR_SAVE', 'for', 'i', 'in', 'range', '(', 'len', '(', 'filename', ')', '-', '1', ',', '-', '1', ',', '-', '1', ')', ':', 'ext', '=', 'filename', '[', 'i', ':', ']', 'if', 'ext', 'in', 'known_extensions', ':', 'break', 'else', ':', 'raise', 'ValueError', '(', '"Cannot find a registered extension in %s"', '%', 'filename', ')', 'root', '=', 'filename', '[', ':', 'i', ']', 'if', 'is_ncfile', ':', 'ext', '+=', '".nc"', 'return', 'root', ',', 'ext'] | Split the ABINIT extension from a filename.
"Extension" are found by searching in an internal database.
Returns "(root, ext)" where ext is the registered ABINIT extension
The final ".nc" is included (if any)
>>> assert abi_splitext("foo_WFK") == ('foo_', 'WFK')
>>> assert abi_splitext("/home/guido/foo_bar_WFK.nc") == ('foo_bar_', 'WFK.nc') | ['Split', 'the', 'ABINIT', 'extension', 'from', 'a', 'filename', '.', 'Extension', 'are', 'found', 'by', 'searching', 'in', 'an', 'internal', 'database', '.'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/utils.py#L495-L528 |
9,670 | summanlp/textrank | summa/preprocessing/snowball.py | RussianStemmer.__cyrillic_to_roman | def __cyrillic_to_roman(self, word):
"""
Transliterate a Russian word into the Roman alphabet.
A Russian word whose letters consist of the Cyrillic
alphabet are transliterated into the Roman alphabet
in order to ease the forthcoming stemming process.
:param word: The word that is transliterated.
:type word: unicode
:return: the transliterated word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly!
"""
word = (word.replace("\u0410", "a").replace("\u0430", "a")
.replace("\u0411", "b").replace("\u0431", "b")
.replace("\u0412", "v").replace("\u0432", "v")
.replace("\u0413", "g").replace("\u0433", "g")
.replace("\u0414", "d").replace("\u0434", "d")
.replace("\u0415", "e").replace("\u0435", "e")
.replace("\u0401", "e").replace("\u0451", "e")
.replace("\u0416", "zh").replace("\u0436", "zh")
.replace("\u0417", "z").replace("\u0437", "z")
.replace("\u0418", "i").replace("\u0438", "i")
.replace("\u0419", "i`").replace("\u0439", "i`")
.replace("\u041A", "k").replace("\u043A", "k")
.replace("\u041B", "l").replace("\u043B", "l")
.replace("\u041C", "m").replace("\u043C", "m")
.replace("\u041D", "n").replace("\u043D", "n")
.replace("\u041E", "o").replace("\u043E", "o")
.replace("\u041F", "p").replace("\u043F", "p")
.replace("\u0420", "r").replace("\u0440", "r")
.replace("\u0421", "s").replace("\u0441", "s")
.replace("\u0422", "t").replace("\u0442", "t")
.replace("\u0423", "u").replace("\u0443", "u")
.replace("\u0424", "f").replace("\u0444", "f")
.replace("\u0425", "kh").replace("\u0445", "kh")
.replace("\u0426", "t^s").replace("\u0446", "t^s")
.replace("\u0427", "ch").replace("\u0447", "ch")
.replace("\u0428", "sh").replace("\u0448", "sh")
.replace("\u0429", "shch").replace("\u0449", "shch")
.replace("\u042A", "''").replace("\u044A", "''")
.replace("\u042B", "y").replace("\u044B", "y")
.replace("\u042C", "'").replace("\u044C", "'")
.replace("\u042D", "e`").replace("\u044D", "e`")
.replace("\u042E", "i^u").replace("\u044E", "i^u")
.replace("\u042F", "i^a").replace("\u044F", "i^a"))
return word | python | def __cyrillic_to_roman(self, word):
"""
Transliterate a Russian word into the Roman alphabet.
A Russian word whose letters consist of the Cyrillic
alphabet are transliterated into the Roman alphabet
in order to ease the forthcoming stemming process.
:param word: The word that is transliterated.
:type word: unicode
:return: the transliterated word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly!
"""
word = (word.replace("\u0410", "a").replace("\u0430", "a")
.replace("\u0411", "b").replace("\u0431", "b")
.replace("\u0412", "v").replace("\u0432", "v")
.replace("\u0413", "g").replace("\u0433", "g")
.replace("\u0414", "d").replace("\u0434", "d")
.replace("\u0415", "e").replace("\u0435", "e")
.replace("\u0401", "e").replace("\u0451", "e")
.replace("\u0416", "zh").replace("\u0436", "zh")
.replace("\u0417", "z").replace("\u0437", "z")
.replace("\u0418", "i").replace("\u0438", "i")
.replace("\u0419", "i`").replace("\u0439", "i`")
.replace("\u041A", "k").replace("\u043A", "k")
.replace("\u041B", "l").replace("\u043B", "l")
.replace("\u041C", "m").replace("\u043C", "m")
.replace("\u041D", "n").replace("\u043D", "n")
.replace("\u041E", "o").replace("\u043E", "o")
.replace("\u041F", "p").replace("\u043F", "p")
.replace("\u0420", "r").replace("\u0440", "r")
.replace("\u0421", "s").replace("\u0441", "s")
.replace("\u0422", "t").replace("\u0442", "t")
.replace("\u0423", "u").replace("\u0443", "u")
.replace("\u0424", "f").replace("\u0444", "f")
.replace("\u0425", "kh").replace("\u0445", "kh")
.replace("\u0426", "t^s").replace("\u0446", "t^s")
.replace("\u0427", "ch").replace("\u0447", "ch")
.replace("\u0428", "sh").replace("\u0448", "sh")
.replace("\u0429", "shch").replace("\u0449", "shch")
.replace("\u042A", "''").replace("\u044A", "''")
.replace("\u042B", "y").replace("\u044B", "y")
.replace("\u042C", "'").replace("\u044C", "'")
.replace("\u042D", "e`").replace("\u044D", "e`")
.replace("\u042E", "i^u").replace("\u044E", "i^u")
.replace("\u042F", "i^a").replace("\u044F", "i^a"))
return word | ['def', '__cyrillic_to_roman', '(', 'self', ',', 'word', ')', ':', 'word', '=', '(', 'word', '.', 'replace', '(', '"\\u0410"', ',', '"a"', ')', '.', 'replace', '(', '"\\u0430"', ',', '"a"', ')', '.', 'replace', '(', '"\\u0411"', ',', '"b"', ')', '.', 'replace', '(', '"\\u0431"', ',', '"b"', ')', '.', 'replace', '(', '"\\u0412"', ',', '"v"', ')', '.', 'replace', '(', '"\\u0432"', ',', '"v"', ')', '.', 'replace', '(', '"\\u0413"', ',', '"g"', ')', '.', 'replace', '(', '"\\u0433"', ',', '"g"', ')', '.', 'replace', '(', '"\\u0414"', ',', '"d"', ')', '.', 'replace', '(', '"\\u0434"', ',', '"d"', ')', '.', 'replace', '(', '"\\u0415"', ',', '"e"', ')', '.', 'replace', '(', '"\\u0435"', ',', '"e"', ')', '.', 'replace', '(', '"\\u0401"', ',', '"e"', ')', '.', 'replace', '(', '"\\u0451"', ',', '"e"', ')', '.', 'replace', '(', '"\\u0416"', ',', '"zh"', ')', '.', 'replace', '(', '"\\u0436"', ',', '"zh"', ')', '.', 'replace', '(', '"\\u0417"', ',', '"z"', ')', '.', 'replace', '(', '"\\u0437"', ',', '"z"', ')', '.', 'replace', '(', '"\\u0418"', ',', '"i"', ')', '.', 'replace', '(', '"\\u0438"', ',', '"i"', ')', '.', 'replace', '(', '"\\u0419"', ',', '"i`"', ')', '.', 'replace', '(', '"\\u0439"', ',', '"i`"', ')', '.', 'replace', '(', '"\\u041A"', ',', '"k"', ')', '.', 'replace', '(', '"\\u043A"', ',', '"k"', ')', '.', 'replace', '(', '"\\u041B"', ',', '"l"', ')', '.', 'replace', '(', '"\\u043B"', ',', '"l"', ')', '.', 'replace', '(', '"\\u041C"', ',', '"m"', ')', '.', 'replace', '(', '"\\u043C"', ',', '"m"', ')', '.', 'replace', '(', '"\\u041D"', ',', '"n"', ')', '.', 'replace', '(', '"\\u043D"', ',', '"n"', ')', '.', 'replace', '(', '"\\u041E"', ',', '"o"', ')', '.', 'replace', '(', '"\\u043E"', ',', '"o"', ')', '.', 'replace', '(', '"\\u041F"', ',', '"p"', ')', '.', 'replace', '(', '"\\u043F"', ',', '"p"', ')', '.', 'replace', '(', '"\\u0420"', ',', '"r"', ')', '.', 'replace', '(', '"\\u0440"', ',', '"r"', ')', '.', 'replace', '(', '"\\u0421"', ',', '"s"', ')', '.', 'replace', '(', '"\\u0441"', ',', '"s"', ')', '.', 'replace', '(', '"\\u0422"', ',', '"t"', ')', '.', 'replace', '(', '"\\u0442"', ',', '"t"', ')', '.', 'replace', '(', '"\\u0423"', ',', '"u"', ')', '.', 'replace', '(', '"\\u0443"', ',', '"u"', ')', '.', 'replace', '(', '"\\u0424"', ',', '"f"', ')', '.', 'replace', '(', '"\\u0444"', ',', '"f"', ')', '.', 'replace', '(', '"\\u0425"', ',', '"kh"', ')', '.', 'replace', '(', '"\\u0445"', ',', '"kh"', ')', '.', 'replace', '(', '"\\u0426"', ',', '"t^s"', ')', '.', 'replace', '(', '"\\u0446"', ',', '"t^s"', ')', '.', 'replace', '(', '"\\u0427"', ',', '"ch"', ')', '.', 'replace', '(', '"\\u0447"', ',', '"ch"', ')', '.', 'replace', '(', '"\\u0428"', ',', '"sh"', ')', '.', 'replace', '(', '"\\u0448"', ',', '"sh"', ')', '.', 'replace', '(', '"\\u0429"', ',', '"shch"', ')', '.', 'replace', '(', '"\\u0449"', ',', '"shch"', ')', '.', 'replace', '(', '"\\u042A"', ',', '"\'\'"', ')', '.', 'replace', '(', '"\\u044A"', ',', '"\'\'"', ')', '.', 'replace', '(', '"\\u042B"', ',', '"y"', ')', '.', 'replace', '(', '"\\u044B"', ',', '"y"', ')', '.', 'replace', '(', '"\\u042C"', ',', '"\'"', ')', '.', 'replace', '(', '"\\u044C"', ',', '"\'"', ')', '.', 'replace', '(', '"\\u042D"', ',', '"e`"', ')', '.', 'replace', '(', '"\\u044D"', ',', '"e`"', ')', '.', 'replace', '(', '"\\u042E"', ',', '"i^u"', ')', '.', 'replace', '(', '"\\u044E"', ',', '"i^u"', ')', '.', 'replace', '(', '"\\u042F"', ',', '"i^a"', ')', '.', 'replace', '(', '"\\u044F"', ',', '"i^a"', ')', ')', 'return', 'word'] | Transliterate a Russian word into the Roman alphabet.
A Russian word whose letters consist of the Cyrillic
alphabet are transliterated into the Roman alphabet
in order to ease the forthcoming stemming process.
:param word: The word that is transliterated.
:type word: unicode
:return: the transliterated word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly! | ['Transliterate', 'a', 'Russian', 'word', 'into', 'the', 'Roman', 'alphabet', '.'] | train | https://github.com/summanlp/textrank/blob/6844bbe8c4b2b468020ae0dfd6574a743f9ad442/summa/preprocessing/snowball.py#L3183-L3234 |
9,671 | matousc89/padasip | padasip/filters/rls.py | FilterRLS.adapt | def adapt(self, d, x):
"""
Adapt weights according one desired value and its input.
**Args:**
* `d` : desired value (float)
* `x` : input array (1-dimensional array)
"""
y = np.dot(self.w, x)
e = d - y
R1 = np.dot(np.dot(np.dot(self.R,x),x.T),self.R)
R2 = self.mu + np.dot(np.dot(x,self.R),x.T)
self.R = 1/self.mu * (self.R - R1/R2)
dw = np.dot(self.R, x.T) * e
self.w += dw | python | def adapt(self, d, x):
"""
Adapt weights according one desired value and its input.
**Args:**
* `d` : desired value (float)
* `x` : input array (1-dimensional array)
"""
y = np.dot(self.w, x)
e = d - y
R1 = np.dot(np.dot(np.dot(self.R,x),x.T),self.R)
R2 = self.mu + np.dot(np.dot(x,self.R),x.T)
self.R = 1/self.mu * (self.R - R1/R2)
dw = np.dot(self.R, x.T) * e
self.w += dw | ['def', 'adapt', '(', 'self', ',', 'd', ',', 'x', ')', ':', 'y', '=', 'np', '.', 'dot', '(', 'self', '.', 'w', ',', 'x', ')', 'e', '=', 'd', '-', 'y', 'R1', '=', 'np', '.', 'dot', '(', 'np', '.', 'dot', '(', 'np', '.', 'dot', '(', 'self', '.', 'R', ',', 'x', ')', ',', 'x', '.', 'T', ')', ',', 'self', '.', 'R', ')', 'R2', '=', 'self', '.', 'mu', '+', 'np', '.', 'dot', '(', 'np', '.', 'dot', '(', 'x', ',', 'self', '.', 'R', ')', ',', 'x', '.', 'T', ')', 'self', '.', 'R', '=', '1', '/', 'self', '.', 'mu', '*', '(', 'self', '.', 'R', '-', 'R1', '/', 'R2', ')', 'dw', '=', 'np', '.', 'dot', '(', 'self', '.', 'R', ',', 'x', '.', 'T', ')', '*', 'e', 'self', '.', 'w', '+=', 'dw'] | Adapt weights according one desired value and its input.
**Args:**
* `d` : desired value (float)
* `x` : input array (1-dimensional array) | ['Adapt', 'weights', 'according', 'one', 'desired', 'value', 'and', 'its', 'input', '.'] | train | https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/filters/rls.py#L208-L224 |
9,672 | Nachtfeuer/pipeline | spline/tools/event.py | Event.delegate | def delegate(self, success, **kwargs):
"""Delegate success/failure to the right method."""
if success:
self.succeeded(**kwargs)
else:
self.failed(**kwargs) | python | def delegate(self, success, **kwargs):
"""Delegate success/failure to the right method."""
if success:
self.succeeded(**kwargs)
else:
self.failed(**kwargs) | ['def', 'delegate', '(', 'self', ',', 'success', ',', '*', '*', 'kwargs', ')', ':', 'if', 'success', ':', 'self', '.', 'succeeded', '(', '*', '*', 'kwargs', ')', 'else', ':', 'self', '.', 'failed', '(', '*', '*', 'kwargs', ')'] | Delegate success/failure to the right method. | ['Delegate', 'success', '/', 'failure', 'to', 'the', 'right', 'method', '.'] | train | https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/tools/event.py#L62-L67 |
9,673 | malramsay64/experi | src/experi/commands.py | Command.get_variables | def get_variables(self) -> Set[str]:
"""Find all the variables specified in a format string.
This returns a list of all the different variables specified in a format string,
that is the variables inside the braces.
"""
variables = set()
for cmd in self._cmd:
for var in self.__formatter.parse(cmd):
logger.debug("Checking variable: %s", var)
# creates and requires are special class values
if var[1] is not None and var[1] not in ["creates", "requires"]:
variables.add(var[1])
return variables | python | def get_variables(self) -> Set[str]:
"""Find all the variables specified in a format string.
This returns a list of all the different variables specified in a format string,
that is the variables inside the braces.
"""
variables = set()
for cmd in self._cmd:
for var in self.__formatter.parse(cmd):
logger.debug("Checking variable: %s", var)
# creates and requires are special class values
if var[1] is not None and var[1] not in ["creates", "requires"]:
variables.add(var[1])
return variables | ['def', 'get_variables', '(', 'self', ')', '->', 'Set', '[', 'str', ']', ':', 'variables', '=', 'set', '(', ')', 'for', 'cmd', 'in', 'self', '.', '_cmd', ':', 'for', 'var', 'in', 'self', '.', '__formatter', '.', 'parse', '(', 'cmd', ')', ':', 'logger', '.', 'debug', '(', '"Checking variable: %s"', ',', 'var', ')', '# creates and requires are special class values', 'if', 'var', '[', '1', ']', 'is', 'not', 'None', 'and', 'var', '[', '1', ']', 'not', 'in', '[', '"creates"', ',', '"requires"', ']', ':', 'variables', '.', 'add', '(', 'var', '[', '1', ']', ')', 'return', 'variables'] | Find all the variables specified in a format string.
This returns a list of all the different variables specified in a format string,
that is the variables inside the braces. | ['Find', 'all', 'the', 'variables', 'specified', 'in', 'a', 'format', 'string', '.'] | train | https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/commands.py#L53-L67 |
9,674 | ARMmbed/mbed-cloud-sdk-python | src/mbed_cloud/certificates/certificates.py | CertificatesAPI.list_certificates | def list_certificates(self, **kwargs):
"""List certificates registered to organisation.
Currently returns partially populated certificates. To obtain the full certificate object:
`[get_certificate(certificate_id=cert['id']) for cert in list_certificates]`
:param int limit: The number of certificates to retrieve.
:param str order: The ordering direction, ascending (asc) or
descending (desc).
:param str after: Get certificates after/starting at given `certificate_id`.
:param dict filters: Dictionary of filters to apply: type (eq), expire (eq), owner (eq)
:return: list of :py:class:`Certificate` objects
:rtype: Certificate
"""
kwargs = self._verify_sort_options(kwargs)
kwargs = self._verify_filters(kwargs, Certificate)
if "service__eq" in kwargs:
if kwargs["service__eq"] == CertificateType.bootstrap:
pass
elif kwargs["service__eq"] == CertificateType.developer:
kwargs["device_execution_mode__eq"] = 1
kwargs.pop("service__eq")
elif kwargs["service__eq"] == CertificateType.lwm2m:
pass
else:
raise CloudValueError(
"Incorrect value for CertificateType filter: %s" % (kwargs["service__eq"])
)
owner = kwargs.pop('owner_id__eq', None)
if owner is not None:
kwargs['owner__eq'] = owner
api = self._get_api(iam.DeveloperApi)
return PaginatedResponse(api.get_all_certificates, lwrap_type=Certificate, **kwargs) | python | def list_certificates(self, **kwargs):
"""List certificates registered to organisation.
Currently returns partially populated certificates. To obtain the full certificate object:
`[get_certificate(certificate_id=cert['id']) for cert in list_certificates]`
:param int limit: The number of certificates to retrieve.
:param str order: The ordering direction, ascending (asc) or
descending (desc).
:param str after: Get certificates after/starting at given `certificate_id`.
:param dict filters: Dictionary of filters to apply: type (eq), expire (eq), owner (eq)
:return: list of :py:class:`Certificate` objects
:rtype: Certificate
"""
kwargs = self._verify_sort_options(kwargs)
kwargs = self._verify_filters(kwargs, Certificate)
if "service__eq" in kwargs:
if kwargs["service__eq"] == CertificateType.bootstrap:
pass
elif kwargs["service__eq"] == CertificateType.developer:
kwargs["device_execution_mode__eq"] = 1
kwargs.pop("service__eq")
elif kwargs["service__eq"] == CertificateType.lwm2m:
pass
else:
raise CloudValueError(
"Incorrect value for CertificateType filter: %s" % (kwargs["service__eq"])
)
owner = kwargs.pop('owner_id__eq', None)
if owner is not None:
kwargs['owner__eq'] = owner
api = self._get_api(iam.DeveloperApi)
return PaginatedResponse(api.get_all_certificates, lwrap_type=Certificate, **kwargs) | ['def', 'list_certificates', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'kwargs', '=', 'self', '.', '_verify_sort_options', '(', 'kwargs', ')', 'kwargs', '=', 'self', '.', '_verify_filters', '(', 'kwargs', ',', 'Certificate', ')', 'if', '"service__eq"', 'in', 'kwargs', ':', 'if', 'kwargs', '[', '"service__eq"', ']', '==', 'CertificateType', '.', 'bootstrap', ':', 'pass', 'elif', 'kwargs', '[', '"service__eq"', ']', '==', 'CertificateType', '.', 'developer', ':', 'kwargs', '[', '"device_execution_mode__eq"', ']', '=', '1', 'kwargs', '.', 'pop', '(', '"service__eq"', ')', 'elif', 'kwargs', '[', '"service__eq"', ']', '==', 'CertificateType', '.', 'lwm2m', ':', 'pass', 'else', ':', 'raise', 'CloudValueError', '(', '"Incorrect value for CertificateType filter: %s"', '%', '(', 'kwargs', '[', '"service__eq"', ']', ')', ')', 'owner', '=', 'kwargs', '.', 'pop', '(', "'owner_id__eq'", ',', 'None', ')', 'if', 'owner', 'is', 'not', 'None', ':', 'kwargs', '[', "'owner__eq'", ']', '=', 'owner', 'api', '=', 'self', '.', '_get_api', '(', 'iam', '.', 'DeveloperApi', ')', 'return', 'PaginatedResponse', '(', 'api', '.', 'get_all_certificates', ',', 'lwrap_type', '=', 'Certificate', ',', '*', '*', 'kwargs', ')'] | List certificates registered to organisation.
Currently returns partially populated certificates. To obtain the full certificate object:
`[get_certificate(certificate_id=cert['id']) for cert in list_certificates]`
:param int limit: The number of certificates to retrieve.
:param str order: The ordering direction, ascending (asc) or
descending (desc).
:param str after: Get certificates after/starting at given `certificate_id`.
:param dict filters: Dictionary of filters to apply: type (eq), expire (eq), owner (eq)
:return: list of :py:class:`Certificate` objects
:rtype: Certificate | ['List', 'certificates', 'registered', 'to', 'organisation', '.'] | train | https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/certificates/certificates.py#L52-L84 |
9,675 | 20c/xbahn | xbahn/connection/__init__.py | Connection.make_data | def make_data(self, message):
"""
make data string from message according to transport_content_type
Returns:
str: message data
"""
if not isinstance(message, Message):
return message
return message.export(self.transport_content_type) | python | def make_data(self, message):
"""
make data string from message according to transport_content_type
Returns:
str: message data
"""
if not isinstance(message, Message):
return message
return message.export(self.transport_content_type) | ['def', 'make_data', '(', 'self', ',', 'message', ')', ':', 'if', 'not', 'isinstance', '(', 'message', ',', 'Message', ')', ':', 'return', 'message', 'return', 'message', '.', 'export', '(', 'self', '.', 'transport_content_type', ')'] | make data string from message according to transport_content_type
Returns:
str: message data | ['make', 'data', 'string', 'from', 'message', 'according', 'to', 'transport_content_type'] | train | https://github.com/20c/xbahn/blob/afb27b0576841338a366d7cac0200a782bd84be6/xbahn/connection/__init__.py#L122-L134 |
9,676 | genialis/resolwe | resolwe/flow/utils/stats.py | SimpleLoadAvg.add | def add(self, count, timestamp=None):
"""Add a value at the specified time to the series.
:param count: The number of work items ready at the specified
time.
:param timestamp: The timestamp to add. Defaults to None,
meaning current time. It should be strictly greater (newer)
than the last added timestamp.
"""
if timestamp is None:
timestamp = time.time()
if self.last_data >= timestamp:
raise ValueError("Time {} >= {} in load average calculation".format(self.last_data, timestamp))
self.last_data = timestamp
for meta in self.intervals.values():
meta.push(count, timestamp) | python | def add(self, count, timestamp=None):
"""Add a value at the specified time to the series.
:param count: The number of work items ready at the specified
time.
:param timestamp: The timestamp to add. Defaults to None,
meaning current time. It should be strictly greater (newer)
than the last added timestamp.
"""
if timestamp is None:
timestamp = time.time()
if self.last_data >= timestamp:
raise ValueError("Time {} >= {} in load average calculation".format(self.last_data, timestamp))
self.last_data = timestamp
for meta in self.intervals.values():
meta.push(count, timestamp) | ['def', 'add', '(', 'self', ',', 'count', ',', 'timestamp', '=', 'None', ')', ':', 'if', 'timestamp', 'is', 'None', ':', 'timestamp', '=', 'time', '.', 'time', '(', ')', 'if', 'self', '.', 'last_data', '>=', 'timestamp', ':', 'raise', 'ValueError', '(', '"Time {} >= {} in load average calculation"', '.', 'format', '(', 'self', '.', 'last_data', ',', 'timestamp', ')', ')', 'self', '.', 'last_data', '=', 'timestamp', 'for', 'meta', 'in', 'self', '.', 'intervals', '.', 'values', '(', ')', ':', 'meta', '.', 'push', '(', 'count', ',', 'timestamp', ')'] | Add a value at the specified time to the series.
:param count: The number of work items ready at the specified
time.
:param timestamp: The timestamp to add. Defaults to None,
meaning current time. It should be strictly greater (newer)
than the last added timestamp. | ['Add', 'a', 'value', 'at', 'the', 'specified', 'time', 'to', 'the', 'series', '.'] | train | https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/utils/stats.py#L146-L162 |
9,677 | watson-developer-cloud/python-sdk | ibm_watson/discovery_v1.py | QueryEvidence._to_dict | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_id') and self.document_id is not None:
_dict['document_id'] = self.document_id
if hasattr(self, 'field') and self.field is not None:
_dict['field'] = self.field
if hasattr(self, 'start_offset') and self.start_offset is not None:
_dict['start_offset'] = self.start_offset
if hasattr(self, 'end_offset') and self.end_offset is not None:
_dict['end_offset'] = self.end_offset
if hasattr(self, 'entities') and self.entities is not None:
_dict['entities'] = [x._to_dict() for x in self.entities]
return _dict | python | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_id') and self.document_id is not None:
_dict['document_id'] = self.document_id
if hasattr(self, 'field') and self.field is not None:
_dict['field'] = self.field
if hasattr(self, 'start_offset') and self.start_offset is not None:
_dict['start_offset'] = self.start_offset
if hasattr(self, 'end_offset') and self.end_offset is not None:
_dict['end_offset'] = self.end_offset
if hasattr(self, 'entities') and self.entities is not None:
_dict['entities'] = [x._to_dict() for x in self.entities]
return _dict | ['def', '_to_dict', '(', 'self', ')', ':', '_dict', '=', '{', '}', 'if', 'hasattr', '(', 'self', ',', "'document_id'", ')', 'and', 'self', '.', 'document_id', 'is', 'not', 'None', ':', '_dict', '[', "'document_id'", ']', '=', 'self', '.', 'document_id', 'if', 'hasattr', '(', 'self', ',', "'field'", ')', 'and', 'self', '.', 'field', 'is', 'not', 'None', ':', '_dict', '[', "'field'", ']', '=', 'self', '.', 'field', 'if', 'hasattr', '(', 'self', ',', "'start_offset'", ')', 'and', 'self', '.', 'start_offset', 'is', 'not', 'None', ':', '_dict', '[', "'start_offset'", ']', '=', 'self', '.', 'start_offset', 'if', 'hasattr', '(', 'self', ',', "'end_offset'", ')', 'and', 'self', '.', 'end_offset', 'is', 'not', 'None', ':', '_dict', '[', "'end_offset'", ']', '=', 'self', '.', 'end_offset', 'if', 'hasattr', '(', 'self', ',', "'entities'", ')', 'and', 'self', '.', 'entities', 'is', 'not', 'None', ':', '_dict', '[', "'entities'", ']', '=', '[', 'x', '.', '_to_dict', '(', ')', 'for', 'x', 'in', 'self', '.', 'entities', ']', 'return', '_dict'] | Return a json dictionary representing this model. | ['Return', 'a', 'json', 'dictionary', 'representing', 'this', 'model', '.'] | train | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/discovery_v1.py#L9012-L9025 |
9,678 | taborlab/FlowCal | setup.py | find_version | def find_version(file_path):
"""
Scrape version information from specified file path.
"""
with open(file_path, 'r') as f:
file_contents = f.read()
version_match = re.search(r"^__version__\s*=\s*['\"]([^'\"]*)['\"]",
file_contents, re.M)
if version_match:
return version_match.group(1)
else:
raise RuntimeError("unable to find version string") | python | def find_version(file_path):
"""
Scrape version information from specified file path.
"""
with open(file_path, 'r') as f:
file_contents = f.read()
version_match = re.search(r"^__version__\s*=\s*['\"]([^'\"]*)['\"]",
file_contents, re.M)
if version_match:
return version_match.group(1)
else:
raise RuntimeError("unable to find version string") | ['def', 'find_version', '(', 'file_path', ')', ':', 'with', 'open', '(', 'file_path', ',', "'r'", ')', 'as', 'f', ':', 'file_contents', '=', 'f', '.', 'read', '(', ')', 'version_match', '=', 're', '.', 'search', '(', 'r"^__version__\\s*=\\s*[\'\\"]([^\'\\"]*)[\'\\"]"', ',', 'file_contents', ',', 're', '.', 'M', ')', 'if', 'version_match', ':', 'return', 'version_match', '.', 'group', '(', '1', ')', 'else', ':', 'raise', 'RuntimeError', '(', '"unable to find version string"', ')'] | Scrape version information from specified file path. | ['Scrape', 'version', 'information', 'from', 'specified', 'file', 'path', '.'] | train | https://github.com/taborlab/FlowCal/blob/031a7af82acb1d46879a8e384a1a00f27f0bdc7a/setup.py#L14-L26 |
9,679 | ArduPilot/MAVProxy | MAVProxy/modules/mavproxy_output.py | OutputModule.cmd_output | def cmd_output(self, args):
'''handle output commands'''
if len(args) < 1 or args[0] == "list":
self.cmd_output_list()
elif args[0] == "add":
if len(args) != 2:
print("Usage: output add OUTPUT")
return
self.cmd_output_add(args[1:])
elif args[0] == "remove":
if len(args) != 2:
print("Usage: output remove OUTPUT")
return
self.cmd_output_remove(args[1:])
elif args[0] == "sysid":
if len(args) != 3:
print("Usage: output sysid SYSID OUTPUT")
return
self.cmd_output_sysid(args[1:])
else:
print("usage: output <list|add|remove|sysid>") | python | def cmd_output(self, args):
'''handle output commands'''
if len(args) < 1 or args[0] == "list":
self.cmd_output_list()
elif args[0] == "add":
if len(args) != 2:
print("Usage: output add OUTPUT")
return
self.cmd_output_add(args[1:])
elif args[0] == "remove":
if len(args) != 2:
print("Usage: output remove OUTPUT")
return
self.cmd_output_remove(args[1:])
elif args[0] == "sysid":
if len(args) != 3:
print("Usage: output sysid SYSID OUTPUT")
return
self.cmd_output_sysid(args[1:])
else:
print("usage: output <list|add|remove|sysid>") | ['def', 'cmd_output', '(', 'self', ',', 'args', ')', ':', 'if', 'len', '(', 'args', ')', '<', '1', 'or', 'args', '[', '0', ']', '==', '"list"', ':', 'self', '.', 'cmd_output_list', '(', ')', 'elif', 'args', '[', '0', ']', '==', '"add"', ':', 'if', 'len', '(', 'args', ')', '!=', '2', ':', 'print', '(', '"Usage: output add OUTPUT"', ')', 'return', 'self', '.', 'cmd_output_add', '(', 'args', '[', '1', ':', ']', ')', 'elif', 'args', '[', '0', ']', '==', '"remove"', ':', 'if', 'len', '(', 'args', ')', '!=', '2', ':', 'print', '(', '"Usage: output remove OUTPUT"', ')', 'return', 'self', '.', 'cmd_output_remove', '(', 'args', '[', '1', ':', ']', ')', 'elif', 'args', '[', '0', ']', '==', '"sysid"', ':', 'if', 'len', '(', 'args', ')', '!=', '3', ':', 'print', '(', '"Usage: output sysid SYSID OUTPUT"', ')', 'return', 'self', '.', 'cmd_output_sysid', '(', 'args', '[', '1', ':', ']', ')', 'else', ':', 'print', '(', '"usage: output <list|add|remove|sysid>"', ')'] | handle output commands | ['handle', 'output', 'commands'] | train | https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_output.py#L21-L41 |
9,680 | srsudar/eg | eg/config.py | _inform_if_path_does_not_exist | def _inform_if_path_does_not_exist(path):
"""
If the path does not exist, print a message saying so. This is intended to
be helpful to users if they specify a custom path that eg cannot find.
"""
expanded_path = get_expanded_path(path)
if not os.path.exists(expanded_path):
print('Could not find custom path at: {}'.format(expanded_path)) | python | def _inform_if_path_does_not_exist(path):
"""
If the path does not exist, print a message saying so. This is intended to
be helpful to users if they specify a custom path that eg cannot find.
"""
expanded_path = get_expanded_path(path)
if not os.path.exists(expanded_path):
print('Could not find custom path at: {}'.format(expanded_path)) | ['def', '_inform_if_path_does_not_exist', '(', 'path', ')', ':', 'expanded_path', '=', 'get_expanded_path', '(', 'path', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'expanded_path', ')', ':', 'print', '(', "'Could not find custom path at: {}'", '.', 'format', '(', 'expanded_path', ')', ')'] | If the path does not exist, print a message saying so. This is intended to
be helpful to users if they specify a custom path that eg cannot find. | ['If', 'the', 'path', 'does', 'not', 'exist', 'print', 'a', 'message', 'saying', 'so', '.', 'This', 'is', 'intended', 'to', 'be', 'helpful', 'to', 'users', 'if', 'they', 'specify', 'a', 'custom', 'path', 'that', 'eg', 'cannot', 'find', '.'] | train | https://github.com/srsudar/eg/blob/96142a74f4416b4a7000c85032c070df713b849e/eg/config.py#L383-L390 |
9,681 | torre76/gd_shortener | gdshortener/gdshortener.py | GDBaseShortener.lookup | def lookup(self, short_url):
'''
Lookup an URL shortened with `is.gd - v.gd url service <http://is.gd/developers.php>`_ and return the real url
:param short_url: the url shortened with .gd service
:type short_url: str.
:returns: str. -- The original url that was shortened with .gd service
:raises: **IOError** when timeout with .gd service occurs
**ValueError** if .gd response is malformed
:class:`gdshortener.GDMalformedURLError` if the previously shortened URL provided is malformed
:class:`gdshortener.GDShortURLError` if the custom URL requested is not available or disabled by .gd service
:class:`gdshortener.GDRateLimitError` if the request rate is exceeded for .gd service
:class:`gdshortener.GDGenericError` in case of generic error from .gd service (mainteinance)
'''
if short_url is None or not isinstance(short_url, basestring) or len(short_url.strip()) == 0:
raise GDMalformedURLError('The shortened URL must be a non empty string')
# Build data for porst
data = {
'format': 'json',
'shorturl': short_url
}
opener = urllib2.build_opener()
headers = { 'User-Agent' : self._user_agent }
req = urllib2.Request("{0}/forward.php".format(self.shortener_url), urllib.urlencode(data), headers)
f_desc = opener.open(req, timeout = self._timeout)
response = json.loads(f_desc.read())
if 'url' in response:
# Success!
return HTMLParser.HTMLParser().unescape(urllib.unquote(response['url']))
else:
# Error
error_code = int(response['errorcode'])
error_description = str(response['errormessage'])
if error_code == 1:
raise GDMalformedURLError(error_description)
if error_code == 2:
raise GDShortURLError(error_description)
if error_code == 3:
raise GDRateLimitError(error_description)
if error_code == 4:
raise GDGenericError(error_description) | python | def lookup(self, short_url):
'''
Lookup an URL shortened with `is.gd - v.gd url service <http://is.gd/developers.php>`_ and return the real url
:param short_url: the url shortened with .gd service
:type short_url: str.
:returns: str. -- The original url that was shortened with .gd service
:raises: **IOError** when timeout with .gd service occurs
**ValueError** if .gd response is malformed
:class:`gdshortener.GDMalformedURLError` if the previously shortened URL provided is malformed
:class:`gdshortener.GDShortURLError` if the custom URL requested is not available or disabled by .gd service
:class:`gdshortener.GDRateLimitError` if the request rate is exceeded for .gd service
:class:`gdshortener.GDGenericError` in case of generic error from .gd service (mainteinance)
'''
if short_url is None or not isinstance(short_url, basestring) or len(short_url.strip()) == 0:
raise GDMalformedURLError('The shortened URL must be a non empty string')
# Build data for porst
data = {
'format': 'json',
'shorturl': short_url
}
opener = urllib2.build_opener()
headers = { 'User-Agent' : self._user_agent }
req = urllib2.Request("{0}/forward.php".format(self.shortener_url), urllib.urlencode(data), headers)
f_desc = opener.open(req, timeout = self._timeout)
response = json.loads(f_desc.read())
if 'url' in response:
# Success!
return HTMLParser.HTMLParser().unescape(urllib.unquote(response['url']))
else:
# Error
error_code = int(response['errorcode'])
error_description = str(response['errormessage'])
if error_code == 1:
raise GDMalformedURLError(error_description)
if error_code == 2:
raise GDShortURLError(error_description)
if error_code == 3:
raise GDRateLimitError(error_description)
if error_code == 4:
raise GDGenericError(error_description) | ['def', 'lookup', '(', 'self', ',', 'short_url', ')', ':', 'if', 'short_url', 'is', 'None', 'or', 'not', 'isinstance', '(', 'short_url', ',', 'basestring', ')', 'or', 'len', '(', 'short_url', '.', 'strip', '(', ')', ')', '==', '0', ':', 'raise', 'GDMalformedURLError', '(', "'The shortened URL must be a non empty string'", ')', '# Build data for porst', 'data', '=', '{', "'format'", ':', "'json'", ',', "'shorturl'", ':', 'short_url', '}', 'opener', '=', 'urllib2', '.', 'build_opener', '(', ')', 'headers', '=', '{', "'User-Agent'", ':', 'self', '.', '_user_agent', '}', 'req', '=', 'urllib2', '.', 'Request', '(', '"{0}/forward.php"', '.', 'format', '(', 'self', '.', 'shortener_url', ')', ',', 'urllib', '.', 'urlencode', '(', 'data', ')', ',', 'headers', ')', 'f_desc', '=', 'opener', '.', 'open', '(', 'req', ',', 'timeout', '=', 'self', '.', '_timeout', ')', 'response', '=', 'json', '.', 'loads', '(', 'f_desc', '.', 'read', '(', ')', ')', 'if', "'url'", 'in', 'response', ':', '# Success!', 'return', 'HTMLParser', '.', 'HTMLParser', '(', ')', '.', 'unescape', '(', 'urllib', '.', 'unquote', '(', 'response', '[', "'url'", ']', ')', ')', 'else', ':', '# Error', 'error_code', '=', 'int', '(', 'response', '[', "'errorcode'", ']', ')', 'error_description', '=', 'str', '(', 'response', '[', "'errormessage'", ']', ')', 'if', 'error_code', '==', '1', ':', 'raise', 'GDMalformedURLError', '(', 'error_description', ')', 'if', 'error_code', '==', '2', ':', 'raise', 'GDShortURLError', '(', 'error_description', ')', 'if', 'error_code', '==', '3', ':', 'raise', 'GDRateLimitError', '(', 'error_description', ')', 'if', 'error_code', '==', '4', ':', 'raise', 'GDGenericError', '(', 'error_description', ')'] | Lookup an URL shortened with `is.gd - v.gd url service <http://is.gd/developers.php>`_ and return the real url
:param short_url: the url shortened with .gd service
:type short_url: str.
:returns: str. -- The original url that was shortened with .gd service
:raises: **IOError** when timeout with .gd service occurs
**ValueError** if .gd response is malformed
:class:`gdshortener.GDMalformedURLError` if the previously shortened URL provided is malformed
:class:`gdshortener.GDShortURLError` if the custom URL requested is not available or disabled by .gd service
:class:`gdshortener.GDRateLimitError` if the request rate is exceeded for .gd service
:class:`gdshortener.GDGenericError` in case of generic error from .gd service (mainteinance) | ['Lookup', 'an', 'URL', 'shortened', 'with', 'is', '.', 'gd', '-', 'v', '.', 'gd', 'url', 'service', '<http', ':', '//', 'is', '.', 'gd', '/', 'developers', '.', 'php', '>', '_', 'and', 'return', 'the', 'real', 'url', ':', 'param', 'short_url', ':', 'the', 'url', 'shortened', 'with', '.', 'gd', 'service', ':', 'type', 'short_url', ':', 'str', '.', ':', 'returns', ':', 'str', '.', '--', 'The', 'original', 'url', 'that', 'was', 'shortened', 'with', '.', 'gd', 'service', ':', 'raises', ':', '**', 'IOError', '**', 'when', 'timeout', 'with', '.', 'gd', 'service', 'occurs', '**', 'ValueError', '**', 'if', '.', 'gd', 'response', 'is', 'malformed', ':', 'class', ':', 'gdshortener', '.', 'GDMalformedURLError', 'if', 'the', 'previously', 'shortened', 'URL', 'provided', 'is', 'malformed', ':', 'class', ':', 'gdshortener', '.', 'GDShortURLError', 'if', 'the', 'custom', 'URL', 'requested', 'is', 'not', 'available', 'or', 'disabled', 'by', '.', 'gd', 'service', ':', 'class', ':', 'gdshortener', '.', 'GDRateLimitError', 'if', 'the', 'request', 'rate', 'is', 'exceeded', 'for', '.', 'gd', 'service', ':', 'class', ':', 'gdshortener', '.', 'GDGenericError', 'in', 'case', 'of', 'generic', 'error', 'from', '.', 'gd', 'service', '(', 'mainteinance', ')'] | train | https://github.com/torre76/gd_shortener/blob/a34becf15512e6193960c93edad6258928705bfa/gdshortener/gdshortener.py#L161-L208 |
9,682 | log2timeline/plaso | plaso/cli/log2timeline_tool.py | Log2TimelineTool.ShowInfo | def ShowInfo(self):
"""Shows information about available hashers, parsers, plugins, etc."""
self._output_writer.Write(
'{0:=^80s}\n'.format(' log2timeline/plaso information '))
plugin_list = self._GetPluginData()
for header, data in plugin_list.items():
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, column_names=['Name', 'Description'],
title=header)
for entry_header, entry_data in sorted(data):
table_view.AddRow([entry_header, entry_data])
table_view.Write(self._output_writer) | python | def ShowInfo(self):
"""Shows information about available hashers, parsers, plugins, etc."""
self._output_writer.Write(
'{0:=^80s}\n'.format(' log2timeline/plaso information '))
plugin_list = self._GetPluginData()
for header, data in plugin_list.items():
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, column_names=['Name', 'Description'],
title=header)
for entry_header, entry_data in sorted(data):
table_view.AddRow([entry_header, entry_data])
table_view.Write(self._output_writer) | ['def', 'ShowInfo', '(', 'self', ')', ':', 'self', '.', '_output_writer', '.', 'Write', '(', "'{0:=^80s}\\n'", '.', 'format', '(', "' log2timeline/plaso information '", ')', ')', 'plugin_list', '=', 'self', '.', '_GetPluginData', '(', ')', 'for', 'header', ',', 'data', 'in', 'plugin_list', '.', 'items', '(', ')', ':', 'table_view', '=', 'views', '.', 'ViewsFactory', '.', 'GetTableView', '(', 'self', '.', '_views_format_type', ',', 'column_names', '=', '[', "'Name'", ',', "'Description'", ']', ',', 'title', '=', 'header', ')', 'for', 'entry_header', ',', 'entry_data', 'in', 'sorted', '(', 'data', ')', ':', 'table_view', '.', 'AddRow', '(', '[', 'entry_header', ',', 'entry_data', ']', ')', 'table_view', '.', 'Write', '(', 'self', '.', '_output_writer', ')'] | Shows information about available hashers, parsers, plugins, etc. | ['Shows', 'information', 'about', 'available', 'hashers', 'parsers', 'plugins', 'etc', '.'] | train | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/log2timeline_tool.py#L454-L466 |
9,683 | cmbruns/pyopenvr | src/openvr/__init__.py | IVRSystem.triggerHapticPulse | def triggerHapticPulse(self, unControllerDeviceIndex, unAxisId, usDurationMicroSec):
"""
Trigger a single haptic pulse on a controller. After this call the application may not trigger another haptic pulse on this controller
and axis combination for 5ms. This function is deprecated in favor of the new IVRInput system.
"""
fn = self.function_table.triggerHapticPulse
fn(unControllerDeviceIndex, unAxisId, usDurationMicroSec) | python | def triggerHapticPulse(self, unControllerDeviceIndex, unAxisId, usDurationMicroSec):
"""
Trigger a single haptic pulse on a controller. After this call the application may not trigger another haptic pulse on this controller
and axis combination for 5ms. This function is deprecated in favor of the new IVRInput system.
"""
fn = self.function_table.triggerHapticPulse
fn(unControllerDeviceIndex, unAxisId, usDurationMicroSec) | ['def', 'triggerHapticPulse', '(', 'self', ',', 'unControllerDeviceIndex', ',', 'unAxisId', ',', 'usDurationMicroSec', ')', ':', 'fn', '=', 'self', '.', 'function_table', '.', 'triggerHapticPulse', 'fn', '(', 'unControllerDeviceIndex', ',', 'unAxisId', ',', 'usDurationMicroSec', ')'] | Trigger a single haptic pulse on a controller. After this call the application may not trigger another haptic pulse on this controller
and axis combination for 5ms. This function is deprecated in favor of the new IVRInput system. | ['Trigger', 'a', 'single', 'haptic', 'pulse', 'on', 'a', 'controller', '.', 'After', 'this', 'call', 'the', 'application', 'may', 'not', 'trigger', 'another', 'haptic', 'pulse', 'on', 'this', 'controller', 'and', 'axis', 'combination', 'for', '5ms', '.', 'This', 'function', 'is', 'deprecated', 'in', 'favor', 'of', 'the', 'new', 'IVRInput', 'system', '.'] | train | https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L3042-L3049 |
9,684 | saltstack/salt | salt/modules/boto_iam.py | delete_server_cert | def delete_server_cert(cert_name, region=None, key=None, keyid=None, profile=None):
'''
Deletes a certificate from Amazon.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion boto_iam.delete_server_cert mycert_name
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
return conn.delete_server_cert(cert_name)
except boto.exception.BotoServerError as e:
log.debug(e)
log.error('Failed to delete certificate %s.', cert_name)
return False | python | def delete_server_cert(cert_name, region=None, key=None, keyid=None, profile=None):
'''
Deletes a certificate from Amazon.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion boto_iam.delete_server_cert mycert_name
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
return conn.delete_server_cert(cert_name)
except boto.exception.BotoServerError as e:
log.debug(e)
log.error('Failed to delete certificate %s.', cert_name)
return False | ['def', 'delete_server_cert', '(', 'cert_name', ',', 'region', '=', 'None', ',', 'key', '=', 'None', ',', 'keyid', '=', 'None', ',', 'profile', '=', 'None', ')', ':', 'conn', '=', '_get_conn', '(', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ')', 'try', ':', 'return', 'conn', '.', 'delete_server_cert', '(', 'cert_name', ')', 'except', 'boto', '.', 'exception', '.', 'BotoServerError', 'as', 'e', ':', 'log', '.', 'debug', '(', 'e', ')', 'log', '.', 'error', '(', "'Failed to delete certificate %s.'", ',', 'cert_name', ')', 'return', 'False'] | Deletes a certificate from Amazon.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion boto_iam.delete_server_cert mycert_name | ['Deletes', 'a', 'certificate', 'from', 'Amazon', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_iam.py#L1599-L1617 |
9,685 | web-push-libs/pywebpush | pywebpush/__init__.py | WebPusher.send | def send(self, data=None, headers=None, ttl=0, gcm_key=None, reg_id=None,
content_encoding="aes128gcm", curl=False, timeout=None):
"""Encode and send the data to the Push Service.
:param data: A serialized block of data (see encode() ).
:type data: str
:param headers: A dictionary containing any additional HTTP headers.
:type headers: dict
:param ttl: The Time To Live in seconds for this message if the
recipient is not online. (Defaults to "0", which discards the
message immediately if the recipient is unavailable.)
:type ttl: int
:param gcm_key: API key obtained from the Google Developer Console.
Needed if endpoint is https://android.googleapis.com/gcm/send
:type gcm_key: string
:param reg_id: registration id of the recipient. If not provided,
it will be extracted from the endpoint.
:type reg_id: str
:param content_encoding: ECE content encoding (defaults to "aes128gcm")
:type content_encoding: str
:param curl: Display output as `curl` command instead of sending
:type curl: bool
:param timeout: POST requests timeout
:type timeout: float or tuple
"""
# Encode the data.
if headers is None:
headers = dict()
encoded = {}
headers = CaseInsensitiveDict(headers)
if data:
encoded = self.encode(data, content_encoding)
if "crypto_key" in encoded:
# Append the p256dh to the end of any existing crypto-key
crypto_key = headers.get("crypto-key", "")
if crypto_key:
# due to some confusion by a push service provider, we
# should use ';' instead of ',' to append the headers.
# see
# https://github.com/webpush-wg/webpush-encryption/issues/6
crypto_key += ';'
crypto_key += (
"dh=" + encoded["crypto_key"].decode('utf8'))
headers.update({
'crypto-key': crypto_key
})
if "salt" in encoded:
headers.update({
'encryption': "salt=" + encoded['salt'].decode('utf8')
})
headers.update({
'content-encoding': content_encoding,
})
if gcm_key:
# guess if it is a legacy GCM project key or actual FCM key
# gcm keys are all about 40 chars (use 100 for confidence),
# fcm keys are 153-175 chars
if len(gcm_key) < 100:
endpoint = 'https://android.googleapis.com/gcm/send'
else:
endpoint = 'https://fcm.googleapis.com/fcm/send'
reg_ids = []
if not reg_id:
reg_id = self.subscription_info['endpoint'].rsplit('/', 1)[-1]
reg_ids.append(reg_id)
gcm_data = dict()
gcm_data['registration_ids'] = reg_ids
if data:
gcm_data['raw_data'] = base64.b64encode(
encoded.get('body')).decode('utf8')
gcm_data['time_to_live'] = int(
headers['ttl'] if 'ttl' in headers else ttl)
encoded_data = json.dumps(gcm_data)
headers.update({
'Authorization': 'key='+gcm_key,
'Content-Type': 'application/json',
})
else:
encoded_data = encoded.get('body')
endpoint = self.subscription_info['endpoint']
if 'ttl' not in headers or ttl:
headers['ttl'] = str(ttl or 0)
# Additionally useful headers:
# Authorization / Crypto-Key (VAPID headers)
if curl:
return self.as_curl(endpoint, encoded_data, headers)
return self.requests_method.post(endpoint,
data=encoded_data,
headers=headers,
timeout=timeout) | python | def send(self, data=None, headers=None, ttl=0, gcm_key=None, reg_id=None,
content_encoding="aes128gcm", curl=False, timeout=None):
"""Encode and send the data to the Push Service.
:param data: A serialized block of data (see encode() ).
:type data: str
:param headers: A dictionary containing any additional HTTP headers.
:type headers: dict
:param ttl: The Time To Live in seconds for this message if the
recipient is not online. (Defaults to "0", which discards the
message immediately if the recipient is unavailable.)
:type ttl: int
:param gcm_key: API key obtained from the Google Developer Console.
Needed if endpoint is https://android.googleapis.com/gcm/send
:type gcm_key: string
:param reg_id: registration id of the recipient. If not provided,
it will be extracted from the endpoint.
:type reg_id: str
:param content_encoding: ECE content encoding (defaults to "aes128gcm")
:type content_encoding: str
:param curl: Display output as `curl` command instead of sending
:type curl: bool
:param timeout: POST requests timeout
:type timeout: float or tuple
"""
# Encode the data.
if headers is None:
headers = dict()
encoded = {}
headers = CaseInsensitiveDict(headers)
if data:
encoded = self.encode(data, content_encoding)
if "crypto_key" in encoded:
# Append the p256dh to the end of any existing crypto-key
crypto_key = headers.get("crypto-key", "")
if crypto_key:
# due to some confusion by a push service provider, we
# should use ';' instead of ',' to append the headers.
# see
# https://github.com/webpush-wg/webpush-encryption/issues/6
crypto_key += ';'
crypto_key += (
"dh=" + encoded["crypto_key"].decode('utf8'))
headers.update({
'crypto-key': crypto_key
})
if "salt" in encoded:
headers.update({
'encryption': "salt=" + encoded['salt'].decode('utf8')
})
headers.update({
'content-encoding': content_encoding,
})
if gcm_key:
# guess if it is a legacy GCM project key or actual FCM key
# gcm keys are all about 40 chars (use 100 for confidence),
# fcm keys are 153-175 chars
if len(gcm_key) < 100:
endpoint = 'https://android.googleapis.com/gcm/send'
else:
endpoint = 'https://fcm.googleapis.com/fcm/send'
reg_ids = []
if not reg_id:
reg_id = self.subscription_info['endpoint'].rsplit('/', 1)[-1]
reg_ids.append(reg_id)
gcm_data = dict()
gcm_data['registration_ids'] = reg_ids
if data:
gcm_data['raw_data'] = base64.b64encode(
encoded.get('body')).decode('utf8')
gcm_data['time_to_live'] = int(
headers['ttl'] if 'ttl' in headers else ttl)
encoded_data = json.dumps(gcm_data)
headers.update({
'Authorization': 'key='+gcm_key,
'Content-Type': 'application/json',
})
else:
encoded_data = encoded.get('body')
endpoint = self.subscription_info['endpoint']
if 'ttl' not in headers or ttl:
headers['ttl'] = str(ttl or 0)
# Additionally useful headers:
# Authorization / Crypto-Key (VAPID headers)
if curl:
return self.as_curl(endpoint, encoded_data, headers)
return self.requests_method.post(endpoint,
data=encoded_data,
headers=headers,
timeout=timeout) | ['def', 'send', '(', 'self', ',', 'data', '=', 'None', ',', 'headers', '=', 'None', ',', 'ttl', '=', '0', ',', 'gcm_key', '=', 'None', ',', 'reg_id', '=', 'None', ',', 'content_encoding', '=', '"aes128gcm"', ',', 'curl', '=', 'False', ',', 'timeout', '=', 'None', ')', ':', '# Encode the data.', 'if', 'headers', 'is', 'None', ':', 'headers', '=', 'dict', '(', ')', 'encoded', '=', '{', '}', 'headers', '=', 'CaseInsensitiveDict', '(', 'headers', ')', 'if', 'data', ':', 'encoded', '=', 'self', '.', 'encode', '(', 'data', ',', 'content_encoding', ')', 'if', '"crypto_key"', 'in', 'encoded', ':', '# Append the p256dh to the end of any existing crypto-key', 'crypto_key', '=', 'headers', '.', 'get', '(', '"crypto-key"', ',', '""', ')', 'if', 'crypto_key', ':', '# due to some confusion by a push service provider, we', "# should use ';' instead of ',' to append the headers.", '# see', '# https://github.com/webpush-wg/webpush-encryption/issues/6', 'crypto_key', '+=', "';'", 'crypto_key', '+=', '(', '"dh="', '+', 'encoded', '[', '"crypto_key"', ']', '.', 'decode', '(', "'utf8'", ')', ')', 'headers', '.', 'update', '(', '{', "'crypto-key'", ':', 'crypto_key', '}', ')', 'if', '"salt"', 'in', 'encoded', ':', 'headers', '.', 'update', '(', '{', "'encryption'", ':', '"salt="', '+', 'encoded', '[', "'salt'", ']', '.', 'decode', '(', "'utf8'", ')', '}', ')', 'headers', '.', 'update', '(', '{', "'content-encoding'", ':', 'content_encoding', ',', '}', ')', 'if', 'gcm_key', ':', '# guess if it is a legacy GCM project key or actual FCM key', '# gcm keys are all about 40 chars (use 100 for confidence),', '# fcm keys are 153-175 chars', 'if', 'len', '(', 'gcm_key', ')', '<', '100', ':', 'endpoint', '=', "'https://android.googleapis.com/gcm/send'", 'else', ':', 'endpoint', '=', "'https://fcm.googleapis.com/fcm/send'", 'reg_ids', '=', '[', ']', 'if', 'not', 'reg_id', ':', 'reg_id', '=', 'self', '.', 'subscription_info', '[', "'endpoint'", ']', '.', 'rsplit', '(', "'/'", ',', '1', ')', '[', '-', '1', ']', 'reg_ids', '.', 'append', '(', 'reg_id', ')', 'gcm_data', '=', 'dict', '(', ')', 'gcm_data', '[', "'registration_ids'", ']', '=', 'reg_ids', 'if', 'data', ':', 'gcm_data', '[', "'raw_data'", ']', '=', 'base64', '.', 'b64encode', '(', 'encoded', '.', 'get', '(', "'body'", ')', ')', '.', 'decode', '(', "'utf8'", ')', 'gcm_data', '[', "'time_to_live'", ']', '=', 'int', '(', 'headers', '[', "'ttl'", ']', 'if', "'ttl'", 'in', 'headers', 'else', 'ttl', ')', 'encoded_data', '=', 'json', '.', 'dumps', '(', 'gcm_data', ')', 'headers', '.', 'update', '(', '{', "'Authorization'", ':', "'key='", '+', 'gcm_key', ',', "'Content-Type'", ':', "'application/json'", ',', '}', ')', 'else', ':', 'encoded_data', '=', 'encoded', '.', 'get', '(', "'body'", ')', 'endpoint', '=', 'self', '.', 'subscription_info', '[', "'endpoint'", ']', 'if', "'ttl'", 'not', 'in', 'headers', 'or', 'ttl', ':', 'headers', '[', "'ttl'", ']', '=', 'str', '(', 'ttl', 'or', '0', ')', '# Additionally useful headers:', '# Authorization / Crypto-Key (VAPID headers)', 'if', 'curl', ':', 'return', 'self', '.', 'as_curl', '(', 'endpoint', ',', 'encoded_data', ',', 'headers', ')', 'return', 'self', '.', 'requests_method', '.', 'post', '(', 'endpoint', ',', 'data', '=', 'encoded_data', ',', 'headers', '=', 'headers', ',', 'timeout', '=', 'timeout', ')'] | Encode and send the data to the Push Service.
:param data: A serialized block of data (see encode() ).
:type data: str
:param headers: A dictionary containing any additional HTTP headers.
:type headers: dict
:param ttl: The Time To Live in seconds for this message if the
recipient is not online. (Defaults to "0", which discards the
message immediately if the recipient is unavailable.)
:type ttl: int
:param gcm_key: API key obtained from the Google Developer Console.
Needed if endpoint is https://android.googleapis.com/gcm/send
:type gcm_key: string
:param reg_id: registration id of the recipient. If not provided,
it will be extracted from the endpoint.
:type reg_id: str
:param content_encoding: ECE content encoding (defaults to "aes128gcm")
:type content_encoding: str
:param curl: Display output as `curl` command instead of sending
:type curl: bool
:param timeout: POST requests timeout
:type timeout: float or tuple | ['Encode', 'and', 'send', 'the', 'data', 'to', 'the', 'Push', 'Service', '.'] | train | https://github.com/web-push-libs/pywebpush/blob/2a23f45b7819e31bd030de9fe1357a1cf7dcfdc4/pywebpush/__init__.py#L256-L347 |
9,686 | d0c-s4vage/pfp | pfp/bitwrap.py | BitwrappedStream.read | def read(self, num):
"""Read ``num`` number of bytes from the stream. Note that this will
automatically resets/ends the current bit-reading if it does not
end on an even byte AND ``self.padded`` is True. If ``self.padded`` is
True, then the entire stream is treated as a bitstream.
:num: number of bytes to read
:returns: the read bytes, or empty string if EOF has been reached
"""
start_pos = self.tell()
if self.padded:
# we toss out any uneven bytes
self._bits.clear()
res = utils.binary(self._stream.read(num))
else:
bits = self.read_bits(num * 8)
res = bits_to_bytes(bits)
res = utils.binary(res)
end_pos = self.tell()
self._update_consumed_ranges(start_pos, end_pos)
return res | python | def read(self, num):
"""Read ``num`` number of bytes from the stream. Note that this will
automatically resets/ends the current bit-reading if it does not
end on an even byte AND ``self.padded`` is True. If ``self.padded`` is
True, then the entire stream is treated as a bitstream.
:num: number of bytes to read
:returns: the read bytes, or empty string if EOF has been reached
"""
start_pos = self.tell()
if self.padded:
# we toss out any uneven bytes
self._bits.clear()
res = utils.binary(self._stream.read(num))
else:
bits = self.read_bits(num * 8)
res = bits_to_bytes(bits)
res = utils.binary(res)
end_pos = self.tell()
self._update_consumed_ranges(start_pos, end_pos)
return res | ['def', 'read', '(', 'self', ',', 'num', ')', ':', 'start_pos', '=', 'self', '.', 'tell', '(', ')', 'if', 'self', '.', 'padded', ':', '# we toss out any uneven bytes', 'self', '.', '_bits', '.', 'clear', '(', ')', 'res', '=', 'utils', '.', 'binary', '(', 'self', '.', '_stream', '.', 'read', '(', 'num', ')', ')', 'else', ':', 'bits', '=', 'self', '.', 'read_bits', '(', 'num', '*', '8', ')', 'res', '=', 'bits_to_bytes', '(', 'bits', ')', 'res', '=', 'utils', '.', 'binary', '(', 'res', ')', 'end_pos', '=', 'self', '.', 'tell', '(', ')', 'self', '.', '_update_consumed_ranges', '(', 'start_pos', ',', 'end_pos', ')', 'return', 'res'] | Read ``num`` number of bytes from the stream. Note that this will
automatically resets/ends the current bit-reading if it does not
end on an even byte AND ``self.padded`` is True. If ``self.padded`` is
True, then the entire stream is treated as a bitstream.
:num: number of bytes to read
:returns: the read bytes, or empty string if EOF has been reached | ['Read', 'num', 'number', 'of', 'bytes', 'from', 'the', 'stream', '.', 'Note', 'that', 'this', 'will', 'automatically', 'resets', '/', 'ends', 'the', 'current', 'bit', '-', 'reading', 'if', 'it', 'does', 'not', 'end', 'on', 'an', 'even', 'byte', 'AND', 'self', '.', 'padded', 'is', 'True', '.', 'If', 'self', '.', 'padded', 'is', 'True', 'then', 'the', 'entire', 'stream', 'is', 'treated', 'as', 'a', 'bitstream', '.'] | train | https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/bitwrap.py#L100-L123 |
9,687 | onnx/onnxmltools | onnxmltools/convert/coreml/shape_calculators/TensorToProbabilityMap.py | calculate_tensor_to_probability_map_output_shapes | def calculate_tensor_to_probability_map_output_shapes(operator):
'''
Allowed input/output patterns are
ONNX < 1.2
1. [1, C] ---> ---> A map
2. [1, C_1, ..., C_n] ---> A map
ONNX >= 1.2
1. [N, C] ---> ---> A sequence of maps
2. [N, C_1, ..., C_n] ---> A sequence of maps
Note that N must be 1 currently if you're using ONNX<1.2 because old ZipMap doesn't produce a seqneuce of map If the
input is not [N, C], it will be reshaped into [N, C_1 x C_2, x ... x C_n] before being fed into ONNX ZipMap.
'''
check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1)
check_input_and_output_types(operator, good_input_types=[FloatTensorType])
model_type = operator.raw_operator.WhichOneof('Type')
if model_type == 'neuralNetworkClassifier':
class_label_type = operator.raw_operator.neuralNetworkClassifier.WhichOneof('ClassLabels')
else:
raise TypeError('%s has no class label' % model_type)
N = operator.inputs[0].type.shape[0]
doc_string = operator.outputs[0].type.doc_string
if class_label_type == 'stringClassLabels':
if operator.target_opset < 7:
operator.outputs[0].type = DictionaryType(StringTensorType([1]), FloatTensorType([1]), doc_string)
else:
operator.outputs[0].type = \
SequenceType(DictionaryType(StringTensorType([]), FloatTensorType([])), N, doc_string)
elif class_label_type == 'int64ClassLabels':
if operator.target_opset < 7:
operator.outputs[0].type = DictionaryType(Int64TensorType([1]), FloatTensorType([1]), doc_string)
else:
operator.outputs[0].type = \
SequenceType(DictionaryType(Int64TensorType([]), FloatTensorType([])), N, doc_string)
else:
raise ValueError('Unsupported label type') | python | def calculate_tensor_to_probability_map_output_shapes(operator):
'''
Allowed input/output patterns are
ONNX < 1.2
1. [1, C] ---> ---> A map
2. [1, C_1, ..., C_n] ---> A map
ONNX >= 1.2
1. [N, C] ---> ---> A sequence of maps
2. [N, C_1, ..., C_n] ---> A sequence of maps
Note that N must be 1 currently if you're using ONNX<1.2 because old ZipMap doesn't produce a seqneuce of map If the
input is not [N, C], it will be reshaped into [N, C_1 x C_2, x ... x C_n] before being fed into ONNX ZipMap.
'''
check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1)
check_input_and_output_types(operator, good_input_types=[FloatTensorType])
model_type = operator.raw_operator.WhichOneof('Type')
if model_type == 'neuralNetworkClassifier':
class_label_type = operator.raw_operator.neuralNetworkClassifier.WhichOneof('ClassLabels')
else:
raise TypeError('%s has no class label' % model_type)
N = operator.inputs[0].type.shape[0]
doc_string = operator.outputs[0].type.doc_string
if class_label_type == 'stringClassLabels':
if operator.target_opset < 7:
operator.outputs[0].type = DictionaryType(StringTensorType([1]), FloatTensorType([1]), doc_string)
else:
operator.outputs[0].type = \
SequenceType(DictionaryType(StringTensorType([]), FloatTensorType([])), N, doc_string)
elif class_label_type == 'int64ClassLabels':
if operator.target_opset < 7:
operator.outputs[0].type = DictionaryType(Int64TensorType([1]), FloatTensorType([1]), doc_string)
else:
operator.outputs[0].type = \
SequenceType(DictionaryType(Int64TensorType([]), FloatTensorType([])), N, doc_string)
else:
raise ValueError('Unsupported label type') | ['def', 'calculate_tensor_to_probability_map_output_shapes', '(', 'operator', ')', ':', 'check_input_and_output_numbers', '(', 'operator', ',', 'input_count_range', '=', '1', ',', 'output_count_range', '=', '1', ')', 'check_input_and_output_types', '(', 'operator', ',', 'good_input_types', '=', '[', 'FloatTensorType', ']', ')', 'model_type', '=', 'operator', '.', 'raw_operator', '.', 'WhichOneof', '(', "'Type'", ')', 'if', 'model_type', '==', "'neuralNetworkClassifier'", ':', 'class_label_type', '=', 'operator', '.', 'raw_operator', '.', 'neuralNetworkClassifier', '.', 'WhichOneof', '(', "'ClassLabels'", ')', 'else', ':', 'raise', 'TypeError', '(', "'%s has no class label'", '%', 'model_type', ')', 'N', '=', 'operator', '.', 'inputs', '[', '0', ']', '.', 'type', '.', 'shape', '[', '0', ']', 'doc_string', '=', 'operator', '.', 'outputs', '[', '0', ']', '.', 'type', '.', 'doc_string', 'if', 'class_label_type', '==', "'stringClassLabels'", ':', 'if', 'operator', '.', 'target_opset', '<', '7', ':', 'operator', '.', 'outputs', '[', '0', ']', '.', 'type', '=', 'DictionaryType', '(', 'StringTensorType', '(', '[', '1', ']', ')', ',', 'FloatTensorType', '(', '[', '1', ']', ')', ',', 'doc_string', ')', 'else', ':', 'operator', '.', 'outputs', '[', '0', ']', '.', 'type', '=', 'SequenceType', '(', 'DictionaryType', '(', 'StringTensorType', '(', '[', ']', ')', ',', 'FloatTensorType', '(', '[', ']', ')', ')', ',', 'N', ',', 'doc_string', ')', 'elif', 'class_label_type', '==', "'int64ClassLabels'", ':', 'if', 'operator', '.', 'target_opset', '<', '7', ':', 'operator', '.', 'outputs', '[', '0', ']', '.', 'type', '=', 'DictionaryType', '(', 'Int64TensorType', '(', '[', '1', ']', ')', ',', 'FloatTensorType', '(', '[', '1', ']', ')', ',', 'doc_string', ')', 'else', ':', 'operator', '.', 'outputs', '[', '0', ']', '.', 'type', '=', 'SequenceType', '(', 'DictionaryType', '(', 'Int64TensorType', '(', '[', ']', ')', ',', 'FloatTensorType', '(', '[', ']', ')', ')', ',', 'N', ',', 'doc_string', ')', 'else', ':', 'raise', 'ValueError', '(', "'Unsupported label type'", ')'] | Allowed input/output patterns are
ONNX < 1.2
1. [1, C] ---> ---> A map
2. [1, C_1, ..., C_n] ---> A map
ONNX >= 1.2
1. [N, C] ---> ---> A sequence of maps
2. [N, C_1, ..., C_n] ---> A sequence of maps
Note that N must be 1 currently if you're using ONNX<1.2 because old ZipMap doesn't produce a seqneuce of map If the
input is not [N, C], it will be reshaped into [N, C_1 x C_2, x ... x C_n] before being fed into ONNX ZipMap. | ['Allowed', 'input', '/', 'output', 'patterns', 'are', 'ONNX', '<', '1', '.', '2', '1', '.', '[', '1', 'C', ']', '---', '>', '---', '>', 'A', 'map', '2', '.', '[', '1', 'C_1', '...', 'C_n', ']', '---', '>', 'A', 'map', 'ONNX', '>', '=', '1', '.', '2', '1', '.', '[', 'N', 'C', ']', '---', '>', '---', '>', 'A', 'sequence', 'of', 'maps', '2', '.', '[', 'N', 'C_1', '...', 'C_n', ']', '---', '>', 'A', 'sequence', 'of', 'maps'] | train | https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxmltools/convert/coreml/shape_calculators/TensorToProbabilityMap.py#L12-L49 |
9,688 | astropy/photutils | photutils/psf/matching/fourier.py | create_matching_kernel | def create_matching_kernel(source_psf, target_psf, window=None):
"""
Create a kernel to match 2D point spread functions (PSF) using the
ratio of Fourier transforms.
Parameters
----------
source_psf : 2D `~numpy.ndarray`
The source PSF. The source PSF should have higher resolution
(i.e. narrower) than the target PSF. ``source_psf`` and
``target_psf`` must have the same shape and pixel scale.
target_psf : 2D `~numpy.ndarray`
The target PSF. The target PSF should have lower resolution
(i.e. broader) than the source PSF. ``source_psf`` and
``target_psf`` must have the same shape and pixel scale.
window : callable, optional
The window (or taper) function or callable class instance used
to remove high frequency noise from the PSF matching kernel.
Some examples include:
* `~photutils.psf.matching.HanningWindow`
* `~photutils.psf.matching.TukeyWindow`
* `~photutils.psf.matching.CosineBellWindow`
* `~photutils.psf.matching.SplitCosineBellWindow`
* `~photutils.psf.matching.TopHatWindow`
For more information on window functions and example usage, see
:ref:`psf_matching`.
Returns
-------
kernel : 2D `~numpy.ndarray`
The matching kernel to go from ``source_psf`` to ``target_psf``.
The output matching kernel is normalized such that it sums to 1.
"""
# inputs are copied so that they are not changed when normalizing
source_psf = np.copy(np.asanyarray(source_psf))
target_psf = np.copy(np.asanyarray(target_psf))
if source_psf.shape != target_psf.shape:
raise ValueError('source_psf and target_psf must have the same shape '
'(i.e. registered with the same pixel scale).')
# ensure input PSFs are normalized
source_psf /= source_psf.sum()
target_psf /= target_psf.sum()
source_otf = fftshift(fft2(source_psf))
target_otf = fftshift(fft2(target_psf))
ratio = target_otf / source_otf
# apply a window function in frequency space
if window is not None:
ratio *= window(target_psf.shape)
kernel = np.real(fftshift((ifft2(ifftshift(ratio)))))
return kernel / kernel.sum() | python | def create_matching_kernel(source_psf, target_psf, window=None):
"""
Create a kernel to match 2D point spread functions (PSF) using the
ratio of Fourier transforms.
Parameters
----------
source_psf : 2D `~numpy.ndarray`
The source PSF. The source PSF should have higher resolution
(i.e. narrower) than the target PSF. ``source_psf`` and
``target_psf`` must have the same shape and pixel scale.
target_psf : 2D `~numpy.ndarray`
The target PSF. The target PSF should have lower resolution
(i.e. broader) than the source PSF. ``source_psf`` and
``target_psf`` must have the same shape and pixel scale.
window : callable, optional
The window (or taper) function or callable class instance used
to remove high frequency noise from the PSF matching kernel.
Some examples include:
* `~photutils.psf.matching.HanningWindow`
* `~photutils.psf.matching.TukeyWindow`
* `~photutils.psf.matching.CosineBellWindow`
* `~photutils.psf.matching.SplitCosineBellWindow`
* `~photutils.psf.matching.TopHatWindow`
For more information on window functions and example usage, see
:ref:`psf_matching`.
Returns
-------
kernel : 2D `~numpy.ndarray`
The matching kernel to go from ``source_psf`` to ``target_psf``.
The output matching kernel is normalized such that it sums to 1.
"""
# inputs are copied so that they are not changed when normalizing
source_psf = np.copy(np.asanyarray(source_psf))
target_psf = np.copy(np.asanyarray(target_psf))
if source_psf.shape != target_psf.shape:
raise ValueError('source_psf and target_psf must have the same shape '
'(i.e. registered with the same pixel scale).')
# ensure input PSFs are normalized
source_psf /= source_psf.sum()
target_psf /= target_psf.sum()
source_otf = fftshift(fft2(source_psf))
target_otf = fftshift(fft2(target_psf))
ratio = target_otf / source_otf
# apply a window function in frequency space
if window is not None:
ratio *= window(target_psf.shape)
kernel = np.real(fftshift((ifft2(ifftshift(ratio)))))
return kernel / kernel.sum() | ['def', 'create_matching_kernel', '(', 'source_psf', ',', 'target_psf', ',', 'window', '=', 'None', ')', ':', '# inputs are copied so that they are not changed when normalizing', 'source_psf', '=', 'np', '.', 'copy', '(', 'np', '.', 'asanyarray', '(', 'source_psf', ')', ')', 'target_psf', '=', 'np', '.', 'copy', '(', 'np', '.', 'asanyarray', '(', 'target_psf', ')', ')', 'if', 'source_psf', '.', 'shape', '!=', 'target_psf', '.', 'shape', ':', 'raise', 'ValueError', '(', "'source_psf and target_psf must have the same shape '", "'(i.e. registered with the same pixel scale).'", ')', '# ensure input PSFs are normalized', 'source_psf', '/=', 'source_psf', '.', 'sum', '(', ')', 'target_psf', '/=', 'target_psf', '.', 'sum', '(', ')', 'source_otf', '=', 'fftshift', '(', 'fft2', '(', 'source_psf', ')', ')', 'target_otf', '=', 'fftshift', '(', 'fft2', '(', 'target_psf', ')', ')', 'ratio', '=', 'target_otf', '/', 'source_otf', '# apply a window function in frequency space', 'if', 'window', 'is', 'not', 'None', ':', 'ratio', '*=', 'window', '(', 'target_psf', '.', 'shape', ')', 'kernel', '=', 'np', '.', 'real', '(', 'fftshift', '(', '(', 'ifft2', '(', 'ifftshift', '(', 'ratio', ')', ')', ')', ')', ')', 'return', 'kernel', '/', 'kernel', '.', 'sum', '(', ')'] | Create a kernel to match 2D point spread functions (PSF) using the
ratio of Fourier transforms.
Parameters
----------
source_psf : 2D `~numpy.ndarray`
The source PSF. The source PSF should have higher resolution
(i.e. narrower) than the target PSF. ``source_psf`` and
``target_psf`` must have the same shape and pixel scale.
target_psf : 2D `~numpy.ndarray`
The target PSF. The target PSF should have lower resolution
(i.e. broader) than the source PSF. ``source_psf`` and
``target_psf`` must have the same shape and pixel scale.
window : callable, optional
The window (or taper) function or callable class instance used
to remove high frequency noise from the PSF matching kernel.
Some examples include:
* `~photutils.psf.matching.HanningWindow`
* `~photutils.psf.matching.TukeyWindow`
* `~photutils.psf.matching.CosineBellWindow`
* `~photutils.psf.matching.SplitCosineBellWindow`
* `~photutils.psf.matching.TopHatWindow`
For more information on window functions and example usage, see
:ref:`psf_matching`.
Returns
-------
kernel : 2D `~numpy.ndarray`
The matching kernel to go from ``source_psf`` to ``target_psf``.
The output matching kernel is normalized such that it sums to 1. | ['Create', 'a', 'kernel', 'to', 'match', '2D', 'point', 'spread', 'functions', '(', 'PSF', ')', 'using', 'the', 'ratio', 'of', 'Fourier', 'transforms', '.'] | train | https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/matching/fourier.py#L45-L104 |
9,689 | PyCQA/astroid | astroid/node_classes.py | UnaryOp.type_errors | def type_errors(self, context=None):
"""Get a list of type errors which can occur during inference.
Each TypeError is represented by a :class:`BadBinaryOperationMessage`,
which holds the original exception.
:returns: The list of possible type errors.
:rtype: list(BadBinaryOperationMessage)
"""
try:
results = self._infer_unaryop(context=context)
return [
result
for result in results
if isinstance(result, util.BadUnaryOperationMessage)
]
except exceptions.InferenceError:
return [] | python | def type_errors(self, context=None):
"""Get a list of type errors which can occur during inference.
Each TypeError is represented by a :class:`BadBinaryOperationMessage`,
which holds the original exception.
:returns: The list of possible type errors.
:rtype: list(BadBinaryOperationMessage)
"""
try:
results = self._infer_unaryop(context=context)
return [
result
for result in results
if isinstance(result, util.BadUnaryOperationMessage)
]
except exceptions.InferenceError:
return [] | ['def', 'type_errors', '(', 'self', ',', 'context', '=', 'None', ')', ':', 'try', ':', 'results', '=', 'self', '.', '_infer_unaryop', '(', 'context', '=', 'context', ')', 'return', '[', 'result', 'for', 'result', 'in', 'results', 'if', 'isinstance', '(', 'result', ',', 'util', '.', 'BadUnaryOperationMessage', ')', ']', 'except', 'exceptions', '.', 'InferenceError', ':', 'return', '[', ']'] | Get a list of type errors which can occur during inference.
Each TypeError is represented by a :class:`BadBinaryOperationMessage`,
which holds the original exception.
:returns: The list of possible type errors.
:rtype: list(BadBinaryOperationMessage) | ['Get', 'a', 'list', 'of', 'type', 'errors', 'which', 'can', 'occur', 'during', 'inference', '.'] | train | https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/node_classes.py#L4309-L4326 |
9,690 | jtwhite79/pyemu | pyemu/plot/plot_utils.py | plot_summary_distributions | def plot_summary_distributions(df,ax=None,label_post=False,label_prior=False,
subplots=False,figsize=(11,8.5),pt_color='b'):
""" helper function to plot gaussian distrbutions from prior and posterior
means and standard deviations
Parameters
----------
df : pandas.DataFrame
a dataframe and csv file. Must have columns named:
'prior_mean','prior_stdev','post_mean','post_stdev'. If loaded
from a csv file, column 0 is assumed to tbe the index
ax: matplotlib.pyplot.axis
If None, and not subplots, then one is created
and all distributions are plotted on a single plot
label_post: bool
flag to add text labels to the peak of the posterior
label_prior: bool
flag to add text labels to the peak of the prior
subplots: (boolean)
flag to use subplots. If True, then 6 axes per page
are used and a single prior and posterior is plotted on each
figsize: tuple
matplotlib figure size
Returns
-------
figs : list
list of figures
axes : list
list of axes
Note
----
This is useful for demystifying FOSM results
if subplots is False, a single axis is returned
Example
-------
``>>>import matplotlib.pyplot as plt``
``>>>import pyemu``
``>>>pyemu.helpers.plot_summary_distributions("pest.par.usum.csv")``
``>>>plt.show()``
"""
import matplotlib.pyplot as plt
if isinstance(df,str):
df = pd.read_csv(df,index_col=0)
if ax is None and not subplots:
fig = plt.figure(figsize=figsize)
ax = plt.subplot(111)
ax.grid()
if "post_stdev" not in df.columns and "post_var" in df.columns:
df.loc[:,"post_stdev"] = df.post_var.apply(np.sqrt)
if "prior_stdev" not in df.columns and "prior_var" in df.columns:
df.loc[:,"prior_stdev"] = df.prior_var.apply(np.sqrt)
if "prior_expt" not in df.columns and "prior_mean" in df.columns:
df.loc[:,"prior_expt"] = df.prior_mean
if "post_expt" not in df.columns and "post_mean" in df.columns:
df.loc[:,"post_expt"] = df.post_mean
if subplots:
fig = plt.figure(figsize=figsize)
ax = plt.subplot(2,3,1)
ax_per_page = 6
ax_count = 0
axes = []
figs = []
for name in df.index:
x,y = gaussian_distribution(df.loc[name,"post_expt"],
df.loc[name,"post_stdev"])
ax.fill_between(x,0,y,facecolor=pt_color,edgecolor="none",alpha=0.25)
if label_post:
mx_idx = np.argmax(y)
xtxt,ytxt = x[mx_idx],y[mx_idx] * 1.001
ax.text(xtxt,ytxt,name,ha="center",alpha=0.5)
x,y = gaussian_distribution(df.loc[name,"prior_expt"],
df.loc[name,"prior_stdev"])
ax.plot(x,y,color='0.5',lw=3.0,dashes=(2,1))
if label_prior:
mx_idx = np.argmax(y)
xtxt,ytxt = x[mx_idx],y[mx_idx] * 1.001
ax.text(xtxt,ytxt,name,ha="center",alpha=0.5)
#ylim = list(ax.get_ylim())
#ylim[1] *= 1.2
#ylim[0] = 0.0
#ax.set_ylim(ylim)
if subplots:
ax.set_title(name)
ax_count += 1
ax.set_yticklabels([])
axes.append(ax)
if name == df.index[-1]:
break
if ax_count >= ax_per_page:
figs.append(fig)
fig = plt.figure(figsize=figsize)
ax_count = 0
ax = plt.subplot(2,3,ax_count+1)
if subplots:
figs.append(fig)
return figs, axes
ylim = list(ax.get_ylim())
ylim[1] *= 1.2
ylim[0] = 0.0
ax.set_ylim(ylim)
ax.set_yticklabels([])
return ax | python | def plot_summary_distributions(df,ax=None,label_post=False,label_prior=False,
subplots=False,figsize=(11,8.5),pt_color='b'):
""" helper function to plot gaussian distrbutions from prior and posterior
means and standard deviations
Parameters
----------
df : pandas.DataFrame
a dataframe and csv file. Must have columns named:
'prior_mean','prior_stdev','post_mean','post_stdev'. If loaded
from a csv file, column 0 is assumed to tbe the index
ax: matplotlib.pyplot.axis
If None, and not subplots, then one is created
and all distributions are plotted on a single plot
label_post: bool
flag to add text labels to the peak of the posterior
label_prior: bool
flag to add text labels to the peak of the prior
subplots: (boolean)
flag to use subplots. If True, then 6 axes per page
are used and a single prior and posterior is plotted on each
figsize: tuple
matplotlib figure size
Returns
-------
figs : list
list of figures
axes : list
list of axes
Note
----
This is useful for demystifying FOSM results
if subplots is False, a single axis is returned
Example
-------
``>>>import matplotlib.pyplot as plt``
``>>>import pyemu``
``>>>pyemu.helpers.plot_summary_distributions("pest.par.usum.csv")``
``>>>plt.show()``
"""
import matplotlib.pyplot as plt
if isinstance(df,str):
df = pd.read_csv(df,index_col=0)
if ax is None and not subplots:
fig = plt.figure(figsize=figsize)
ax = plt.subplot(111)
ax.grid()
if "post_stdev" not in df.columns and "post_var" in df.columns:
df.loc[:,"post_stdev"] = df.post_var.apply(np.sqrt)
if "prior_stdev" not in df.columns and "prior_var" in df.columns:
df.loc[:,"prior_stdev"] = df.prior_var.apply(np.sqrt)
if "prior_expt" not in df.columns and "prior_mean" in df.columns:
df.loc[:,"prior_expt"] = df.prior_mean
if "post_expt" not in df.columns and "post_mean" in df.columns:
df.loc[:,"post_expt"] = df.post_mean
if subplots:
fig = plt.figure(figsize=figsize)
ax = plt.subplot(2,3,1)
ax_per_page = 6
ax_count = 0
axes = []
figs = []
for name in df.index:
x,y = gaussian_distribution(df.loc[name,"post_expt"],
df.loc[name,"post_stdev"])
ax.fill_between(x,0,y,facecolor=pt_color,edgecolor="none",alpha=0.25)
if label_post:
mx_idx = np.argmax(y)
xtxt,ytxt = x[mx_idx],y[mx_idx] * 1.001
ax.text(xtxt,ytxt,name,ha="center",alpha=0.5)
x,y = gaussian_distribution(df.loc[name,"prior_expt"],
df.loc[name,"prior_stdev"])
ax.plot(x,y,color='0.5',lw=3.0,dashes=(2,1))
if label_prior:
mx_idx = np.argmax(y)
xtxt,ytxt = x[mx_idx],y[mx_idx] * 1.001
ax.text(xtxt,ytxt,name,ha="center",alpha=0.5)
#ylim = list(ax.get_ylim())
#ylim[1] *= 1.2
#ylim[0] = 0.0
#ax.set_ylim(ylim)
if subplots:
ax.set_title(name)
ax_count += 1
ax.set_yticklabels([])
axes.append(ax)
if name == df.index[-1]:
break
if ax_count >= ax_per_page:
figs.append(fig)
fig = plt.figure(figsize=figsize)
ax_count = 0
ax = plt.subplot(2,3,ax_count+1)
if subplots:
figs.append(fig)
return figs, axes
ylim = list(ax.get_ylim())
ylim[1] *= 1.2
ylim[0] = 0.0
ax.set_ylim(ylim)
ax.set_yticklabels([])
return ax | ['def', 'plot_summary_distributions', '(', 'df', ',', 'ax', '=', 'None', ',', 'label_post', '=', 'False', ',', 'label_prior', '=', 'False', ',', 'subplots', '=', 'False', ',', 'figsize', '=', '(', '11', ',', '8.5', ')', ',', 'pt_color', '=', "'b'", ')', ':', 'import', 'matplotlib', '.', 'pyplot', 'as', 'plt', 'if', 'isinstance', '(', 'df', ',', 'str', ')', ':', 'df', '=', 'pd', '.', 'read_csv', '(', 'df', ',', 'index_col', '=', '0', ')', 'if', 'ax', 'is', 'None', 'and', 'not', 'subplots', ':', 'fig', '=', 'plt', '.', 'figure', '(', 'figsize', '=', 'figsize', ')', 'ax', '=', 'plt', '.', 'subplot', '(', '111', ')', 'ax', '.', 'grid', '(', ')', 'if', '"post_stdev"', 'not', 'in', 'df', '.', 'columns', 'and', '"post_var"', 'in', 'df', '.', 'columns', ':', 'df', '.', 'loc', '[', ':', ',', '"post_stdev"', ']', '=', 'df', '.', 'post_var', '.', 'apply', '(', 'np', '.', 'sqrt', ')', 'if', '"prior_stdev"', 'not', 'in', 'df', '.', 'columns', 'and', '"prior_var"', 'in', 'df', '.', 'columns', ':', 'df', '.', 'loc', '[', ':', ',', '"prior_stdev"', ']', '=', 'df', '.', 'prior_var', '.', 'apply', '(', 'np', '.', 'sqrt', ')', 'if', '"prior_expt"', 'not', 'in', 'df', '.', 'columns', 'and', '"prior_mean"', 'in', 'df', '.', 'columns', ':', 'df', '.', 'loc', '[', ':', ',', '"prior_expt"', ']', '=', 'df', '.', 'prior_mean', 'if', '"post_expt"', 'not', 'in', 'df', '.', 'columns', 'and', '"post_mean"', 'in', 'df', '.', 'columns', ':', 'df', '.', 'loc', '[', ':', ',', '"post_expt"', ']', '=', 'df', '.', 'post_mean', 'if', 'subplots', ':', 'fig', '=', 'plt', '.', 'figure', '(', 'figsize', '=', 'figsize', ')', 'ax', '=', 'plt', '.', 'subplot', '(', '2', ',', '3', ',', '1', ')', 'ax_per_page', '=', '6', 'ax_count', '=', '0', 'axes', '=', '[', ']', 'figs', '=', '[', ']', 'for', 'name', 'in', 'df', '.', 'index', ':', 'x', ',', 'y', '=', 'gaussian_distribution', '(', 'df', '.', 'loc', '[', 'name', ',', '"post_expt"', ']', ',', 'df', '.', 'loc', '[', 'name', ',', '"post_stdev"', ']', ')', 'ax', '.', 'fill_between', '(', 'x', ',', '0', ',', 'y', ',', 'facecolor', '=', 'pt_color', ',', 'edgecolor', '=', '"none"', ',', 'alpha', '=', '0.25', ')', 'if', 'label_post', ':', 'mx_idx', '=', 'np', '.', 'argmax', '(', 'y', ')', 'xtxt', ',', 'ytxt', '=', 'x', '[', 'mx_idx', ']', ',', 'y', '[', 'mx_idx', ']', '*', '1.001', 'ax', '.', 'text', '(', 'xtxt', ',', 'ytxt', ',', 'name', ',', 'ha', '=', '"center"', ',', 'alpha', '=', '0.5', ')', 'x', ',', 'y', '=', 'gaussian_distribution', '(', 'df', '.', 'loc', '[', 'name', ',', '"prior_expt"', ']', ',', 'df', '.', 'loc', '[', 'name', ',', '"prior_stdev"', ']', ')', 'ax', '.', 'plot', '(', 'x', ',', 'y', ',', 'color', '=', "'0.5'", ',', 'lw', '=', '3.0', ',', 'dashes', '=', '(', '2', ',', '1', ')', ')', 'if', 'label_prior', ':', 'mx_idx', '=', 'np', '.', 'argmax', '(', 'y', ')', 'xtxt', ',', 'ytxt', '=', 'x', '[', 'mx_idx', ']', ',', 'y', '[', 'mx_idx', ']', '*', '1.001', 'ax', '.', 'text', '(', 'xtxt', ',', 'ytxt', ',', 'name', ',', 'ha', '=', '"center"', ',', 'alpha', '=', '0.5', ')', '#ylim = list(ax.get_ylim())', '#ylim[1] *= 1.2', '#ylim[0] = 0.0', '#ax.set_ylim(ylim)', 'if', 'subplots', ':', 'ax', '.', 'set_title', '(', 'name', ')', 'ax_count', '+=', '1', 'ax', '.', 'set_yticklabels', '(', '[', ']', ')', 'axes', '.', 'append', '(', 'ax', ')', 'if', 'name', '==', 'df', '.', 'index', '[', '-', '1', ']', ':', 'break', 'if', 'ax_count', '>=', 'ax_per_page', ':', 'figs', '.', 'append', '(', 'fig', ')', 'fig', '=', 'plt', '.', 'figure', '(', 'figsize', '=', 'figsize', ')', 'ax_count', '=', '0', 'ax', '=', 'plt', '.', 'subplot', '(', '2', ',', '3', ',', 'ax_count', '+', '1', ')', 'if', 'subplots', ':', 'figs', '.', 'append', '(', 'fig', ')', 'return', 'figs', ',', 'axes', 'ylim', '=', 'list', '(', 'ax', '.', 'get_ylim', '(', ')', ')', 'ylim', '[', '1', ']', '*=', '1.2', 'ylim', '[', '0', ']', '=', '0.0', 'ax', '.', 'set_ylim', '(', 'ylim', ')', 'ax', '.', 'set_yticklabels', '(', '[', ']', ')', 'return', 'ax'] | helper function to plot gaussian distrbutions from prior and posterior
means and standard deviations
Parameters
----------
df : pandas.DataFrame
a dataframe and csv file. Must have columns named:
'prior_mean','prior_stdev','post_mean','post_stdev'. If loaded
from a csv file, column 0 is assumed to tbe the index
ax: matplotlib.pyplot.axis
If None, and not subplots, then one is created
and all distributions are plotted on a single plot
label_post: bool
flag to add text labels to the peak of the posterior
label_prior: bool
flag to add text labels to the peak of the prior
subplots: (boolean)
flag to use subplots. If True, then 6 axes per page
are used and a single prior and posterior is plotted on each
figsize: tuple
matplotlib figure size
Returns
-------
figs : list
list of figures
axes : list
list of axes
Note
----
This is useful for demystifying FOSM results
if subplots is False, a single axis is returned
Example
-------
``>>>import matplotlib.pyplot as plt``
``>>>import pyemu``
``>>>pyemu.helpers.plot_summary_distributions("pest.par.usum.csv")``
``>>>plt.show()`` | ['helper', 'function', 'to', 'plot', 'gaussian', 'distrbutions', 'from', 'prior', 'and', 'posterior', 'means', 'and', 'standard', 'deviations'] | train | https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/plot/plot_utils.py#L27-L139 |
9,691 | jantman/awslimitchecker | awslimitchecker/services/vpc.py | _VpcService._update_limits_from_api | def _update_limits_from_api(self):
"""
Query EC2's DescribeAccountAttributes API action and
update the network interface limit, as needed. Updates ``self.limits``.
More info on the network interface limit, from the docs:
'This limit is the greater of either the default limit (350) or your
On-Demand Instance limit multiplied by 5.
The default limit for On-Demand Instances is 20.'
"""
self.connect()
self.connect_resource()
logger.info("Querying EC2 DescribeAccountAttributes for limits")
attribs = self.conn.describe_account_attributes()
for attrib in attribs['AccountAttributes']:
if attrib['AttributeName'] == 'max-instances':
val = attrib['AttributeValues'][0]['AttributeValue']
if int(val) * 5 > DEFAULT_ENI_LIMIT:
limit_name = 'Network interfaces per Region'
self.limits[limit_name]._set_api_limit(int(val) * 5)
logger.debug("Done setting limits from API") | python | def _update_limits_from_api(self):
"""
Query EC2's DescribeAccountAttributes API action and
update the network interface limit, as needed. Updates ``self.limits``.
More info on the network interface limit, from the docs:
'This limit is the greater of either the default limit (350) or your
On-Demand Instance limit multiplied by 5.
The default limit for On-Demand Instances is 20.'
"""
self.connect()
self.connect_resource()
logger.info("Querying EC2 DescribeAccountAttributes for limits")
attribs = self.conn.describe_account_attributes()
for attrib in attribs['AccountAttributes']:
if attrib['AttributeName'] == 'max-instances':
val = attrib['AttributeValues'][0]['AttributeValue']
if int(val) * 5 > DEFAULT_ENI_LIMIT:
limit_name = 'Network interfaces per Region'
self.limits[limit_name]._set_api_limit(int(val) * 5)
logger.debug("Done setting limits from API") | ['def', '_update_limits_from_api', '(', 'self', ')', ':', 'self', '.', 'connect', '(', ')', 'self', '.', 'connect_resource', '(', ')', 'logger', '.', 'info', '(', '"Querying EC2 DescribeAccountAttributes for limits"', ')', 'attribs', '=', 'self', '.', 'conn', '.', 'describe_account_attributes', '(', ')', 'for', 'attrib', 'in', 'attribs', '[', "'AccountAttributes'", ']', ':', 'if', 'attrib', '[', "'AttributeName'", ']', '==', "'max-instances'", ':', 'val', '=', 'attrib', '[', "'AttributeValues'", ']', '[', '0', ']', '[', "'AttributeValue'", ']', 'if', 'int', '(', 'val', ')', '*', '5', '>', 'DEFAULT_ENI_LIMIT', ':', 'limit_name', '=', "'Network interfaces per Region'", 'self', '.', 'limits', '[', 'limit_name', ']', '.', '_set_api_limit', '(', 'int', '(', 'val', ')', '*', '5', ')', 'logger', '.', 'debug', '(', '"Done setting limits from API"', ')'] | Query EC2's DescribeAccountAttributes API action and
update the network interface limit, as needed. Updates ``self.limits``.
More info on the network interface limit, from the docs:
'This limit is the greater of either the default limit (350) or your
On-Demand Instance limit multiplied by 5.
The default limit for On-Demand Instances is 20.' | ['Query', 'EC2', 's', 'DescribeAccountAttributes', 'API', 'action', 'and', 'update', 'the', 'network', 'interface', 'limit', 'as', 'needed', '.', 'Updates', 'self', '.', 'limits', '.'] | train | https://github.com/jantman/awslimitchecker/blob/e50197f70f3d0abcc5cfc7fde6336f548b790e34/awslimitchecker/services/vpc.py#L339-L359 |
9,692 | getsentry/sentry-python | sentry_sdk/scope.py | Scope.apply_to_event | def apply_to_event(self, event, hint=None):
# type: (Dict[str, Any], Dict[str, Any]) -> Optional[Dict[str, Any]]
"""Applies the information contained on the scope to the given event."""
def _drop(event, cause, ty):
# type: (Dict[str, Any], Callable, str) -> Optional[Any]
logger.info("%s (%s) dropped event (%s)", ty, cause, event)
return None
if self._level is not None:
event["level"] = self._level
event.setdefault("breadcrumbs", []).extend(self._breadcrumbs)
if event.get("user") is None and self._user is not None:
event["user"] = self._user
if event.get("transaction") is None and self._transaction is not None:
event["transaction"] = self._transaction
if event.get("fingerprint") is None and self._fingerprint is not None:
event["fingerprint"] = self._fingerprint
if self._extras:
event.setdefault("extra", {}).update(object_to_json(self._extras))
if self._tags:
event.setdefault("tags", {}).update(self._tags)
if self._contexts:
event.setdefault("contexts", {}).update(self._contexts)
if self._span is not None:
event.setdefault("contexts", {})["trace"] = {
"trace_id": self._span.trace_id,
"span_id": self._span.span_id,
}
exc_info = hint.get("exc_info") if hint is not None else None
if exc_info is not None:
for processor in self._error_processors:
new_event = processor(event, exc_info)
if new_event is None:
return _drop(event, processor, "error processor")
event = new_event
for processor in chain(global_event_processors, self._event_processors):
new_event = event
with capture_internal_exceptions():
new_event = processor(event, hint)
if new_event is None:
return _drop(event, processor, "event processor")
event = new_event
return event | python | def apply_to_event(self, event, hint=None):
# type: (Dict[str, Any], Dict[str, Any]) -> Optional[Dict[str, Any]]
"""Applies the information contained on the scope to the given event."""
def _drop(event, cause, ty):
# type: (Dict[str, Any], Callable, str) -> Optional[Any]
logger.info("%s (%s) dropped event (%s)", ty, cause, event)
return None
if self._level is not None:
event["level"] = self._level
event.setdefault("breadcrumbs", []).extend(self._breadcrumbs)
if event.get("user") is None and self._user is not None:
event["user"] = self._user
if event.get("transaction") is None and self._transaction is not None:
event["transaction"] = self._transaction
if event.get("fingerprint") is None and self._fingerprint is not None:
event["fingerprint"] = self._fingerprint
if self._extras:
event.setdefault("extra", {}).update(object_to_json(self._extras))
if self._tags:
event.setdefault("tags", {}).update(self._tags)
if self._contexts:
event.setdefault("contexts", {}).update(self._contexts)
if self._span is not None:
event.setdefault("contexts", {})["trace"] = {
"trace_id": self._span.trace_id,
"span_id": self._span.span_id,
}
exc_info = hint.get("exc_info") if hint is not None else None
if exc_info is not None:
for processor in self._error_processors:
new_event = processor(event, exc_info)
if new_event is None:
return _drop(event, processor, "error processor")
event = new_event
for processor in chain(global_event_processors, self._event_processors):
new_event = event
with capture_internal_exceptions():
new_event = processor(event, hint)
if new_event is None:
return _drop(event, processor, "event processor")
event = new_event
return event | ['def', 'apply_to_event', '(', 'self', ',', 'event', ',', 'hint', '=', 'None', ')', ':', '# type: (Dict[str, Any], Dict[str, Any]) -> Optional[Dict[str, Any]]', 'def', '_drop', '(', 'event', ',', 'cause', ',', 'ty', ')', ':', '# type: (Dict[str, Any], Callable, str) -> Optional[Any]', 'logger', '.', 'info', '(', '"%s (%s) dropped event (%s)"', ',', 'ty', ',', 'cause', ',', 'event', ')', 'return', 'None', 'if', 'self', '.', '_level', 'is', 'not', 'None', ':', 'event', '[', '"level"', ']', '=', 'self', '.', '_level', 'event', '.', 'setdefault', '(', '"breadcrumbs"', ',', '[', ']', ')', '.', 'extend', '(', 'self', '.', '_breadcrumbs', ')', 'if', 'event', '.', 'get', '(', '"user"', ')', 'is', 'None', 'and', 'self', '.', '_user', 'is', 'not', 'None', ':', 'event', '[', '"user"', ']', '=', 'self', '.', '_user', 'if', 'event', '.', 'get', '(', '"transaction"', ')', 'is', 'None', 'and', 'self', '.', '_transaction', 'is', 'not', 'None', ':', 'event', '[', '"transaction"', ']', '=', 'self', '.', '_transaction', 'if', 'event', '.', 'get', '(', '"fingerprint"', ')', 'is', 'None', 'and', 'self', '.', '_fingerprint', 'is', 'not', 'None', ':', 'event', '[', '"fingerprint"', ']', '=', 'self', '.', '_fingerprint', 'if', 'self', '.', '_extras', ':', 'event', '.', 'setdefault', '(', '"extra"', ',', '{', '}', ')', '.', 'update', '(', 'object_to_json', '(', 'self', '.', '_extras', ')', ')', 'if', 'self', '.', '_tags', ':', 'event', '.', 'setdefault', '(', '"tags"', ',', '{', '}', ')', '.', 'update', '(', 'self', '.', '_tags', ')', 'if', 'self', '.', '_contexts', ':', 'event', '.', 'setdefault', '(', '"contexts"', ',', '{', '}', ')', '.', 'update', '(', 'self', '.', '_contexts', ')', 'if', 'self', '.', '_span', 'is', 'not', 'None', ':', 'event', '.', 'setdefault', '(', '"contexts"', ',', '{', '}', ')', '[', '"trace"', ']', '=', '{', '"trace_id"', ':', 'self', '.', '_span', '.', 'trace_id', ',', '"span_id"', ':', 'self', '.', '_span', '.', 'span_id', ',', '}', 'exc_info', '=', 'hint', '.', 'get', '(', '"exc_info"', ')', 'if', 'hint', 'is', 'not', 'None', 'else', 'None', 'if', 'exc_info', 'is', 'not', 'None', ':', 'for', 'processor', 'in', 'self', '.', '_error_processors', ':', 'new_event', '=', 'processor', '(', 'event', ',', 'exc_info', ')', 'if', 'new_event', 'is', 'None', ':', 'return', '_drop', '(', 'event', ',', 'processor', ',', '"error processor"', ')', 'event', '=', 'new_event', 'for', 'processor', 'in', 'chain', '(', 'global_event_processors', ',', 'self', '.', '_event_processors', ')', ':', 'new_event', '=', 'event', 'with', 'capture_internal_exceptions', '(', ')', ':', 'new_event', '=', 'processor', '(', 'event', ',', 'hint', ')', 'if', 'new_event', 'is', 'None', ':', 'return', '_drop', '(', 'event', ',', 'processor', ',', '"event processor"', ')', 'event', '=', 'new_event', 'return', 'event'] | Applies the information contained on the scope to the given event. | ['Applies', 'the', 'information', 'contained', 'on', 'the', 'scope', 'to', 'the', 'given', 'event', '.'] | train | https://github.com/getsentry/sentry-python/blob/a1d77722bdce0b94660ebf50b5c4a4645916d084/sentry_sdk/scope.py#L172-L225 |
9,693 | MIT-LCP/wfdb-python | wfdb/io/_header.py | _parse_signal_lines | def _parse_signal_lines(signal_lines):
"""
Extract fields from a list of signal line strings into a dictionary.
"""
n_sig = len(signal_lines)
# Dictionary for signal fields
signal_fields = {}
# Each dictionary field is a list
for field in SIGNAL_SPECS.index:
signal_fields[field] = n_sig * [None]
# Read string fields from signal line
for ch in range(n_sig):
(signal_fields['file_name'][ch], signal_fields['fmt'][ch],
signal_fields['samps_per_frame'][ch], signal_fields['skew'][ch],
signal_fields['byte_offset'][ch], signal_fields['adc_gain'][ch],
signal_fields['baseline'][ch], signal_fields['units'][ch],
signal_fields['adc_res'][ch], signal_fields['adc_zero'][ch],
signal_fields['init_value'][ch], signal_fields['checksum'][ch],
signal_fields['block_size'][ch],
signal_fields['sig_name'][ch]) = _rx_signal.findall(signal_lines[ch])[0]
for field in SIGNAL_SPECS.index:
# Replace empty strings with their read defaults (which are mostly None)
# Note: Never set a field to None. [None]* n_sig is accurate, indicating
# that different channels can be present or missing.
if signal_fields[field][ch] == '':
signal_fields[field][ch] = SIGNAL_SPECS.loc[field, 'read_default']
# Special case: missing baseline defaults to ADCzero if present
if field == 'baseline' and signal_fields['adc_zero'][ch] != '':
signal_fields['baseline'][ch] = int(signal_fields['adc_zero'][ch])
# Typecast non-empty strings for numerical fields
else:
if SIGNAL_SPECS.loc[field, 'allowed_types'] is int_types:
signal_fields[field][ch] = int(signal_fields[field][ch])
elif SIGNAL_SPECS.loc[field, 'allowed_types'] is float_types:
signal_fields[field][ch] = float(signal_fields[field][ch])
# Special case: adc_gain of 0 means 200
if field == 'adc_gain' and signal_fields['adc_gain'][ch] == 0:
signal_fields['adc_gain'][ch] = 200.
return signal_fields | python | def _parse_signal_lines(signal_lines):
"""
Extract fields from a list of signal line strings into a dictionary.
"""
n_sig = len(signal_lines)
# Dictionary for signal fields
signal_fields = {}
# Each dictionary field is a list
for field in SIGNAL_SPECS.index:
signal_fields[field] = n_sig * [None]
# Read string fields from signal line
for ch in range(n_sig):
(signal_fields['file_name'][ch], signal_fields['fmt'][ch],
signal_fields['samps_per_frame'][ch], signal_fields['skew'][ch],
signal_fields['byte_offset'][ch], signal_fields['adc_gain'][ch],
signal_fields['baseline'][ch], signal_fields['units'][ch],
signal_fields['adc_res'][ch], signal_fields['adc_zero'][ch],
signal_fields['init_value'][ch], signal_fields['checksum'][ch],
signal_fields['block_size'][ch],
signal_fields['sig_name'][ch]) = _rx_signal.findall(signal_lines[ch])[0]
for field in SIGNAL_SPECS.index:
# Replace empty strings with their read defaults (which are mostly None)
# Note: Never set a field to None. [None]* n_sig is accurate, indicating
# that different channels can be present or missing.
if signal_fields[field][ch] == '':
signal_fields[field][ch] = SIGNAL_SPECS.loc[field, 'read_default']
# Special case: missing baseline defaults to ADCzero if present
if field == 'baseline' and signal_fields['adc_zero'][ch] != '':
signal_fields['baseline'][ch] = int(signal_fields['adc_zero'][ch])
# Typecast non-empty strings for numerical fields
else:
if SIGNAL_SPECS.loc[field, 'allowed_types'] is int_types:
signal_fields[field][ch] = int(signal_fields[field][ch])
elif SIGNAL_SPECS.loc[field, 'allowed_types'] is float_types:
signal_fields[field][ch] = float(signal_fields[field][ch])
# Special case: adc_gain of 0 means 200
if field == 'adc_gain' and signal_fields['adc_gain'][ch] == 0:
signal_fields['adc_gain'][ch] = 200.
return signal_fields | ['def', '_parse_signal_lines', '(', 'signal_lines', ')', ':', 'n_sig', '=', 'len', '(', 'signal_lines', ')', '# Dictionary for signal fields', 'signal_fields', '=', '{', '}', '# Each dictionary field is a list', 'for', 'field', 'in', 'SIGNAL_SPECS', '.', 'index', ':', 'signal_fields', '[', 'field', ']', '=', 'n_sig', '*', '[', 'None', ']', '# Read string fields from signal line', 'for', 'ch', 'in', 'range', '(', 'n_sig', ')', ':', '(', 'signal_fields', '[', "'file_name'", ']', '[', 'ch', ']', ',', 'signal_fields', '[', "'fmt'", ']', '[', 'ch', ']', ',', 'signal_fields', '[', "'samps_per_frame'", ']', '[', 'ch', ']', ',', 'signal_fields', '[', "'skew'", ']', '[', 'ch', ']', ',', 'signal_fields', '[', "'byte_offset'", ']', '[', 'ch', ']', ',', 'signal_fields', '[', "'adc_gain'", ']', '[', 'ch', ']', ',', 'signal_fields', '[', "'baseline'", ']', '[', 'ch', ']', ',', 'signal_fields', '[', "'units'", ']', '[', 'ch', ']', ',', 'signal_fields', '[', "'adc_res'", ']', '[', 'ch', ']', ',', 'signal_fields', '[', "'adc_zero'", ']', '[', 'ch', ']', ',', 'signal_fields', '[', "'init_value'", ']', '[', 'ch', ']', ',', 'signal_fields', '[', "'checksum'", ']', '[', 'ch', ']', ',', 'signal_fields', '[', "'block_size'", ']', '[', 'ch', ']', ',', 'signal_fields', '[', "'sig_name'", ']', '[', 'ch', ']', ')', '=', '_rx_signal', '.', 'findall', '(', 'signal_lines', '[', 'ch', ']', ')', '[', '0', ']', 'for', 'field', 'in', 'SIGNAL_SPECS', '.', 'index', ':', '# Replace empty strings with their read defaults (which are mostly None)', '# Note: Never set a field to None. [None]* n_sig is accurate, indicating', '# that different channels can be present or missing.', 'if', 'signal_fields', '[', 'field', ']', '[', 'ch', ']', '==', "''", ':', 'signal_fields', '[', 'field', ']', '[', 'ch', ']', '=', 'SIGNAL_SPECS', '.', 'loc', '[', 'field', ',', "'read_default'", ']', '# Special case: missing baseline defaults to ADCzero if present', 'if', 'field', '==', "'baseline'", 'and', 'signal_fields', '[', "'adc_zero'", ']', '[', 'ch', ']', '!=', "''", ':', 'signal_fields', '[', "'baseline'", ']', '[', 'ch', ']', '=', 'int', '(', 'signal_fields', '[', "'adc_zero'", ']', '[', 'ch', ']', ')', '# Typecast non-empty strings for numerical fields', 'else', ':', 'if', 'SIGNAL_SPECS', '.', 'loc', '[', 'field', ',', "'allowed_types'", ']', 'is', 'int_types', ':', 'signal_fields', '[', 'field', ']', '[', 'ch', ']', '=', 'int', '(', 'signal_fields', '[', 'field', ']', '[', 'ch', ']', ')', 'elif', 'SIGNAL_SPECS', '.', 'loc', '[', 'field', ',', "'allowed_types'", ']', 'is', 'float_types', ':', 'signal_fields', '[', 'field', ']', '[', 'ch', ']', '=', 'float', '(', 'signal_fields', '[', 'field', ']', '[', 'ch', ']', ')', '# Special case: adc_gain of 0 means 200', 'if', 'field', '==', "'adc_gain'", 'and', 'signal_fields', '[', "'adc_gain'", ']', '[', 'ch', ']', '==', '0', ':', 'signal_fields', '[', "'adc_gain'", ']', '[', 'ch', ']', '=', '200.', 'return', 'signal_fields'] | Extract fields from a list of signal line strings into a dictionary. | ['Extract', 'fields', 'from', 'a', 'list', 'of', 'signal', 'line', 'strings', 'into', 'a', 'dictionary', '.'] | train | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/_header.py#L780-L824 |
9,694 | googledatalab/pydatalab | datalab/bigquery/_dataset.py | Dataset.create | def create(self, friendly_name=None, description=None):
"""Creates the Dataset with the specified friendly name and description.
Args:
friendly_name: (optional) the friendly name for the dataset if it is being created.
description: (optional) a description for the dataset if it is being created.
Returns:
The Dataset.
Raises:
Exception if the Dataset could not be created.
"""
if not self.exists():
try:
response = self._api.datasets_insert(self._name_parts,
friendly_name=friendly_name,
description=description)
except Exception as e:
raise e
if 'selfLink' not in response:
raise Exception("Could not create dataset %s" % self._full_name)
return self | python | def create(self, friendly_name=None, description=None):
"""Creates the Dataset with the specified friendly name and description.
Args:
friendly_name: (optional) the friendly name for the dataset if it is being created.
description: (optional) a description for the dataset if it is being created.
Returns:
The Dataset.
Raises:
Exception if the Dataset could not be created.
"""
if not self.exists():
try:
response = self._api.datasets_insert(self._name_parts,
friendly_name=friendly_name,
description=description)
except Exception as e:
raise e
if 'selfLink' not in response:
raise Exception("Could not create dataset %s" % self._full_name)
return self | ['def', 'create', '(', 'self', ',', 'friendly_name', '=', 'None', ',', 'description', '=', 'None', ')', ':', 'if', 'not', 'self', '.', 'exists', '(', ')', ':', 'try', ':', 'response', '=', 'self', '.', '_api', '.', 'datasets_insert', '(', 'self', '.', '_name_parts', ',', 'friendly_name', '=', 'friendly_name', ',', 'description', '=', 'description', ')', 'except', 'Exception', 'as', 'e', ':', 'raise', 'e', 'if', "'selfLink'", 'not', 'in', 'response', ':', 'raise', 'Exception', '(', '"Could not create dataset %s"', '%', 'self', '.', '_full_name', ')', 'return', 'self'] | Creates the Dataset with the specified friendly name and description.
Args:
friendly_name: (optional) the friendly name for the dataset if it is being created.
description: (optional) a description for the dataset if it is being created.
Returns:
The Dataset.
Raises:
Exception if the Dataset could not be created. | ['Creates', 'the', 'Dataset', 'with', 'the', 'specified', 'friendly', 'name', 'and', 'description', '.'] | train | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_dataset.py#L121-L141 |
9,695 | fermiPy/fermipy | fermipy/gtanalysis.py | GTAnalysis.clone | def clone(self, config, **kwargs):
"""Make a clone of this analysis instance."""
gta = GTAnalysis(config, **kwargs)
gta._roi = copy.deepcopy(self.roi)
return gta | python | def clone(self, config, **kwargs):
"""Make a clone of this analysis instance."""
gta = GTAnalysis(config, **kwargs)
gta._roi = copy.deepcopy(self.roi)
return gta | ['def', 'clone', '(', 'self', ',', 'config', ',', '*', '*', 'kwargs', ')', ':', 'gta', '=', 'GTAnalysis', '(', 'config', ',', '*', '*', 'kwargs', ')', 'gta', '.', '_roi', '=', 'copy', '.', 'deepcopy', '(', 'self', '.', 'roi', ')', 'return', 'gta'] | Make a clone of this analysis instance. | ['Make', 'a', 'clone', 'of', 'this', 'analysis', 'instance', '.'] | train | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtanalysis.py#L551-L555 |
9,696 | SheffieldML/GPy | GPy/examples/classification.py | crescent_data | def crescent_data(model_type='Full', num_inducing=10, seed=default_seed, kernel=None, optimize=True, plot=True):
"""
Run a Gaussian process classification on the crescent data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood.
:param model_type: type of model to fit ['Full', 'FITC', 'DTC'].
:param inducing: number of inducing variables (only used for 'FITC' or 'DTC').
:type inducing: int
:param seed: seed value for data generation.
:type seed: int
:param kernel: kernel to use in the model
:type kernel: a GPy kernel
"""
try:import pods
except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets')
data = pods.datasets.crescent_data(seed=seed)
Y = data['Y']
Y[Y.flatten()==-1] = 0
if model_type == 'Full':
m = GPy.models.GPClassification(data['X'], Y, kernel=kernel)
elif model_type == 'DTC':
m = GPy.models.SparseGPClassification(data['X'], Y, kernel=kernel, num_inducing=num_inducing)
m['.*len'] = 10.
elif model_type == 'FITC':
m = GPy.models.FITCClassification(data['X'], Y, kernel=kernel, num_inducing=num_inducing)
m['.*len'] = 3.
if optimize:
m.optimize(messages=1)
if plot:
m.plot()
print(m)
return m | python | def crescent_data(model_type='Full', num_inducing=10, seed=default_seed, kernel=None, optimize=True, plot=True):
"""
Run a Gaussian process classification on the crescent data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood.
:param model_type: type of model to fit ['Full', 'FITC', 'DTC'].
:param inducing: number of inducing variables (only used for 'FITC' or 'DTC').
:type inducing: int
:param seed: seed value for data generation.
:type seed: int
:param kernel: kernel to use in the model
:type kernel: a GPy kernel
"""
try:import pods
except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets')
data = pods.datasets.crescent_data(seed=seed)
Y = data['Y']
Y[Y.flatten()==-1] = 0
if model_type == 'Full':
m = GPy.models.GPClassification(data['X'], Y, kernel=kernel)
elif model_type == 'DTC':
m = GPy.models.SparseGPClassification(data['X'], Y, kernel=kernel, num_inducing=num_inducing)
m['.*len'] = 10.
elif model_type == 'FITC':
m = GPy.models.FITCClassification(data['X'], Y, kernel=kernel, num_inducing=num_inducing)
m['.*len'] = 3.
if optimize:
m.optimize(messages=1)
if plot:
m.plot()
print(m)
return m | ['def', 'crescent_data', '(', 'model_type', '=', "'Full'", ',', 'num_inducing', '=', '10', ',', 'seed', '=', 'default_seed', ',', 'kernel', '=', 'None', ',', 'optimize', '=', 'True', ',', 'plot', '=', 'True', ')', ':', 'try', ':', 'import', 'pods', 'except', 'ImportError', ':', 'print', '(', "'pods unavailable, see https://github.com/sods/ods for example datasets'", ')', 'data', '=', 'pods', '.', 'datasets', '.', 'crescent_data', '(', 'seed', '=', 'seed', ')', 'Y', '=', 'data', '[', "'Y'", ']', 'Y', '[', 'Y', '.', 'flatten', '(', ')', '==', '-', '1', ']', '=', '0', 'if', 'model_type', '==', "'Full'", ':', 'm', '=', 'GPy', '.', 'models', '.', 'GPClassification', '(', 'data', '[', "'X'", ']', ',', 'Y', ',', 'kernel', '=', 'kernel', ')', 'elif', 'model_type', '==', "'DTC'", ':', 'm', '=', 'GPy', '.', 'models', '.', 'SparseGPClassification', '(', 'data', '[', "'X'", ']', ',', 'Y', ',', 'kernel', '=', 'kernel', ',', 'num_inducing', '=', 'num_inducing', ')', 'm', '[', "'.*len'", ']', '=', '10.', 'elif', 'model_type', '==', "'FITC'", ':', 'm', '=', 'GPy', '.', 'models', '.', 'FITCClassification', '(', 'data', '[', "'X'", ']', ',', 'Y', ',', 'kernel', '=', 'kernel', ',', 'num_inducing', '=', 'num_inducing', ')', 'm', '[', "'.*len'", ']', '=', '3.', 'if', 'optimize', ':', 'm', '.', 'optimize', '(', 'messages', '=', '1', ')', 'if', 'plot', ':', 'm', '.', 'plot', '(', ')', 'print', '(', 'm', ')', 'return', 'm'] | Run a Gaussian process classification on the crescent data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood.
:param model_type: type of model to fit ['Full', 'FITC', 'DTC'].
:param inducing: number of inducing variables (only used for 'FITC' or 'DTC').
:type inducing: int
:param seed: seed value for data generation.
:type seed: int
:param kernel: kernel to use in the model
:type kernel: a GPy kernel | ['Run', 'a', 'Gaussian', 'process', 'classification', 'on', 'the', 'crescent', 'data', '.', 'The', 'demonstration', 'calls', 'the', 'basic', 'GP', 'classification', 'model', 'and', 'uses', 'EP', 'to', 'approximate', 'the', 'likelihood', '.'] | train | https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/examples/classification.py#L225-L260 |
9,697 | emc-openstack/storops | storops/lib/parser.py | ParserConfigFactory._get_converter | def _get_converter(self, converter_str):
"""find converter function reference by name
find converter by name, converter name follows this convention:
Class.method
or:
method
The first type of converter class/function must be available in
current module.
The second type of converter must be available in `__builtin__`
(or `builtins` in python3) module.
:param converter_str: string representation of the converter func
:return: function reference
"""
ret = None
if converter_str is not None:
converter_desc_list = converter_str.split('.')
if len(converter_desc_list) == 1:
converter = converter_desc_list[0]
# default to `converter`
ret = getattr(cvt, converter, None)
if ret is None:
# try module converter
ret = self.get_converter(converter)
if ret is None:
ret = self.get_resource_clz_by_name(converter)
if ret is None:
ret = self.get_enum_by_name(converter)
if ret is None:
# try parser config
ret = self.get(converter)
if ret is None and converter_str is not None:
raise ValueError(
'Specified converter not supported: {}'.format(
converter_str))
return ret | python | def _get_converter(self, converter_str):
"""find converter function reference by name
find converter by name, converter name follows this convention:
Class.method
or:
method
The first type of converter class/function must be available in
current module.
The second type of converter must be available in `__builtin__`
(or `builtins` in python3) module.
:param converter_str: string representation of the converter func
:return: function reference
"""
ret = None
if converter_str is not None:
converter_desc_list = converter_str.split('.')
if len(converter_desc_list) == 1:
converter = converter_desc_list[0]
# default to `converter`
ret = getattr(cvt, converter, None)
if ret is None:
# try module converter
ret = self.get_converter(converter)
if ret is None:
ret = self.get_resource_clz_by_name(converter)
if ret is None:
ret = self.get_enum_by_name(converter)
if ret is None:
# try parser config
ret = self.get(converter)
if ret is None and converter_str is not None:
raise ValueError(
'Specified converter not supported: {}'.format(
converter_str))
return ret | ['def', '_get_converter', '(', 'self', ',', 'converter_str', ')', ':', 'ret', '=', 'None', 'if', 'converter_str', 'is', 'not', 'None', ':', 'converter_desc_list', '=', 'converter_str', '.', 'split', '(', "'.'", ')', 'if', 'len', '(', 'converter_desc_list', ')', '==', '1', ':', 'converter', '=', 'converter_desc_list', '[', '0', ']', '# default to `converter`', 'ret', '=', 'getattr', '(', 'cvt', ',', 'converter', ',', 'None', ')', 'if', 'ret', 'is', 'None', ':', '# try module converter', 'ret', '=', 'self', '.', 'get_converter', '(', 'converter', ')', 'if', 'ret', 'is', 'None', ':', 'ret', '=', 'self', '.', 'get_resource_clz_by_name', '(', 'converter', ')', 'if', 'ret', 'is', 'None', ':', 'ret', '=', 'self', '.', 'get_enum_by_name', '(', 'converter', ')', 'if', 'ret', 'is', 'None', ':', '# try parser config', 'ret', '=', 'self', '.', 'get', '(', 'converter', ')', 'if', 'ret', 'is', 'None', 'and', 'converter_str', 'is', 'not', 'None', ':', 'raise', 'ValueError', '(', "'Specified converter not supported: {}'", '.', 'format', '(', 'converter_str', ')', ')', 'return', 'ret'] | find converter function reference by name
find converter by name, converter name follows this convention:
Class.method
or:
method
The first type of converter class/function must be available in
current module.
The second type of converter must be available in `__builtin__`
(or `builtins` in python3) module.
:param converter_str: string representation of the converter func
:return: function reference | ['find', 'converter', 'function', 'reference', 'by', 'name'] | train | https://github.com/emc-openstack/storops/blob/24b4b13bf065c0ef0538dd0b5ebb8f25d24176bd/storops/lib/parser.py#L398-L443 |
9,698 | sbg/sevenbridges-python | sevenbridges/models/storage_export.py | Export.query | def query(cls, volume=None, state=None, offset=None,
limit=None, api=None):
"""
Query (List) exports.
:param volume: Optional volume identifier.
:param state: Optional import sate.
:param api: Api instance.
:return: Collection object.
"""
api = api or cls._API
if volume:
volume = Transform.to_volume(volume)
return super(Export, cls)._query(
url=cls._URL['query'], volume=volume, state=state, offset=offset,
limit=limit, fields='_all', api=api
) | python | def query(cls, volume=None, state=None, offset=None,
limit=None, api=None):
"""
Query (List) exports.
:param volume: Optional volume identifier.
:param state: Optional import sate.
:param api: Api instance.
:return: Collection object.
"""
api = api or cls._API
if volume:
volume = Transform.to_volume(volume)
return super(Export, cls)._query(
url=cls._URL['query'], volume=volume, state=state, offset=offset,
limit=limit, fields='_all', api=api
) | ['def', 'query', '(', 'cls', ',', 'volume', '=', 'None', ',', 'state', '=', 'None', ',', 'offset', '=', 'None', ',', 'limit', '=', 'None', ',', 'api', '=', 'None', ')', ':', 'api', '=', 'api', 'or', 'cls', '.', '_API', 'if', 'volume', ':', 'volume', '=', 'Transform', '.', 'to_volume', '(', 'volume', ')', 'return', 'super', '(', 'Export', ',', 'cls', ')', '.', '_query', '(', 'url', '=', 'cls', '.', '_URL', '[', "'query'", ']', ',', 'volume', '=', 'volume', ',', 'state', '=', 'state', ',', 'offset', '=', 'offset', ',', 'limit', '=', 'limit', ',', 'fields', '=', "'_all'", ',', 'api', '=', 'api', ')'] | Query (List) exports.
:param volume: Optional volume identifier.
:param state: Optional import sate.
:param api: Api instance.
:return: Collection object. | ['Query', '(', 'List', ')', 'exports', '.', ':', 'param', 'volume', ':', 'Optional', 'volume', 'identifier', '.', ':', 'param', 'state', ':', 'Optional', 'import', 'sate', '.', ':', 'param', 'api', ':', 'Api', 'instance', '.', ':', 'return', ':', 'Collection', 'object', '.'] | train | https://github.com/sbg/sevenbridges-python/blob/f62640d1018d959f0b686f2dbe5e183085336607/sevenbridges/models/storage_export.py#L125-L143 |
9,699 | 20c/vodka | vodka/config/validators.py | host | def host(value):
""" Validates that the value is a valid network location """
if not value:
return (True, "")
try:
host,port = value.split(":")
except ValueError as _:
return (False, "value needs to be <host>:<port>")
try:
int(port)
except ValueError as _:
return (False, "port component of the host address needs to be a number")
return (True, "") | python | def host(value):
""" Validates that the value is a valid network location """
if not value:
return (True, "")
try:
host,port = value.split(":")
except ValueError as _:
return (False, "value needs to be <host>:<port>")
try:
int(port)
except ValueError as _:
return (False, "port component of the host address needs to be a number")
return (True, "") | ['def', 'host', '(', 'value', ')', ':', 'if', 'not', 'value', ':', 'return', '(', 'True', ',', '""', ')', 'try', ':', 'host', ',', 'port', '=', 'value', '.', 'split', '(', '":"', ')', 'except', 'ValueError', 'as', '_', ':', 'return', '(', 'False', ',', '"value needs to be <host>:<port>"', ')', 'try', ':', 'int', '(', 'port', ')', 'except', 'ValueError', 'as', '_', ':', 'return', '(', 'False', ',', '"port component of the host address needs to be a number"', ')', 'return', '(', 'True', ',', '""', ')'] | Validates that the value is a valid network location | ['Validates', 'that', 'the', 'value', 'is', 'a', 'valid', 'network', 'location'] | train | https://github.com/20c/vodka/blob/9615148ac6560298453704bb5246b35b66b3339c/vodka/config/validators.py#L19-L33 |
Subsets and Splits