code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def authenticate(self, username, password, priv_lvl=TAC_PLUS_PRIV_LVL_MIN,
authen_type=TAC_PLUS_AUTHEN_TYPE_ASCII,
chap_ppp_id=None, chap_challenge=None,
rem_addr=TAC_PLUS_VIRTUAL_REM_ADDR, port=TAC_PLUS_VIRTUAL_PORT):
"""
Authenticate to a TACACS+ server with a username and password.
:param username:
:param password:
:param priv_lvl:
:param authen_type: TAC_PLUS_AUTHEN_TYPE_ASCII,
TAC_PLUS_AUTHEN_TYPE_PAP,
TAC_PLUS_AUTHEN_TYPE_CHAP
:param chap_ppp_id: PPP ID when authen_type == 'chap'
:param chap_challenge: challenge value when authen_type == 'chap'
:param rem_addr: AAA request source, default to TAC_PLUS_VIRTUAL_REM_ADDR
:param port: AAA port, default to TAC_PLUS_VIRTUAL_PORT
:return: TACACSAuthenticationReply
:raises: socket.timeout, socket.error
"""
start_data = six.b('')
if authen_type in (TAC_PLUS_AUTHEN_TYPE_PAP,
TAC_PLUS_AUTHEN_TYPE_CHAP):
self.version_min = TAC_PLUS_MINOR_VER_ONE
if authen_type == TAC_PLUS_AUTHEN_TYPE_PAP:
start_data = six.b(password)
if authen_type == TAC_PLUS_AUTHEN_TYPE_CHAP:
if not isinstance(chap_ppp_id, six.string_types):
raise ValueError('chap_ppp_id must be a string')
if len(chap_ppp_id) != 1:
raise ValueError('chap_ppp_id must be a 1-byte string')
if not isinstance(chap_challenge, six.string_types):
raise ValueError('chap_challenge must be a string')
if len(chap_challenge) > 255:
raise ValueError('chap_challenge may not be more 255 bytes')
start_data = (
six.b(chap_ppp_id) +
six.b(chap_challenge) +
md5(six.b(
chap_ppp_id + password + chap_challenge
)).digest()
)
with self.closing():
packet = self.send(
TACACSAuthenticationStart(username, authen_type, priv_lvl,
start_data, rem_addr=rem_addr, port=port),
TAC_PLUS_AUTHEN
)
reply = TACACSAuthenticationReply.unpacked(packet.body)
logger.debug('\n'.join([
reply.__class__.__name__,
'recv header <%s>' % packet.header,
'recv body <%s>' % reply
]))
if authen_type == TAC_PLUS_AUTHEN_TYPE_ASCII and reply.getpass:
packet = self.send(TACACSAuthenticationContinue(password),
TAC_PLUS_AUTHEN,
packet.seq_no + 1)
reply = TACACSAuthenticationReply.unpacked(packet.body)
logger.debug('\n'.join([
reply.__class__.__name__,
'recv header <%s>' % packet.header,
'recv body <%s>' % reply
]))
if reply.flags == TAC_PLUS_CONTINUE_FLAG_ABORT:
reply.status = TAC_PLUS_AUTHEN_STATUS_FAIL
return reply | Authenticate to a TACACS+ server with a username and password.
:param username:
:param password:
:param priv_lvl:
:param authen_type: TAC_PLUS_AUTHEN_TYPE_ASCII,
TAC_PLUS_AUTHEN_TYPE_PAP,
TAC_PLUS_AUTHEN_TYPE_CHAP
:param chap_ppp_id: PPP ID when authen_type == 'chap'
:param chap_challenge: challenge value when authen_type == 'chap'
:param rem_addr: AAA request source, default to TAC_PLUS_VIRTUAL_REM_ADDR
:param port: AAA port, default to TAC_PLUS_VIRTUAL_PORT
:return: TACACSAuthenticationReply
:raises: socket.timeout, socket.error | Below is the the instruction that describes the task:
### Input:
Authenticate to a TACACS+ server with a username and password.
:param username:
:param password:
:param priv_lvl:
:param authen_type: TAC_PLUS_AUTHEN_TYPE_ASCII,
TAC_PLUS_AUTHEN_TYPE_PAP,
TAC_PLUS_AUTHEN_TYPE_CHAP
:param chap_ppp_id: PPP ID when authen_type == 'chap'
:param chap_challenge: challenge value when authen_type == 'chap'
:param rem_addr: AAA request source, default to TAC_PLUS_VIRTUAL_REM_ADDR
:param port: AAA port, default to TAC_PLUS_VIRTUAL_PORT
:return: TACACSAuthenticationReply
:raises: socket.timeout, socket.error
### Response:
def authenticate(self, username, password, priv_lvl=TAC_PLUS_PRIV_LVL_MIN,
authen_type=TAC_PLUS_AUTHEN_TYPE_ASCII,
chap_ppp_id=None, chap_challenge=None,
rem_addr=TAC_PLUS_VIRTUAL_REM_ADDR, port=TAC_PLUS_VIRTUAL_PORT):
"""
Authenticate to a TACACS+ server with a username and password.
:param username:
:param password:
:param priv_lvl:
:param authen_type: TAC_PLUS_AUTHEN_TYPE_ASCII,
TAC_PLUS_AUTHEN_TYPE_PAP,
TAC_PLUS_AUTHEN_TYPE_CHAP
:param chap_ppp_id: PPP ID when authen_type == 'chap'
:param chap_challenge: challenge value when authen_type == 'chap'
:param rem_addr: AAA request source, default to TAC_PLUS_VIRTUAL_REM_ADDR
:param port: AAA port, default to TAC_PLUS_VIRTUAL_PORT
:return: TACACSAuthenticationReply
:raises: socket.timeout, socket.error
"""
start_data = six.b('')
if authen_type in (TAC_PLUS_AUTHEN_TYPE_PAP,
TAC_PLUS_AUTHEN_TYPE_CHAP):
self.version_min = TAC_PLUS_MINOR_VER_ONE
if authen_type == TAC_PLUS_AUTHEN_TYPE_PAP:
start_data = six.b(password)
if authen_type == TAC_PLUS_AUTHEN_TYPE_CHAP:
if not isinstance(chap_ppp_id, six.string_types):
raise ValueError('chap_ppp_id must be a string')
if len(chap_ppp_id) != 1:
raise ValueError('chap_ppp_id must be a 1-byte string')
if not isinstance(chap_challenge, six.string_types):
raise ValueError('chap_challenge must be a string')
if len(chap_challenge) > 255:
raise ValueError('chap_challenge may not be more 255 bytes')
start_data = (
six.b(chap_ppp_id) +
six.b(chap_challenge) +
md5(six.b(
chap_ppp_id + password + chap_challenge
)).digest()
)
with self.closing():
packet = self.send(
TACACSAuthenticationStart(username, authen_type, priv_lvl,
start_data, rem_addr=rem_addr, port=port),
TAC_PLUS_AUTHEN
)
reply = TACACSAuthenticationReply.unpacked(packet.body)
logger.debug('\n'.join([
reply.__class__.__name__,
'recv header <%s>' % packet.header,
'recv body <%s>' % reply
]))
if authen_type == TAC_PLUS_AUTHEN_TYPE_ASCII and reply.getpass:
packet = self.send(TACACSAuthenticationContinue(password),
TAC_PLUS_AUTHEN,
packet.seq_no + 1)
reply = TACACSAuthenticationReply.unpacked(packet.body)
logger.debug('\n'.join([
reply.__class__.__name__,
'recv header <%s>' % packet.header,
'recv body <%s>' % reply
]))
if reply.flags == TAC_PLUS_CONTINUE_FLAG_ABORT:
reply.status = TAC_PLUS_AUTHEN_STATUS_FAIL
return reply |
def get_timestamp(timezone_name, year, month, day, hour=0, minute=0):
"""Epoch timestamp from timezone, year, month, day, hour and minute."""
tz = pytz.timezone(timezone_name)
tz_datetime = tz.localize(datetime(year, month, day, hour, minute))
timestamp = calendar.timegm(tz_datetime.utctimetuple())
return timestamp | Epoch timestamp from timezone, year, month, day, hour and minute. | Below is the the instruction that describes the task:
### Input:
Epoch timestamp from timezone, year, month, day, hour and minute.
### Response:
def get_timestamp(timezone_name, year, month, day, hour=0, minute=0):
"""Epoch timestamp from timezone, year, month, day, hour and minute."""
tz = pytz.timezone(timezone_name)
tz_datetime = tz.localize(datetime(year, month, day, hour, minute))
timestamp = calendar.timegm(tz_datetime.utctimetuple())
return timestamp |
def set_scroll_callback(window, cbfun):
"""
Sets the scroll callback.
Wrapper for:
GLFWscrollfun glfwSetScrollCallback(GLFWwindow* window, GLFWscrollfun cbfun);
"""
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _scroll_callback_repository:
previous_callback = _scroll_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWscrollfun(cbfun)
_scroll_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetScrollCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0] | Sets the scroll callback.
Wrapper for:
GLFWscrollfun glfwSetScrollCallback(GLFWwindow* window, GLFWscrollfun cbfun); | Below is the the instruction that describes the task:
### Input:
Sets the scroll callback.
Wrapper for:
GLFWscrollfun glfwSetScrollCallback(GLFWwindow* window, GLFWscrollfun cbfun);
### Response:
def set_scroll_callback(window, cbfun):
"""
Sets the scroll callback.
Wrapper for:
GLFWscrollfun glfwSetScrollCallback(GLFWwindow* window, GLFWscrollfun cbfun);
"""
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _scroll_callback_repository:
previous_callback = _scroll_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWscrollfun(cbfun)
_scroll_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetScrollCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0] |
def openAnything(source, searchpaths=None):
"""URI, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
Examples:
>>> from xml.dom import minidom
>>> sock = openAnything("http://localhost/kant.xml")
>>> doc = minidom.parse(sock)
>>> sock.close()
>>> sock = openAnything("c:\\inetpub\\wwwroot\\kant.xml")
>>> doc = minidom.parse(sock)
>>> sock.close()
>>> sock = openAnything("<ref id='conjunction'><text>and</text><text>or</text></ref>")
>>> doc = minidom.parse(sock)
>>> sock.close()
"""
if hasattr(source, "read"):
return source
if source == "-":
import sys
return sys.stdin
# try to open with urllib (if source is http, ftp, or file URL)
import urllib
try:
return urllib.urlopen(source)
except (IOError, OSError):
pass
# try to open with native open function (if source is pathname)
for path in searchpaths or ['.']:
try:
return open(os.path.join(path, source))
except (IOError, OSError):
pass
# treat source as string
import StringIO
return StringIO.StringIO(str(source)) | URI, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
Examples:
>>> from xml.dom import minidom
>>> sock = openAnything("http://localhost/kant.xml")
>>> doc = minidom.parse(sock)
>>> sock.close()
>>> sock = openAnything("c:\\inetpub\\wwwroot\\kant.xml")
>>> doc = minidom.parse(sock)
>>> sock.close()
>>> sock = openAnything("<ref id='conjunction'><text>and</text><text>or</text></ref>")
>>> doc = minidom.parse(sock)
>>> sock.close() | Below is the the instruction that describes the task:
### Input:
URI, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
Examples:
>>> from xml.dom import minidom
>>> sock = openAnything("http://localhost/kant.xml")
>>> doc = minidom.parse(sock)
>>> sock.close()
>>> sock = openAnything("c:\\inetpub\\wwwroot\\kant.xml")
>>> doc = minidom.parse(sock)
>>> sock.close()
>>> sock = openAnything("<ref id='conjunction'><text>and</text><text>or</text></ref>")
>>> doc = minidom.parse(sock)
>>> sock.close()
### Response:
def openAnything(source, searchpaths=None):
"""URI, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
Examples:
>>> from xml.dom import minidom
>>> sock = openAnything("http://localhost/kant.xml")
>>> doc = minidom.parse(sock)
>>> sock.close()
>>> sock = openAnything("c:\\inetpub\\wwwroot\\kant.xml")
>>> doc = minidom.parse(sock)
>>> sock.close()
>>> sock = openAnything("<ref id='conjunction'><text>and</text><text>or</text></ref>")
>>> doc = minidom.parse(sock)
>>> sock.close()
"""
if hasattr(source, "read"):
return source
if source == "-":
import sys
return sys.stdin
# try to open with urllib (if source is http, ftp, or file URL)
import urllib
try:
return urllib.urlopen(source)
except (IOError, OSError):
pass
# try to open with native open function (if source is pathname)
for path in searchpaths or ['.']:
try:
return open(os.path.join(path, source))
except (IOError, OSError):
pass
# treat source as string
import StringIO
return StringIO.StringIO(str(source)) |
def touidref(src, dst, src_relation, src_portal_type, fieldname):
"""Convert an archetypes reference in src/src_relation to a UIDReference
in dst/fieldname.
"""
field = dst.getField(fieldname)
refs = src.getRefs(relationship=src_relation)
if len(refs) == 1:
value = get_uid(refs[0])
elif len(refs) > 1:
value = filter(lambda x: x, [get_uid(ref) for ref in refs])
else:
value = field.get(src)
if not value:
value = ''
if not field:
raise RuntimeError('Cannot find field %s/%s' % (fieldname, src))
if field.required and not value:
logger.exception('Required %s field %s/%s has no value' %
(src.portal_type, src, fieldname))
field.set(src, value) | Convert an archetypes reference in src/src_relation to a UIDReference
in dst/fieldname. | Below is the the instruction that describes the task:
### Input:
Convert an archetypes reference in src/src_relation to a UIDReference
in dst/fieldname.
### Response:
def touidref(src, dst, src_relation, src_portal_type, fieldname):
"""Convert an archetypes reference in src/src_relation to a UIDReference
in dst/fieldname.
"""
field = dst.getField(fieldname)
refs = src.getRefs(relationship=src_relation)
if len(refs) == 1:
value = get_uid(refs[0])
elif len(refs) > 1:
value = filter(lambda x: x, [get_uid(ref) for ref in refs])
else:
value = field.get(src)
if not value:
value = ''
if not field:
raise RuntimeError('Cannot find field %s/%s' % (fieldname, src))
if field.required and not value:
logger.exception('Required %s field %s/%s has no value' %
(src.portal_type, src, fieldname))
field.set(src, value) |
def auth(alias=None, url=None, cfg="~/.xnat_auth"):
'''
Read connection details from an xnat_auth XML file
Example:
>>> import yaxil
>>> auth = yaxil.auth('xnatastic')
>>> auth.url, auth.username, auth.password
('https://www.xnatastic.org/', 'username', '********')
:param alias: XNAT alias
:type alias: str
:param url: XNAT URL
:type url: str
:param cfg: Configuration file
:type cfg: str
:returns: Named tuple of (url, username, password)
:rtype: :mod:`yaxil.XnatAuth`
'''
if not alias and not url:
raise ValueError('you must provide an alias or url argument')
if alias and url:
raise ValueError('cannot provide both alias and url arguments')
# check and parse config file
cfg = os.path.expanduser(cfg)
if not os.path.exists(cfg):
raise AuthError("could not locate auth file %s" % cfg)
tree = etree.parse(os.path.expanduser(cfg))
# search by alias or url
res = None
if alias:
res = tree.findall("./%s" % alias)
if url:
res = tree.findall("./*/[url='%s']" % url)
if not res:
raise AuthError("failed to locate xnat credentials within %s" % cfg)
elif len(res) > 1:
raise AuthError("found too many sets of credentials within %s" % cfg)
res = res.pop()
# get url
url = res.findall("url")
if not url:
raise AuthError("no url for %s in %s" % (alias, cfg))
elif len(url) > 1:
raise AuthError("too many urls for %s in %s" % (alias, cfg))
# get username
username = res.findall("username")
if not username:
raise AuthError("no username for %s in %s" % (alias, cfg))
elif len(username) > 1:
raise AuthError("too many usernames for %s in %s" % (alias, cfg))
# get password
password = res.findall("password")
if not password:
raise AuthError("no password for %s in %s" % (alias, cfg))
elif len(password) > 1:
raise AuthError("too many passwords for %s in %s" % (alias, cfg))
return XnatAuth(url=url.pop().text, username=username.pop().text,
password=password.pop().text) | Read connection details from an xnat_auth XML file
Example:
>>> import yaxil
>>> auth = yaxil.auth('xnatastic')
>>> auth.url, auth.username, auth.password
('https://www.xnatastic.org/', 'username', '********')
:param alias: XNAT alias
:type alias: str
:param url: XNAT URL
:type url: str
:param cfg: Configuration file
:type cfg: str
:returns: Named tuple of (url, username, password)
:rtype: :mod:`yaxil.XnatAuth` | Below is the the instruction that describes the task:
### Input:
Read connection details from an xnat_auth XML file
Example:
>>> import yaxil
>>> auth = yaxil.auth('xnatastic')
>>> auth.url, auth.username, auth.password
('https://www.xnatastic.org/', 'username', '********')
:param alias: XNAT alias
:type alias: str
:param url: XNAT URL
:type url: str
:param cfg: Configuration file
:type cfg: str
:returns: Named tuple of (url, username, password)
:rtype: :mod:`yaxil.XnatAuth`
### Response:
def auth(alias=None, url=None, cfg="~/.xnat_auth"):
'''
Read connection details from an xnat_auth XML file
Example:
>>> import yaxil
>>> auth = yaxil.auth('xnatastic')
>>> auth.url, auth.username, auth.password
('https://www.xnatastic.org/', 'username', '********')
:param alias: XNAT alias
:type alias: str
:param url: XNAT URL
:type url: str
:param cfg: Configuration file
:type cfg: str
:returns: Named tuple of (url, username, password)
:rtype: :mod:`yaxil.XnatAuth`
'''
if not alias and not url:
raise ValueError('you must provide an alias or url argument')
if alias and url:
raise ValueError('cannot provide both alias and url arguments')
# check and parse config file
cfg = os.path.expanduser(cfg)
if not os.path.exists(cfg):
raise AuthError("could not locate auth file %s" % cfg)
tree = etree.parse(os.path.expanduser(cfg))
# search by alias or url
res = None
if alias:
res = tree.findall("./%s" % alias)
if url:
res = tree.findall("./*/[url='%s']" % url)
if not res:
raise AuthError("failed to locate xnat credentials within %s" % cfg)
elif len(res) > 1:
raise AuthError("found too many sets of credentials within %s" % cfg)
res = res.pop()
# get url
url = res.findall("url")
if not url:
raise AuthError("no url for %s in %s" % (alias, cfg))
elif len(url) > 1:
raise AuthError("too many urls for %s in %s" % (alias, cfg))
# get username
username = res.findall("username")
if not username:
raise AuthError("no username for %s in %s" % (alias, cfg))
elif len(username) > 1:
raise AuthError("too many usernames for %s in %s" % (alias, cfg))
# get password
password = res.findall("password")
if not password:
raise AuthError("no password for %s in %s" % (alias, cfg))
elif len(password) > 1:
raise AuthError("too many passwords for %s in %s" % (alias, cfg))
return XnatAuth(url=url.pop().text, username=username.pop().text,
password=password.pop().text) |
def wrap_class(cls, error_threshold=None):
''' Wraps a class with reporting to errors backend by decorating each function of the class.
Decorators are injected under the classmethod decorator if they exist.
'''
methods = inspect.getmembers(cls, inspect.ismethod) + inspect.getmembers(cls, inspect.isfunction)
for method_name, method in methods:
wrapped_method = flawless.client.client._wrap_function_with_error_decorator(
method if not im_self(method) else im_func(method),
save_current_stack_trace=False,
error_threshold=error_threshold,
)
if im_self(method):
wrapped_method = classmethod(wrapped_method)
setattr(cls, method_name, wrapped_method)
return cls | Wraps a class with reporting to errors backend by decorating each function of the class.
Decorators are injected under the classmethod decorator if they exist. | Below is the the instruction that describes the task:
### Input:
Wraps a class with reporting to errors backend by decorating each function of the class.
Decorators are injected under the classmethod decorator if they exist.
### Response:
def wrap_class(cls, error_threshold=None):
''' Wraps a class with reporting to errors backend by decorating each function of the class.
Decorators are injected under the classmethod decorator if they exist.
'''
methods = inspect.getmembers(cls, inspect.ismethod) + inspect.getmembers(cls, inspect.isfunction)
for method_name, method in methods:
wrapped_method = flawless.client.client._wrap_function_with_error_decorator(
method if not im_self(method) else im_func(method),
save_current_stack_trace=False,
error_threshold=error_threshold,
)
if im_self(method):
wrapped_method = classmethod(wrapped_method)
setattr(cls, method_name, wrapped_method)
return cls |
def _completion_checker(async_id, context_id):
"""Check if all Async jobs within a Context have been run."""
if not context_id:
logging.debug("Context for async %s does not exist", async_id)
return
context = FuriousContext.from_id(context_id)
marker = FuriousCompletionMarker.get_by_id(context_id)
if marker and marker.complete:
logging.info("Context %s already complete" % context_id)
return True
task_ids = context.task_ids
if async_id in task_ids:
task_ids.remove(async_id)
logging.debug("Loaded context.")
logging.debug(task_ids)
done, has_errors = _check_markers(task_ids)
if not done:
return False
_mark_context_complete(marker, context, has_errors)
return True | Check if all Async jobs within a Context have been run. | Below is the the instruction that describes the task:
### Input:
Check if all Async jobs within a Context have been run.
### Response:
def _completion_checker(async_id, context_id):
"""Check if all Async jobs within a Context have been run."""
if not context_id:
logging.debug("Context for async %s does not exist", async_id)
return
context = FuriousContext.from_id(context_id)
marker = FuriousCompletionMarker.get_by_id(context_id)
if marker and marker.complete:
logging.info("Context %s already complete" % context_id)
return True
task_ids = context.task_ids
if async_id in task_ids:
task_ids.remove(async_id)
logging.debug("Loaded context.")
logging.debug(task_ids)
done, has_errors = _check_markers(task_ids)
if not done:
return False
_mark_context_complete(marker, context, has_errors)
return True |
def add_user(self, user_name, role='user'):
"""
Calls CF's associate user with org. Valid roles include `user`, `auditor`,
`manager`,`billing_manager`
"""
role_uri = self._get_role_uri(role=role)
return self.api.put(path=role_uri, data={'username': user_name}) | Calls CF's associate user with org. Valid roles include `user`, `auditor`,
`manager`,`billing_manager` | Below is the the instruction that describes the task:
### Input:
Calls CF's associate user with org. Valid roles include `user`, `auditor`,
`manager`,`billing_manager`
### Response:
def add_user(self, user_name, role='user'):
"""
Calls CF's associate user with org. Valid roles include `user`, `auditor`,
`manager`,`billing_manager`
"""
role_uri = self._get_role_uri(role=role)
return self.api.put(path=role_uri, data={'username': user_name}) |
def add_html_link(app, pagename, templatename, context, doctree):
"""As each page is built, collect page names for the sitemap"""
base_url = app.config['html_theme_options'].get('base_url', '')
if base_url:
app.sitemap_links.append(base_url + pagename + ".html") | As each page is built, collect page names for the sitemap | Below is the the instruction that describes the task:
### Input:
As each page is built, collect page names for the sitemap
### Response:
def add_html_link(app, pagename, templatename, context, doctree):
"""As each page is built, collect page names for the sitemap"""
base_url = app.config['html_theme_options'].get('base_url', '')
if base_url:
app.sitemap_links.append(base_url + pagename + ".html") |
def get_lat_variable(nc):
'''
Returns the variable for latitude
:param netcdf4.dataset nc: an open netcdf dataset object
'''
if 'latitude' in nc.variables:
return 'latitude'
latitudes = nc.get_variables_by_attributes(standard_name="latitude")
if latitudes:
return latitudes[0].name
return None | Returns the variable for latitude
:param netcdf4.dataset nc: an open netcdf dataset object | Below is the the instruction that describes the task:
### Input:
Returns the variable for latitude
:param netcdf4.dataset nc: an open netcdf dataset object
### Response:
def get_lat_variable(nc):
'''
Returns the variable for latitude
:param netcdf4.dataset nc: an open netcdf dataset object
'''
if 'latitude' in nc.variables:
return 'latitude'
latitudes = nc.get_variables_by_attributes(standard_name="latitude")
if latitudes:
return latitudes[0].name
return None |
def reset_for_retry(self):
"""Reset self for shard retry."""
self.retries += 1
self.last_work_item = ""
self.active = True
self.result_status = None
self.input_finished = False
self.counters_map = CountersMap()
self.slice_id = 0
self.slice_start_time = None
self.slice_request_id = None
self.slice_retries = 0
self.acquired_once = False | Reset self for shard retry. | Below is the the instruction that describes the task:
### Input:
Reset self for shard retry.
### Response:
def reset_for_retry(self):
"""Reset self for shard retry."""
self.retries += 1
self.last_work_item = ""
self.active = True
self.result_status = None
self.input_finished = False
self.counters_map = CountersMap()
self.slice_id = 0
self.slice_start_time = None
self.slice_request_id = None
self.slice_retries = 0
self.acquired_once = False |
def to_utc_datetime(self, value):
"""
from value to datetime with tzinfo format (datetime.datetime instance)
"""
if isinstance(value, (six.integer_types, float, six.string_types)):
value = self.to_naive_datetime(value)
if isinstance(value, datetime.datetime):
if timezone.is_naive(value):
value = timezone.make_aware(value, timezone.utc)
else:
value = timezone.localtime(value, timezone.utc)
return value
raise exceptions.ValidationError(
"Unable to convert value: '%s' to python data type" % value,
code="invalid_datetime"
) | from value to datetime with tzinfo format (datetime.datetime instance) | Below is the the instruction that describes the task:
### Input:
from value to datetime with tzinfo format (datetime.datetime instance)
### Response:
def to_utc_datetime(self, value):
"""
from value to datetime with tzinfo format (datetime.datetime instance)
"""
if isinstance(value, (six.integer_types, float, six.string_types)):
value = self.to_naive_datetime(value)
if isinstance(value, datetime.datetime):
if timezone.is_naive(value):
value = timezone.make_aware(value, timezone.utc)
else:
value = timezone.localtime(value, timezone.utc)
return value
raise exceptions.ValidationError(
"Unable to convert value: '%s' to python data type" % value,
code="invalid_datetime"
) |
def get_changes(self, remove=True, only_current=False, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Get the changes this refactoring makes
If `remove` is `False` the definition will not be removed. If
`only_current` is `True`, the the current occurrence will be
inlined, only.
"""
changes = ChangeSet('Inline method <%s>' % self.name)
if resources is None:
resources = self.project.get_python_files()
if only_current:
resources = [self.original]
if remove:
resources.append(self.resource)
job_set = task_handle.create_jobset('Collecting Changes',
len(resources))
for file in resources:
job_set.started_job(file.path)
if file == self.resource:
changes.add_change(self._defining_file_changes(
changes, remove=remove, only_current=only_current))
else:
aim = None
if only_current and self.original == file:
aim = self.offset
handle = _InlineFunctionCallsForModuleHandle(
self.project, file, self.others_generator, aim)
result = move.ModuleSkipRenamer(
self.occurrence_finder, file, handle).get_changed_module()
if result is not None:
result = _add_imports(self.project, result,
file, self.imports)
if remove:
result = _remove_from(self.project, self.pyname,
result, file)
changes.add_change(ChangeContents(file, result))
job_set.finished_job()
return changes | Get the changes this refactoring makes
If `remove` is `False` the definition will not be removed. If
`only_current` is `True`, the the current occurrence will be
inlined, only. | Below is the the instruction that describes the task:
### Input:
Get the changes this refactoring makes
If `remove` is `False` the definition will not be removed. If
`only_current` is `True`, the the current occurrence will be
inlined, only.
### Response:
def get_changes(self, remove=True, only_current=False, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Get the changes this refactoring makes
If `remove` is `False` the definition will not be removed. If
`only_current` is `True`, the the current occurrence will be
inlined, only.
"""
changes = ChangeSet('Inline method <%s>' % self.name)
if resources is None:
resources = self.project.get_python_files()
if only_current:
resources = [self.original]
if remove:
resources.append(self.resource)
job_set = task_handle.create_jobset('Collecting Changes',
len(resources))
for file in resources:
job_set.started_job(file.path)
if file == self.resource:
changes.add_change(self._defining_file_changes(
changes, remove=remove, only_current=only_current))
else:
aim = None
if only_current and self.original == file:
aim = self.offset
handle = _InlineFunctionCallsForModuleHandle(
self.project, file, self.others_generator, aim)
result = move.ModuleSkipRenamer(
self.occurrence_finder, file, handle).get_changed_module()
if result is not None:
result = _add_imports(self.project, result,
file, self.imports)
if remove:
result = _remove_from(self.project, self.pyname,
result, file)
changes.add_change(ChangeContents(file, result))
job_set.finished_job()
return changes |
def parse_files(self, req, name, field):
"""Pull a file from the request."""
files = ((k, v) for k, v in req.POST.items() if hasattr(v, "file"))
return core.get_value(MultiDict(files), name, field) | Pull a file from the request. | Below is the the instruction that describes the task:
### Input:
Pull a file from the request.
### Response:
def parse_files(self, req, name, field):
"""Pull a file from the request."""
files = ((k, v) for k, v in req.POST.items() if hasattr(v, "file"))
return core.get_value(MultiDict(files), name, field) |
def write_multiple_registers(slave_id, starting_address, values):
""" Return ADU for Modbus function code 16: Write Multiple Registers.
:param slave_id: Number of slave.
:return: Byte array with ADU.
"""
function = WriteMultipleRegisters()
function.starting_address = starting_address
function.values = values
return _create_request_adu(slave_id, function.request_pdu) | Return ADU for Modbus function code 16: Write Multiple Registers.
:param slave_id: Number of slave.
:return: Byte array with ADU. | Below is the the instruction that describes the task:
### Input:
Return ADU for Modbus function code 16: Write Multiple Registers.
:param slave_id: Number of slave.
:return: Byte array with ADU.
### Response:
def write_multiple_registers(slave_id, starting_address, values):
""" Return ADU for Modbus function code 16: Write Multiple Registers.
:param slave_id: Number of slave.
:return: Byte array with ADU.
"""
function = WriteMultipleRegisters()
function.starting_address = starting_address
function.values = values
return _create_request_adu(slave_id, function.request_pdu) |
def check_order(self, order):
"""
order must be a subset of self.order
"""
own_order = self.order
for item in order:
if item not in own_order:
raise ValueError(f'Order item {item} not found.')
return order | order must be a subset of self.order | Below is the the instruction that describes the task:
### Input:
order must be a subset of self.order
### Response:
def check_order(self, order):
"""
order must be a subset of self.order
"""
own_order = self.order
for item in order:
if item not in own_order:
raise ValueError(f'Order item {item} not found.')
return order |
def _prepare_configs(configs, requires_filters, temporal_timeouts):
"""
Overrides the filters specified in the decorator with the given ones
:param configs: Field β (Requirement, key, allow_none) dictionary
:param requires_filters: Content of the 'requires.filter' component
property (field β string)
:param temporal_timeouts: Content of the 'temporal.timeouts' component
property (field β float)
:return: The new configuration dictionary
"""
if not isinstance(requires_filters, dict):
requires_filters = {}
if not isinstance(temporal_timeouts, dict):
temporal_timeouts = {}
if not requires_filters and not temporal_timeouts:
# No explicit configuration given
return configs
# We need to change a part of the requirements
new_configs = {}
for field, config in configs.items():
# Extract values from tuple
requirement, timeout = config
explicit_filter = requires_filters.get(field)
explicit_timeout = temporal_timeouts.get(field)
# Convert the timeout value
try:
explicit_timeout = int(explicit_timeout)
if explicit_timeout <= 0:
explicit_timeout = timeout
except (ValueError, TypeError):
explicit_timeout = timeout
if not explicit_filter and not explicit_timeout:
# Nothing to do
new_configs[field] = config
else:
try:
# Store an updated copy of the requirement
requirement_copy = requirement.copy()
if explicit_filter:
requirement_copy.set_filter(explicit_filter)
new_configs[field] = (requirement_copy, explicit_timeout)
except (TypeError, ValueError):
# No information for this one, or invalid filter:
# keep the factory requirement
new_configs[field] = config
return new_configs | Overrides the filters specified in the decorator with the given ones
:param configs: Field β (Requirement, key, allow_none) dictionary
:param requires_filters: Content of the 'requires.filter' component
property (field β string)
:param temporal_timeouts: Content of the 'temporal.timeouts' component
property (field β float)
:return: The new configuration dictionary | Below is the the instruction that describes the task:
### Input:
Overrides the filters specified in the decorator with the given ones
:param configs: Field β (Requirement, key, allow_none) dictionary
:param requires_filters: Content of the 'requires.filter' component
property (field β string)
:param temporal_timeouts: Content of the 'temporal.timeouts' component
property (field β float)
:return: The new configuration dictionary
### Response:
def _prepare_configs(configs, requires_filters, temporal_timeouts):
"""
Overrides the filters specified in the decorator with the given ones
:param configs: Field β (Requirement, key, allow_none) dictionary
:param requires_filters: Content of the 'requires.filter' component
property (field β string)
:param temporal_timeouts: Content of the 'temporal.timeouts' component
property (field β float)
:return: The new configuration dictionary
"""
if not isinstance(requires_filters, dict):
requires_filters = {}
if not isinstance(temporal_timeouts, dict):
temporal_timeouts = {}
if not requires_filters and not temporal_timeouts:
# No explicit configuration given
return configs
# We need to change a part of the requirements
new_configs = {}
for field, config in configs.items():
# Extract values from tuple
requirement, timeout = config
explicit_filter = requires_filters.get(field)
explicit_timeout = temporal_timeouts.get(field)
# Convert the timeout value
try:
explicit_timeout = int(explicit_timeout)
if explicit_timeout <= 0:
explicit_timeout = timeout
except (ValueError, TypeError):
explicit_timeout = timeout
if not explicit_filter and not explicit_timeout:
# Nothing to do
new_configs[field] = config
else:
try:
# Store an updated copy of the requirement
requirement_copy = requirement.copy()
if explicit_filter:
requirement_copy.set_filter(explicit_filter)
new_configs[field] = (requirement_copy, explicit_timeout)
except (TypeError, ValueError):
# No information for this one, or invalid filter:
# keep the factory requirement
new_configs[field] = config
return new_configs |
def codepoint_included(self, codepoint):
"""Check if codepoint matches any of the defined codepoints."""
if self.codepoints == None:
return True
for cp in self.codepoints:
mismatch = False
for i in range(len(cp)):
if (cp[i] is not None) and (cp[i] != codepoint[i]):
mismatch = True
break
if not mismatch:
return True
return False | Check if codepoint matches any of the defined codepoints. | Below is the the instruction that describes the task:
### Input:
Check if codepoint matches any of the defined codepoints.
### Response:
def codepoint_included(self, codepoint):
"""Check if codepoint matches any of the defined codepoints."""
if self.codepoints == None:
return True
for cp in self.codepoints:
mismatch = False
for i in range(len(cp)):
if (cp[i] is not None) and (cp[i] != codepoint[i]):
mismatch = True
break
if not mismatch:
return True
return False |
def create_args(line, namespace):
""" Expand any meta-variable references in the argument list. """
args = []
# Using shlex.split handles quotes args and escape characters.
for arg in shlex.split(line):
if not arg:
continue
if arg[0] == '$':
var_name = arg[1:]
if var_name in namespace:
args.append((namespace[var_name]))
else:
raise Exception('Undefined variable referenced in command line: %s' % arg)
else:
args.append(arg)
return args | Expand any meta-variable references in the argument list. | Below is the the instruction that describes the task:
### Input:
Expand any meta-variable references in the argument list.
### Response:
def create_args(line, namespace):
""" Expand any meta-variable references in the argument list. """
args = []
# Using shlex.split handles quotes args and escape characters.
for arg in shlex.split(line):
if not arg:
continue
if arg[0] == '$':
var_name = arg[1:]
if var_name in namespace:
args.append((namespace[var_name]))
else:
raise Exception('Undefined variable referenced in command line: %s' % arg)
else:
args.append(arg)
return args |
def save(self):
"""
:return: save this OS instance on Ariane server (create or update)
"""
LOGGER.debug("OSInstance.save")
post_payload = {}
consolidated_osi_id = []
consolidated_ipa_id = []
consolidated_nic_id = []
consolidated_app_id = []
consolidated_env_id = []
consolidated_snet_id = []
consolidated_team_id = []
if self.id is not None:
post_payload['osInstanceID'] = self.id
if self.name is not None:
post_payload['osInstanceName'] = self.name
if self.description is not None:
post_payload['osInstanceDescription'] = self.description
if self.admin_gate_uri is not None:
post_payload['osInstanceAdminGateURI'] = self.admin_gate_uri
if self.embedding_osi_id is not None:
post_payload['osInstanceEmbeddingOSInstanceID'] = self.embedding_osi_id
if self.ost_id is not None:
post_payload['osInstanceOSTypeID'] = self.ost_id
if self.embedded_osi_ids is not None:
consolidated_osi_id = copy.deepcopy(self.embedded_osi_ids)
if self.embedded_osi_2_rm is not None:
for osi_2_rm in self.embedded_osi_2_rm:
if osi_2_rm.id is None:
osi_2_rm.sync()
consolidated_osi_id.remove(osi_2_rm.id)
if self.embedded_osi_2_add is not None:
for osi_id_2_add in self.embedded_osi_2_add:
if osi_id_2_add.id is None:
osi_id_2_add.save()
consolidated_osi_id.append(osi_id_2_add.id)
post_payload['osInstanceEmbeddedOSInstancesID'] = consolidated_osi_id
if self.ip_address_ids is not None:
consolidated_ipa_id = copy.deepcopy(self.ip_address_ids)
if self.ip_address_2_rm is not None:
for ipa_2_rm in self.ip_address_2_rm:
if ipa_2_rm.id is None:
ipa_2_rm.sync()
consolidated_ipa_id.remove(ipa_2_rm.id)
if self.ip_address_2_add is not None:
for ipa_2_add in self.ip_address_2_add:
if ipa_2_add.id is None:
ipa_2_add.save()
consolidated_ipa_id.append(ipa_2_add.id)
post_payload['osInstanceIPAddressesID'] = consolidated_ipa_id
if self.nic_ids is not None:
consolidated_nic_id = copy.deepcopy(self.nic_ids)
if self.nic_2_rm is not None:
for nic_2_rm in self.nic_2_rm:
if nic_2_rm.id is None:
nic_2_rm.sync()
consolidated_nic_id.remove(nic_2_rm.id)
if self.nic_2_add is not None:
for nic_2_add in self.nic_2_add:
if nic_2_add.id is None:
nic_2_add.save()
consolidated_nic_id.append(nic_2_add.id)
post_payload['osInstanceNICsID'] = consolidated_nic_id
if self.subnet_ids is not None:
consolidated_snet_id = copy.deepcopy(self.subnet_ids)
if self.subnets_2_rm is not None:
for snet_2_rm in self.subnets_2_rm:
if snet_2_rm.id is None:
snet_2_rm.sync()
consolidated_snet_id.remove(snet_2_rm.id)
if self.subnets_2_add is not None:
for snet_2_add in self.subnets_2_add:
if snet_2_add.id is None:
snet_2_add.save()
consolidated_snet_id.append(snet_2_add.id)
post_payload['osInstanceSubnetsID'] = consolidated_snet_id
if self.application_ids is not None:
consolidated_app_id = copy.deepcopy(self.application_ids)
if self.application_2_rm is not None:
for app_2_rm in self.application_2_rm:
if app_2_rm.id is None:
app_2_rm.sync()
consolidated_app_id.remove(app_2_rm.id)
if self.application_2_add is not None:
for app_2_add in self.application_2_add:
if app_2_add.id is None:
app_2_add.save()
consolidated_app_id.append(app_2_add.id)
post_payload['osInstanceApplicationsID'] = consolidated_app_id
if self.environment_ids is not None:
consolidated_env_id = copy.deepcopy(self.environment_ids)
if self.environment_2_rm is not None:
for env_2_rm in self.environment_2_rm:
if env_2_rm.id is None:
env_2_rm.sync()
consolidated_env_id.remove(env_2_rm.id)
if self.environment_2_add is not None:
for env_2_add in self.environment_2_add:
if env_2_add.id is None:
env_2_add.save()
consolidated_env_id.append(env_2_add.id)
post_payload['osInstanceEnvironmentsID'] = consolidated_env_id
if self.team_ids is not None:
consolidated_team_id = copy.deepcopy(self.team_ids)
if self.team_2_rm is not None:
for team_2_rm in self.team_2_rm:
if team_2_rm.id is None:
team_2_rm.sync()
consolidated_team_id.remove(team_2_rm.id)
if self.team_2_add is not None:
for team_2_add in self.team_2_add:
if team_2_add.id is None:
team_2_add.save()
consolidated_team_id.append(team_2_add.id)
post_payload['osInstanceTeamsID'] = consolidated_team_id
args = {'http_operation': 'POST', 'operation_path': '', 'parameters': {'payload': json.dumps(post_payload)}}
response = OSInstanceService.requester.call(args)
if response.rc != 0:
LOGGER.warning(
'OSInstance.save - Problem while saving OS instance ' + self.name +
'. Reason: ' + str(response.response_content) + '-' + str(response.error_message) +
" (" + str(response.rc) + ")"
)
else:
self.id = response.response_content['osInstanceID']
if self.embedded_osi_2_add is not None:
for osi_2_add in self.embedded_osi_2_add:
osi_2_add.sync()
if self.embedded_osi_2_rm is not None:
for osi_2_rm in self.embedded_osi_2_rm:
osi_2_rm.sync()
if self.ip_address_2_add is not None:
for ipa_2_add in self.ip_address_2_add:
ipa_2_add.sync()
if self.ip_address_2_rm is not None:
for ipa_2_rm in self.ip_address_2_rm:
ipa_2_rm.sync()
if self.nic_2_add is not None:
for nic_2_add in self.nic_2_add:
nic_2_add.sync()
if self.nic_2_rm is not None:
for nic_2_rm in self.nic_2_rm:
nic_2_rm.sync()
if self.subnets_2_add is not None:
for snet_2_add in self.subnets_2_add:
snet_2_add.sync()
if self.subnets_2_rm is not None:
for snet_2_rm in self.subnets_2_rm:
snet_2_rm.sync()
if self.application_2_add is not None:
for app_2_add in self.application_2_add:
app_2_add.sync()
if self.application_2_rm is not None:
for app_2_rm in self.application_2_rm:
app_2_rm.sync()
if self.environment_2_add is not None:
for env_2_add in self.environment_2_add:
env_2_add.sync()
if self.environment_2_rm is not None:
for env_2_rm in self.environment_2_rm:
env_2_rm.sync()
if self.team_2_add is not None:
for team_2_add in self.team_2_add:
team_2_add.sync()
if self.team_2_rm is not None:
for team_2_rm in self.team_2_rm:
team_2_rm.sync()
self.embedded_osi_2_add.clear()
self.embedded_osi_2_rm.clear()
self.ip_address_2_add.clear()
self.ip_address_2_rm.clear()
self.nic_2_add.clear()
self.nic_2_rm.clear()
self.subnets_2_add.clear()
self.subnets_2_rm.clear()
self.application_2_add.clear()
self.application_2_rm.clear()
self.environment_2_add.clear()
self.environment_2_rm.clear()
self.team_2_add.clear()
self.team_2_rm.clear()
self.sync()
return self | :return: save this OS instance on Ariane server (create or update) | Below is the the instruction that describes the task:
### Input:
:return: save this OS instance on Ariane server (create or update)
### Response:
def save(self):
"""
:return: save this OS instance on Ariane server (create or update)
"""
LOGGER.debug("OSInstance.save")
post_payload = {}
consolidated_osi_id = []
consolidated_ipa_id = []
consolidated_nic_id = []
consolidated_app_id = []
consolidated_env_id = []
consolidated_snet_id = []
consolidated_team_id = []
if self.id is not None:
post_payload['osInstanceID'] = self.id
if self.name is not None:
post_payload['osInstanceName'] = self.name
if self.description is not None:
post_payload['osInstanceDescription'] = self.description
if self.admin_gate_uri is not None:
post_payload['osInstanceAdminGateURI'] = self.admin_gate_uri
if self.embedding_osi_id is not None:
post_payload['osInstanceEmbeddingOSInstanceID'] = self.embedding_osi_id
if self.ost_id is not None:
post_payload['osInstanceOSTypeID'] = self.ost_id
if self.embedded_osi_ids is not None:
consolidated_osi_id = copy.deepcopy(self.embedded_osi_ids)
if self.embedded_osi_2_rm is not None:
for osi_2_rm in self.embedded_osi_2_rm:
if osi_2_rm.id is None:
osi_2_rm.sync()
consolidated_osi_id.remove(osi_2_rm.id)
if self.embedded_osi_2_add is not None:
for osi_id_2_add in self.embedded_osi_2_add:
if osi_id_2_add.id is None:
osi_id_2_add.save()
consolidated_osi_id.append(osi_id_2_add.id)
post_payload['osInstanceEmbeddedOSInstancesID'] = consolidated_osi_id
if self.ip_address_ids is not None:
consolidated_ipa_id = copy.deepcopy(self.ip_address_ids)
if self.ip_address_2_rm is not None:
for ipa_2_rm in self.ip_address_2_rm:
if ipa_2_rm.id is None:
ipa_2_rm.sync()
consolidated_ipa_id.remove(ipa_2_rm.id)
if self.ip_address_2_add is not None:
for ipa_2_add in self.ip_address_2_add:
if ipa_2_add.id is None:
ipa_2_add.save()
consolidated_ipa_id.append(ipa_2_add.id)
post_payload['osInstanceIPAddressesID'] = consolidated_ipa_id
if self.nic_ids is not None:
consolidated_nic_id = copy.deepcopy(self.nic_ids)
if self.nic_2_rm is not None:
for nic_2_rm in self.nic_2_rm:
if nic_2_rm.id is None:
nic_2_rm.sync()
consolidated_nic_id.remove(nic_2_rm.id)
if self.nic_2_add is not None:
for nic_2_add in self.nic_2_add:
if nic_2_add.id is None:
nic_2_add.save()
consolidated_nic_id.append(nic_2_add.id)
post_payload['osInstanceNICsID'] = consolidated_nic_id
if self.subnet_ids is not None:
consolidated_snet_id = copy.deepcopy(self.subnet_ids)
if self.subnets_2_rm is not None:
for snet_2_rm in self.subnets_2_rm:
if snet_2_rm.id is None:
snet_2_rm.sync()
consolidated_snet_id.remove(snet_2_rm.id)
if self.subnets_2_add is not None:
for snet_2_add in self.subnets_2_add:
if snet_2_add.id is None:
snet_2_add.save()
consolidated_snet_id.append(snet_2_add.id)
post_payload['osInstanceSubnetsID'] = consolidated_snet_id
if self.application_ids is not None:
consolidated_app_id = copy.deepcopy(self.application_ids)
if self.application_2_rm is not None:
for app_2_rm in self.application_2_rm:
if app_2_rm.id is None:
app_2_rm.sync()
consolidated_app_id.remove(app_2_rm.id)
if self.application_2_add is not None:
for app_2_add in self.application_2_add:
if app_2_add.id is None:
app_2_add.save()
consolidated_app_id.append(app_2_add.id)
post_payload['osInstanceApplicationsID'] = consolidated_app_id
if self.environment_ids is not None:
consolidated_env_id = copy.deepcopy(self.environment_ids)
if self.environment_2_rm is not None:
for env_2_rm in self.environment_2_rm:
if env_2_rm.id is None:
env_2_rm.sync()
consolidated_env_id.remove(env_2_rm.id)
if self.environment_2_add is not None:
for env_2_add in self.environment_2_add:
if env_2_add.id is None:
env_2_add.save()
consolidated_env_id.append(env_2_add.id)
post_payload['osInstanceEnvironmentsID'] = consolidated_env_id
if self.team_ids is not None:
consolidated_team_id = copy.deepcopy(self.team_ids)
if self.team_2_rm is not None:
for team_2_rm in self.team_2_rm:
if team_2_rm.id is None:
team_2_rm.sync()
consolidated_team_id.remove(team_2_rm.id)
if self.team_2_add is not None:
for team_2_add in self.team_2_add:
if team_2_add.id is None:
team_2_add.save()
consolidated_team_id.append(team_2_add.id)
post_payload['osInstanceTeamsID'] = consolidated_team_id
args = {'http_operation': 'POST', 'operation_path': '', 'parameters': {'payload': json.dumps(post_payload)}}
response = OSInstanceService.requester.call(args)
if response.rc != 0:
LOGGER.warning(
'OSInstance.save - Problem while saving OS instance ' + self.name +
'. Reason: ' + str(response.response_content) + '-' + str(response.error_message) +
" (" + str(response.rc) + ")"
)
else:
self.id = response.response_content['osInstanceID']
if self.embedded_osi_2_add is not None:
for osi_2_add in self.embedded_osi_2_add:
osi_2_add.sync()
if self.embedded_osi_2_rm is not None:
for osi_2_rm in self.embedded_osi_2_rm:
osi_2_rm.sync()
if self.ip_address_2_add is not None:
for ipa_2_add in self.ip_address_2_add:
ipa_2_add.sync()
if self.ip_address_2_rm is not None:
for ipa_2_rm in self.ip_address_2_rm:
ipa_2_rm.sync()
if self.nic_2_add is not None:
for nic_2_add in self.nic_2_add:
nic_2_add.sync()
if self.nic_2_rm is not None:
for nic_2_rm in self.nic_2_rm:
nic_2_rm.sync()
if self.subnets_2_add is not None:
for snet_2_add in self.subnets_2_add:
snet_2_add.sync()
if self.subnets_2_rm is not None:
for snet_2_rm in self.subnets_2_rm:
snet_2_rm.sync()
if self.application_2_add is not None:
for app_2_add in self.application_2_add:
app_2_add.sync()
if self.application_2_rm is not None:
for app_2_rm in self.application_2_rm:
app_2_rm.sync()
if self.environment_2_add is not None:
for env_2_add in self.environment_2_add:
env_2_add.sync()
if self.environment_2_rm is not None:
for env_2_rm in self.environment_2_rm:
env_2_rm.sync()
if self.team_2_add is not None:
for team_2_add in self.team_2_add:
team_2_add.sync()
if self.team_2_rm is not None:
for team_2_rm in self.team_2_rm:
team_2_rm.sync()
self.embedded_osi_2_add.clear()
self.embedded_osi_2_rm.clear()
self.ip_address_2_add.clear()
self.ip_address_2_rm.clear()
self.nic_2_add.clear()
self.nic_2_rm.clear()
self.subnets_2_add.clear()
self.subnets_2_rm.clear()
self.application_2_add.clear()
self.application_2_rm.clear()
self.environment_2_add.clear()
self.environment_2_rm.clear()
self.team_2_add.clear()
self.team_2_rm.clear()
self.sync()
return self |
def finding_path(cls, project, scan_config, scan_run, finding):
"""Return a fully-qualified finding string."""
return google.api_core.path_template.expand(
"projects/{project}/scanConfigs/{scan_config}/scanRuns/{scan_run}/findings/{finding}",
project=project,
scan_config=scan_config,
scan_run=scan_run,
finding=finding,
) | Return a fully-qualified finding string. | Below is the the instruction that describes the task:
### Input:
Return a fully-qualified finding string.
### Response:
def finding_path(cls, project, scan_config, scan_run, finding):
"""Return a fully-qualified finding string."""
return google.api_core.path_template.expand(
"projects/{project}/scanConfigs/{scan_config}/scanRuns/{scan_run}/findings/{finding}",
project=project,
scan_config=scan_config,
scan_run=scan_run,
finding=finding,
) |
def _init_oauth(self, oauth_token, oauth_token_secret):
"Store and initialize a verified set of OAuth credentials"
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
self._oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.oauth_token,
resource_owner_secret=self.oauth_token_secret,
rsa_key=self.rsa_key,
signature_method=self._signature_method
) | Store and initialize a verified set of OAuth credentials | Below is the the instruction that describes the task:
### Input:
Store and initialize a verified set of OAuth credentials
### Response:
def _init_oauth(self, oauth_token, oauth_token_secret):
"Store and initialize a verified set of OAuth credentials"
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
self._oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.oauth_token,
resource_owner_secret=self.oauth_token_secret,
rsa_key=self.rsa_key,
signature_method=self._signature_method
) |
def cancelTask(self, *args, **kwargs):
"""
Cancel Task
This method will cancel a task that is either `unscheduled`, `pending` or
`running`. It will resolve the current run as `exception` with
`reasonResolved` set to `canceled`. If the task isn't scheduled yet, ie.
it doesn't have any runs, an initial run will be added and resolved as
described above. Hence, after canceling a task, it cannot be scheduled
with `queue.scheduleTask`, but a new run can be created with
`queue.rerun`. These semantics is equivalent to calling
`queue.scheduleTask` immediately followed by `queue.cancelTask`.
**Remark** this operation is idempotent, if you try to cancel a task that
isn't `unscheduled`, `pending` or `running`, this operation will just
return the current task status.
This method gives output: ``v1/task-status-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["cancelTask"], *args, **kwargs) | Cancel Task
This method will cancel a task that is either `unscheduled`, `pending` or
`running`. It will resolve the current run as `exception` with
`reasonResolved` set to `canceled`. If the task isn't scheduled yet, ie.
it doesn't have any runs, an initial run will be added and resolved as
described above. Hence, after canceling a task, it cannot be scheduled
with `queue.scheduleTask`, but a new run can be created with
`queue.rerun`. These semantics is equivalent to calling
`queue.scheduleTask` immediately followed by `queue.cancelTask`.
**Remark** this operation is idempotent, if you try to cancel a task that
isn't `unscheduled`, `pending` or `running`, this operation will just
return the current task status.
This method gives output: ``v1/task-status-response.json#``
This method is ``stable`` | Below is the the instruction that describes the task:
### Input:
Cancel Task
This method will cancel a task that is either `unscheduled`, `pending` or
`running`. It will resolve the current run as `exception` with
`reasonResolved` set to `canceled`. If the task isn't scheduled yet, ie.
it doesn't have any runs, an initial run will be added and resolved as
described above. Hence, after canceling a task, it cannot be scheduled
with `queue.scheduleTask`, but a new run can be created with
`queue.rerun`. These semantics is equivalent to calling
`queue.scheduleTask` immediately followed by `queue.cancelTask`.
**Remark** this operation is idempotent, if you try to cancel a task that
isn't `unscheduled`, `pending` or `running`, this operation will just
return the current task status.
This method gives output: ``v1/task-status-response.json#``
This method is ``stable``
### Response:
def cancelTask(self, *args, **kwargs):
"""
Cancel Task
This method will cancel a task that is either `unscheduled`, `pending` or
`running`. It will resolve the current run as `exception` with
`reasonResolved` set to `canceled`. If the task isn't scheduled yet, ie.
it doesn't have any runs, an initial run will be added and resolved as
described above. Hence, after canceling a task, it cannot be scheduled
with `queue.scheduleTask`, but a new run can be created with
`queue.rerun`. These semantics is equivalent to calling
`queue.scheduleTask` immediately followed by `queue.cancelTask`.
**Remark** this operation is idempotent, if you try to cancel a task that
isn't `unscheduled`, `pending` or `running`, this operation will just
return the current task status.
This method gives output: ``v1/task-status-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["cancelTask"], *args, **kwargs) |
def match(self, pattern, screen=None, rect=None, offset=None, threshold=None, method=None):
"""Check if image position in screen
Args:
- pattern: Image file name or opencv image object
- screen (PIL.Image): optional, if not None, screenshot method will be called
- threshold (float): it depends on the image match method
- method (string): choices on <template | sift>
Returns:
None or FindPoint, For example:
FindPoint(pos=(20, 30), method='tmpl', confidence=0.801, matched=True)
Only when confidence > self.image_match_threshold, matched will be True
Raises:
TypeError: when image_match_method is invalid
"""
pattern = self.pattern_open(pattern)
search_img = pattern.image
pattern_scale = self._cal_scale(pattern)
if pattern_scale != 1.0:
search_img = cv2.resize(search_img, (0, 0),
fx=pattern_scale, fy=pattern_scale,
interpolation=cv2.INTER_CUBIC)
screen = screen or self.region_screenshot()
threshold = threshold or pattern.threshold or self.image_match_threshold
# handle offset if percent, ex (0.2, 0.8)
dx, dy = offset or pattern.offset or (0, 0)
dx = pattern.image.shape[1] * dx # opencv object width
dy = pattern.image.shape[0] * dy # opencv object height
dx, dy = int(dx*pattern_scale), int(dy*pattern_scale)
# image match
screen = imutils.from_pillow(screen) # convert to opencv image
if rect and isinstance(rect, tuple) and len(rect) == 4:
(x0, y0, x1, y1) = [int(v*pattern_scale) for v in rect]
(dx, dy) = dx+x0, dy+y0
screen = imutils.crop(screen, x0, y0, x1, y1)
#cv2.imwrite('cc.png', screen)
match_method = method or self.image_match_method
ret = None
confidence = None
matched = False
position = None
if match_method == consts.IMAGE_MATCH_METHOD_TMPL: #IMG_METHOD_TMPL
ret = ac.find_template(screen, search_img)
if ret is None:
return None
confidence = ret['confidence']
if confidence > threshold:
matched = True
(x, y) = ret['result']
position = (x+dx, y+dy) # fix by offset
elif match_method == consts.IMAGE_MATCH_METHOD_SIFT:
ret = ac.find_sift(screen, search_img, min_match_count=10)
if ret is None:
return None
confidence = ret['confidence']
matches, total = confidence
if 1.0*matches/total > 0.5: # FIXME(ssx): sift just write here
matched = True
(x, y) = ret['result']
position = (x+dx, y+dy) # fix by offset
elif match_method == consts.IMAGE_MATCH_METHOD_AUTO:
fp = self._match_auto(screen, search_img, threshold)
if fp is None:
return None
(x, y) = fp.pos
position = (x+dx, y+dy)
return FindPoint(position, fp.confidence, fp.method, fp.matched)
else:
raise TypeError("Invalid image match method: %s" %(match_method,))
(x, y) = ret['result']
position = (x+dx, y+dy) # fix by offset
if self.bounds:
x, y = position
position = (x+self.bounds.left, y+self.bounds.top)
return FindPoint(position, confidence, match_method, matched=matched) | Check if image position in screen
Args:
- pattern: Image file name or opencv image object
- screen (PIL.Image): optional, if not None, screenshot method will be called
- threshold (float): it depends on the image match method
- method (string): choices on <template | sift>
Returns:
None or FindPoint, For example:
FindPoint(pos=(20, 30), method='tmpl', confidence=0.801, matched=True)
Only when confidence > self.image_match_threshold, matched will be True
Raises:
TypeError: when image_match_method is invalid | Below is the the instruction that describes the task:
### Input:
Check if image position in screen
Args:
- pattern: Image file name or opencv image object
- screen (PIL.Image): optional, if not None, screenshot method will be called
- threshold (float): it depends on the image match method
- method (string): choices on <template | sift>
Returns:
None or FindPoint, For example:
FindPoint(pos=(20, 30), method='tmpl', confidence=0.801, matched=True)
Only when confidence > self.image_match_threshold, matched will be True
Raises:
TypeError: when image_match_method is invalid
### Response:
def match(self, pattern, screen=None, rect=None, offset=None, threshold=None, method=None):
"""Check if image position in screen
Args:
- pattern: Image file name or opencv image object
- screen (PIL.Image): optional, if not None, screenshot method will be called
- threshold (float): it depends on the image match method
- method (string): choices on <template | sift>
Returns:
None or FindPoint, For example:
FindPoint(pos=(20, 30), method='tmpl', confidence=0.801, matched=True)
Only when confidence > self.image_match_threshold, matched will be True
Raises:
TypeError: when image_match_method is invalid
"""
pattern = self.pattern_open(pattern)
search_img = pattern.image
pattern_scale = self._cal_scale(pattern)
if pattern_scale != 1.0:
search_img = cv2.resize(search_img, (0, 0),
fx=pattern_scale, fy=pattern_scale,
interpolation=cv2.INTER_CUBIC)
screen = screen or self.region_screenshot()
threshold = threshold or pattern.threshold or self.image_match_threshold
# handle offset if percent, ex (0.2, 0.8)
dx, dy = offset or pattern.offset or (0, 0)
dx = pattern.image.shape[1] * dx # opencv object width
dy = pattern.image.shape[0] * dy # opencv object height
dx, dy = int(dx*pattern_scale), int(dy*pattern_scale)
# image match
screen = imutils.from_pillow(screen) # convert to opencv image
if rect and isinstance(rect, tuple) and len(rect) == 4:
(x0, y0, x1, y1) = [int(v*pattern_scale) for v in rect]
(dx, dy) = dx+x0, dy+y0
screen = imutils.crop(screen, x0, y0, x1, y1)
#cv2.imwrite('cc.png', screen)
match_method = method or self.image_match_method
ret = None
confidence = None
matched = False
position = None
if match_method == consts.IMAGE_MATCH_METHOD_TMPL: #IMG_METHOD_TMPL
ret = ac.find_template(screen, search_img)
if ret is None:
return None
confidence = ret['confidence']
if confidence > threshold:
matched = True
(x, y) = ret['result']
position = (x+dx, y+dy) # fix by offset
elif match_method == consts.IMAGE_MATCH_METHOD_SIFT:
ret = ac.find_sift(screen, search_img, min_match_count=10)
if ret is None:
return None
confidence = ret['confidence']
matches, total = confidence
if 1.0*matches/total > 0.5: # FIXME(ssx): sift just write here
matched = True
(x, y) = ret['result']
position = (x+dx, y+dy) # fix by offset
elif match_method == consts.IMAGE_MATCH_METHOD_AUTO:
fp = self._match_auto(screen, search_img, threshold)
if fp is None:
return None
(x, y) = fp.pos
position = (x+dx, y+dy)
return FindPoint(position, fp.confidence, fp.method, fp.matched)
else:
raise TypeError("Invalid image match method: %s" %(match_method,))
(x, y) = ret['result']
position = (x+dx, y+dy) # fix by offset
if self.bounds:
x, y = position
position = (x+self.bounds.left, y+self.bounds.top)
return FindPoint(position, confidence, match_method, matched=matched) |
def get(self, server):
""" Retrieve credentials for `server`. If no credentials are found,
a `StoreError` will be raised.
"""
if not isinstance(server, six.binary_type):
server = server.encode('utf-8')
data = self._execute('get', server)
result = json.loads(data.decode('utf-8'))
# docker-credential-pass will return an object for inexistent servers
# whereas other helpers will exit with returncode != 0. For
# consistency, if no significant data is returned,
# raise CredentialsNotFound
if result['Username'] == '' and result['Secret'] == '':
raise errors.CredentialsNotFound(
'No matching credentials in {}'.format(self.program)
)
return result | Retrieve credentials for `server`. If no credentials are found,
a `StoreError` will be raised. | Below is the the instruction that describes the task:
### Input:
Retrieve credentials for `server`. If no credentials are found,
a `StoreError` will be raised.
### Response:
def get(self, server):
""" Retrieve credentials for `server`. If no credentials are found,
a `StoreError` will be raised.
"""
if not isinstance(server, six.binary_type):
server = server.encode('utf-8')
data = self._execute('get', server)
result = json.loads(data.decode('utf-8'))
# docker-credential-pass will return an object for inexistent servers
# whereas other helpers will exit with returncode != 0. For
# consistency, if no significant data is returned,
# raise CredentialsNotFound
if result['Username'] == '' and result['Secret'] == '':
raise errors.CredentialsNotFound(
'No matching credentials in {}'.format(self.program)
)
return result |
def full_s(self):
""" Get the full singular value matrix of self
Returns
-------
Matrix : Matrix
"""
x = np.zeros((self.shape),dtype=np.float32)
x[:self.s.shape[0],:self.s.shape[0]] = self.s.as_2d
s = Matrix(x=x, row_names=self.row_names,
col_names=self.col_names, isdiagonal=False,
autoalign=False)
return s | Get the full singular value matrix of self
Returns
-------
Matrix : Matrix | Below is the the instruction that describes the task:
### Input:
Get the full singular value matrix of self
Returns
-------
Matrix : Matrix
### Response:
def full_s(self):
""" Get the full singular value matrix of self
Returns
-------
Matrix : Matrix
"""
x = np.zeros((self.shape),dtype=np.float32)
x[:self.s.shape[0],:self.s.shape[0]] = self.s.as_2d
s = Matrix(x=x, row_names=self.row_names,
col_names=self.col_names, isdiagonal=False,
autoalign=False)
return s |
def push_front(self, value):
'''Appends a copy of ``value`` to the beginning of the list.'''
self.cache.push_front(self.value_pickler.dumps(value)) | Appends a copy of ``value`` to the beginning of the list. | Below is the the instruction that describes the task:
### Input:
Appends a copy of ``value`` to the beginning of the list.
### Response:
def push_front(self, value):
'''Appends a copy of ``value`` to the beginning of the list.'''
self.cache.push_front(self.value_pickler.dumps(value)) |
def tv2(data,weight,Niter=50):
"""
chambolles tv regularized denoising
weight should be around 2+1.5*noise_sigma
"""
prog = OCLProgram(abspath("kernels/tv2.cl"))
data_im = OCLImage.from_array(data.astype(np,float32,copy=False))
pImgs = [ dev.createImage(data.shape[::-1],
mem_flags = cl.mem_flags.READ_WRITE,
dtype= np.float32,
channel_order = cl.channel_order.RGBA)
for i in range(2)]
outImg = dev.createImage(data.shape[::-1],
dtype = np.float32,
mem_flags = cl.mem_flags.READ_WRITE)
dev.writeImage(inImg,data.astype(np.float32));
dev.writeImage(pImgs[0],np.zeros((4,)+data.shape,dtype=np.float32));
dev.writeImage(pImgs[1],np.zeros((4,)+data.shape,dtype=np.float32));
for i in range(Niter):
proc.runKernel("div_step",inImg.shape,None,
inImg,pImgs[i%2],outImg)
proc.runKernel("grad_step",inImg.shape,None,
outImg,pImgs[i%2],pImgs[1-i%2],
np.float32(weight))
return dev.readImage(outImg,dtype=np.float32) | chambolles tv regularized denoising
weight should be around 2+1.5*noise_sigma | Below is the the instruction that describes the task:
### Input:
chambolles tv regularized denoising
weight should be around 2+1.5*noise_sigma
### Response:
def tv2(data,weight,Niter=50):
"""
chambolles tv regularized denoising
weight should be around 2+1.5*noise_sigma
"""
prog = OCLProgram(abspath("kernels/tv2.cl"))
data_im = OCLImage.from_array(data.astype(np,float32,copy=False))
pImgs = [ dev.createImage(data.shape[::-1],
mem_flags = cl.mem_flags.READ_WRITE,
dtype= np.float32,
channel_order = cl.channel_order.RGBA)
for i in range(2)]
outImg = dev.createImage(data.shape[::-1],
dtype = np.float32,
mem_flags = cl.mem_flags.READ_WRITE)
dev.writeImage(inImg,data.astype(np.float32));
dev.writeImage(pImgs[0],np.zeros((4,)+data.shape,dtype=np.float32));
dev.writeImage(pImgs[1],np.zeros((4,)+data.shape,dtype=np.float32));
for i in range(Niter):
proc.runKernel("div_step",inImg.shape,None,
inImg,pImgs[i%2],outImg)
proc.runKernel("grad_step",inImg.shape,None,
outImg,pImgs[i%2],pImgs[1-i%2],
np.float32(weight))
return dev.readImage(outImg,dtype=np.float32) |
def _arith_method_SPARSE_SERIES(cls, op, special):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
op_name = _get_op_name(op, special)
def wrapper(self, other):
if isinstance(other, ABCDataFrame):
return NotImplemented
elif isinstance(other, ABCSeries):
if not isinstance(other, ABCSparseSeries):
other = other.to_sparse(fill_value=self.fill_value)
return _sparse_series_op(self, other, op, op_name)
elif is_scalar(other):
with np.errstate(all='ignore'):
new_values = op(self.values, other)
return self._constructor(new_values,
index=self.index,
name=self.name)
else: # pragma: no cover
raise TypeError('operation with {other} not supported'
.format(other=type(other)))
wrapper.__name__ = op_name
return wrapper | Wrapper function for Series arithmetic operations, to avoid
code duplication. | Below is the the instruction that describes the task:
### Input:
Wrapper function for Series arithmetic operations, to avoid
code duplication.
### Response:
def _arith_method_SPARSE_SERIES(cls, op, special):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
op_name = _get_op_name(op, special)
def wrapper(self, other):
if isinstance(other, ABCDataFrame):
return NotImplemented
elif isinstance(other, ABCSeries):
if not isinstance(other, ABCSparseSeries):
other = other.to_sparse(fill_value=self.fill_value)
return _sparse_series_op(self, other, op, op_name)
elif is_scalar(other):
with np.errstate(all='ignore'):
new_values = op(self.values, other)
return self._constructor(new_values,
index=self.index,
name=self.name)
else: # pragma: no cover
raise TypeError('operation with {other} not supported'
.format(other=type(other)))
wrapper.__name__ = op_name
return wrapper |
def _update_record(self, record_id, name, address, ttl):
"""Updates an existing record."""
data = json.dumps({'record': {'name': name,
'content': address,
'ttl': ttl}})
headers = {'Content-Type': 'application/json'}
request = self._session.put(self._baseurl + '/%d' % record_id,
data=data, headers=headers)
if not request.ok:
raise RuntimeError('Failed to update record: %s - %s' %
(self._format_hostname(name), request.json()))
record = request.json()
if 'record' not in record or 'id' not in record['record']:
raise RuntimeError('Invalid record JSON format: %s - %s' %
(self._format_hostname(name), request.json()))
return record['record'] | Updates an existing record. | Below is the the instruction that describes the task:
### Input:
Updates an existing record.
### Response:
def _update_record(self, record_id, name, address, ttl):
"""Updates an existing record."""
data = json.dumps({'record': {'name': name,
'content': address,
'ttl': ttl}})
headers = {'Content-Type': 'application/json'}
request = self._session.put(self._baseurl + '/%d' % record_id,
data=data, headers=headers)
if not request.ok:
raise RuntimeError('Failed to update record: %s - %s' %
(self._format_hostname(name), request.json()))
record = request.json()
if 'record' not in record or 'id' not in record['record']:
raise RuntimeError('Invalid record JSON format: %s - %s' %
(self._format_hostname(name), request.json()))
return record['record'] |
def clean_description(self):
"""
Text content validation
"""
description = self.cleaned_data.get("description")
validation_helper = safe_import_module(settings.FORUM_TEXT_VALIDATOR_HELPER_PATH)
if validation_helper is not None:
return validation_helper(self, description)
else:
return description | Text content validation | Below is the the instruction that describes the task:
### Input:
Text content validation
### Response:
def clean_description(self):
"""
Text content validation
"""
description = self.cleaned_data.get("description")
validation_helper = safe_import_module(settings.FORUM_TEXT_VALIDATOR_HELPER_PATH)
if validation_helper is not None:
return validation_helper(self, description)
else:
return description |
def get_item_admin_session(self, proxy):
"""Gets the ``OsidSession`` associated with the item administration service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.ItemAdminSession) - an
``ItemAdminSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_item_admin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_item_admin()`` is ``true``.*
"""
if not self.supports_item_admin():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.ItemAdminSession(proxy=proxy, runtime=self._runtime) | Gets the ``OsidSession`` associated with the item administration service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.ItemAdminSession) - an
``ItemAdminSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_item_admin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_item_admin()`` is ``true``.* | Below is the the instruction that describes the task:
### Input:
Gets the ``OsidSession`` associated with the item administration service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.ItemAdminSession) - an
``ItemAdminSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_item_admin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_item_admin()`` is ``true``.*
### Response:
def get_item_admin_session(self, proxy):
"""Gets the ``OsidSession`` associated with the item administration service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.assessment.ItemAdminSession) - an
``ItemAdminSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_item_admin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_item_admin()`` is ``true``.*
"""
if not self.supports_item_admin():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.ItemAdminSession(proxy=proxy, runtime=self._runtime) |
def particle_covariance_mtx(weights,locations):
"""
Returns an estimate of the covariance of a distribution
represented by a given set of SMC particle.
:param weights: An array containing the weights of each
particle.
:param location: An array containing the locations of
each particle.
:rtype: :class:`numpy.ndarray`, shape
``(n_modelparams, n_modelparams)``.
:returns: An array containing the estimated covariance matrix.
"""
# TODO: add shapes to docstring.
warnings.warn('particle_covariance_mtx is deprecated, please use distributions.ParticleDistribution',
DeprecationWarning)
# Find the mean model vector, shape (n_modelparams, ).
mu = particle_meanfn(weights, locations)
# Transpose the particle locations to have shape
# (n_modelparams, n_particles).
xs = locations.transpose([1, 0])
# Give a shorter name to the particle weights, shape (n_particles, ).
ws = weights
cov = (
# This sum is a reduction over the particle index, chosen to be
# axis=2. Thus, the sum represents an expectation value over the
# outer product $x . x^T$.
#
# All three factors have the particle index as the rightmost
# index, axis=2. Using the Einstein summation convention (ESC),
# we can reduce over the particle index easily while leaving
# the model parameter index to vary between the two factors
# of xs.
#
# This corresponds to evaluating A_{m,n} = w_{i} x_{m,i} x_{n,i}
# using the ESC, where A_{m,n} is the temporary array created.
np.einsum('i,mi,ni', ws, xs, xs)
# We finish by subracting from the above expectation value
# the outer product $mu . mu^T$.
- np.dot(mu[..., np.newaxis], mu[np.newaxis, ...])
)
# The SMC approximation is not guaranteed to produce a
# positive-semidefinite covariance matrix. If a negative eigenvalue
# is produced, we should warn the caller of this.
assert np.all(np.isfinite(cov))
if not np.all(la.eig(cov)[0] >= 0):
warnings.warn('Numerical error in covariance estimation causing positive semidefinite violation.', ApproximationWarning)
return cov | Returns an estimate of the covariance of a distribution
represented by a given set of SMC particle.
:param weights: An array containing the weights of each
particle.
:param location: An array containing the locations of
each particle.
:rtype: :class:`numpy.ndarray`, shape
``(n_modelparams, n_modelparams)``.
:returns: An array containing the estimated covariance matrix. | Below is the the instruction that describes the task:
### Input:
Returns an estimate of the covariance of a distribution
represented by a given set of SMC particle.
:param weights: An array containing the weights of each
particle.
:param location: An array containing the locations of
each particle.
:rtype: :class:`numpy.ndarray`, shape
``(n_modelparams, n_modelparams)``.
:returns: An array containing the estimated covariance matrix.
### Response:
def particle_covariance_mtx(weights,locations):
"""
Returns an estimate of the covariance of a distribution
represented by a given set of SMC particle.
:param weights: An array containing the weights of each
particle.
:param location: An array containing the locations of
each particle.
:rtype: :class:`numpy.ndarray`, shape
``(n_modelparams, n_modelparams)``.
:returns: An array containing the estimated covariance matrix.
"""
# TODO: add shapes to docstring.
warnings.warn('particle_covariance_mtx is deprecated, please use distributions.ParticleDistribution',
DeprecationWarning)
# Find the mean model vector, shape (n_modelparams, ).
mu = particle_meanfn(weights, locations)
# Transpose the particle locations to have shape
# (n_modelparams, n_particles).
xs = locations.transpose([1, 0])
# Give a shorter name to the particle weights, shape (n_particles, ).
ws = weights
cov = (
# This sum is a reduction over the particle index, chosen to be
# axis=2. Thus, the sum represents an expectation value over the
# outer product $x . x^T$.
#
# All three factors have the particle index as the rightmost
# index, axis=2. Using the Einstein summation convention (ESC),
# we can reduce over the particle index easily while leaving
# the model parameter index to vary between the two factors
# of xs.
#
# This corresponds to evaluating A_{m,n} = w_{i} x_{m,i} x_{n,i}
# using the ESC, where A_{m,n} is the temporary array created.
np.einsum('i,mi,ni', ws, xs, xs)
# We finish by subracting from the above expectation value
# the outer product $mu . mu^T$.
- np.dot(mu[..., np.newaxis], mu[np.newaxis, ...])
)
# The SMC approximation is not guaranteed to produce a
# positive-semidefinite covariance matrix. If a negative eigenvalue
# is produced, we should warn the caller of this.
assert np.all(np.isfinite(cov))
if not np.all(la.eig(cov)[0] >= 0):
warnings.warn('Numerical error in covariance estimation causing positive semidefinite violation.', ApproximationWarning)
return cov |
def android_setup_view(request):
"""Set up a GCM session.
This does *not* require a valid login session. Instead, a token from the client
session is sent to the Android backend, which queries a POST request to this view.
The "android_gcm_rand" is randomly set when the Android app is detected through
the user agent. If it has the same value, it is assumed to be correct.
"""
logger.debug(request.POST)
if request.method == "POST":
if "user_token" in request.POST and "gcm_token" in request.POST:
user_token = request.POST.get("user_token")
gcm_token = request.POST.get("gcm_token")
logger.debug(user_token)
logger.debug(gcm_token)
try:
ncfg = NotificationConfig.objects.get(android_gcm_rand=user_token)
except NotificationConfig.DoesNotExist:
logger.debug("No pair")
return HttpResponse('{"error":"Invalid data."}', content_type="text/json")
ncfg.gcm_token = gcm_token
ncfg.android_gcm_rand = None
ncfg.android_gcm_date = None
ncfg.save()
return HttpResponse('{"success":"Now registered."}', content_type="text/json")
return HttpResponse('{"error":"Invalid arguments."}', content_type="text/json") | Set up a GCM session.
This does *not* require a valid login session. Instead, a token from the client
session is sent to the Android backend, which queries a POST request to this view.
The "android_gcm_rand" is randomly set when the Android app is detected through
the user agent. If it has the same value, it is assumed to be correct. | Below is the the instruction that describes the task:
### Input:
Set up a GCM session.
This does *not* require a valid login session. Instead, a token from the client
session is sent to the Android backend, which queries a POST request to this view.
The "android_gcm_rand" is randomly set when the Android app is detected through
the user agent. If it has the same value, it is assumed to be correct.
### Response:
def android_setup_view(request):
"""Set up a GCM session.
This does *not* require a valid login session. Instead, a token from the client
session is sent to the Android backend, which queries a POST request to this view.
The "android_gcm_rand" is randomly set when the Android app is detected through
the user agent. If it has the same value, it is assumed to be correct.
"""
logger.debug(request.POST)
if request.method == "POST":
if "user_token" in request.POST and "gcm_token" in request.POST:
user_token = request.POST.get("user_token")
gcm_token = request.POST.get("gcm_token")
logger.debug(user_token)
logger.debug(gcm_token)
try:
ncfg = NotificationConfig.objects.get(android_gcm_rand=user_token)
except NotificationConfig.DoesNotExist:
logger.debug("No pair")
return HttpResponse('{"error":"Invalid data."}', content_type="text/json")
ncfg.gcm_token = gcm_token
ncfg.android_gcm_rand = None
ncfg.android_gcm_date = None
ncfg.save()
return HttpResponse('{"success":"Now registered."}', content_type="text/json")
return HttpResponse('{"error":"Invalid arguments."}', content_type="text/json") |
def clear(self):
"""Clears the line and returns cursor to the start.
of line
Returns
-------
self
"""
if not self._enabled:
return self
self._stream.write('\r')
self._stream.write(self.CLEAR_LINE)
return self | Clears the line and returns cursor to the start.
of line
Returns
-------
self | Below is the the instruction that describes the task:
### Input:
Clears the line and returns cursor to the start.
of line
Returns
-------
self
### Response:
def clear(self):
"""Clears the line and returns cursor to the start.
of line
Returns
-------
self
"""
if not self._enabled:
return self
self._stream.write('\r')
self._stream.write(self.CLEAR_LINE)
return self |
def index_nearest(array, value):
"""
Finds index of nearest value in array.
Args:
array: numpy array
value:
Returns:
int
http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
"""
idx = (np.abs(array-value)).argmin()
return idx | Finds index of nearest value in array.
Args:
array: numpy array
value:
Returns:
int
http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array | Below is the the instruction that describes the task:
### Input:
Finds index of nearest value in array.
Args:
array: numpy array
value:
Returns:
int
http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
### Response:
def index_nearest(array, value):
"""
Finds index of nearest value in array.
Args:
array: numpy array
value:
Returns:
int
http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
"""
idx = (np.abs(array-value)).argmin()
return idx |
def _IsUnparsedFlagAccessAllowed(self, name):
"""Determine whether to allow unparsed flag access or not."""
if _UNPARSED_FLAG_ACCESS_ENV_NAME in os.environ:
# We've been told explicitly what to do.
allow_unparsed_flag_access = (
os.getenv(_UNPARSED_FLAG_ACCESS_ENV_NAME) == '1')
elif self.__dict__['__reset_called']:
# Raise exception if .Reset() was called. This mostly happens in tests.
allow_unparsed_flag_access = False
elif _helpers.IsRunningTest():
# Staged "rollout", based on name of the flag so that we don't break
# everyone. Hashing the flag is a way of choosing a random but
# consistent subset of flags to lock down which we can make larger
# over time.
name_bytes = name.encode('utf8') if not isinstance(name, bytes) else name
flag_percentile = (
struct.unpack('<I', hashlib.md5(name_bytes).digest()[:4])[0] % 100)
allow_unparsed_flag_access = (
_UNPARSED_ACCESS_DISABLED_PERCENT <= flag_percentile)
else:
allow_unparsed_flag_access = True
return allow_unparsed_flag_access | Determine whether to allow unparsed flag access or not. | Below is the the instruction that describes the task:
### Input:
Determine whether to allow unparsed flag access or not.
### Response:
def _IsUnparsedFlagAccessAllowed(self, name):
"""Determine whether to allow unparsed flag access or not."""
if _UNPARSED_FLAG_ACCESS_ENV_NAME in os.environ:
# We've been told explicitly what to do.
allow_unparsed_flag_access = (
os.getenv(_UNPARSED_FLAG_ACCESS_ENV_NAME) == '1')
elif self.__dict__['__reset_called']:
# Raise exception if .Reset() was called. This mostly happens in tests.
allow_unparsed_flag_access = False
elif _helpers.IsRunningTest():
# Staged "rollout", based on name of the flag so that we don't break
# everyone. Hashing the flag is a way of choosing a random but
# consistent subset of flags to lock down which we can make larger
# over time.
name_bytes = name.encode('utf8') if not isinstance(name, bytes) else name
flag_percentile = (
struct.unpack('<I', hashlib.md5(name_bytes).digest()[:4])[0] % 100)
allow_unparsed_flag_access = (
_UNPARSED_ACCESS_DISABLED_PERCENT <= flag_percentile)
else:
allow_unparsed_flag_access = True
return allow_unparsed_flag_access |
def __read_frame(self):
"""*Attempt* to read a frame. If we get an EAGAIN on the frame header,
it'll raise to our caller. If we get it *after* we already got the
header, wait-out the rest of the frame.
"""
if self.__frame_header_cache is None:
_logger.debug("Reading frame header.")
(length, frame_type) = struct.unpack('!II', self.__read(8))
self.__frame_header_cache = (length, frame_type)
else:
(length, frame_type) = self.__frame_header_cache
try:
data = self.__read(length - 4)
except errno.EAGAIN:
self.__frame_header_cache = (length, frame_type)
raise
self.__frame_header_cache = None
self.__process_message(frame_type, data) | *Attempt* to read a frame. If we get an EAGAIN on the frame header,
it'll raise to our caller. If we get it *after* we already got the
header, wait-out the rest of the frame. | Below is the the instruction that describes the task:
### Input:
*Attempt* to read a frame. If we get an EAGAIN on the frame header,
it'll raise to our caller. If we get it *after* we already got the
header, wait-out the rest of the frame.
### Response:
def __read_frame(self):
"""*Attempt* to read a frame. If we get an EAGAIN on the frame header,
it'll raise to our caller. If we get it *after* we already got the
header, wait-out the rest of the frame.
"""
if self.__frame_header_cache is None:
_logger.debug("Reading frame header.")
(length, frame_type) = struct.unpack('!II', self.__read(8))
self.__frame_header_cache = (length, frame_type)
else:
(length, frame_type) = self.__frame_header_cache
try:
data = self.__read(length - 4)
except errno.EAGAIN:
self.__frame_header_cache = (length, frame_type)
raise
self.__frame_header_cache = None
self.__process_message(frame_type, data) |
def sunset(self, date=None, zenith=None):
"""Calculate sunset times for locations.
Args:
date (datetime.date): Calculate rise or set for given date
zenith (str): Calculate sunset events, or start of twilight times
Returns:
list of list of datetime.datetime: The time for the sunset for each
point in each segment
"""
return (segment.sunset(date, zenith) for segment in self) | Calculate sunset times for locations.
Args:
date (datetime.date): Calculate rise or set for given date
zenith (str): Calculate sunset events, or start of twilight times
Returns:
list of list of datetime.datetime: The time for the sunset for each
point in each segment | Below is the the instruction that describes the task:
### Input:
Calculate sunset times for locations.
Args:
date (datetime.date): Calculate rise or set for given date
zenith (str): Calculate sunset events, or start of twilight times
Returns:
list of list of datetime.datetime: The time for the sunset for each
point in each segment
### Response:
def sunset(self, date=None, zenith=None):
"""Calculate sunset times for locations.
Args:
date (datetime.date): Calculate rise or set for given date
zenith (str): Calculate sunset events, or start of twilight times
Returns:
list of list of datetime.datetime: The time for the sunset for each
point in each segment
"""
return (segment.sunset(date, zenith) for segment in self) |
def metric_history(slug, granularity="daily", since=None, to=None,
with_data_table=False):
"""Template Tag to display a metric's history.
* ``slug`` -- the metric's unique slug
* ``granularity`` -- the granularity: daily, hourly, weekly, monthly, yearly
* ``since`` -- a datetime object or a string string matching one of the
following patterns: "YYYY-mm-dd" for a date or "YYYY-mm-dd HH:MM:SS" for
a date & time.
* ``to`` -- the date until which we start pulling metrics
* ``with_data_table`` -- if True, prints the raw data in a table.
"""
r = get_r()
try:
if since and len(since) == 10: # yyyy-mm-dd
since = datetime.strptime(since, "%Y-%m-%d")
elif since and len(since) == 19: # yyyy-mm-dd HH:MM:ss
since = datetime.strptime(since, "%Y-%m-%d %H:%M:%S")
if to and len(to) == 10: # yyyy-mm-dd
to = datetime.strptime(since, "%Y-%m-%d")
elif to and len(to) == 19: # yyyy-mm-dd HH:MM:ss
to = datetime.strptime(to, "%Y-%m-%d %H:%M:%S")
except (TypeError, ValueError):
# assume we got a datetime object or leave since = None
pass
metric_history = r.get_metric_history(
slugs=slug,
since=since,
to=to,
granularity=granularity
)
return {
'since': since,
'to': to,
'slug': slug,
'granularity': granularity,
'metric_history': metric_history,
'with_data_table': with_data_table,
} | Template Tag to display a metric's history.
* ``slug`` -- the metric's unique slug
* ``granularity`` -- the granularity: daily, hourly, weekly, monthly, yearly
* ``since`` -- a datetime object or a string string matching one of the
following patterns: "YYYY-mm-dd" for a date or "YYYY-mm-dd HH:MM:SS" for
a date & time.
* ``to`` -- the date until which we start pulling metrics
* ``with_data_table`` -- if True, prints the raw data in a table. | Below is the the instruction that describes the task:
### Input:
Template Tag to display a metric's history.
* ``slug`` -- the metric's unique slug
* ``granularity`` -- the granularity: daily, hourly, weekly, monthly, yearly
* ``since`` -- a datetime object or a string string matching one of the
following patterns: "YYYY-mm-dd" for a date or "YYYY-mm-dd HH:MM:SS" for
a date & time.
* ``to`` -- the date until which we start pulling metrics
* ``with_data_table`` -- if True, prints the raw data in a table.
### Response:
def metric_history(slug, granularity="daily", since=None, to=None,
with_data_table=False):
"""Template Tag to display a metric's history.
* ``slug`` -- the metric's unique slug
* ``granularity`` -- the granularity: daily, hourly, weekly, monthly, yearly
* ``since`` -- a datetime object or a string string matching one of the
following patterns: "YYYY-mm-dd" for a date or "YYYY-mm-dd HH:MM:SS" for
a date & time.
* ``to`` -- the date until which we start pulling metrics
* ``with_data_table`` -- if True, prints the raw data in a table.
"""
r = get_r()
try:
if since and len(since) == 10: # yyyy-mm-dd
since = datetime.strptime(since, "%Y-%m-%d")
elif since and len(since) == 19: # yyyy-mm-dd HH:MM:ss
since = datetime.strptime(since, "%Y-%m-%d %H:%M:%S")
if to and len(to) == 10: # yyyy-mm-dd
to = datetime.strptime(since, "%Y-%m-%d")
elif to and len(to) == 19: # yyyy-mm-dd HH:MM:ss
to = datetime.strptime(to, "%Y-%m-%d %H:%M:%S")
except (TypeError, ValueError):
# assume we got a datetime object or leave since = None
pass
metric_history = r.get_metric_history(
slugs=slug,
since=since,
to=to,
granularity=granularity
)
return {
'since': since,
'to': to,
'slug': slug,
'granularity': granularity,
'metric_history': metric_history,
'with_data_table': with_data_table,
} |
def crossJoin(self, other):
"""Returns the cartesian product with another :class:`DataFrame`.
:param other: Right side of the cartesian product.
>>> df.select("age", "name").collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df2.select("name", "height").collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85)]
>>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect()
[Row(age=2, name=u'Alice', height=80), Row(age=2, name=u'Alice', height=85),
Row(age=5, name=u'Bob', height=80), Row(age=5, name=u'Bob', height=85)]
"""
jdf = self._jdf.crossJoin(other._jdf)
return DataFrame(jdf, self.sql_ctx) | Returns the cartesian product with another :class:`DataFrame`.
:param other: Right side of the cartesian product.
>>> df.select("age", "name").collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df2.select("name", "height").collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85)]
>>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect()
[Row(age=2, name=u'Alice', height=80), Row(age=2, name=u'Alice', height=85),
Row(age=5, name=u'Bob', height=80), Row(age=5, name=u'Bob', height=85)] | Below is the the instruction that describes the task:
### Input:
Returns the cartesian product with another :class:`DataFrame`.
:param other: Right side of the cartesian product.
>>> df.select("age", "name").collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df2.select("name", "height").collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85)]
>>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect()
[Row(age=2, name=u'Alice', height=80), Row(age=2, name=u'Alice', height=85),
Row(age=5, name=u'Bob', height=80), Row(age=5, name=u'Bob', height=85)]
### Response:
def crossJoin(self, other):
"""Returns the cartesian product with another :class:`DataFrame`.
:param other: Right side of the cartesian product.
>>> df.select("age", "name").collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df2.select("name", "height").collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85)]
>>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect()
[Row(age=2, name=u'Alice', height=80), Row(age=2, name=u'Alice', height=85),
Row(age=5, name=u'Bob', height=80), Row(age=5, name=u'Bob', height=85)]
"""
jdf = self._jdf.crossJoin(other._jdf)
return DataFrame(jdf, self.sql_ctx) |
def assertFileSizeAlmostEqual(
self, filename, size, places=None, msg=None, delta=None):
'''Fail if ``filename`` does not have the given ``size`` as
determined by their difference rounded to the given number of
decimal ``places`` (default 7) and comparing to zero, or if
their difference is greater than a given ``delta``.
Parameters
----------
filename : str, bytes, file-like
size : int, float
places : int
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
delta : int, float
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertAlmostEqual(
fsize, size, places=places, msg=msg, delta=delta) | Fail if ``filename`` does not have the given ``size`` as
determined by their difference rounded to the given number of
decimal ``places`` (default 7) and comparing to zero, or if
their difference is greater than a given ``delta``.
Parameters
----------
filename : str, bytes, file-like
size : int, float
places : int
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
delta : int, float
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like. | Below is the the instruction that describes the task:
### Input:
Fail if ``filename`` does not have the given ``size`` as
determined by their difference rounded to the given number of
decimal ``places`` (default 7) and comparing to zero, or if
their difference is greater than a given ``delta``.
Parameters
----------
filename : str, bytes, file-like
size : int, float
places : int
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
delta : int, float
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
### Response:
def assertFileSizeAlmostEqual(
self, filename, size, places=None, msg=None, delta=None):
'''Fail if ``filename`` does not have the given ``size`` as
determined by their difference rounded to the given number of
decimal ``places`` (default 7) and comparing to zero, or if
their difference is greater than a given ``delta``.
Parameters
----------
filename : str, bytes, file-like
size : int, float
places : int
msg : str
If not provided, the :mod:`marbles.mixins` or
:mod:`unittest` standard message will be used.
delta : int, float
Raises
------
TypeError
If ``filename`` is not a str or bytes object and is not
file-like.
'''
fsize = self._get_file_size(filename)
self.assertAlmostEqual(
fsize, size, places=places, msg=msg, delta=delta) |
def compile_template(template,
renderers,
default,
blacklist,
whitelist,
saltenv='base',
sls='',
input_data='',
**kwargs):
'''
Take the path to a template and return the high data structure
derived from the template.
Helpers:
:param mask_value:
Mask value for debugging purposes (prevent sensitive information etc)
example: "mask_value="pass*". All "passwd", "password", "pass" will
be masked (as text).
'''
# if any error occurs, we return an empty dictionary
ret = {}
log.debug('compile template: %s', template)
if 'env' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('env')
if template != ':string:':
# Template was specified incorrectly
if not isinstance(template, six.string_types):
log.error('Template was specified incorrectly: %s', template)
return ret
# Template does not exist
if not os.path.isfile(template):
log.error('Template does not exist: %s', template)
return ret
# Template is an empty file
if salt.utils.files.is_empty(template):
log.debug('Template is an empty file: %s', template)
return ret
with codecs.open(template, encoding=SLS_ENCODING) as ifile:
# data input to the first render function in the pipe
input_data = ifile.read()
if not input_data.strip():
# Template is nothing but whitespace
log.error('Template is nothing but whitespace: %s', template)
return ret
# Get the list of render funcs in the render pipe line.
render_pipe = template_shebang(template, renderers, default, blacklist, whitelist, input_data)
windows_newline = '\r\n' in input_data
input_data = StringIO(input_data)
for render, argline in render_pipe:
if salt.utils.stringio.is_readable(input_data):
input_data.seek(0) # pylint: disable=no-member
render_kwargs = dict(renderers=renderers, tmplpath=template)
render_kwargs.update(kwargs)
if argline:
render_kwargs['argline'] = argline
start = time.time()
ret = render(input_data, saltenv, sls, **render_kwargs)
log.profile(
'Time (in seconds) to render \'%s\' using \'%s\' renderer: %s',
template,
render.__module__.split('.')[-1],
time.time() - start
)
if ret is None:
# The file is empty or is being written elsewhere
time.sleep(0.01)
ret = render(input_data, saltenv, sls, **render_kwargs)
input_data = ret
if log.isEnabledFor(logging.GARBAGE): # pylint: disable=no-member
# If ret is not a StringIO (which means it was rendered using
# yaml, mako, or another engine which renders to a data
# structure) we don't want to log this.
if salt.utils.stringio.is_readable(ret):
log.debug('Rendered data from file: %s:\n%s', template,
salt.utils.sanitizers.mask_args_value(salt.utils.data.decode(ret.read()),
kwargs.get('mask_value'))) # pylint: disable=no-member
ret.seek(0) # pylint: disable=no-member
# Preserve newlines from original template
if windows_newline:
if salt.utils.stringio.is_readable(ret):
is_stringio = True
contents = ret.read()
else:
is_stringio = False
contents = ret
if isinstance(contents, six.string_types):
if '\r\n' not in contents:
contents = contents.replace('\n', '\r\n')
ret = StringIO(contents) if is_stringio else contents
else:
if is_stringio:
ret.seek(0)
return ret | Take the path to a template and return the high data structure
derived from the template.
Helpers:
:param mask_value:
Mask value for debugging purposes (prevent sensitive information etc)
example: "mask_value="pass*". All "passwd", "password", "pass" will
be masked (as text). | Below is the the instruction that describes the task:
### Input:
Take the path to a template and return the high data structure
derived from the template.
Helpers:
:param mask_value:
Mask value for debugging purposes (prevent sensitive information etc)
example: "mask_value="pass*". All "passwd", "password", "pass" will
be masked (as text).
### Response:
def compile_template(template,
renderers,
default,
blacklist,
whitelist,
saltenv='base',
sls='',
input_data='',
**kwargs):
'''
Take the path to a template and return the high data structure
derived from the template.
Helpers:
:param mask_value:
Mask value for debugging purposes (prevent sensitive information etc)
example: "mask_value="pass*". All "passwd", "password", "pass" will
be masked (as text).
'''
# if any error occurs, we return an empty dictionary
ret = {}
log.debug('compile template: %s', template)
if 'env' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('env')
if template != ':string:':
# Template was specified incorrectly
if not isinstance(template, six.string_types):
log.error('Template was specified incorrectly: %s', template)
return ret
# Template does not exist
if not os.path.isfile(template):
log.error('Template does not exist: %s', template)
return ret
# Template is an empty file
if salt.utils.files.is_empty(template):
log.debug('Template is an empty file: %s', template)
return ret
with codecs.open(template, encoding=SLS_ENCODING) as ifile:
# data input to the first render function in the pipe
input_data = ifile.read()
if not input_data.strip():
# Template is nothing but whitespace
log.error('Template is nothing but whitespace: %s', template)
return ret
# Get the list of render funcs in the render pipe line.
render_pipe = template_shebang(template, renderers, default, blacklist, whitelist, input_data)
windows_newline = '\r\n' in input_data
input_data = StringIO(input_data)
for render, argline in render_pipe:
if salt.utils.stringio.is_readable(input_data):
input_data.seek(0) # pylint: disable=no-member
render_kwargs = dict(renderers=renderers, tmplpath=template)
render_kwargs.update(kwargs)
if argline:
render_kwargs['argline'] = argline
start = time.time()
ret = render(input_data, saltenv, sls, **render_kwargs)
log.profile(
'Time (in seconds) to render \'%s\' using \'%s\' renderer: %s',
template,
render.__module__.split('.')[-1],
time.time() - start
)
if ret is None:
# The file is empty or is being written elsewhere
time.sleep(0.01)
ret = render(input_data, saltenv, sls, **render_kwargs)
input_data = ret
if log.isEnabledFor(logging.GARBAGE): # pylint: disable=no-member
# If ret is not a StringIO (which means it was rendered using
# yaml, mako, or another engine which renders to a data
# structure) we don't want to log this.
if salt.utils.stringio.is_readable(ret):
log.debug('Rendered data from file: %s:\n%s', template,
salt.utils.sanitizers.mask_args_value(salt.utils.data.decode(ret.read()),
kwargs.get('mask_value'))) # pylint: disable=no-member
ret.seek(0) # pylint: disable=no-member
# Preserve newlines from original template
if windows_newline:
if salt.utils.stringio.is_readable(ret):
is_stringio = True
contents = ret.read()
else:
is_stringio = False
contents = ret
if isinstance(contents, six.string_types):
if '\r\n' not in contents:
contents = contents.replace('\n', '\r\n')
ret = StringIO(contents) if is_stringio else contents
else:
if is_stringio:
ret.seek(0)
return ret |
def set_attr(self, attr_id, value):
"""Calls SQLSetConnectAttr with the given values.
:param attr_id: the attribute ID (integer) to set. These are ODBC or
driver constants.
:parm value: the connection attribute value to set. At this time
only integer values are supported.
"""
fut = self._execute(self._conn.set_attr, attr_id, value)
return fut | Calls SQLSetConnectAttr with the given values.
:param attr_id: the attribute ID (integer) to set. These are ODBC or
driver constants.
:parm value: the connection attribute value to set. At this time
only integer values are supported. | Below is the the instruction that describes the task:
### Input:
Calls SQLSetConnectAttr with the given values.
:param attr_id: the attribute ID (integer) to set. These are ODBC or
driver constants.
:parm value: the connection attribute value to set. At this time
only integer values are supported.
### Response:
def set_attr(self, attr_id, value):
"""Calls SQLSetConnectAttr with the given values.
:param attr_id: the attribute ID (integer) to set. These are ODBC or
driver constants.
:parm value: the connection attribute value to set. At this time
only integer values are supported.
"""
fut = self._execute(self._conn.set_attr, attr_id, value)
return fut |
def poll(self, id):
"""Poll with a given id.
Parameters
----------
id : int
Poll id.
Returns
-------
an :class:`ApiQuery` of :class:`Poll`
Raises
------
:class:`NotFound`
If a poll with the requested id doesn't exist.
"""
@api_query('poll', pollid=str(id))
async def result(_, root):
elem = root.find('POLL')
if not elem:
raise NotFound(f'No poll found with id {id}')
return Poll(elem)
return result(self) | Poll with a given id.
Parameters
----------
id : int
Poll id.
Returns
-------
an :class:`ApiQuery` of :class:`Poll`
Raises
------
:class:`NotFound`
If a poll with the requested id doesn't exist. | Below is the the instruction that describes the task:
### Input:
Poll with a given id.
Parameters
----------
id : int
Poll id.
Returns
-------
an :class:`ApiQuery` of :class:`Poll`
Raises
------
:class:`NotFound`
If a poll with the requested id doesn't exist.
### Response:
def poll(self, id):
"""Poll with a given id.
Parameters
----------
id : int
Poll id.
Returns
-------
an :class:`ApiQuery` of :class:`Poll`
Raises
------
:class:`NotFound`
If a poll with the requested id doesn't exist.
"""
@api_query('poll', pollid=str(id))
async def result(_, root):
elem = root.find('POLL')
if not elem:
raise NotFound(f'No poll found with id {id}')
return Poll(elem)
return result(self) |
def _normalized_cookie_tuples(self, attrs_set):
"""Return list of tuples containing normalised cookie information.
attrs_set is the list of lists of key,value pairs extracted from
the Set-Cookie or Set-Cookie2 headers.
Tuples are name, value, standard, rest, where name and value are the
cookie name and value, standard is a dictionary containing the standard
cookie-attributes (discard, secure, version, expires or max-age,
domain, path and port) and rest is a dictionary containing the rest of
the cookie-attributes.
"""
cookie_tuples = []
boolean_attrs = "discard", "secure"
value_attrs = ("version",
"expires", "max-age",
"domain", "path", "port",
"comment", "commenturl")
for cookie_attrs in attrs_set:
name, value = cookie_attrs[0]
# Build dictionary of standard cookie-attributes (standard) and
# dictionary of other cookie-attributes (rest).
# Note: expiry time is normalised to seconds since epoch. V0
# cookies should have the Expires cookie-attribute, and V1 cookies
# should have Max-Age, but since V1 includes RFC 2109 cookies (and
# since V0 cookies may be a mish-mash of Netscape and RFC 2109), we
# accept either (but prefer Max-Age).
max_age_set = False
bad_cookie = False
standard = {}
rest = {}
for k, v in cookie_attrs[1:]:
lc = k.lower()
# don't lose case distinction for unknown fields
if lc in value_attrs or lc in boolean_attrs:
k = lc
if k in boolean_attrs and v is None:
# boolean cookie-attribute is present, but has no value
# (like "discard", rather than "port=80")
v = True
if k in standard:
# only first value is significant
continue
if k == "domain":
if v is None:
_debug(" missing value for domain attribute")
bad_cookie = True
break
# RFC 2965 section 3.3.3
v = v.lower()
if k == "expires":
if max_age_set:
# Prefer max-age to expires (like Mozilla)
continue
if v is None:
_debug(" missing or invalid value for expires "
"attribute: treating as session cookie")
continue
if k == "max-age":
max_age_set = True
try:
v = int(v)
except ValueError:
_debug(" missing or invalid (non-numeric) value for "
"max-age attribute")
bad_cookie = True
break
# convert RFC 2965 Max-Age to seconds since epoch
# XXX Strictly you're supposed to follow RFC 2616
# age-calculation rules. Remember that zero Max-Age is a
# is a request to discard (old and new) cookie, though.
k = "expires"
v = self._now + v
if (k in value_attrs) or (k in boolean_attrs):
if (v is None and
k not in ("port", "comment", "commenturl")):
_debug(" missing value for %s attribute" % k)
bad_cookie = True
break
standard[k] = v
else:
rest[k] = v
if bad_cookie:
continue
cookie_tuples.append((name, value, standard, rest))
return cookie_tuples | Return list of tuples containing normalised cookie information.
attrs_set is the list of lists of key,value pairs extracted from
the Set-Cookie or Set-Cookie2 headers.
Tuples are name, value, standard, rest, where name and value are the
cookie name and value, standard is a dictionary containing the standard
cookie-attributes (discard, secure, version, expires or max-age,
domain, path and port) and rest is a dictionary containing the rest of
the cookie-attributes. | Below is the the instruction that describes the task:
### Input:
Return list of tuples containing normalised cookie information.
attrs_set is the list of lists of key,value pairs extracted from
the Set-Cookie or Set-Cookie2 headers.
Tuples are name, value, standard, rest, where name and value are the
cookie name and value, standard is a dictionary containing the standard
cookie-attributes (discard, secure, version, expires or max-age,
domain, path and port) and rest is a dictionary containing the rest of
the cookie-attributes.
### Response:
def _normalized_cookie_tuples(self, attrs_set):
"""Return list of tuples containing normalised cookie information.
attrs_set is the list of lists of key,value pairs extracted from
the Set-Cookie or Set-Cookie2 headers.
Tuples are name, value, standard, rest, where name and value are the
cookie name and value, standard is a dictionary containing the standard
cookie-attributes (discard, secure, version, expires or max-age,
domain, path and port) and rest is a dictionary containing the rest of
the cookie-attributes.
"""
cookie_tuples = []
boolean_attrs = "discard", "secure"
value_attrs = ("version",
"expires", "max-age",
"domain", "path", "port",
"comment", "commenturl")
for cookie_attrs in attrs_set:
name, value = cookie_attrs[0]
# Build dictionary of standard cookie-attributes (standard) and
# dictionary of other cookie-attributes (rest).
# Note: expiry time is normalised to seconds since epoch. V0
# cookies should have the Expires cookie-attribute, and V1 cookies
# should have Max-Age, but since V1 includes RFC 2109 cookies (and
# since V0 cookies may be a mish-mash of Netscape and RFC 2109), we
# accept either (but prefer Max-Age).
max_age_set = False
bad_cookie = False
standard = {}
rest = {}
for k, v in cookie_attrs[1:]:
lc = k.lower()
# don't lose case distinction for unknown fields
if lc in value_attrs or lc in boolean_attrs:
k = lc
if k in boolean_attrs and v is None:
# boolean cookie-attribute is present, but has no value
# (like "discard", rather than "port=80")
v = True
if k in standard:
# only first value is significant
continue
if k == "domain":
if v is None:
_debug(" missing value for domain attribute")
bad_cookie = True
break
# RFC 2965 section 3.3.3
v = v.lower()
if k == "expires":
if max_age_set:
# Prefer max-age to expires (like Mozilla)
continue
if v is None:
_debug(" missing or invalid value for expires "
"attribute: treating as session cookie")
continue
if k == "max-age":
max_age_set = True
try:
v = int(v)
except ValueError:
_debug(" missing or invalid (non-numeric) value for "
"max-age attribute")
bad_cookie = True
break
# convert RFC 2965 Max-Age to seconds since epoch
# XXX Strictly you're supposed to follow RFC 2616
# age-calculation rules. Remember that zero Max-Age is a
# is a request to discard (old and new) cookie, though.
k = "expires"
v = self._now + v
if (k in value_attrs) or (k in boolean_attrs):
if (v is None and
k not in ("port", "comment", "commenturl")):
_debug(" missing value for %s attribute" % k)
bad_cookie = True
break
standard[k] = v
else:
rest[k] = v
if bad_cookie:
continue
cookie_tuples.append((name, value, standard, rest))
return cookie_tuples |
def reduce(self, body):
'''
remove nodes from a list
'''
i = 0
while i < len(body):
stmnt = body[i]
if self.visit(stmnt):
body.pop(i)
else:
i += 1 | remove nodes from a list | Below is the the instruction that describes the task:
### Input:
remove nodes from a list
### Response:
def reduce(self, body):
'''
remove nodes from a list
'''
i = 0
while i < len(body):
stmnt = body[i]
if self.visit(stmnt):
body.pop(i)
else:
i += 1 |
def season_id(x):
"""takes in 4-digit years and returns API formatted seasonID
Input Values: YYYY
Used in:
"""
if len(str(x)) == 4:
try:
return "".join(["2", str(x)])
except ValueError:
raise ValueError("Enter the four digit year for the first half of the desired season")
else:
raise ValueError("Enter the four digit year for the first half of the desired season") | takes in 4-digit years and returns API formatted seasonID
Input Values: YYYY
Used in: | Below is the the instruction that describes the task:
### Input:
takes in 4-digit years and returns API formatted seasonID
Input Values: YYYY
Used in:
### Response:
def season_id(x):
"""takes in 4-digit years and returns API formatted seasonID
Input Values: YYYY
Used in:
"""
if len(str(x)) == 4:
try:
return "".join(["2", str(x)])
except ValueError:
raise ValueError("Enter the four digit year for the first half of the desired season")
else:
raise ValueError("Enter the four digit year for the first half of the desired season") |
def scopes(self, name=None, pk=None, status=ScopeStatus.ACTIVE, **kwargs):
# type: (Optional[str], Optional[str], Optional[str], **Any) -> List[Scope]
"""Return all scopes visible / accessible for the logged in user.
If additional `keyword=value` arguments are provided, these are added to the request parameters. Please
refer to the documentation of the KE-chain API for additional query parameters.
:param name: if provided, filter the search for a scope/project by name
:type name: basestring or None
:param pk: if provided, filter the search by scope_id
:type pk: basestring or None
:param status: if provided, filter the search for the status. eg. 'ACTIVE', 'TEMPLATE', 'LIBRARY'
:type status: basestring or None
:param kwargs: optional additional search arguments
:type kwargs: dict or None
:return: list of `Scopes`
:rtype: list(:class:`models.Scope`)
:raises NotFoundError: if no scopes are not found.
Example
-------
>>> client = Client(url='https://default.localhost:9443', verify=False)
>>> client.login('admin','pass')
>>> client.scopes() # doctest: Ellipsis
...
>>> client.scopes(name="Bike Project") # doctest: Ellipsis
...
>>> last_request = client.last_request # doctest: Ellipsis
...
"""
request_params = {
'name': name,
'id': pk,
'status': status,
}
if kwargs:
request_params.update(**kwargs)
response = self._request('GET', self._build_url('scopes'), params=request_params)
if response.status_code != requests.codes.ok: # pragma: no cover
raise NotFoundError("Could not retrieve scopes")
data = response.json()
return [Scope(s, client=self) for s in data['results']] | Return all scopes visible / accessible for the logged in user.
If additional `keyword=value` arguments are provided, these are added to the request parameters. Please
refer to the documentation of the KE-chain API for additional query parameters.
:param name: if provided, filter the search for a scope/project by name
:type name: basestring or None
:param pk: if provided, filter the search by scope_id
:type pk: basestring or None
:param status: if provided, filter the search for the status. eg. 'ACTIVE', 'TEMPLATE', 'LIBRARY'
:type status: basestring or None
:param kwargs: optional additional search arguments
:type kwargs: dict or None
:return: list of `Scopes`
:rtype: list(:class:`models.Scope`)
:raises NotFoundError: if no scopes are not found.
Example
-------
>>> client = Client(url='https://default.localhost:9443', verify=False)
>>> client.login('admin','pass')
>>> client.scopes() # doctest: Ellipsis
...
>>> client.scopes(name="Bike Project") # doctest: Ellipsis
...
>>> last_request = client.last_request # doctest: Ellipsis
... | Below is the the instruction that describes the task:
### Input:
Return all scopes visible / accessible for the logged in user.
If additional `keyword=value` arguments are provided, these are added to the request parameters. Please
refer to the documentation of the KE-chain API for additional query parameters.
:param name: if provided, filter the search for a scope/project by name
:type name: basestring or None
:param pk: if provided, filter the search by scope_id
:type pk: basestring or None
:param status: if provided, filter the search for the status. eg. 'ACTIVE', 'TEMPLATE', 'LIBRARY'
:type status: basestring or None
:param kwargs: optional additional search arguments
:type kwargs: dict or None
:return: list of `Scopes`
:rtype: list(:class:`models.Scope`)
:raises NotFoundError: if no scopes are not found.
Example
-------
>>> client = Client(url='https://default.localhost:9443', verify=False)
>>> client.login('admin','pass')
>>> client.scopes() # doctest: Ellipsis
...
>>> client.scopes(name="Bike Project") # doctest: Ellipsis
...
>>> last_request = client.last_request # doctest: Ellipsis
...
### Response:
def scopes(self, name=None, pk=None, status=ScopeStatus.ACTIVE, **kwargs):
# type: (Optional[str], Optional[str], Optional[str], **Any) -> List[Scope]
"""Return all scopes visible / accessible for the logged in user.
If additional `keyword=value` arguments are provided, these are added to the request parameters. Please
refer to the documentation of the KE-chain API for additional query parameters.
:param name: if provided, filter the search for a scope/project by name
:type name: basestring or None
:param pk: if provided, filter the search by scope_id
:type pk: basestring or None
:param status: if provided, filter the search for the status. eg. 'ACTIVE', 'TEMPLATE', 'LIBRARY'
:type status: basestring or None
:param kwargs: optional additional search arguments
:type kwargs: dict or None
:return: list of `Scopes`
:rtype: list(:class:`models.Scope`)
:raises NotFoundError: if no scopes are not found.
Example
-------
>>> client = Client(url='https://default.localhost:9443', verify=False)
>>> client.login('admin','pass')
>>> client.scopes() # doctest: Ellipsis
...
>>> client.scopes(name="Bike Project") # doctest: Ellipsis
...
>>> last_request = client.last_request # doctest: Ellipsis
...
"""
request_params = {
'name': name,
'id': pk,
'status': status,
}
if kwargs:
request_params.update(**kwargs)
response = self._request('GET', self._build_url('scopes'), params=request_params)
if response.status_code != requests.codes.ok: # pragma: no cover
raise NotFoundError("Could not retrieve scopes")
data = response.json()
return [Scope(s, client=self) for s in data['results']] |
def read_all(self):
"""Returns the 2 byte Header ROM and all 120 byte static memory.
"""
log.debug("read all static memory")
cmd = "\x00\x00\x00" + self.uid
return self.transceive(cmd) | Returns the 2 byte Header ROM and all 120 byte static memory. | Below is the the instruction that describes the task:
### Input:
Returns the 2 byte Header ROM and all 120 byte static memory.
### Response:
def read_all(self):
"""Returns the 2 byte Header ROM and all 120 byte static memory.
"""
log.debug("read all static memory")
cmd = "\x00\x00\x00" + self.uid
return self.transceive(cmd) |
def drawPoint(self, x, y, silent=True):
"""
Draws a point on the current :py:class:`Layer` with the current :py:class:`Brush`.
Coordinates are relative to the original layer size WITHOUT downsampling applied.
:param x1: Point X coordinate.
:param y1: Point Y coordinate.
:rtype: Nothing.
"""
start = time.time()
#Downsample the coordinates
x = int(x/config.DOWNSAMPLING)
y = int(y/config.DOWNSAMPLING)
#Apply the dab with or without source caching
if self.brush.usesSourceCaching:
applyMirroredDab_jit(self.mirrorMode, self.image.getActiveLayer().data, int(x-self.brush.brushSize*0.5), int(y-self.brush.brushSize*0.5), self.brush.coloredBrushSource.copy(), config.CANVAS_SIZE, self.brush.brushMask)
else:
self.brush.makeDab(self.image.getActiveLayer(), int(x), int(y), self.color, self.secondColor, mirror=self.mirrorMode)
config.AVGTIME.append(time.time()-start) | Draws a point on the current :py:class:`Layer` with the current :py:class:`Brush`.
Coordinates are relative to the original layer size WITHOUT downsampling applied.
:param x1: Point X coordinate.
:param y1: Point Y coordinate.
:rtype: Nothing. | Below is the the instruction that describes the task:
### Input:
Draws a point on the current :py:class:`Layer` with the current :py:class:`Brush`.
Coordinates are relative to the original layer size WITHOUT downsampling applied.
:param x1: Point X coordinate.
:param y1: Point Y coordinate.
:rtype: Nothing.
### Response:
def drawPoint(self, x, y, silent=True):
"""
Draws a point on the current :py:class:`Layer` with the current :py:class:`Brush`.
Coordinates are relative to the original layer size WITHOUT downsampling applied.
:param x1: Point X coordinate.
:param y1: Point Y coordinate.
:rtype: Nothing.
"""
start = time.time()
#Downsample the coordinates
x = int(x/config.DOWNSAMPLING)
y = int(y/config.DOWNSAMPLING)
#Apply the dab with or without source caching
if self.brush.usesSourceCaching:
applyMirroredDab_jit(self.mirrorMode, self.image.getActiveLayer().data, int(x-self.brush.brushSize*0.5), int(y-self.brush.brushSize*0.5), self.brush.coloredBrushSource.copy(), config.CANVAS_SIZE, self.brush.brushMask)
else:
self.brush.makeDab(self.image.getActiveLayer(), int(x), int(y), self.color, self.secondColor, mirror=self.mirrorMode)
config.AVGTIME.append(time.time()-start) |
def compute_Pi_V_given_J(self, CDR3_seq, V_usage_mask, J_usage_mask):
"""Compute Pi_V conditioned on J.
This function returns the Pi array from the model factors of the V genomic
contributions, P(V, J)*P(delV|V). This corresponds to V(J)_{x_1}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
V_usage_mask : list
Indices of the V alleles to be considered in the Pgen computation
J_usage_mask : list
Indices of the J alleles to be considered in the Pgen computation
self.cutV_genomic_CDR3_segs : list of strings
List of all the V genomic nucleotide sequences trimmed to begin at the
conserved C residue and with the maximum number of palindromic
insertions appended.
self.PVdelV_nt_pos_vec : list of ndarrays
For each V allele, format P(delV|V) into the correct form for a Pi
array or V(J)_{x_1}. This is only done for the first and last
position in each codon.
self.PVdelV_2nd_nt_pos_per_aa_vec : list of dicts
For each V allele, and each 'amino acid', format P(V)*P(delV|V) for
positions in the middle of a codon into the correct form for a Pi
array or V(J)_{x_1} given the 'amino acid'.
self.PVJ : ndarray
Joint probability distribution of V and J, P(V, J).
Returns
-------
Pi_V_given_J : list
List of (4, 3L) ndarrays corresponding to V(J)_{x_1}.
max_V_align: int
Maximum alignment of the CDR3_seq to any genomic V allele allowed by
V_usage_mask.
"""
#Note, the cutV_genomic_CDR3_segs INCLUDE the palindromic insertions and thus are max_palindrome nts longer than the template.
#furthermore, the genomic sequence should be pruned to start at the conserved C
Pi_V_given_J = [np.zeros((4, len(CDR3_seq)*3)) for i in J_usage_mask] #Holds the aggregate weight for each nt possiblity and position
alignment_lengths = []
for V_in in V_usage_mask:
try:
cutV_gen_seg = self.cutV_genomic_CDR3_segs[V_in]
except IndexError:
print 'Check provided V usage mask. Contains indicies out of allowed range.'
continue
current_alignment_length = self.max_nt_to_aa_alignment_left(CDR3_seq, cutV_gen_seg)
alignment_lengths += [current_alignment_length]
current_Pi_V = np.zeros((4, len(CDR3_seq)*3))
if current_alignment_length > 0:
#For first and last nt in a codon use PVdelV_nt_pos_vec
current_Pi_V[:, :current_alignment_length] = self.PVdelV_nt_pos_vec[V_in][:, :current_alignment_length]
for pos in range(1, current_alignment_length, 3): #for middle nt use PVdelV_2nd_nt_pos_per_aa_vec
current_Pi_V[:, pos] = self.PVdelV_2nd_nt_pos_per_aa_vec[V_in][CDR3_seq[pos/3]][:, pos]
for j, J_in in enumerate(J_usage_mask):
Pi_V_given_J[j][:, :current_alignment_length] += self.PVJ[V_in, J_in]*current_Pi_V[:, :current_alignment_length]
return Pi_V_given_J, max(alignment_lengths) | Compute Pi_V conditioned on J.
This function returns the Pi array from the model factors of the V genomic
contributions, P(V, J)*P(delV|V). This corresponds to V(J)_{x_1}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
V_usage_mask : list
Indices of the V alleles to be considered in the Pgen computation
J_usage_mask : list
Indices of the J alleles to be considered in the Pgen computation
self.cutV_genomic_CDR3_segs : list of strings
List of all the V genomic nucleotide sequences trimmed to begin at the
conserved C residue and with the maximum number of palindromic
insertions appended.
self.PVdelV_nt_pos_vec : list of ndarrays
For each V allele, format P(delV|V) into the correct form for a Pi
array or V(J)_{x_1}. This is only done for the first and last
position in each codon.
self.PVdelV_2nd_nt_pos_per_aa_vec : list of dicts
For each V allele, and each 'amino acid', format P(V)*P(delV|V) for
positions in the middle of a codon into the correct form for a Pi
array or V(J)_{x_1} given the 'amino acid'.
self.PVJ : ndarray
Joint probability distribution of V and J, P(V, J).
Returns
-------
Pi_V_given_J : list
List of (4, 3L) ndarrays corresponding to V(J)_{x_1}.
max_V_align: int
Maximum alignment of the CDR3_seq to any genomic V allele allowed by
V_usage_mask. | Below is the the instruction that describes the task:
### Input:
Compute Pi_V conditioned on J.
This function returns the Pi array from the model factors of the V genomic
contributions, P(V, J)*P(delV|V). This corresponds to V(J)_{x_1}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
V_usage_mask : list
Indices of the V alleles to be considered in the Pgen computation
J_usage_mask : list
Indices of the J alleles to be considered in the Pgen computation
self.cutV_genomic_CDR3_segs : list of strings
List of all the V genomic nucleotide sequences trimmed to begin at the
conserved C residue and with the maximum number of palindromic
insertions appended.
self.PVdelV_nt_pos_vec : list of ndarrays
For each V allele, format P(delV|V) into the correct form for a Pi
array or V(J)_{x_1}. This is only done for the first and last
position in each codon.
self.PVdelV_2nd_nt_pos_per_aa_vec : list of dicts
For each V allele, and each 'amino acid', format P(V)*P(delV|V) for
positions in the middle of a codon into the correct form for a Pi
array or V(J)_{x_1} given the 'amino acid'.
self.PVJ : ndarray
Joint probability distribution of V and J, P(V, J).
Returns
-------
Pi_V_given_J : list
List of (4, 3L) ndarrays corresponding to V(J)_{x_1}.
max_V_align: int
Maximum alignment of the CDR3_seq to any genomic V allele allowed by
V_usage_mask.
### Response:
def compute_Pi_V_given_J(self, CDR3_seq, V_usage_mask, J_usage_mask):
"""Compute Pi_V conditioned on J.
This function returns the Pi array from the model factors of the V genomic
contributions, P(V, J)*P(delV|V). This corresponds to V(J)_{x_1}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
V_usage_mask : list
Indices of the V alleles to be considered in the Pgen computation
J_usage_mask : list
Indices of the J alleles to be considered in the Pgen computation
self.cutV_genomic_CDR3_segs : list of strings
List of all the V genomic nucleotide sequences trimmed to begin at the
conserved C residue and with the maximum number of palindromic
insertions appended.
self.PVdelV_nt_pos_vec : list of ndarrays
For each V allele, format P(delV|V) into the correct form for a Pi
array or V(J)_{x_1}. This is only done for the first and last
position in each codon.
self.PVdelV_2nd_nt_pos_per_aa_vec : list of dicts
For each V allele, and each 'amino acid', format P(V)*P(delV|V) for
positions in the middle of a codon into the correct form for a Pi
array or V(J)_{x_1} given the 'amino acid'.
self.PVJ : ndarray
Joint probability distribution of V and J, P(V, J).
Returns
-------
Pi_V_given_J : list
List of (4, 3L) ndarrays corresponding to V(J)_{x_1}.
max_V_align: int
Maximum alignment of the CDR3_seq to any genomic V allele allowed by
V_usage_mask.
"""
#Note, the cutV_genomic_CDR3_segs INCLUDE the palindromic insertions and thus are max_palindrome nts longer than the template.
#furthermore, the genomic sequence should be pruned to start at the conserved C
Pi_V_given_J = [np.zeros((4, len(CDR3_seq)*3)) for i in J_usage_mask] #Holds the aggregate weight for each nt possiblity and position
alignment_lengths = []
for V_in in V_usage_mask:
try:
cutV_gen_seg = self.cutV_genomic_CDR3_segs[V_in]
except IndexError:
print 'Check provided V usage mask. Contains indicies out of allowed range.'
continue
current_alignment_length = self.max_nt_to_aa_alignment_left(CDR3_seq, cutV_gen_seg)
alignment_lengths += [current_alignment_length]
current_Pi_V = np.zeros((4, len(CDR3_seq)*3))
if current_alignment_length > 0:
#For first and last nt in a codon use PVdelV_nt_pos_vec
current_Pi_V[:, :current_alignment_length] = self.PVdelV_nt_pos_vec[V_in][:, :current_alignment_length]
for pos in range(1, current_alignment_length, 3): #for middle nt use PVdelV_2nd_nt_pos_per_aa_vec
current_Pi_V[:, pos] = self.PVdelV_2nd_nt_pos_per_aa_vec[V_in][CDR3_seq[pos/3]][:, pos]
for j, J_in in enumerate(J_usage_mask):
Pi_V_given_J[j][:, :current_alignment_length] += self.PVJ[V_in, J_in]*current_Pi_V[:, :current_alignment_length]
return Pi_V_given_J, max(alignment_lengths) |
def get_instance(self, payload):
"""
Build an instance of ThisMonthInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance
:rtype: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance
"""
return ThisMonthInstance(self._version, payload, account_sid=self._solution['account_sid'], ) | Build an instance of ThisMonthInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance
:rtype: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance | Below is the the instruction that describes the task:
### Input:
Build an instance of ThisMonthInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance
:rtype: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance
### Response:
def get_instance(self, payload):
"""
Build an instance of ThisMonthInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance
:rtype: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance
"""
return ThisMonthInstance(self._version, payload, account_sid=self._solution['account_sid'], ) |
def copy(self, *, shallow=False):
"""Return a copy of a table."""
table = type(self)()
for label in self.labels:
if shallow:
column = self[label]
else:
column = np.copy(self[label])
self._add_column_and_format(table, label, column)
return table | Return a copy of a table. | Below is the the instruction that describes the task:
### Input:
Return a copy of a table.
### Response:
def copy(self, *, shallow=False):
"""Return a copy of a table."""
table = type(self)()
for label in self.labels:
if shallow:
column = self[label]
else:
column = np.copy(self[label])
self._add_column_and_format(table, label, column)
return table |
def waveform_to_examples(data, sample_rate):
"""Converts audio waveform into an array of examples for VGGish.
Args:
data: np.array of either one dimension (mono) or two dimensions
(multi-channel, with the outer dimension representing channels).
Each sample is generally expected to lie in the range [-1.0, +1.0],
although this is not required.
sample_rate: Sample rate of data.
Returns:
3-D np.array of shape [num_examples, num_frames, num_bands] which represents
a sequence of examples, each of which contains a patch of log mel
spectrogram, covering num_frames frames of audio and num_bands mel frequency
bands, where the frame length is vggish_params.STFT_HOP_LENGTH_SECONDS.
"""
import resampy
# Convert to mono.
if len(data.shape) > 1:
data = np.mean(data, axis=1)
# Resample to the rate assumed by VGGish.
if sample_rate != vggish_params.SAMPLE_RATE:
data = resampy.resample(data, sample_rate, vggish_params.SAMPLE_RATE)
# Compute log mel spectrogram features.
log_mel = mel_features.log_mel_spectrogram(
data,
audio_sample_rate=vggish_params.SAMPLE_RATE,
log_offset=vggish_params.LOG_OFFSET,
window_length_secs=vggish_params.STFT_WINDOW_LENGTH_SECONDS,
hop_length_secs=vggish_params.STFT_HOP_LENGTH_SECONDS,
num_mel_bins=vggish_params.NUM_MEL_BINS,
lower_edge_hertz=vggish_params.MEL_MIN_HZ,
upper_edge_hertz=vggish_params.MEL_MAX_HZ)
# Frame features into examples.
features_sample_rate = 1.0 / vggish_params.STFT_HOP_LENGTH_SECONDS
example_window_length = int(round(
vggish_params.EXAMPLE_WINDOW_SECONDS * features_sample_rate))
example_hop_length = int(round(
vggish_params.EXAMPLE_HOP_SECONDS * features_sample_rate))
log_mel_examples = mel_features.frame(
log_mel,
window_length=example_window_length,
hop_length=example_hop_length)
return log_mel_examples | Converts audio waveform into an array of examples for VGGish.
Args:
data: np.array of either one dimension (mono) or two dimensions
(multi-channel, with the outer dimension representing channels).
Each sample is generally expected to lie in the range [-1.0, +1.0],
although this is not required.
sample_rate: Sample rate of data.
Returns:
3-D np.array of shape [num_examples, num_frames, num_bands] which represents
a sequence of examples, each of which contains a patch of log mel
spectrogram, covering num_frames frames of audio and num_bands mel frequency
bands, where the frame length is vggish_params.STFT_HOP_LENGTH_SECONDS. | Below is the the instruction that describes the task:
### Input:
Converts audio waveform into an array of examples for VGGish.
Args:
data: np.array of either one dimension (mono) or two dimensions
(multi-channel, with the outer dimension representing channels).
Each sample is generally expected to lie in the range [-1.0, +1.0],
although this is not required.
sample_rate: Sample rate of data.
Returns:
3-D np.array of shape [num_examples, num_frames, num_bands] which represents
a sequence of examples, each of which contains a patch of log mel
spectrogram, covering num_frames frames of audio and num_bands mel frequency
bands, where the frame length is vggish_params.STFT_HOP_LENGTH_SECONDS.
### Response:
def waveform_to_examples(data, sample_rate):
"""Converts audio waveform into an array of examples for VGGish.
Args:
data: np.array of either one dimension (mono) or two dimensions
(multi-channel, with the outer dimension representing channels).
Each sample is generally expected to lie in the range [-1.0, +1.0],
although this is not required.
sample_rate: Sample rate of data.
Returns:
3-D np.array of shape [num_examples, num_frames, num_bands] which represents
a sequence of examples, each of which contains a patch of log mel
spectrogram, covering num_frames frames of audio and num_bands mel frequency
bands, where the frame length is vggish_params.STFT_HOP_LENGTH_SECONDS.
"""
import resampy
# Convert to mono.
if len(data.shape) > 1:
data = np.mean(data, axis=1)
# Resample to the rate assumed by VGGish.
if sample_rate != vggish_params.SAMPLE_RATE:
data = resampy.resample(data, sample_rate, vggish_params.SAMPLE_RATE)
# Compute log mel spectrogram features.
log_mel = mel_features.log_mel_spectrogram(
data,
audio_sample_rate=vggish_params.SAMPLE_RATE,
log_offset=vggish_params.LOG_OFFSET,
window_length_secs=vggish_params.STFT_WINDOW_LENGTH_SECONDS,
hop_length_secs=vggish_params.STFT_HOP_LENGTH_SECONDS,
num_mel_bins=vggish_params.NUM_MEL_BINS,
lower_edge_hertz=vggish_params.MEL_MIN_HZ,
upper_edge_hertz=vggish_params.MEL_MAX_HZ)
# Frame features into examples.
features_sample_rate = 1.0 / vggish_params.STFT_HOP_LENGTH_SECONDS
example_window_length = int(round(
vggish_params.EXAMPLE_WINDOW_SECONDS * features_sample_rate))
example_hop_length = int(round(
vggish_params.EXAMPLE_HOP_SECONDS * features_sample_rate))
log_mel_examples = mel_features.frame(
log_mel,
window_length=example_window_length,
hop_length=example_hop_length)
return log_mel_examples |
def as_ul(self, current_linkable=False, class_current="active_link",
before_1="", after_1="", before_all="", after_all=""):
"""
It returns menu as ul
"""
return self.__do_menu("as_ul", current_linkable, class_current,
before_1=before_1, after_1=after_1, before_all=before_all, after_all=after_all) | It returns menu as ul | Below is the the instruction that describes the task:
### Input:
It returns menu as ul
### Response:
def as_ul(self, current_linkable=False, class_current="active_link",
before_1="", after_1="", before_all="", after_all=""):
"""
It returns menu as ul
"""
return self.__do_menu("as_ul", current_linkable, class_current,
before_1=before_1, after_1=after_1, before_all=before_all, after_all=after_all) |
def set_filling(self, populations):
"""Sets the orbital enenergies for on the reference of the free case.
By setting the desired local populations on every orbital.
Then generate the necesary operators to respect such configuraion"""
populations = np.asarray(populations)
#
# self.param['orbital_e'] -= bethe_findfill_zeroT( \
# self.param['avg_particles'],
# self.param['orbital_e'],
# self.param['hopping'])
efermi = - bethe_find_crystalfield(
populations, self.param['hopping'])
self.param['populations'] = populations
# fermion_avg(efermi, self.param['hopping'], 'ocupation')
self.param['ekin'] = fermion_avg(efermi, self.param['hopping'], 'ekin')
spin_gen_op(self.oper, estimate_gauge(populations)) | Sets the orbital enenergies for on the reference of the free case.
By setting the desired local populations on every orbital.
Then generate the necesary operators to respect such configuraion | Below is the the instruction that describes the task:
### Input:
Sets the orbital enenergies for on the reference of the free case.
By setting the desired local populations on every orbital.
Then generate the necesary operators to respect such configuraion
### Response:
def set_filling(self, populations):
"""Sets the orbital enenergies for on the reference of the free case.
By setting the desired local populations on every orbital.
Then generate the necesary operators to respect such configuraion"""
populations = np.asarray(populations)
#
# self.param['orbital_e'] -= bethe_findfill_zeroT( \
# self.param['avg_particles'],
# self.param['orbital_e'],
# self.param['hopping'])
efermi = - bethe_find_crystalfield(
populations, self.param['hopping'])
self.param['populations'] = populations
# fermion_avg(efermi, self.param['hopping'], 'ocupation')
self.param['ekin'] = fermion_avg(efermi, self.param['hopping'], 'ekin')
spin_gen_op(self.oper, estimate_gauge(populations)) |
def start_date(self) -> Optional[datetime.date]:
"""
Returns the start date of the set of intervals, or ``None`` if empty.
"""
if not self.intervals:
return None
return self.start_datetime().date() | Returns the start date of the set of intervals, or ``None`` if empty. | Below is the the instruction that describes the task:
### Input:
Returns the start date of the set of intervals, or ``None`` if empty.
### Response:
def start_date(self) -> Optional[datetime.date]:
"""
Returns the start date of the set of intervals, or ``None`` if empty.
"""
if not self.intervals:
return None
return self.start_datetime().date() |
def __placeSellShortOrder(self, tick):
''' place short sell order'''
share=math.floor(self.__strategy.getAccountCopy().getCash() / float(tick.close))
sellShortOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL_SHORT,
is_market=True,
security=self.__security,
share=share)
if self.__strategy.placeOrder(sellShortOrder):
self.__buyOrder=sellShortOrder
# place stop order
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY_TO_COVER,
is_stop=True,
security=self.__security,
price=tick.close * 1.05,
share=0 - share)
self.__placeStopOrder(stopOrder) | place short sell order | Below is the the instruction that describes the task:
### Input:
place short sell order
### Response:
def __placeSellShortOrder(self, tick):
''' place short sell order'''
share=math.floor(self.__strategy.getAccountCopy().getCash() / float(tick.close))
sellShortOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL_SHORT,
is_market=True,
security=self.__security,
share=share)
if self.__strategy.placeOrder(sellShortOrder):
self.__buyOrder=sellShortOrder
# place stop order
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY_TO_COVER,
is_stop=True,
security=self.__security,
price=tick.close * 1.05,
share=0 - share)
self.__placeStopOrder(stopOrder) |
def _iter_grouped_shortcut(self):
"""Fast version of `_iter_grouped` that yields Variables without
metadata
"""
var = self._obj.variable
for indices in self._group_indices:
yield var[{self._group_dim: indices}] | Fast version of `_iter_grouped` that yields Variables without
metadata | Below is the the instruction that describes the task:
### Input:
Fast version of `_iter_grouped` that yields Variables without
metadata
### Response:
def _iter_grouped_shortcut(self):
"""Fast version of `_iter_grouped` that yields Variables without
metadata
"""
var = self._obj.variable
for indices in self._group_indices:
yield var[{self._group_dim: indices}] |
def content(self, value):
"""
Setter for **self.__content** attribute.
:param value: Attribute value.
:type value: list
"""
if value is not None:
assert type(value) is list, "'{0}' attribute: '{1}' type is not 'list'!".format("content", value)
self.__content = value | Setter for **self.__content** attribute.
:param value: Attribute value.
:type value: list | Below is the the instruction that describes the task:
### Input:
Setter for **self.__content** attribute.
:param value: Attribute value.
:type value: list
### Response:
def content(self, value):
"""
Setter for **self.__content** attribute.
:param value: Attribute value.
:type value: list
"""
if value is not None:
assert type(value) is list, "'{0}' attribute: '{1}' type is not 'list'!".format("content", value)
self.__content = value |
def route_has_dead_links(root, machine):
"""Quickly determine if a route uses any dead links.
Parameters
----------
root : :py:class:`~rig.place_and_route.routing_tree.RoutingTree`
The root of the RoutingTree which contains nothing but RoutingTrees
(i.e. no vertices and links).
machine : :py:class:`~rig.place_and_route.Machine`
The machine in which the routes exist.
Returns
-------
bool
True if the route uses any dead/missing links, False otherwise.
"""
for direction, (x, y), routes in root.traverse():
for route in routes:
if (x, y, route) not in machine:
return True
return False | Quickly determine if a route uses any dead links.
Parameters
----------
root : :py:class:`~rig.place_and_route.routing_tree.RoutingTree`
The root of the RoutingTree which contains nothing but RoutingTrees
(i.e. no vertices and links).
machine : :py:class:`~rig.place_and_route.Machine`
The machine in which the routes exist.
Returns
-------
bool
True if the route uses any dead/missing links, False otherwise. | Below is the the instruction that describes the task:
### Input:
Quickly determine if a route uses any dead links.
Parameters
----------
root : :py:class:`~rig.place_and_route.routing_tree.RoutingTree`
The root of the RoutingTree which contains nothing but RoutingTrees
(i.e. no vertices and links).
machine : :py:class:`~rig.place_and_route.Machine`
The machine in which the routes exist.
Returns
-------
bool
True if the route uses any dead/missing links, False otherwise.
### Response:
def route_has_dead_links(root, machine):
"""Quickly determine if a route uses any dead links.
Parameters
----------
root : :py:class:`~rig.place_and_route.routing_tree.RoutingTree`
The root of the RoutingTree which contains nothing but RoutingTrees
(i.e. no vertices and links).
machine : :py:class:`~rig.place_and_route.Machine`
The machine in which the routes exist.
Returns
-------
bool
True if the route uses any dead/missing links, False otherwise.
"""
for direction, (x, y), routes in root.traverse():
for route in routes:
if (x, y, route) not in machine:
return True
return False |
def _save(self, stateName, path):
"""save into 'stateName' to pyz-path"""
print('saving...')
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.sigSave.emit(state)
self.saveThread.prepare(stateName, path, self.tmp_dir_session, state)
self.saveThread.start()
self.current_session = stateName
r = self.opts['recent sessions']
try:
# is this session already exists: remove it
r.pop(r.index(path))
except ValueError:
pass
# add this session at the beginning
r.insert(0, path) | save into 'stateName' to pyz-path | Below is the the instruction that describes the task:
### Input:
save into 'stateName' to pyz-path
### Response:
def _save(self, stateName, path):
"""save into 'stateName' to pyz-path"""
print('saving...')
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.sigSave.emit(state)
self.saveThread.prepare(stateName, path, self.tmp_dir_session, state)
self.saveThread.start()
self.current_session = stateName
r = self.opts['recent sessions']
try:
# is this session already exists: remove it
r.pop(r.index(path))
except ValueError:
pass
# add this session at the beginning
r.insert(0, path) |
def _construct_production_name(glyph_name, data=None):
"""Return the production name for a glyph name from the GlyphData.xml
database according to the AGL specification.
This should be run only if there is no official entry with a production
name in it.
Handles single glyphs (e.g. "brevecomb") and ligatures (e.g.
"brevecomb_acutecomb"). Returns None when a valid and semantically
meaningful production name can't be constructed or when the AGL
specification would be violated, get_glyph() will use the bare glyph
name then.
Note:
- Glyph name is the full name, e.g. "brevecomb_acutecomb.case".
- Base name is the base part, e.g. "brevecomb_acutecomb"
- Suffix is e.g. "case".
"""
# At this point, we have already checked the data for the full glyph name, so
# directly go to the base name here (e.g. when looking at "fi.alt").
base_name, dot, suffix = glyph_name.partition(".")
glyphinfo = _lookup_attributes(base_name, data)
if glyphinfo and glyphinfo.get("production"):
# Found the base glyph.
return glyphinfo["production"] + dot + suffix
if glyph_name in fontTools.agl.AGL2UV or base_name in fontTools.agl.AGL2UV:
# Glyph name is actually an AGLFN name.
return glyph_name
if "_" not in base_name:
# Nothing found so far and the glyph name isn't a ligature ("_"
# somewhere in it). The name does not carry any discernable Unicode
# semantics, so just return something sanitized.
return _agl_compliant_name(glyph_name)
# So we have a ligature that is not mapped in the data. Split it up and
# look up the individual parts.
base_name_parts = base_name.split("_")
# If all parts are in the AGLFN list, the glyph name is our production
# name already.
if all(part in fontTools.agl.AGL2UV for part in base_name_parts):
return _agl_compliant_name(glyph_name)
# Turn all parts of the ligature into production names.
_character_outside_BMP = False
production_names = []
for part in base_name_parts:
if part in fontTools.agl.AGL2UV:
# A name present in the AGLFN is a production name already.
production_names.append(part)
else:
part_entry = data.names.get(part) or {}
part_production_name = part_entry.get("production")
if part_production_name:
production_names.append(part_production_name)
# Take note if there are any characters outside the Unicode
# BMP, e.g. "u10FFF" or "u10FFFF". Do not catch e.g. "u013B"
# though.
if len(part_production_name) > 5 and _is_unicode_u_value(
part_production_name
):
_character_outside_BMP = True
else:
# We hit a part that does not seem to be a valid glyph name known to us,
# so the entire glyph name can't carry Unicode meaning. Return it
# sanitized.
return _agl_compliant_name(glyph_name)
# Some names Glyphs uses resolve to other names that are not uniXXXX names and may
# contain dots (e.g. idotaccent -> i.loclTRK). If there is any name with a "." in
# it before the last element, punt. We'd have to introduce a "." into the ligature
# midway, which is invalid according to the AGL. Example: "a_i.loclTRK" is valid,
# but "a_i.loclTRK_a" isn't.
if any("." in part for part in production_names[:-1]):
return _agl_compliant_name(glyph_name)
# If any production name starts with a "uni" and there are none of the
# "uXXXXX" format, try to turn all parts into "uni" names and concatenate
# them.
if not _character_outside_BMP and any(
part.startswith("uni") for part in production_names
):
uni_names = []
for part in production_names:
if part.startswith("uni"):
uni_names.append(part[3:])
elif len(part) == 5 and _is_unicode_u_value(part):
uni_names.append(part[1:])
elif part in fontTools.agl.AGL2UV:
uni_names.append("{:04X}".format(fontTools.agl.AGL2UV[part]))
else:
return None
final_production_name = "uni" + "".join(uni_names) + dot + suffix
else:
final_production_name = "_".join(production_names) + dot + suffix
return _agl_compliant_name(final_production_name) | Return the production name for a glyph name from the GlyphData.xml
database according to the AGL specification.
This should be run only if there is no official entry with a production
name in it.
Handles single glyphs (e.g. "brevecomb") and ligatures (e.g.
"brevecomb_acutecomb"). Returns None when a valid and semantically
meaningful production name can't be constructed or when the AGL
specification would be violated, get_glyph() will use the bare glyph
name then.
Note:
- Glyph name is the full name, e.g. "brevecomb_acutecomb.case".
- Base name is the base part, e.g. "brevecomb_acutecomb"
- Suffix is e.g. "case". | Below is the the instruction that describes the task:
### Input:
Return the production name for a glyph name from the GlyphData.xml
database according to the AGL specification.
This should be run only if there is no official entry with a production
name in it.
Handles single glyphs (e.g. "brevecomb") and ligatures (e.g.
"brevecomb_acutecomb"). Returns None when a valid and semantically
meaningful production name can't be constructed or when the AGL
specification would be violated, get_glyph() will use the bare glyph
name then.
Note:
- Glyph name is the full name, e.g. "brevecomb_acutecomb.case".
- Base name is the base part, e.g. "brevecomb_acutecomb"
- Suffix is e.g. "case".
### Response:
def _construct_production_name(glyph_name, data=None):
"""Return the production name for a glyph name from the GlyphData.xml
database according to the AGL specification.
This should be run only if there is no official entry with a production
name in it.
Handles single glyphs (e.g. "brevecomb") and ligatures (e.g.
"brevecomb_acutecomb"). Returns None when a valid and semantically
meaningful production name can't be constructed or when the AGL
specification would be violated, get_glyph() will use the bare glyph
name then.
Note:
- Glyph name is the full name, e.g. "brevecomb_acutecomb.case".
- Base name is the base part, e.g. "brevecomb_acutecomb"
- Suffix is e.g. "case".
"""
# At this point, we have already checked the data for the full glyph name, so
# directly go to the base name here (e.g. when looking at "fi.alt").
base_name, dot, suffix = glyph_name.partition(".")
glyphinfo = _lookup_attributes(base_name, data)
if glyphinfo and glyphinfo.get("production"):
# Found the base glyph.
return glyphinfo["production"] + dot + suffix
if glyph_name in fontTools.agl.AGL2UV or base_name in fontTools.agl.AGL2UV:
# Glyph name is actually an AGLFN name.
return glyph_name
if "_" not in base_name:
# Nothing found so far and the glyph name isn't a ligature ("_"
# somewhere in it). The name does not carry any discernable Unicode
# semantics, so just return something sanitized.
return _agl_compliant_name(glyph_name)
# So we have a ligature that is not mapped in the data. Split it up and
# look up the individual parts.
base_name_parts = base_name.split("_")
# If all parts are in the AGLFN list, the glyph name is our production
# name already.
if all(part in fontTools.agl.AGL2UV for part in base_name_parts):
return _agl_compliant_name(glyph_name)
# Turn all parts of the ligature into production names.
_character_outside_BMP = False
production_names = []
for part in base_name_parts:
if part in fontTools.agl.AGL2UV:
# A name present in the AGLFN is a production name already.
production_names.append(part)
else:
part_entry = data.names.get(part) or {}
part_production_name = part_entry.get("production")
if part_production_name:
production_names.append(part_production_name)
# Take note if there are any characters outside the Unicode
# BMP, e.g. "u10FFF" or "u10FFFF". Do not catch e.g. "u013B"
# though.
if len(part_production_name) > 5 and _is_unicode_u_value(
part_production_name
):
_character_outside_BMP = True
else:
# We hit a part that does not seem to be a valid glyph name known to us,
# so the entire glyph name can't carry Unicode meaning. Return it
# sanitized.
return _agl_compliant_name(glyph_name)
# Some names Glyphs uses resolve to other names that are not uniXXXX names and may
# contain dots (e.g. idotaccent -> i.loclTRK). If there is any name with a "." in
# it before the last element, punt. We'd have to introduce a "." into the ligature
# midway, which is invalid according to the AGL. Example: "a_i.loclTRK" is valid,
# but "a_i.loclTRK_a" isn't.
if any("." in part for part in production_names[:-1]):
return _agl_compliant_name(glyph_name)
# If any production name starts with a "uni" and there are none of the
# "uXXXXX" format, try to turn all parts into "uni" names and concatenate
# them.
if not _character_outside_BMP and any(
part.startswith("uni") for part in production_names
):
uni_names = []
for part in production_names:
if part.startswith("uni"):
uni_names.append(part[3:])
elif len(part) == 5 and _is_unicode_u_value(part):
uni_names.append(part[1:])
elif part in fontTools.agl.AGL2UV:
uni_names.append("{:04X}".format(fontTools.agl.AGL2UV[part]))
else:
return None
final_production_name = "uni" + "".join(uni_names) + dot + suffix
else:
final_production_name = "_".join(production_names) + dot + suffix
return _agl_compliant_name(final_production_name) |
def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False):
"""
Convenience function for interpolate.BPoly.from_derivatives.
Construct a piecewise polynomial in the Bernstein basis, compatible
with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array-likes
yi[i][j] is the j-th derivative known at xi[i]
order: None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
der : int or list
How many derivatives to extract; None for all potentially nonzero
derivatives (that is a number equal to the number of points), or a
list of derivatives to extract. This numberincludes the function
value as 0th derivative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first and last
intervals, or to return NaNs. Default: True.
See Also
--------
scipy.interpolate.BPoly.from_derivatives
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R.
"""
from scipy import interpolate
# return the method for compat with scipy version & backwards compat
method = interpolate.BPoly.from_derivatives
m = method(xi, yi.reshape(-1, 1),
orders=order, extrapolate=extrapolate)
return m(x) | Convenience function for interpolate.BPoly.from_derivatives.
Construct a piecewise polynomial in the Bernstein basis, compatible
with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array-likes
yi[i][j] is the j-th derivative known at xi[i]
order: None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
der : int or list
How many derivatives to extract; None for all potentially nonzero
derivatives (that is a number equal to the number of points), or a
list of derivatives to extract. This numberincludes the function
value as 0th derivative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first and last
intervals, or to return NaNs. Default: True.
See Also
--------
scipy.interpolate.BPoly.from_derivatives
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R. | Below is the the instruction that describes the task:
### Input:
Convenience function for interpolate.BPoly.from_derivatives.
Construct a piecewise polynomial in the Bernstein basis, compatible
with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array-likes
yi[i][j] is the j-th derivative known at xi[i]
order: None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
der : int or list
How many derivatives to extract; None for all potentially nonzero
derivatives (that is a number equal to the number of points), or a
list of derivatives to extract. This numberincludes the function
value as 0th derivative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first and last
intervals, or to return NaNs. Default: True.
See Also
--------
scipy.interpolate.BPoly.from_derivatives
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R.
### Response:
def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False):
"""
Convenience function for interpolate.BPoly.from_derivatives.
Construct a piecewise polynomial in the Bernstein basis, compatible
with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array-likes
yi[i][j] is the j-th derivative known at xi[i]
order: None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
der : int or list
How many derivatives to extract; None for all potentially nonzero
derivatives (that is a number equal to the number of points), or a
list of derivatives to extract. This numberincludes the function
value as 0th derivative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first and last
intervals, or to return NaNs. Default: True.
See Also
--------
scipy.interpolate.BPoly.from_derivatives
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R.
"""
from scipy import interpolate
# return the method for compat with scipy version & backwards compat
method = interpolate.BPoly.from_derivatives
m = method(xi, yi.reshape(-1, 1),
orders=order, extrapolate=extrapolate)
return m(x) |
def recoverString(self, strIndex, withIndex=False):
'''
This will return the string that starts at the given index
@param strIndex - the index of the string we want to recover
@return - string that we found starting at the specified '$' index
'''
retNums = []
indices = []
#figure out the first hop backwards
currIndex = strIndex
prevChar = self.getCharAtIndex(currIndex)
currIndex = self.getOccurrenceOfCharAtIndex(prevChar, currIndex)
#while we haven't looped back to the start
while currIndex != strIndex:
#update the string
retNums.append(prevChar)
if withIndex:
indices.append(currIndex)
#figure out where to go from here
prevChar = self.getCharAtIndex(currIndex)
currIndex = self.getOccurrenceOfCharAtIndex(prevChar, currIndex)
for i in xrange(0, self.vcLen):
if strIndex < self.endIndex[i]:
retNums.append(i)
break
if withIndex:
indices.append(strIndex)
#reverse the numbers, convert to characters, and join them in to a single sequence
ret = ''.join(self.numToChar[retNums[::-1]])
#return what we found
if withIndex:
return (ret, indices[::-1])
else:
return ret | This will return the string that starts at the given index
@param strIndex - the index of the string we want to recover
@return - string that we found starting at the specified '$' index | Below is the the instruction that describes the task:
### Input:
This will return the string that starts at the given index
@param strIndex - the index of the string we want to recover
@return - string that we found starting at the specified '$' index
### Response:
def recoverString(self, strIndex, withIndex=False):
'''
This will return the string that starts at the given index
@param strIndex - the index of the string we want to recover
@return - string that we found starting at the specified '$' index
'''
retNums = []
indices = []
#figure out the first hop backwards
currIndex = strIndex
prevChar = self.getCharAtIndex(currIndex)
currIndex = self.getOccurrenceOfCharAtIndex(prevChar, currIndex)
#while we haven't looped back to the start
while currIndex != strIndex:
#update the string
retNums.append(prevChar)
if withIndex:
indices.append(currIndex)
#figure out where to go from here
prevChar = self.getCharAtIndex(currIndex)
currIndex = self.getOccurrenceOfCharAtIndex(prevChar, currIndex)
for i in xrange(0, self.vcLen):
if strIndex < self.endIndex[i]:
retNums.append(i)
break
if withIndex:
indices.append(strIndex)
#reverse the numbers, convert to characters, and join them in to a single sequence
ret = ''.join(self.numToChar[retNums[::-1]])
#return what we found
if withIndex:
return (ret, indices[::-1])
else:
return ret |
def load_genotypes(self):
"""Prepares the files for genotype parsing.
:return: None
"""
if self.file_index < len(self.archives):
self.current_file = self.archives[self.file_index]
info_filename = self.current_file.replace(Parser.gen_ext, Parser.info_ext)
if len(self.info_files) > 0:
info_filename = self.info_files[self.file_index]
self.info_file = open(info_filename)
self.info_file.readline() # Dump the header
if DataParser.compressed_pedigree:
self.freq_file = gzip.open("%s" % (self.current_file), 'rb')
else:
self.freq_file = open(self.current_file)
self.current_chrom = self.chroms[self.file_index]
self.file_index += 1
else:
raise StopIteration | Prepares the files for genotype parsing.
:return: None | Below is the the instruction that describes the task:
### Input:
Prepares the files for genotype parsing.
:return: None
### Response:
def load_genotypes(self):
"""Prepares the files for genotype parsing.
:return: None
"""
if self.file_index < len(self.archives):
self.current_file = self.archives[self.file_index]
info_filename = self.current_file.replace(Parser.gen_ext, Parser.info_ext)
if len(self.info_files) > 0:
info_filename = self.info_files[self.file_index]
self.info_file = open(info_filename)
self.info_file.readline() # Dump the header
if DataParser.compressed_pedigree:
self.freq_file = gzip.open("%s" % (self.current_file), 'rb')
else:
self.freq_file = open(self.current_file)
self.current_chrom = self.chroms[self.file_index]
self.file_index += 1
else:
raise StopIteration |
def setup_editorstack(self, parent, layout):
"""Setup editorstack's layout"""
layout.setSpacing(1)
self.fname_label = QLabel()
self.fname_label.setStyleSheet(
"QLabel {margin: 0px; padding: 3px;}")
layout.addWidget(self.fname_label)
menu_btn = create_toolbutton(self, icon=ima.icon('tooloptions'),
tip=_('Options'))
# Don't show menu arrow and remove padding
if is_dark_interface():
menu_btn.setStyleSheet(
("QToolButton::menu-indicator{image: none;}\n"
"QToolButton{margin: 1px; padding: 3px;}"))
else:
menu_btn.setStyleSheet(
"QToolButton::menu-indicator{image: none;}")
self.menu = QMenu(self)
menu_btn.setMenu(self.menu)
menu_btn.setPopupMode(menu_btn.InstantPopup)
self.menu.aboutToShow.connect(self.__setup_menu)
corner_widgets = {Qt.TopRightCorner: [menu_btn]}
self.tabs = BaseTabs(self, menu=self.menu, menu_use_tooltips=True,
corner_widgets=corner_widgets)
self.tabs.tabBar().setObjectName('plugin-tab')
self.tabs.set_close_function(self.close_file)
self.tabs.tabBar().tabMoved.connect(self.move_editorstack_data)
self.tabs.setMovable(True)
self.stack_history.refresh()
if hasattr(self.tabs, 'setDocumentMode') \
and not sys.platform == 'darwin':
# Don't set document mode to true on OSX because it generates
# a crash when the editor is detached from the main window
# Fixes Issue 561
self.tabs.setDocumentMode(True)
self.tabs.currentChanged.connect(self.current_changed)
if sys.platform == 'darwin':
tab_container = QWidget()
tab_container.setObjectName('tab-container')
tab_layout = QHBoxLayout(tab_container)
tab_layout.setContentsMargins(0, 0, 0, 0)
tab_layout.addWidget(self.tabs)
layout.addWidget(tab_container)
else:
layout.addWidget(self.tabs) | Setup editorstack's layout | Below is the the instruction that describes the task:
### Input:
Setup editorstack's layout
### Response:
def setup_editorstack(self, parent, layout):
"""Setup editorstack's layout"""
layout.setSpacing(1)
self.fname_label = QLabel()
self.fname_label.setStyleSheet(
"QLabel {margin: 0px; padding: 3px;}")
layout.addWidget(self.fname_label)
menu_btn = create_toolbutton(self, icon=ima.icon('tooloptions'),
tip=_('Options'))
# Don't show menu arrow and remove padding
if is_dark_interface():
menu_btn.setStyleSheet(
("QToolButton::menu-indicator{image: none;}\n"
"QToolButton{margin: 1px; padding: 3px;}"))
else:
menu_btn.setStyleSheet(
"QToolButton::menu-indicator{image: none;}")
self.menu = QMenu(self)
menu_btn.setMenu(self.menu)
menu_btn.setPopupMode(menu_btn.InstantPopup)
self.menu.aboutToShow.connect(self.__setup_menu)
corner_widgets = {Qt.TopRightCorner: [menu_btn]}
self.tabs = BaseTabs(self, menu=self.menu, menu_use_tooltips=True,
corner_widgets=corner_widgets)
self.tabs.tabBar().setObjectName('plugin-tab')
self.tabs.set_close_function(self.close_file)
self.tabs.tabBar().tabMoved.connect(self.move_editorstack_data)
self.tabs.setMovable(True)
self.stack_history.refresh()
if hasattr(self.tabs, 'setDocumentMode') \
and not sys.platform == 'darwin':
# Don't set document mode to true on OSX because it generates
# a crash when the editor is detached from the main window
# Fixes Issue 561
self.tabs.setDocumentMode(True)
self.tabs.currentChanged.connect(self.current_changed)
if sys.platform == 'darwin':
tab_container = QWidget()
tab_container.setObjectName('tab-container')
tab_layout = QHBoxLayout(tab_container)
tab_layout.setContentsMargins(0, 0, 0, 0)
tab_layout.addWidget(self.tabs)
layout.addWidget(tab_container)
else:
layout.addWidget(self.tabs) |
def parse_options(arguments):
"""Parse command line arguments.
The parsing logic is fairly simple. It can only parse long-style
parameters of the form::
--key value
Several parameters can be defined in the environment and will be used
unless explicitly overridden with command-line arguments. The access key,
secret and endpoint values will be loaded from C{AWS_ACCESS_KEY_ID},
C{AWS_SECRET_ACCESS_KEY} and C{AWS_ENDPOINT} environment variables.
@param arguments: A list of command-line arguments. The first item is
expected to be the name of the program being run.
@raises OptionError: Raised if incorrectly formed command-line arguments
are specified, or if required command-line arguments are not present.
@raises UsageError: Raised if C{--help} is present in command-line
arguments.
@return: A C{dict} with key/value pairs extracted from the argument list.
"""
arguments = arguments[1:]
options = {}
while arguments:
key = arguments.pop(0)
if key in ("-h", "--help"):
raise UsageError("Help requested.")
if key.startswith("--"):
key = key[2:]
try:
value = arguments.pop(0)
except IndexError:
raise OptionError("'--%s' is missing a value." % key)
options[key] = value
else:
raise OptionError("Encountered unexpected value '%s'." % key)
default_key = os.environ.get("AWS_ACCESS_KEY_ID")
if "key" not in options and default_key:
options["key"] = default_key
default_secret = os.environ.get("AWS_SECRET_ACCESS_KEY")
if "secret" not in options and default_secret:
options["secret"] = default_secret
default_endpoint = os.environ.get("AWS_ENDPOINT")
if "endpoint" not in options and default_endpoint:
options["endpoint"] = default_endpoint
for name in ("key", "secret", "endpoint", "action"):
if name not in options:
raise OptionError(
"The '--%s' command-line argument is required." % name)
return options | Parse command line arguments.
The parsing logic is fairly simple. It can only parse long-style
parameters of the form::
--key value
Several parameters can be defined in the environment and will be used
unless explicitly overridden with command-line arguments. The access key,
secret and endpoint values will be loaded from C{AWS_ACCESS_KEY_ID},
C{AWS_SECRET_ACCESS_KEY} and C{AWS_ENDPOINT} environment variables.
@param arguments: A list of command-line arguments. The first item is
expected to be the name of the program being run.
@raises OptionError: Raised if incorrectly formed command-line arguments
are specified, or if required command-line arguments are not present.
@raises UsageError: Raised if C{--help} is present in command-line
arguments.
@return: A C{dict} with key/value pairs extracted from the argument list. | Below is the the instruction that describes the task:
### Input:
Parse command line arguments.
The parsing logic is fairly simple. It can only parse long-style
parameters of the form::
--key value
Several parameters can be defined in the environment and will be used
unless explicitly overridden with command-line arguments. The access key,
secret and endpoint values will be loaded from C{AWS_ACCESS_KEY_ID},
C{AWS_SECRET_ACCESS_KEY} and C{AWS_ENDPOINT} environment variables.
@param arguments: A list of command-line arguments. The first item is
expected to be the name of the program being run.
@raises OptionError: Raised if incorrectly formed command-line arguments
are specified, or if required command-line arguments are not present.
@raises UsageError: Raised if C{--help} is present in command-line
arguments.
@return: A C{dict} with key/value pairs extracted from the argument list.
### Response:
def parse_options(arguments):
"""Parse command line arguments.
The parsing logic is fairly simple. It can only parse long-style
parameters of the form::
--key value
Several parameters can be defined in the environment and will be used
unless explicitly overridden with command-line arguments. The access key,
secret and endpoint values will be loaded from C{AWS_ACCESS_KEY_ID},
C{AWS_SECRET_ACCESS_KEY} and C{AWS_ENDPOINT} environment variables.
@param arguments: A list of command-line arguments. The first item is
expected to be the name of the program being run.
@raises OptionError: Raised if incorrectly formed command-line arguments
are specified, or if required command-line arguments are not present.
@raises UsageError: Raised if C{--help} is present in command-line
arguments.
@return: A C{dict} with key/value pairs extracted from the argument list.
"""
arguments = arguments[1:]
options = {}
while arguments:
key = arguments.pop(0)
if key in ("-h", "--help"):
raise UsageError("Help requested.")
if key.startswith("--"):
key = key[2:]
try:
value = arguments.pop(0)
except IndexError:
raise OptionError("'--%s' is missing a value." % key)
options[key] = value
else:
raise OptionError("Encountered unexpected value '%s'." % key)
default_key = os.environ.get("AWS_ACCESS_KEY_ID")
if "key" not in options and default_key:
options["key"] = default_key
default_secret = os.environ.get("AWS_SECRET_ACCESS_KEY")
if "secret" not in options and default_secret:
options["secret"] = default_secret
default_endpoint = os.environ.get("AWS_ENDPOINT")
if "endpoint" not in options and default_endpoint:
options["endpoint"] = default_endpoint
for name in ("key", "secret", "endpoint", "action"):
if name not in options:
raise OptionError(
"The '--%s' command-line argument is required." % name)
return options |
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
# for the --sql use case, run migrations for each URL into
# individual files.
engines = {
'': {
'url': context.config.get_main_option('sqlalchemy.url')
}
}
for name in bind_names:
engines[name] = rec = {}
rec['url'] = context.config.get_section_option(name, "sqlalchemy.url")
for name, rec in engines.items():
logger.info("Migrating database %s" % (name or '<default>'))
file_ = "%s.sql" % name
logger.info("Writing output to %s" % file_)
with open(file_, 'w') as buffer:
context.configure(
url=rec['url'],
output_buffer=buffer,
target_metadata=get_metadata(name),
literal_binds=True,
)
with context.begin_transaction():
context.run_migrations(engine_name=name) | Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output. | Below is the the instruction that describes the task:
### Input:
Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
### Response:
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
# for the --sql use case, run migrations for each URL into
# individual files.
engines = {
'': {
'url': context.config.get_main_option('sqlalchemy.url')
}
}
for name in bind_names:
engines[name] = rec = {}
rec['url'] = context.config.get_section_option(name, "sqlalchemy.url")
for name, rec in engines.items():
logger.info("Migrating database %s" % (name or '<default>'))
file_ = "%s.sql" % name
logger.info("Writing output to %s" % file_)
with open(file_, 'w') as buffer:
context.configure(
url=rec['url'],
output_buffer=buffer,
target_metadata=get_metadata(name),
literal_binds=True,
)
with context.begin_transaction():
context.run_migrations(engine_name=name) |
def decode_list(cls, obj, element_type):
# type: (List[Any], ConjureTypeType) -> List[Any]
"""Decodes json into a list, handling conversion of the elements.
Args:
obj: the json object to decode
element_type: a class object which is the conjure type of
the elements in this list.
Returns:
A python list where the elements are instances of type
element_type.
"""
if not isinstance(obj, list):
raise Exception("expected a python list")
return list(map(lambda x: cls.do_decode(x, element_type), obj)) | Decodes json into a list, handling conversion of the elements.
Args:
obj: the json object to decode
element_type: a class object which is the conjure type of
the elements in this list.
Returns:
A python list where the elements are instances of type
element_type. | Below is the the instruction that describes the task:
### Input:
Decodes json into a list, handling conversion of the elements.
Args:
obj: the json object to decode
element_type: a class object which is the conjure type of
the elements in this list.
Returns:
A python list where the elements are instances of type
element_type.
### Response:
def decode_list(cls, obj, element_type):
# type: (List[Any], ConjureTypeType) -> List[Any]
"""Decodes json into a list, handling conversion of the elements.
Args:
obj: the json object to decode
element_type: a class object which is the conjure type of
the elements in this list.
Returns:
A python list where the elements are instances of type
element_type.
"""
if not isinstance(obj, list):
raise Exception("expected a python list")
return list(map(lambda x: cls.do_decode(x, element_type), obj)) |
def add_and_rename_file(self, filename: str, new_filename: str) -> None:
"""
Copies the specified file into the working directory of this
sandbox and renames it to new_filename.
"""
dest = os.path.join(
self.name + ':' + SANDBOX_WORKING_DIR_NAME,
new_filename)
subprocess.check_call(['docker', 'cp', filename, dest])
self._chown_files([new_filename]) | Copies the specified file into the working directory of this
sandbox and renames it to new_filename. | Below is the the instruction that describes the task:
### Input:
Copies the specified file into the working directory of this
sandbox and renames it to new_filename.
### Response:
def add_and_rename_file(self, filename: str, new_filename: str) -> None:
"""
Copies the specified file into the working directory of this
sandbox and renames it to new_filename.
"""
dest = os.path.join(
self.name + ':' + SANDBOX_WORKING_DIR_NAME,
new_filename)
subprocess.check_call(['docker', 'cp', filename, dest])
self._chown_files([new_filename]) |
def get_suppliers_per_page(self, per_page=1000, page=1, params=None):
"""
Get suppliers per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list
"""
return self._get_resource_per_page(resource=SUPPLIERS, per_page=per_page, page=page, params=params) | Get suppliers per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list | Below is the the instruction that describes the task:
### Input:
Get suppliers per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list
### Response:
def get_suppliers_per_page(self, per_page=1000, page=1, params=None):
"""
Get suppliers per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list
"""
return self._get_resource_per_page(resource=SUPPLIERS, per_page=per_page, page=page, params=params) |
def detect_Massimini2004(dat_orig, s_freq, time, opts):
"""Slow wave detection based on Massimini et al., 2004.
Parameters
----------
dat_orig : ndarray (dtype='float')
vector with the data for one channel
s_freq : float
sampling frequency
time : ndarray (dtype='float')
vector with the time points for each sample
opts : instance of 'DetectSlowWave'
'det_filt' : dict
parameters for 'butter',
'duration' : tuple of float
min and max duration of SW
'min_ptp' : float
min peak-to-peak amplitude
'trough_duration' : tuple of float
min and max duration of first half-wave (trough)
Returns
-------
list of dict
list of detected SWs
float
SW density, per 30-s epoch
References
----------
Massimini, M. et al. J Neurosci 24(31) 6862-70 (2004).
"""
if opts.invert:
dat_orig = -dat_orig
dat_det = transform_signal(dat_orig, s_freq, 'double_butter',
opts.det_filt)
above_zero = detect_events(dat_det, 'above_thresh', value=0.)
sw_in_chan = []
if above_zero is not None:
troughs = within_duration(above_zero, time, opts.trough_duration)
#lg.info('troughs within duration: ' + str(troughs.shape))
if troughs is not None:
troughs = select_peaks(dat_det, troughs, opts.max_trough_amp)
#lg.info('troughs deep enough: ' + str(troughs.shape))
if troughs is not None:
events = _add_halfwave(dat_det, troughs, s_freq, opts)
#lg.info('SWs high enough: ' + str(events.shape))
if len(events):
events = within_duration(events, time, opts.duration)
events = remove_straddlers(events, time, s_freq)
#lg.info('SWs within duration: ' + str(events.shape))
sw_in_chan = make_slow_waves(events, dat_det, time, s_freq)
if len(sw_in_chan) == 0:
lg.info('No slow wave found')
return sw_in_chan | Slow wave detection based on Massimini et al., 2004.
Parameters
----------
dat_orig : ndarray (dtype='float')
vector with the data for one channel
s_freq : float
sampling frequency
time : ndarray (dtype='float')
vector with the time points for each sample
opts : instance of 'DetectSlowWave'
'det_filt' : dict
parameters for 'butter',
'duration' : tuple of float
min and max duration of SW
'min_ptp' : float
min peak-to-peak amplitude
'trough_duration' : tuple of float
min and max duration of first half-wave (trough)
Returns
-------
list of dict
list of detected SWs
float
SW density, per 30-s epoch
References
----------
Massimini, M. et al. J Neurosci 24(31) 6862-70 (2004). | Below is the the instruction that describes the task:
### Input:
Slow wave detection based on Massimini et al., 2004.
Parameters
----------
dat_orig : ndarray (dtype='float')
vector with the data for one channel
s_freq : float
sampling frequency
time : ndarray (dtype='float')
vector with the time points for each sample
opts : instance of 'DetectSlowWave'
'det_filt' : dict
parameters for 'butter',
'duration' : tuple of float
min and max duration of SW
'min_ptp' : float
min peak-to-peak amplitude
'trough_duration' : tuple of float
min and max duration of first half-wave (trough)
Returns
-------
list of dict
list of detected SWs
float
SW density, per 30-s epoch
References
----------
Massimini, M. et al. J Neurosci 24(31) 6862-70 (2004).
### Response:
def detect_Massimini2004(dat_orig, s_freq, time, opts):
"""Slow wave detection based on Massimini et al., 2004.
Parameters
----------
dat_orig : ndarray (dtype='float')
vector with the data for one channel
s_freq : float
sampling frequency
time : ndarray (dtype='float')
vector with the time points for each sample
opts : instance of 'DetectSlowWave'
'det_filt' : dict
parameters for 'butter',
'duration' : tuple of float
min and max duration of SW
'min_ptp' : float
min peak-to-peak amplitude
'trough_duration' : tuple of float
min and max duration of first half-wave (trough)
Returns
-------
list of dict
list of detected SWs
float
SW density, per 30-s epoch
References
----------
Massimini, M. et al. J Neurosci 24(31) 6862-70 (2004).
"""
if opts.invert:
dat_orig = -dat_orig
dat_det = transform_signal(dat_orig, s_freq, 'double_butter',
opts.det_filt)
above_zero = detect_events(dat_det, 'above_thresh', value=0.)
sw_in_chan = []
if above_zero is not None:
troughs = within_duration(above_zero, time, opts.trough_duration)
#lg.info('troughs within duration: ' + str(troughs.shape))
if troughs is not None:
troughs = select_peaks(dat_det, troughs, opts.max_trough_amp)
#lg.info('troughs deep enough: ' + str(troughs.shape))
if troughs is not None:
events = _add_halfwave(dat_det, troughs, s_freq, opts)
#lg.info('SWs high enough: ' + str(events.shape))
if len(events):
events = within_duration(events, time, opts.duration)
events = remove_straddlers(events, time, s_freq)
#lg.info('SWs within duration: ' + str(events.shape))
sw_in_chan = make_slow_waves(events, dat_det, time, s_freq)
if len(sw_in_chan) == 0:
lg.info('No slow wave found')
return sw_in_chan |
def _edges2conns(G, edge_data=False):
"""Create a mapping from graph edges to agent connections to be created.
:param G:
NetworkX's Graph or DiGraph which has :attr:`addr` attribute for each
node.
:param bool edge_data:
If ``True``, stores also edge data to the returned dictionary.
:returns:
A dictionary where keys are agent addresses and values are lists of
addresses to which key-agent should create connections in order to
recreate the graph structure in an agent society.
:rtype: dict
"""
cm = {}
for n in G.nodes(data=True):
if edge_data:
cm[n[1]['addr']] = [(G.node[nb]['addr'], G[n[0]][nb])
for nb in G[n[0]]]
else:
cm[n[1]['addr']] = [(G.node[nb]['addr'], {}) for nb in G[n[0]]]
return cm | Create a mapping from graph edges to agent connections to be created.
:param G:
NetworkX's Graph or DiGraph which has :attr:`addr` attribute for each
node.
:param bool edge_data:
If ``True``, stores also edge data to the returned dictionary.
:returns:
A dictionary where keys are agent addresses and values are lists of
addresses to which key-agent should create connections in order to
recreate the graph structure in an agent society.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Create a mapping from graph edges to agent connections to be created.
:param G:
NetworkX's Graph or DiGraph which has :attr:`addr` attribute for each
node.
:param bool edge_data:
If ``True``, stores also edge data to the returned dictionary.
:returns:
A dictionary where keys are agent addresses and values are lists of
addresses to which key-agent should create connections in order to
recreate the graph structure in an agent society.
:rtype: dict
### Response:
def _edges2conns(G, edge_data=False):
"""Create a mapping from graph edges to agent connections to be created.
:param G:
NetworkX's Graph or DiGraph which has :attr:`addr` attribute for each
node.
:param bool edge_data:
If ``True``, stores also edge data to the returned dictionary.
:returns:
A dictionary where keys are agent addresses and values are lists of
addresses to which key-agent should create connections in order to
recreate the graph structure in an agent society.
:rtype: dict
"""
cm = {}
for n in G.nodes(data=True):
if edge_data:
cm[n[1]['addr']] = [(G.node[nb]['addr'], G[n[0]][nb])
for nb in G[n[0]]]
else:
cm[n[1]['addr']] = [(G.node[nb]['addr'], {}) for nb in G[n[0]]]
return cm |
def operator_from_str(op):
"""
Return the operator associated to the given string `op`.
raises:
`KeyError` if invalid string.
>>> assert operator_from_str("==")(1, 1) and operator_from_str("+")(1,1) == 2
"""
d = {"==": operator.eq,
"!=": operator.ne,
">": operator.gt,
">=": operator.ge,
"<": operator.lt,
"<=": operator.le,
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'%': operator.mod,
'^': operator.xor,
}
try:
d['/'] = operator.truediv
except AttributeError:
pass
return d[op] | Return the operator associated to the given string `op`.
raises:
`KeyError` if invalid string.
>>> assert operator_from_str("==")(1, 1) and operator_from_str("+")(1,1) == 2 | Below is the the instruction that describes the task:
### Input:
Return the operator associated to the given string `op`.
raises:
`KeyError` if invalid string.
>>> assert operator_from_str("==")(1, 1) and operator_from_str("+")(1,1) == 2
### Response:
def operator_from_str(op):
"""
Return the operator associated to the given string `op`.
raises:
`KeyError` if invalid string.
>>> assert operator_from_str("==")(1, 1) and operator_from_str("+")(1,1) == 2
"""
d = {"==": operator.eq,
"!=": operator.ne,
">": operator.gt,
">=": operator.ge,
"<": operator.lt,
"<=": operator.le,
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'%': operator.mod,
'^': operator.xor,
}
try:
d['/'] = operator.truediv
except AttributeError:
pass
return d[op] |
def update_roles_gce(use_cache=True, cache_expiration=86400, cache_path="~/.gcetools/instances", group_name=None, region=None, zone=None):
"""
Dynamically update fabric's roles by using assigning the tags associated with
each machine in Google Compute Engine.
use_cache - will store a local cache in ~/.gcetools/
cache_expiration - cache expiration in seconds (default: 1 day)
cache_path - the path to store instances data (default: ~/.gcetools/instances)
group_name - optional managed instance group to use instead of the global instance pool
region - gce region name (such as `us-central1`) for a regional managed instance group
zone - gce zone name (such as `us-central1-a`) for a zone managed instance group
How to use:
- Call 'update_roles_gce' at the end of your fabfile.py (it will run each
time you run fabric).
- On each function use the regular @roles decorator and set the role to the name
of one of the tags associated with the instances you wish to work with
"""
data = _get_data(use_cache, cache_expiration, group_name=group_name, region=region, zone=zone)
roles = _get_roles(data)
env.roledefs.update(roles)
_data_loaded = True
return INSTANCES_CACHE | Dynamically update fabric's roles by using assigning the tags associated with
each machine in Google Compute Engine.
use_cache - will store a local cache in ~/.gcetools/
cache_expiration - cache expiration in seconds (default: 1 day)
cache_path - the path to store instances data (default: ~/.gcetools/instances)
group_name - optional managed instance group to use instead of the global instance pool
region - gce region name (such as `us-central1`) for a regional managed instance group
zone - gce zone name (such as `us-central1-a`) for a zone managed instance group
How to use:
- Call 'update_roles_gce' at the end of your fabfile.py (it will run each
time you run fabric).
- On each function use the regular @roles decorator and set the role to the name
of one of the tags associated with the instances you wish to work with | Below is the the instruction that describes the task:
### Input:
Dynamically update fabric's roles by using assigning the tags associated with
each machine in Google Compute Engine.
use_cache - will store a local cache in ~/.gcetools/
cache_expiration - cache expiration in seconds (default: 1 day)
cache_path - the path to store instances data (default: ~/.gcetools/instances)
group_name - optional managed instance group to use instead of the global instance pool
region - gce region name (such as `us-central1`) for a regional managed instance group
zone - gce zone name (such as `us-central1-a`) for a zone managed instance group
How to use:
- Call 'update_roles_gce' at the end of your fabfile.py (it will run each
time you run fabric).
- On each function use the regular @roles decorator and set the role to the name
of one of the tags associated with the instances you wish to work with
### Response:
def update_roles_gce(use_cache=True, cache_expiration=86400, cache_path="~/.gcetools/instances", group_name=None, region=None, zone=None):
"""
Dynamically update fabric's roles by using assigning the tags associated with
each machine in Google Compute Engine.
use_cache - will store a local cache in ~/.gcetools/
cache_expiration - cache expiration in seconds (default: 1 day)
cache_path - the path to store instances data (default: ~/.gcetools/instances)
group_name - optional managed instance group to use instead of the global instance pool
region - gce region name (such as `us-central1`) for a regional managed instance group
zone - gce zone name (such as `us-central1-a`) for a zone managed instance group
How to use:
- Call 'update_roles_gce' at the end of your fabfile.py (it will run each
time you run fabric).
- On each function use the regular @roles decorator and set the role to the name
of one of the tags associated with the instances you wish to work with
"""
data = _get_data(use_cache, cache_expiration, group_name=group_name, region=region, zone=zone)
roles = _get_roles(data)
env.roledefs.update(roles)
_data_loaded = True
return INSTANCES_CACHE |
def _load_file(self):
"""Load all entries from json backing file
"""
if not os.path.exists(self.file):
return {}
with open(self.file, "r") as infile:
data = json.load(infile)
return data | Load all entries from json backing file | Below is the the instruction that describes the task:
### Input:
Load all entries from json backing file
### Response:
def _load_file(self):
"""Load all entries from json backing file
"""
if not os.path.exists(self.file):
return {}
with open(self.file, "r") as infile:
data = json.load(infile)
return data |
def write_index_and_rst_files(self, overwrite: bool = False,
mock: bool = False) -> None:
"""
Writes both the individual RST files and the index.
Args:
overwrite: allow existing files to be overwritten?
mock: pretend to write, but don't
"""
for f in self.files_to_index:
if isinstance(f, FileToAutodocument):
f.write_rst(
prefix=self.rst_prefix,
suffix=self.rst_suffix,
heading_underline_char=self.source_rst_heading_underline_char, # noqa
overwrite=overwrite,
mock=mock,
)
elif isinstance(f, AutodocIndex):
f.write_index_and_rst_files(overwrite=overwrite, mock=mock)
else:
fail("Unknown thing in files_to_index: {!r}".format(f))
self.write_index(overwrite=overwrite, mock=mock) | Writes both the individual RST files and the index.
Args:
overwrite: allow existing files to be overwritten?
mock: pretend to write, but don't | Below is the the instruction that describes the task:
### Input:
Writes both the individual RST files and the index.
Args:
overwrite: allow existing files to be overwritten?
mock: pretend to write, but don't
### Response:
def write_index_and_rst_files(self, overwrite: bool = False,
mock: bool = False) -> None:
"""
Writes both the individual RST files and the index.
Args:
overwrite: allow existing files to be overwritten?
mock: pretend to write, but don't
"""
for f in self.files_to_index:
if isinstance(f, FileToAutodocument):
f.write_rst(
prefix=self.rst_prefix,
suffix=self.rst_suffix,
heading_underline_char=self.source_rst_heading_underline_char, # noqa
overwrite=overwrite,
mock=mock,
)
elif isinstance(f, AutodocIndex):
f.write_index_and_rst_files(overwrite=overwrite, mock=mock)
else:
fail("Unknown thing in files_to_index: {!r}".format(f))
self.write_index(overwrite=overwrite, mock=mock) |
def train(hparams, *args):
"""Train your awesome model.
:param hparams: The arguments to run the model with.
"""
# Initialize experiments and track all the hyperparameters
exp = Experiment(
name=hparams.test_tube_exp_name,
# Location to save the metrics.
save_dir=hparams.log_path,
autosave=False,
)
exp.argparse(hparams)
# Pretend to train.
x = torch.rand((1, hparams.x_val))
for train_step in range(0, 100):
y = torch.rand((hparams.x_val, 1))
out = x.mm(y)
exp.log({'fake_err': out.item()})
# Save exp when .
exp.save() | Train your awesome model.
:param hparams: The arguments to run the model with. | Below is the the instruction that describes the task:
### Input:
Train your awesome model.
:param hparams: The arguments to run the model with.
### Response:
def train(hparams, *args):
"""Train your awesome model.
:param hparams: The arguments to run the model with.
"""
# Initialize experiments and track all the hyperparameters
exp = Experiment(
name=hparams.test_tube_exp_name,
# Location to save the metrics.
save_dir=hparams.log_path,
autosave=False,
)
exp.argparse(hparams)
# Pretend to train.
x = torch.rand((1, hparams.x_val))
for train_step in range(0, 100):
y = torch.rand((hparams.x_val, 1))
out = x.mm(y)
exp.log({'fake_err': out.item()})
# Save exp when .
exp.save() |
def learn(self, state_arr, limit=1000):
'''
Learning and searching the optimal solution.
Args:
state_arr: `np.ndarray` of initial state.
limit: The maximum number of iterative updates based on value iteration algorithms.
'''
while self.t <= limit:
# Draw samples of next possible actions from any distribution.
next_action_arr = self.extract_possible_actions(state_arr)
# Inference Q-Values.
predicted_q_arr = self.__function_approximator.inference_q(next_action_arr)
# Set `np.ndarray` of rewards and next Q-Values.
reward_value_arr = np.empty((next_action_arr.shape[0], 1))
next_max_q_arr = np.empty((next_action_arr.shape[0], 1))
for i in range(reward_value_arr.shape[0]):
# Observe reward values.
reward_value_arr[i] = self.observe_reward_value(state_arr, next_action_arr[i])
# Inference the Max-Q-Value in next action time.
next_next_action_arr = self.extract_possible_actions(next_action_arr[i])
next_max_q_arr[i] = self.__function_approximator.inference_q(next_next_action_arr).max()
# Select action.
action_arr, predicted_q = self.select_action(next_action_arr, predicted_q_arr)
# Update real Q-Values.
real_q_arr = self.update_q(
predicted_q_arr,
reward_value_arr,
next_max_q_arr
)
# Maximum of predicted and real Q-Values.
real_q = real_q_arr[np.where(predicted_q_arr == predicted_q)[0][0]]
if self.__q_logs_arr.shape[0] > 0:
self.__q_logs_arr = np.r_[
self.__q_logs_arr,
np.array([predicted_q, real_q]).reshape(1, 2)
]
else:
self.__q_logs_arr = np.array([predicted_q, real_q]).reshape(1, 2)
# Learn Q-Values.
self.learn_q(predicted_q_arr, real_q_arr)
# Update State.
state_arr = self.update_state(state_arr, action_arr)
# Epsode.
self.t += 1
# Check.
end_flag = self.check_the_end_flag(state_arr)
if end_flag is True:
break | Learning and searching the optimal solution.
Args:
state_arr: `np.ndarray` of initial state.
limit: The maximum number of iterative updates based on value iteration algorithms. | Below is the the instruction that describes the task:
### Input:
Learning and searching the optimal solution.
Args:
state_arr: `np.ndarray` of initial state.
limit: The maximum number of iterative updates based on value iteration algorithms.
### Response:
def learn(self, state_arr, limit=1000):
'''
Learning and searching the optimal solution.
Args:
state_arr: `np.ndarray` of initial state.
limit: The maximum number of iterative updates based on value iteration algorithms.
'''
while self.t <= limit:
# Draw samples of next possible actions from any distribution.
next_action_arr = self.extract_possible_actions(state_arr)
# Inference Q-Values.
predicted_q_arr = self.__function_approximator.inference_q(next_action_arr)
# Set `np.ndarray` of rewards and next Q-Values.
reward_value_arr = np.empty((next_action_arr.shape[0], 1))
next_max_q_arr = np.empty((next_action_arr.shape[0], 1))
for i in range(reward_value_arr.shape[0]):
# Observe reward values.
reward_value_arr[i] = self.observe_reward_value(state_arr, next_action_arr[i])
# Inference the Max-Q-Value in next action time.
next_next_action_arr = self.extract_possible_actions(next_action_arr[i])
next_max_q_arr[i] = self.__function_approximator.inference_q(next_next_action_arr).max()
# Select action.
action_arr, predicted_q = self.select_action(next_action_arr, predicted_q_arr)
# Update real Q-Values.
real_q_arr = self.update_q(
predicted_q_arr,
reward_value_arr,
next_max_q_arr
)
# Maximum of predicted and real Q-Values.
real_q = real_q_arr[np.where(predicted_q_arr == predicted_q)[0][0]]
if self.__q_logs_arr.shape[0] > 0:
self.__q_logs_arr = np.r_[
self.__q_logs_arr,
np.array([predicted_q, real_q]).reshape(1, 2)
]
else:
self.__q_logs_arr = np.array([predicted_q, real_q]).reshape(1, 2)
# Learn Q-Values.
self.learn_q(predicted_q_arr, real_q_arr)
# Update State.
state_arr = self.update_state(state_arr, action_arr)
# Epsode.
self.t += 1
# Check.
end_flag = self.check_the_end_flag(state_arr)
if end_flag is True:
break |
def get_moments(model,x):
'''
Moments (mean and sdev.) of a GP model at x
'''
input_dim = model.X.shape[1]
x = reshape(x,input_dim)
fmin = min(model.predict(model.X)[0])
m, v = model.predict(x)
s = np.sqrt(np.clip(v, 0, np.inf))
return (m,s, fmin) | Moments (mean and sdev.) of a GP model at x | Below is the the instruction that describes the task:
### Input:
Moments (mean and sdev.) of a GP model at x
### Response:
def get_moments(model,x):
'''
Moments (mean and sdev.) of a GP model at x
'''
input_dim = model.X.shape[1]
x = reshape(x,input_dim)
fmin = min(model.predict(model.X)[0])
m, v = model.predict(x)
s = np.sqrt(np.clip(v, 0, np.inf))
return (m,s, fmin) |
def compress_multiple_pdfs(source_directory, output_directory, ghostscript_binary):
"""Compress all PDF files in the current directory and place the output in the
given output directory. This is a generator function that first yields the amount
of files to be compressed, and then yields the output path of each file.
Args:
source_directory (str): Filepath to the source directory.
output_directory (str): Filepath to the output directory.
ghostscript_binary (str): Name of the Ghostscript binary.
Returns:
list(str): paths to outputs.
"""
source_paths = _get_pdf_filenames_at(source_directory)
yield len(source_paths)
for source_path in source_paths:
output = os.path.join(output_directory, os.path.basename(source_path))
compress_pdf(source_path, output, ghostscript_binary)
yield output | Compress all PDF files in the current directory and place the output in the
given output directory. This is a generator function that first yields the amount
of files to be compressed, and then yields the output path of each file.
Args:
source_directory (str): Filepath to the source directory.
output_directory (str): Filepath to the output directory.
ghostscript_binary (str): Name of the Ghostscript binary.
Returns:
list(str): paths to outputs. | Below is the the instruction that describes the task:
### Input:
Compress all PDF files in the current directory and place the output in the
given output directory. This is a generator function that first yields the amount
of files to be compressed, and then yields the output path of each file.
Args:
source_directory (str): Filepath to the source directory.
output_directory (str): Filepath to the output directory.
ghostscript_binary (str): Name of the Ghostscript binary.
Returns:
list(str): paths to outputs.
### Response:
def compress_multiple_pdfs(source_directory, output_directory, ghostscript_binary):
"""Compress all PDF files in the current directory and place the output in the
given output directory. This is a generator function that first yields the amount
of files to be compressed, and then yields the output path of each file.
Args:
source_directory (str): Filepath to the source directory.
output_directory (str): Filepath to the output directory.
ghostscript_binary (str): Name of the Ghostscript binary.
Returns:
list(str): paths to outputs.
"""
source_paths = _get_pdf_filenames_at(source_directory)
yield len(source_paths)
for source_path in source_paths:
output = os.path.join(output_directory, os.path.basename(source_path))
compress_pdf(source_path, output, ghostscript_binary)
yield output |
def _Complete(self):
"""Marks the hunt as completed."""
self._RemoveForemanRule()
if "w" in self.hunt_obj.mode:
self.hunt_obj.Set(self.hunt_obj.Schema.STATE("COMPLETED"))
self.hunt_obj.Flush() | Marks the hunt as completed. | Below is the the instruction that describes the task:
### Input:
Marks the hunt as completed.
### Response:
def _Complete(self):
"""Marks the hunt as completed."""
self._RemoveForemanRule()
if "w" in self.hunt_obj.mode:
self.hunt_obj.Set(self.hunt_obj.Schema.STATE("COMPLETED"))
self.hunt_obj.Flush() |
def compose(self, to, subject, text):
"""Login required. Sends POST to send a message to a user. Returns True or raises :class:`exceptions.UnexpectedResponse` if non-"truthy" value in response.
URL: ``http://www.reddit.com/api/compose/``
:param to: username or :class`things.Account` of user to send to
:param subject: subject of message
:param text: message body text
"""
if isinstance(to, Account):
to = to.name
data = dict(to=to, subject=subject, text=text)
j = self.post('api', 'compose', data=data)
return assert_truthy(j) | Login required. Sends POST to send a message to a user. Returns True or raises :class:`exceptions.UnexpectedResponse` if non-"truthy" value in response.
URL: ``http://www.reddit.com/api/compose/``
:param to: username or :class`things.Account` of user to send to
:param subject: subject of message
:param text: message body text | Below is the the instruction that describes the task:
### Input:
Login required. Sends POST to send a message to a user. Returns True or raises :class:`exceptions.UnexpectedResponse` if non-"truthy" value in response.
URL: ``http://www.reddit.com/api/compose/``
:param to: username or :class`things.Account` of user to send to
:param subject: subject of message
:param text: message body text
### Response:
def compose(self, to, subject, text):
"""Login required. Sends POST to send a message to a user. Returns True or raises :class:`exceptions.UnexpectedResponse` if non-"truthy" value in response.
URL: ``http://www.reddit.com/api/compose/``
:param to: username or :class`things.Account` of user to send to
:param subject: subject of message
:param text: message body text
"""
if isinstance(to, Account):
to = to.name
data = dict(to=to, subject=subject, text=text)
j = self.post('api', 'compose', data=data)
return assert_truthy(j) |
def mob_suite_targets(self, database_name='mob_suite'):
"""
Download MOB-suite databases
:param database_name: name of current database
"""
logging.info('Download MOB-suite databases')
# NOTE: This requires mob_suite >=1.4.9.1. Versions before that don't have the -d option.
cmd = 'mob_init -d {}'.format(os.path.join(self.databasepath, database_name))
out, err = run_subprocess(cmd) | Download MOB-suite databases
:param database_name: name of current database | Below is the the instruction that describes the task:
### Input:
Download MOB-suite databases
:param database_name: name of current database
### Response:
def mob_suite_targets(self, database_name='mob_suite'):
"""
Download MOB-suite databases
:param database_name: name of current database
"""
logging.info('Download MOB-suite databases')
# NOTE: This requires mob_suite >=1.4.9.1. Versions before that don't have the -d option.
cmd = 'mob_init -d {}'.format(os.path.join(self.databasepath, database_name))
out, err = run_subprocess(cmd) |
def genstis(outname):
""" Generate TestCases from cmdfile according to the pattern in patternfile"""
pattern="""class stisS%d(countrateCase):
def setUp(self):
self.obsmode="%s"
self.spectrum="%s"
self.setglobal(__file__)
self.runpy()\n"""
speclist=['/grp/hst/cdbs/calspec/gd71_mod_005.fits',
'/grp/hst/cdbs/calspec/gd153_mod_004.fits',
'/grp/hst/cdbs/calspec/g191b2b_mod_004.fits']
glist={'g140l':'fuvmama','g230l':'nuvmama','g430l':'ccd','g750l':'ccd',
'g230lb':'ccd'}
out=open(outname,'a')
out.write("""from pytools import testutil
import sys
from basecase import calcphotCase, calcspecCase, countrateCase,SpecSourcerateSpecCase\n
""")
count=0
for g in glist:
for sp in speclist:
obsmode='stis,%s,fuvmama,s52x2'%g
defn=pattern%(count,obsmode,sp)
out.write(defn)
count+=1
out.write("""\n\n
if __name__ == '__main__':
if 'debug' in sys.argv:
testutil.debug(__name__)
else:
testutil.testall(__name__,2)
""")
out.close() | Generate TestCases from cmdfile according to the pattern in patternfile | Below is the the instruction that describes the task:
### Input:
Generate TestCases from cmdfile according to the pattern in patternfile
### Response:
def genstis(outname):
""" Generate TestCases from cmdfile according to the pattern in patternfile"""
pattern="""class stisS%d(countrateCase):
def setUp(self):
self.obsmode="%s"
self.spectrum="%s"
self.setglobal(__file__)
self.runpy()\n"""
speclist=['/grp/hst/cdbs/calspec/gd71_mod_005.fits',
'/grp/hst/cdbs/calspec/gd153_mod_004.fits',
'/grp/hst/cdbs/calspec/g191b2b_mod_004.fits']
glist={'g140l':'fuvmama','g230l':'nuvmama','g430l':'ccd','g750l':'ccd',
'g230lb':'ccd'}
out=open(outname,'a')
out.write("""from pytools import testutil
import sys
from basecase import calcphotCase, calcspecCase, countrateCase,SpecSourcerateSpecCase\n
""")
count=0
for g in glist:
for sp in speclist:
obsmode='stis,%s,fuvmama,s52x2'%g
defn=pattern%(count,obsmode,sp)
out.write(defn)
count+=1
out.write("""\n\n
if __name__ == '__main__':
if 'debug' in sys.argv:
testutil.debug(__name__)
else:
testutil.testall(__name__,2)
""")
out.close() |
def register_service(cls, service):
"""Add a service to the thread's StackInABox instance.
:param service: StackInABoxService instance to add to the test
For return value and errors see StackInABox.register()
"""
logger.debug('Registering service {0}'.format(service.name))
return local_store.instance.register(service) | Add a service to the thread's StackInABox instance.
:param service: StackInABoxService instance to add to the test
For return value and errors see StackInABox.register() | Below is the the instruction that describes the task:
### Input:
Add a service to the thread's StackInABox instance.
:param service: StackInABoxService instance to add to the test
For return value and errors see StackInABox.register()
### Response:
def register_service(cls, service):
"""Add a service to the thread's StackInABox instance.
:param service: StackInABoxService instance to add to the test
For return value and errors see StackInABox.register()
"""
logger.debug('Registering service {0}'.format(service.name))
return local_store.instance.register(service) |
def set_color(self, value, callb=None, duration=0, rapid=False):
"""Convenience method to set the colour status of the device
This method will send a LightSetColor message to the device, and request callb be executed
when an ACK is received. The default callback will simply cache the value.
:param value: The new state, a dictionary onf int with 4 keys Hue, Saturation, Brightness, Kelvin
:type value: dict
:param duration: The duration, in seconds, of the power state transition.
:type duration: int
:param callb: Callable to be used when the response is received. If not set,
self.resp_set_label will be used.
:type callb: callable
:param rapid: Whether to ask for ack (False) or not (True). Default False
:type rapid: bool
:returns: None
:rtype: None
"""
if len(value) == 4:
mypartial=partial(self.resp_set_light,color=value)
if callb:
mycallb=lambda x,y:(mypartial(y),callb(x,y))
else:
mycallb=lambda x,y:mypartial(y)
#try:
if rapid:
self.fire_and_forget(LightSetColor, {"color": value, "duration": duration}, num_repeats=1)
self.resp_set_light(None,color=value)
if callb:
callb(self,None)
else:
self.req_with_ack(LightSetColor, {"color": value, "duration": duration},callb=mycallb) | Convenience method to set the colour status of the device
This method will send a LightSetColor message to the device, and request callb be executed
when an ACK is received. The default callback will simply cache the value.
:param value: The new state, a dictionary onf int with 4 keys Hue, Saturation, Brightness, Kelvin
:type value: dict
:param duration: The duration, in seconds, of the power state transition.
:type duration: int
:param callb: Callable to be used when the response is received. If not set,
self.resp_set_label will be used.
:type callb: callable
:param rapid: Whether to ask for ack (False) or not (True). Default False
:type rapid: bool
:returns: None
:rtype: None | Below is the the instruction that describes the task:
### Input:
Convenience method to set the colour status of the device
This method will send a LightSetColor message to the device, and request callb be executed
when an ACK is received. The default callback will simply cache the value.
:param value: The new state, a dictionary onf int with 4 keys Hue, Saturation, Brightness, Kelvin
:type value: dict
:param duration: The duration, in seconds, of the power state transition.
:type duration: int
:param callb: Callable to be used when the response is received. If not set,
self.resp_set_label will be used.
:type callb: callable
:param rapid: Whether to ask for ack (False) or not (True). Default False
:type rapid: bool
:returns: None
:rtype: None
### Response:
def set_color(self, value, callb=None, duration=0, rapid=False):
"""Convenience method to set the colour status of the device
This method will send a LightSetColor message to the device, and request callb be executed
when an ACK is received. The default callback will simply cache the value.
:param value: The new state, a dictionary onf int with 4 keys Hue, Saturation, Brightness, Kelvin
:type value: dict
:param duration: The duration, in seconds, of the power state transition.
:type duration: int
:param callb: Callable to be used when the response is received. If not set,
self.resp_set_label will be used.
:type callb: callable
:param rapid: Whether to ask for ack (False) or not (True). Default False
:type rapid: bool
:returns: None
:rtype: None
"""
if len(value) == 4:
mypartial=partial(self.resp_set_light,color=value)
if callb:
mycallb=lambda x,y:(mypartial(y),callb(x,y))
else:
mycallb=lambda x,y:mypartial(y)
#try:
if rapid:
self.fire_and_forget(LightSetColor, {"color": value, "duration": duration}, num_repeats=1)
self.resp_set_light(None,color=value)
if callb:
callb(self,None)
else:
self.req_with_ack(LightSetColor, {"color": value, "duration": duration},callb=mycallb) |
def check_deleted(session, rev_id, title=None, timestamp=None,
radius=defaults.RADIUS, before=None, window=None,
rvprop=None):
"""
Checks the revert status of a deleted revision. With this method, you can
determine whether an edit is a 'reverting' edit, was 'reverted' by another
edit and/or was 'reverted_to' by another edit.
:Parameters:
session : :class:`mwapi.Session`
An API session to make use of
rev_id : int
the ID of the revision to check
title : str
the title of the page the revision occupies (slower if not
provided) Note that the MediaWiki API expects the title to
include the namespace prefix (e.g. "User_talk:EpochFail")
radius : int
a positive integer indicating the maximum number of revisions
that can be reverted
before : :class:`mwtypes.Timestamp`
if set, limits the search for *reverting* revisions to those which
were saved before this timestamp
window : int
if set, limits the search for *reverting* revisions to those which
were saved within `window` seconds after the reverted edit
rvprop : set( str )
a set of properties to include in revisions
:Returns:
A triple :class:`mwreverts.Revert` | `None`
* reverting -- If this edit reverted other edit(s)
* reverted -- If this edit was reverted by another edit
* reverted_to -- If this edit was reverted to by another edit
"""
rev_id = int(rev_id)
radius = int(radius)
if radius < 1:
raise TypeError("invalid radius. Expected a positive integer.")
title = str(title) if title is not None else None
before = Timestamp(before) if before is not None else None
rvprop = set(rvprop) if rvprop is not None else set()
# If we don't have the title, we're going to need to look it up
if title is None or timestamp is None:
title, timestamp = get_deleted_title_and_timestamp(session, rev_id)
# Load history and current rev
current_and_past_revs = list(n_deleted_edits_before(
session, rev_id, title, timestamp, n=radius + 1,
rvprop={'ids', 'timestamp', 'sha1'} | rvprop
))
if len(current_and_past_revs) < 1:
raise KeyError("Revision {0} not found in page {1}."
.format(rev_id, title))
current_rev, past_revs = (
current_and_past_revs[-1], # Current
current_and_past_revs[:-1] # Past revisions
)
if window is not None and before is None:
before = Timestamp(current_rev['timestamp']) + window
# Load future revisions
future_revs = list(n_deleted_edits_after(
session, rev_id + 1, title, timestamp, n=radius, before=before,
rvprop={'ids', 'timestamp', 'sha1'} | rvprop
))
return build_revert_tuple(
rev_id, past_revs, current_rev, future_revs, radius) | Checks the revert status of a deleted revision. With this method, you can
determine whether an edit is a 'reverting' edit, was 'reverted' by another
edit and/or was 'reverted_to' by another edit.
:Parameters:
session : :class:`mwapi.Session`
An API session to make use of
rev_id : int
the ID of the revision to check
title : str
the title of the page the revision occupies (slower if not
provided) Note that the MediaWiki API expects the title to
include the namespace prefix (e.g. "User_talk:EpochFail")
radius : int
a positive integer indicating the maximum number of revisions
that can be reverted
before : :class:`mwtypes.Timestamp`
if set, limits the search for *reverting* revisions to those which
were saved before this timestamp
window : int
if set, limits the search for *reverting* revisions to those which
were saved within `window` seconds after the reverted edit
rvprop : set( str )
a set of properties to include in revisions
:Returns:
A triple :class:`mwreverts.Revert` | `None`
* reverting -- If this edit reverted other edit(s)
* reverted -- If this edit was reverted by another edit
* reverted_to -- If this edit was reverted to by another edit | Below is the the instruction that describes the task:
### Input:
Checks the revert status of a deleted revision. With this method, you can
determine whether an edit is a 'reverting' edit, was 'reverted' by another
edit and/or was 'reverted_to' by another edit.
:Parameters:
session : :class:`mwapi.Session`
An API session to make use of
rev_id : int
the ID of the revision to check
title : str
the title of the page the revision occupies (slower if not
provided) Note that the MediaWiki API expects the title to
include the namespace prefix (e.g. "User_talk:EpochFail")
radius : int
a positive integer indicating the maximum number of revisions
that can be reverted
before : :class:`mwtypes.Timestamp`
if set, limits the search for *reverting* revisions to those which
were saved before this timestamp
window : int
if set, limits the search for *reverting* revisions to those which
were saved within `window` seconds after the reverted edit
rvprop : set( str )
a set of properties to include in revisions
:Returns:
A triple :class:`mwreverts.Revert` | `None`
* reverting -- If this edit reverted other edit(s)
* reverted -- If this edit was reverted by another edit
* reverted_to -- If this edit was reverted to by another edit
### Response:
def check_deleted(session, rev_id, title=None, timestamp=None,
radius=defaults.RADIUS, before=None, window=None,
rvprop=None):
"""
Checks the revert status of a deleted revision. With this method, you can
determine whether an edit is a 'reverting' edit, was 'reverted' by another
edit and/or was 'reverted_to' by another edit.
:Parameters:
session : :class:`mwapi.Session`
An API session to make use of
rev_id : int
the ID of the revision to check
title : str
the title of the page the revision occupies (slower if not
provided) Note that the MediaWiki API expects the title to
include the namespace prefix (e.g. "User_talk:EpochFail")
radius : int
a positive integer indicating the maximum number of revisions
that can be reverted
before : :class:`mwtypes.Timestamp`
if set, limits the search for *reverting* revisions to those which
were saved before this timestamp
window : int
if set, limits the search for *reverting* revisions to those which
were saved within `window` seconds after the reverted edit
rvprop : set( str )
a set of properties to include in revisions
:Returns:
A triple :class:`mwreverts.Revert` | `None`
* reverting -- If this edit reverted other edit(s)
* reverted -- If this edit was reverted by another edit
* reverted_to -- If this edit was reverted to by another edit
"""
rev_id = int(rev_id)
radius = int(radius)
if radius < 1:
raise TypeError("invalid radius. Expected a positive integer.")
title = str(title) if title is not None else None
before = Timestamp(before) if before is not None else None
rvprop = set(rvprop) if rvprop is not None else set()
# If we don't have the title, we're going to need to look it up
if title is None or timestamp is None:
title, timestamp = get_deleted_title_and_timestamp(session, rev_id)
# Load history and current rev
current_and_past_revs = list(n_deleted_edits_before(
session, rev_id, title, timestamp, n=radius + 1,
rvprop={'ids', 'timestamp', 'sha1'} | rvprop
))
if len(current_and_past_revs) < 1:
raise KeyError("Revision {0} not found in page {1}."
.format(rev_id, title))
current_rev, past_revs = (
current_and_past_revs[-1], # Current
current_and_past_revs[:-1] # Past revisions
)
if window is not None and before is None:
before = Timestamp(current_rev['timestamp']) + window
# Load future revisions
future_revs = list(n_deleted_edits_after(
session, rev_id + 1, title, timestamp, n=radius, before=before,
rvprop={'ids', 'timestamp', 'sha1'} | rvprop
))
return build_revert_tuple(
rev_id, past_revs, current_rev, future_revs, radius) |
def nowarnings(func):
"""Create a function wrapped in a context that ignores warnings.
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return func(*args, **kwargs)
return new_func | Create a function wrapped in a context that ignores warnings. | Below is the the instruction that describes the task:
### Input:
Create a function wrapped in a context that ignores warnings.
### Response:
def nowarnings(func):
"""Create a function wrapped in a context that ignores warnings.
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return func(*args, **kwargs)
return new_func |
def GetKernelParams(time, flux, errors, kernel='Basic', mask=[],
giter=3, gmaxf=200, guess=None):
'''
Optimizes the GP by training it on the current de-trended light curve.
Returns the white noise amplitude, red noise amplitude,
and red noise timescale.
:param array_like time: The time array
:param array_like flux: The flux array
:param array_like errors: The flux errors array
:param array_like mask: The indices to be masked when training the GP. \
Default `[]`
:param int giter: The number of iterations. Default 3
:param int gmaxf: The maximum number of function evaluations. Default 200
:param tuple guess: The guess to initialize the minimization with. \
Default :py:obj:`None`
'''
log.info("Optimizing the GP...")
# Save a copy of time and errors for later
time_copy = np.array(time)
errors_copy = np.array(errors)
# Apply the mask
time = np.delete(time, mask)
flux = np.delete(flux, mask)
errors = np.delete(errors, mask)
# Remove 5-sigma outliers to be safe
f = flux - savgol_filter(flux, 49, 2) + np.nanmedian(flux)
med = np.nanmedian(f)
MAD = 1.4826 * np.nanmedian(np.abs(f - med))
mask = np.where((f > med + 5 * MAD) | (f < med - 5 * MAD))[0]
time = np.delete(time, mask)
flux = np.delete(flux, mask)
errors = np.delete(errors, mask)
# Initial guesses and bounds
white = np.nanmedian([np.nanstd(c) for c in Chunks(flux, 13)])
amp = np.nanstd(flux)
tau = 30.0
if kernel == 'Basic':
if guess is None:
guess = [white, amp, tau]
bounds = [[0.1 * white, 10. * white],
[1., 10000. * amp],
[0.5, 100.]]
elif kernel == 'QuasiPeriodic':
if guess is None:
guess = [white, amp, tau, 1., 20.]
bounds = [[0.1 * white, 10. * white],
[1., 10000. * amp],
[1e-5, 1e2],
[0.02, 100.]]
else:
raise ValueError('Invalid value for `kernel`.')
# Loop
llbest = -np.inf
xbest = np.array(guess)
for i in range(giter):
# Randomize an initial guess
iguess = [np.inf for g in guess]
for j, b in enumerate(bounds):
tries = 0
while (iguess[j] < b[0]) or (iguess[j] > b[1]):
iguess[j] = (1 + 0.5 * np.random.randn()) * guess[j]
tries += 1
if tries > 100:
iguess[j] = b[0] + np.random.random() * (b[1] - b[0])
break
# Optimize
x = fmin_l_bfgs_b(NegLnLike, iguess, approx_grad=False,
bounds=bounds, args=(time, flux, errors, kernel),
maxfun=gmaxf)
log.info('Iteration #%d/%d:' % (i + 1, giter))
log.info(' ' + x[2]['task'].decode('utf-8'))
log.info(' ' + 'Function calls: %d' % x[2]['funcalls'])
log.info(' ' + 'Log-likelihood: %.3e' % -x[1])
if kernel == 'Basic':
log.info(' ' + 'White noise : %.3e (%.1f x error bars)' %
(x[0][0], x[0][0] / np.nanmedian(errors)))
log.info(' ' + 'Red amplitude : %.3e (%.1f x stand dev)' %
(x[0][1], x[0][1] / np.nanstd(flux)))
log.info(' ' + 'Red timescale : %.2f days' % x[0][2])
elif kernel == 'QuasiPeriodic':
log.info(' ' + 'White noise : %.3e (%.1f x error bars)' %
(x[0][0], x[0][0] / np.nanmedian(errors)))
log.info(' ' + 'Red amplitude : %.3e (%.1f x stand dev)' %
(x[0][1], x[0][1] / np.nanstd(flux)))
log.info(' ' + 'Gamma : %.3e' % x[0][2])
log.info(' ' + 'Period : %.2f days' % x[0][3])
if -x[1] > llbest:
llbest = -x[1]
xbest = np.array(x[0])
return xbest | Optimizes the GP by training it on the current de-trended light curve.
Returns the white noise amplitude, red noise amplitude,
and red noise timescale.
:param array_like time: The time array
:param array_like flux: The flux array
:param array_like errors: The flux errors array
:param array_like mask: The indices to be masked when training the GP. \
Default `[]`
:param int giter: The number of iterations. Default 3
:param int gmaxf: The maximum number of function evaluations. Default 200
:param tuple guess: The guess to initialize the minimization with. \
Default :py:obj:`None` | Below is the the instruction that describes the task:
### Input:
Optimizes the GP by training it on the current de-trended light curve.
Returns the white noise amplitude, red noise amplitude,
and red noise timescale.
:param array_like time: The time array
:param array_like flux: The flux array
:param array_like errors: The flux errors array
:param array_like mask: The indices to be masked when training the GP. \
Default `[]`
:param int giter: The number of iterations. Default 3
:param int gmaxf: The maximum number of function evaluations. Default 200
:param tuple guess: The guess to initialize the minimization with. \
Default :py:obj:`None`
### Response:
def GetKernelParams(time, flux, errors, kernel='Basic', mask=[],
giter=3, gmaxf=200, guess=None):
'''
Optimizes the GP by training it on the current de-trended light curve.
Returns the white noise amplitude, red noise amplitude,
and red noise timescale.
:param array_like time: The time array
:param array_like flux: The flux array
:param array_like errors: The flux errors array
:param array_like mask: The indices to be masked when training the GP. \
Default `[]`
:param int giter: The number of iterations. Default 3
:param int gmaxf: The maximum number of function evaluations. Default 200
:param tuple guess: The guess to initialize the minimization with. \
Default :py:obj:`None`
'''
log.info("Optimizing the GP...")
# Save a copy of time and errors for later
time_copy = np.array(time)
errors_copy = np.array(errors)
# Apply the mask
time = np.delete(time, mask)
flux = np.delete(flux, mask)
errors = np.delete(errors, mask)
# Remove 5-sigma outliers to be safe
f = flux - savgol_filter(flux, 49, 2) + np.nanmedian(flux)
med = np.nanmedian(f)
MAD = 1.4826 * np.nanmedian(np.abs(f - med))
mask = np.where((f > med + 5 * MAD) | (f < med - 5 * MAD))[0]
time = np.delete(time, mask)
flux = np.delete(flux, mask)
errors = np.delete(errors, mask)
# Initial guesses and bounds
white = np.nanmedian([np.nanstd(c) for c in Chunks(flux, 13)])
amp = np.nanstd(flux)
tau = 30.0
if kernel == 'Basic':
if guess is None:
guess = [white, amp, tau]
bounds = [[0.1 * white, 10. * white],
[1., 10000. * amp],
[0.5, 100.]]
elif kernel == 'QuasiPeriodic':
if guess is None:
guess = [white, amp, tau, 1., 20.]
bounds = [[0.1 * white, 10. * white],
[1., 10000. * amp],
[1e-5, 1e2],
[0.02, 100.]]
else:
raise ValueError('Invalid value for `kernel`.')
# Loop
llbest = -np.inf
xbest = np.array(guess)
for i in range(giter):
# Randomize an initial guess
iguess = [np.inf for g in guess]
for j, b in enumerate(bounds):
tries = 0
while (iguess[j] < b[0]) or (iguess[j] > b[1]):
iguess[j] = (1 + 0.5 * np.random.randn()) * guess[j]
tries += 1
if tries > 100:
iguess[j] = b[0] + np.random.random() * (b[1] - b[0])
break
# Optimize
x = fmin_l_bfgs_b(NegLnLike, iguess, approx_grad=False,
bounds=bounds, args=(time, flux, errors, kernel),
maxfun=gmaxf)
log.info('Iteration #%d/%d:' % (i + 1, giter))
log.info(' ' + x[2]['task'].decode('utf-8'))
log.info(' ' + 'Function calls: %d' % x[2]['funcalls'])
log.info(' ' + 'Log-likelihood: %.3e' % -x[1])
if kernel == 'Basic':
log.info(' ' + 'White noise : %.3e (%.1f x error bars)' %
(x[0][0], x[0][0] / np.nanmedian(errors)))
log.info(' ' + 'Red amplitude : %.3e (%.1f x stand dev)' %
(x[0][1], x[0][1] / np.nanstd(flux)))
log.info(' ' + 'Red timescale : %.2f days' % x[0][2])
elif kernel == 'QuasiPeriodic':
log.info(' ' + 'White noise : %.3e (%.1f x error bars)' %
(x[0][0], x[0][0] / np.nanmedian(errors)))
log.info(' ' + 'Red amplitude : %.3e (%.1f x stand dev)' %
(x[0][1], x[0][1] / np.nanstd(flux)))
log.info(' ' + 'Gamma : %.3e' % x[0][2])
log.info(' ' + 'Period : %.2f days' % x[0][3])
if -x[1] > llbest:
llbest = -x[1]
xbest = np.array(x[0])
return xbest |
def _sync_extract(self, from_path, method, to_path):
"""Returns `to_path` once resource has been extracted there."""
to_path_tmp = '%s%s_%s' % (to_path, constants.INCOMPLETE_SUFFIX,
uuid.uuid4().hex)
try:
for path, handle in iter_archive(from_path, method):
_copy(handle, path and os.path.join(to_path_tmp, path) or to_path_tmp)
except BaseException as err:
msg = 'Error while extracting %s to %s : %s' % (from_path, to_path, err)
raise ExtractError(msg)
# `tf.io.gfile.Rename(overwrite=True)` doesn't work for non empty
# directories, so delete destination first, if it already exists.
if tf.io.gfile.exists(to_path):
tf.io.gfile.rmtree(to_path)
tf.io.gfile.rename(to_path_tmp, to_path)
self._pbar_path.update(1)
return to_path | Returns `to_path` once resource has been extracted there. | Below is the the instruction that describes the task:
### Input:
Returns `to_path` once resource has been extracted there.
### Response:
def _sync_extract(self, from_path, method, to_path):
"""Returns `to_path` once resource has been extracted there."""
to_path_tmp = '%s%s_%s' % (to_path, constants.INCOMPLETE_SUFFIX,
uuid.uuid4().hex)
try:
for path, handle in iter_archive(from_path, method):
_copy(handle, path and os.path.join(to_path_tmp, path) or to_path_tmp)
except BaseException as err:
msg = 'Error while extracting %s to %s : %s' % (from_path, to_path, err)
raise ExtractError(msg)
# `tf.io.gfile.Rename(overwrite=True)` doesn't work for non empty
# directories, so delete destination first, if it already exists.
if tf.io.gfile.exists(to_path):
tf.io.gfile.rmtree(to_path)
tf.io.gfile.rename(to_path_tmp, to_path)
self._pbar_path.update(1)
return to_path |
def p_ConstValue_float(p):
"""ConstValue : FLOAT"""
p[0] = model.Value(type=model.Value.FLOAT, value=p[1]) | ConstValue : FLOAT | Below is the the instruction that describes the task:
### Input:
ConstValue : FLOAT
### Response:
def p_ConstValue_float(p):
"""ConstValue : FLOAT"""
p[0] = model.Value(type=model.Value.FLOAT, value=p[1]) |
def transcripts(context, build, hgnc_id, json):
"""Show all transcripts in the database"""
LOG.info("Running scout view transcripts")
adapter = context.obj['adapter']
if not json:
click.echo("Chromosome\tstart\tend\ttranscript_id\thgnc_id\trefseq\tis_primary")
for tx_obj in adapter.transcripts(build=build, hgnc_id=hgnc_id):
if json:
pp(tx_obj)
continue
click.echo("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}".format(
tx_obj['chrom'],
tx_obj['start'],
tx_obj['end'],
tx_obj['ensembl_transcript_id'],
tx_obj['hgnc_id'],
tx_obj.get('refseq_id', ''),
tx_obj.get('is_primary') or '',
)) | Show all transcripts in the database | Below is the the instruction that describes the task:
### Input:
Show all transcripts in the database
### Response:
def transcripts(context, build, hgnc_id, json):
"""Show all transcripts in the database"""
LOG.info("Running scout view transcripts")
adapter = context.obj['adapter']
if not json:
click.echo("Chromosome\tstart\tend\ttranscript_id\thgnc_id\trefseq\tis_primary")
for tx_obj in adapter.transcripts(build=build, hgnc_id=hgnc_id):
if json:
pp(tx_obj)
continue
click.echo("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}".format(
tx_obj['chrom'],
tx_obj['start'],
tx_obj['end'],
tx_obj['ensembl_transcript_id'],
tx_obj['hgnc_id'],
tx_obj.get('refseq_id', ''),
tx_obj.get('is_primary') or '',
)) |
def _show_shortcuts(shortcuts, name=None):
"""Display shortcuts."""
name = name or ''
print('')
if name:
name = ' for ' + name
print('Keyboard shortcuts' + name)
for name in sorted(shortcuts):
shortcut = _get_shortcut_string(shortcuts[name])
if not name.startswith('_'):
print('- {0:<40}: {1:s}'.format(name, shortcut)) | Display shortcuts. | Below is the the instruction that describes the task:
### Input:
Display shortcuts.
### Response:
def _show_shortcuts(shortcuts, name=None):
"""Display shortcuts."""
name = name or ''
print('')
if name:
name = ' for ' + name
print('Keyboard shortcuts' + name)
for name in sorted(shortcuts):
shortcut = _get_shortcut_string(shortcuts[name])
if not name.startswith('_'):
print('- {0:<40}: {1:s}'.format(name, shortcut)) |
def db_set_assoc(self, server_url, handle, secret, issued, lifetime, assoc_type):
"""
Set an association. This is implemented as a method because
REPLACE INTO is not supported by PostgreSQL (and is not
standard SQL).
"""
result = self.db_get_assoc(server_url, handle)
rows = self.cur.fetchall()
if len(rows):
# Update the table since this associations already exists.
return self.db_update_assoc(secret, issued, lifetime, assoc_type,
server_url, handle)
else:
# Insert a new record because this association wasn't
# found.
return self.db_new_assoc(server_url, handle, secret, issued,
lifetime, assoc_type) | Set an association. This is implemented as a method because
REPLACE INTO is not supported by PostgreSQL (and is not
standard SQL). | Below is the the instruction that describes the task:
### Input:
Set an association. This is implemented as a method because
REPLACE INTO is not supported by PostgreSQL (and is not
standard SQL).
### Response:
def db_set_assoc(self, server_url, handle, secret, issued, lifetime, assoc_type):
"""
Set an association. This is implemented as a method because
REPLACE INTO is not supported by PostgreSQL (and is not
standard SQL).
"""
result = self.db_get_assoc(server_url, handle)
rows = self.cur.fetchall()
if len(rows):
# Update the table since this associations already exists.
return self.db_update_assoc(secret, issued, lifetime, assoc_type,
server_url, handle)
else:
# Insert a new record because this association wasn't
# found.
return self.db_new_assoc(server_url, handle, secret, issued,
lifetime, assoc_type) |
def list_files(self, id=None, path="/"):
""" List files in an allocation directory.
https://www.nomadproject.io/docs/http/client-fs-ls.html
arguments:
- id
- path
returns: list
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
if id:
return self.request(id, params={"path": path}, method="get").json()
else:
return self.request(params={"path": path}, method="get").json() | List files in an allocation directory.
https://www.nomadproject.io/docs/http/client-fs-ls.html
arguments:
- id
- path
returns: list
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException | Below is the the instruction that describes the task:
### Input:
List files in an allocation directory.
https://www.nomadproject.io/docs/http/client-fs-ls.html
arguments:
- id
- path
returns: list
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
### Response:
def list_files(self, id=None, path="/"):
""" List files in an allocation directory.
https://www.nomadproject.io/docs/http/client-fs-ls.html
arguments:
- id
- path
returns: list
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
if id:
return self.request(id, params={"path": path}, method="get").json()
else:
return self.request(params={"path": path}, method="get").json() |
def _TSKFileTimeCopyToStatTimeTuple(self, tsk_file, time_value):
"""Copies a SleuthKit file object time value to a stat timestamp tuple.
Args:
tsk_file (pytsk3.File): TSK file.
time_value (str): name of the time value.
Returns:
tuple[int, int]: number of seconds since 1970-01-01 00:00:00 and fraction
of second in 100 nano seconds intervals. The number of seconds is None
on error, or if the file system does not include the requested
timestamp. The fraction of second is None on error, or if the file
system does not support sub-second precision.
Raises:
BackEndError: if the TSK File .info, .info.meta or info.fs_info
attribute is missing.
"""
if (not tsk_file or not tsk_file.info or not tsk_file.info.meta or
not tsk_file.info.fs_info):
raise errors.BackEndError(
'Missing TSK File .info, .info.meta. or .info.fs_info')
stat_time = getattr(tsk_file.info.meta, time_value, None)
stat_time_nano = None
if self._file_system_type in self._TSK_HAS_NANO_FS_TYPES:
time_value_nano = '{0:s}_nano'.format(time_value)
stat_time_nano = getattr(tsk_file.info.meta, time_value_nano, None)
# Sleuthkit 4.2.0 switched from 100 nano seconds precision to
# 1 nano seconds precision.
if stat_time_nano is not None and pytsk3.TSK_VERSION_NUM >= 0x040200ff:
stat_time_nano /= 100
return stat_time, stat_time_nano | Copies a SleuthKit file object time value to a stat timestamp tuple.
Args:
tsk_file (pytsk3.File): TSK file.
time_value (str): name of the time value.
Returns:
tuple[int, int]: number of seconds since 1970-01-01 00:00:00 and fraction
of second in 100 nano seconds intervals. The number of seconds is None
on error, or if the file system does not include the requested
timestamp. The fraction of second is None on error, or if the file
system does not support sub-second precision.
Raises:
BackEndError: if the TSK File .info, .info.meta or info.fs_info
attribute is missing. | Below is the the instruction that describes the task:
### Input:
Copies a SleuthKit file object time value to a stat timestamp tuple.
Args:
tsk_file (pytsk3.File): TSK file.
time_value (str): name of the time value.
Returns:
tuple[int, int]: number of seconds since 1970-01-01 00:00:00 and fraction
of second in 100 nano seconds intervals. The number of seconds is None
on error, or if the file system does not include the requested
timestamp. The fraction of second is None on error, or if the file
system does not support sub-second precision.
Raises:
BackEndError: if the TSK File .info, .info.meta or info.fs_info
attribute is missing.
### Response:
def _TSKFileTimeCopyToStatTimeTuple(self, tsk_file, time_value):
"""Copies a SleuthKit file object time value to a stat timestamp tuple.
Args:
tsk_file (pytsk3.File): TSK file.
time_value (str): name of the time value.
Returns:
tuple[int, int]: number of seconds since 1970-01-01 00:00:00 and fraction
of second in 100 nano seconds intervals. The number of seconds is None
on error, or if the file system does not include the requested
timestamp. The fraction of second is None on error, or if the file
system does not support sub-second precision.
Raises:
BackEndError: if the TSK File .info, .info.meta or info.fs_info
attribute is missing.
"""
if (not tsk_file or not tsk_file.info or not tsk_file.info.meta or
not tsk_file.info.fs_info):
raise errors.BackEndError(
'Missing TSK File .info, .info.meta. or .info.fs_info')
stat_time = getattr(tsk_file.info.meta, time_value, None)
stat_time_nano = None
if self._file_system_type in self._TSK_HAS_NANO_FS_TYPES:
time_value_nano = '{0:s}_nano'.format(time_value)
stat_time_nano = getattr(tsk_file.info.meta, time_value_nano, None)
# Sleuthkit 4.2.0 switched from 100 nano seconds precision to
# 1 nano seconds precision.
if stat_time_nano is not None and pytsk3.TSK_VERSION_NUM >= 0x040200ff:
stat_time_nano /= 100
return stat_time, stat_time_nano |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.