code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def removevalues(self, key, values):
"""
Removes all <values> from the values of <key>. If <key> has no
remaining values after removevalues(), the key is popped.
Example:
omd = omdict([(1, 1), (1, 11), (1, 1), (1, 111)])
omd.removevalues(1, [1, 111])
omd.allitems() == [(1, 11)]
Returns: <self>.
"""
self.setlist(key, [v for v in self.getlist(key) if v not in values])
return self | Removes all <values> from the values of <key>. If <key> has no
remaining values after removevalues(), the key is popped.
Example:
omd = omdict([(1, 1), (1, 11), (1, 1), (1, 111)])
omd.removevalues(1, [1, 111])
omd.allitems() == [(1, 11)]
Returns: <self>. | Below is the the instruction that describes the task:
### Input:
Removes all <values> from the values of <key>. If <key> has no
remaining values after removevalues(), the key is popped.
Example:
omd = omdict([(1, 1), (1, 11), (1, 1), (1, 111)])
omd.removevalues(1, [1, 111])
omd.allitems() == [(1, 11)]
Returns: <self>.
### Response:
def removevalues(self, key, values):
"""
Removes all <values> from the values of <key>. If <key> has no
remaining values after removevalues(), the key is popped.
Example:
omd = omdict([(1, 1), (1, 11), (1, 1), (1, 111)])
omd.removevalues(1, [1, 111])
omd.allitems() == [(1, 11)]
Returns: <self>.
"""
self.setlist(key, [v for v in self.getlist(key) if v not in values])
return self |
def MI_getInstance(self,
env,
instanceName,
propertyList):
# pylint: disable=invalid-name
"""Return a specific CIM instance
Implements the WBEM operation GetInstance in terms
of the get_instance method. A derived class will not normally
override this method.
"""
logger = env.get_logger()
logger.log_debug('CIMProvider2 MI_getInstance called...')
plist = None
if propertyList is not None:
plist = [s.lower() for s in propertyList]
plist += [s.lower() for s in instanceName.keybindings.keys()]
model = pywbem.CIMInstance(classname=instanceName.classname,
path=instanceName, property_list=plist)
model.update(model.path.keybindings)
rval = self.get_instance(env=env, model=model)
logger.log_debug('CIMProvider2 MI_getInstance returning')
if rval is None:
raise pywbem.CIMError(pywbem.CIM_ERR_NOT_FOUND, "")
return rval | Return a specific CIM instance
Implements the WBEM operation GetInstance in terms
of the get_instance method. A derived class will not normally
override this method. | Below is the the instruction that describes the task:
### Input:
Return a specific CIM instance
Implements the WBEM operation GetInstance in terms
of the get_instance method. A derived class will not normally
override this method.
### Response:
def MI_getInstance(self,
env,
instanceName,
propertyList):
# pylint: disable=invalid-name
"""Return a specific CIM instance
Implements the WBEM operation GetInstance in terms
of the get_instance method. A derived class will not normally
override this method.
"""
logger = env.get_logger()
logger.log_debug('CIMProvider2 MI_getInstance called...')
plist = None
if propertyList is not None:
plist = [s.lower() for s in propertyList]
plist += [s.lower() for s in instanceName.keybindings.keys()]
model = pywbem.CIMInstance(classname=instanceName.classname,
path=instanceName, property_list=plist)
model.update(model.path.keybindings)
rval = self.get_instance(env=env, model=model)
logger.log_debug('CIMProvider2 MI_getInstance returning')
if rval is None:
raise pywbem.CIMError(pywbem.CIM_ERR_NOT_FOUND, "")
return rval |
def get_local_client(
c_path=os.path.join(syspaths.CONFIG_DIR, 'master'),
mopts=None,
skip_perm_errors=False,
io_loop=None,
auto_reconnect=False):
'''
.. versionadded:: 2014.7.0
Read in the config and return the correct LocalClient object based on
the configured transport
:param IOLoop io_loop: io_loop used for events.
Pass in an io_loop if you want asynchronous
operation for obtaining events. Eg use of
set_event_handler() API. Otherwise, operation
will be synchronous.
'''
if mopts:
opts = mopts
else:
# Late import to prevent circular import
import salt.config
opts = salt.config.client_config(c_path)
# TODO: AIO core is separate from transport
return LocalClient(
mopts=opts,
skip_perm_errors=skip_perm_errors,
io_loop=io_loop,
auto_reconnect=auto_reconnect) | .. versionadded:: 2014.7.0
Read in the config and return the correct LocalClient object based on
the configured transport
:param IOLoop io_loop: io_loop used for events.
Pass in an io_loop if you want asynchronous
operation for obtaining events. Eg use of
set_event_handler() API. Otherwise, operation
will be synchronous. | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2014.7.0
Read in the config and return the correct LocalClient object based on
the configured transport
:param IOLoop io_loop: io_loop used for events.
Pass in an io_loop if you want asynchronous
operation for obtaining events. Eg use of
set_event_handler() API. Otherwise, operation
will be synchronous.
### Response:
def get_local_client(
c_path=os.path.join(syspaths.CONFIG_DIR, 'master'),
mopts=None,
skip_perm_errors=False,
io_loop=None,
auto_reconnect=False):
'''
.. versionadded:: 2014.7.0
Read in the config and return the correct LocalClient object based on
the configured transport
:param IOLoop io_loop: io_loop used for events.
Pass in an io_loop if you want asynchronous
operation for obtaining events. Eg use of
set_event_handler() API. Otherwise, operation
will be synchronous.
'''
if mopts:
opts = mopts
else:
# Late import to prevent circular import
import salt.config
opts = salt.config.client_config(c_path)
# TODO: AIO core is separate from transport
return LocalClient(
mopts=opts,
skip_perm_errors=skip_perm_errors,
io_loop=io_loop,
auto_reconnect=auto_reconnect) |
def makeSocket(self, timeout=1):
"""Override SocketHandler.makeSocket, to allow creating wrapped
TLS sockets"""
plain_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if hasattr(plain_socket, 'settimeout'):
plain_socket.settimeout(timeout)
wrapped_socket = ssl.wrap_socket(
plain_socket,
ca_certs=self.ca_certs,
cert_reqs=self.reqs,
keyfile=self.keyfile,
certfile=self.certfile
)
wrapped_socket.connect((self.host, self.port))
return wrapped_socket | Override SocketHandler.makeSocket, to allow creating wrapped
TLS sockets | Below is the the instruction that describes the task:
### Input:
Override SocketHandler.makeSocket, to allow creating wrapped
TLS sockets
### Response:
def makeSocket(self, timeout=1):
"""Override SocketHandler.makeSocket, to allow creating wrapped
TLS sockets"""
plain_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if hasattr(plain_socket, 'settimeout'):
plain_socket.settimeout(timeout)
wrapped_socket = ssl.wrap_socket(
plain_socket,
ca_certs=self.ca_certs,
cert_reqs=self.reqs,
keyfile=self.keyfile,
certfile=self.certfile
)
wrapped_socket.connect((self.host, self.port))
return wrapped_socket |
def formatted_command(self):
"""
If we have ``bash``, then the command is ``/bin/bash -c <bash>``, whereas
if the ``command`` is set, then we just return that.
"""
bash = self.bash
if bash not in (None, "", NotSpecified) and callable(bash):
bash = bash()
if bash not in (None, "", NotSpecified):
return "{0} -c {1}".format(self.resolved_shell, shlex_quote(bash))
command = self.command
if command not in (None, "", NotSpecified) and callable(command):
command = command()
if command not in (None, "", NotSpecified):
return command
return None | If we have ``bash``, then the command is ``/bin/bash -c <bash>``, whereas
if the ``command`` is set, then we just return that. | Below is the the instruction that describes the task:
### Input:
If we have ``bash``, then the command is ``/bin/bash -c <bash>``, whereas
if the ``command`` is set, then we just return that.
### Response:
def formatted_command(self):
"""
If we have ``bash``, then the command is ``/bin/bash -c <bash>``, whereas
if the ``command`` is set, then we just return that.
"""
bash = self.bash
if bash not in (None, "", NotSpecified) and callable(bash):
bash = bash()
if bash not in (None, "", NotSpecified):
return "{0} -c {1}".format(self.resolved_shell, shlex_quote(bash))
command = self.command
if command not in (None, "", NotSpecified) and callable(command):
command = command()
if command not in (None, "", NotSpecified):
return command
return None |
def parseEvent(self, result, i):
"""Parse the current event and extract data."""
fmt = '%Y-%m-%dT%H:%M:%SZ'
due = 0
delay = 0
real_time = 'n'
number = result['stopEvents'][i]['transportation']['number']
planned = datetime.strptime(result['stopEvents'][i]
['departureTimePlanned'], fmt)
destination = result['stopEvents'][i]['transportation']['destination']['name']
mode = self.get_mode(result['stopEvents'][i]['transportation']['product']['class'])
estimated = planned
if 'isRealtimeControlled' in result['stopEvents'][i]:
real_time = 'y'
estimated = datetime.strptime(result['stopEvents'][i]
['departureTimeEstimated'], fmt)
# Only deal with future leave times
if estimated > datetime.utcnow():
due = self.get_due(estimated)
delay = self.get_delay(planned, estimated)
return[
number,
due,
delay,
planned,
estimated,
real_time,
destination,
mode
]
else:
return None | Parse the current event and extract data. | Below is the the instruction that describes the task:
### Input:
Parse the current event and extract data.
### Response:
def parseEvent(self, result, i):
"""Parse the current event and extract data."""
fmt = '%Y-%m-%dT%H:%M:%SZ'
due = 0
delay = 0
real_time = 'n'
number = result['stopEvents'][i]['transportation']['number']
planned = datetime.strptime(result['stopEvents'][i]
['departureTimePlanned'], fmt)
destination = result['stopEvents'][i]['transportation']['destination']['name']
mode = self.get_mode(result['stopEvents'][i]['transportation']['product']['class'])
estimated = planned
if 'isRealtimeControlled' in result['stopEvents'][i]:
real_time = 'y'
estimated = datetime.strptime(result['stopEvents'][i]
['departureTimeEstimated'], fmt)
# Only deal with future leave times
if estimated > datetime.utcnow():
due = self.get_due(estimated)
delay = self.get_delay(planned, estimated)
return[
number,
due,
delay,
planned,
estimated,
real_time,
destination,
mode
]
else:
return None |
def summarise_pdfs(pdfs):
"""
Collate the first page from each of the PDFs provided into a single PDF.
:param pdfs:
The contents of several PDF files.
:type pdfs:
list of str
:returns:
The contents of single PDF, which can be written directly to disk.
"""
# Ignore None.
print('Summarising {0} articles ({1} had errors)'.format(
len(pdfs), pdfs.count(None)))
pdfs = [_ for _ in pdfs if _ is not None]
summary = PdfFileWriter()
for pdf in pdfs:
summary.addPage(PdfFileReader(StringIO(pdf)).getPage(0))
return summary | Collate the first page from each of the PDFs provided into a single PDF.
:param pdfs:
The contents of several PDF files.
:type pdfs:
list of str
:returns:
The contents of single PDF, which can be written directly to disk. | Below is the the instruction that describes the task:
### Input:
Collate the first page from each of the PDFs provided into a single PDF.
:param pdfs:
The contents of several PDF files.
:type pdfs:
list of str
:returns:
The contents of single PDF, which can be written directly to disk.
### Response:
def summarise_pdfs(pdfs):
"""
Collate the first page from each of the PDFs provided into a single PDF.
:param pdfs:
The contents of several PDF files.
:type pdfs:
list of str
:returns:
The contents of single PDF, which can be written directly to disk.
"""
# Ignore None.
print('Summarising {0} articles ({1} had errors)'.format(
len(pdfs), pdfs.count(None)))
pdfs = [_ for _ in pdfs if _ is not None]
summary = PdfFileWriter()
for pdf in pdfs:
summary.addPage(PdfFileReader(StringIO(pdf)).getPage(0))
return summary |
def _get_num_similar_objects(self, obj):
"""Get any statement lines which would be considered a duplicate of obj"""
return StatementLine.objects.filter(
date=obj.date, amount=obj.amount, description=obj.description
).count() | Get any statement lines which would be considered a duplicate of obj | Below is the the instruction that describes the task:
### Input:
Get any statement lines which would be considered a duplicate of obj
### Response:
def _get_num_similar_objects(self, obj):
"""Get any statement lines which would be considered a duplicate of obj"""
return StatementLine.objects.filter(
date=obj.date, amount=obj.amount, description=obj.description
).count() |
def walk(self, node, name='', list=list, len=len, type=type):
"""Walk the tree starting at a given node.
Maintain a stack of nodes.
"""
pre_handlers = self.pre_handlers.get
post_handlers = self.post_handlers.get
nodestack = self.nodestack
emptystack = len(nodestack)
append, pop = nodestack.append, nodestack.pop
append([node, name, list(iter_node(node, name + '_item')), -1])
while len(nodestack) > emptystack:
node, name, subnodes, index = nodestack[-1]
if index >= len(subnodes):
handler = (post_handlers(type(node).__name__) or
post_handlers(name + '_name'))
if handler is None:
pop()
continue
self.cur_node = node
self.cur_name = name
handler()
current = nodestack and nodestack[-1]
popstack = current and current[0] is node
if popstack and current[-1] >= len(current[-2]):
pop()
continue
nodestack[-1][-1] = index + 1
if index < 0:
handler = (pre_handlers(type(node).__name__) or
pre_handlers(name + '_name'))
if handler is not None:
self.cur_node = node
self.cur_name = name
if handler():
pop()
else:
node, name = subnodes[index]
append([node, name, list(iter_node(node, name + '_item')), -1]) | Walk the tree starting at a given node.
Maintain a stack of nodes. | Below is the the instruction that describes the task:
### Input:
Walk the tree starting at a given node.
Maintain a stack of nodes.
### Response:
def walk(self, node, name='', list=list, len=len, type=type):
"""Walk the tree starting at a given node.
Maintain a stack of nodes.
"""
pre_handlers = self.pre_handlers.get
post_handlers = self.post_handlers.get
nodestack = self.nodestack
emptystack = len(nodestack)
append, pop = nodestack.append, nodestack.pop
append([node, name, list(iter_node(node, name + '_item')), -1])
while len(nodestack) > emptystack:
node, name, subnodes, index = nodestack[-1]
if index >= len(subnodes):
handler = (post_handlers(type(node).__name__) or
post_handlers(name + '_name'))
if handler is None:
pop()
continue
self.cur_node = node
self.cur_name = name
handler()
current = nodestack and nodestack[-1]
popstack = current and current[0] is node
if popstack and current[-1] >= len(current[-2]):
pop()
continue
nodestack[-1][-1] = index + 1
if index < 0:
handler = (pre_handlers(type(node).__name__) or
pre_handlers(name + '_name'))
if handler is not None:
self.cur_node = node
self.cur_name = name
if handler():
pop()
else:
node, name = subnodes[index]
append([node, name, list(iter_node(node, name + '_item')), -1]) |
def save(self):
"""This function is called by the parent dialog window when the user selects to save the settings."""
if self.path is None: # Delete requested, so remove the current path from sys.path, if present
if self.config_manager.userCodeDir is not None:
sys.path.remove(self.config_manager.userCodeDir)
self.config_manager.userCodeDir = None
logger.info("Removed custom module search path from configuration and sys.path.")
else:
if self.path != self.config_manager.userCodeDir:
if self.config_manager.userCodeDir is not None:
sys.path.remove(self.config_manager.userCodeDir)
sys.path.append(self.path)
self.config_manager.userCodeDir = self.path
logger.info("Saved custom module search path and added it to sys.path: {}".format(self.path)) | This function is called by the parent dialog window when the user selects to save the settings. | Below is the the instruction that describes the task:
### Input:
This function is called by the parent dialog window when the user selects to save the settings.
### Response:
def save(self):
"""This function is called by the parent dialog window when the user selects to save the settings."""
if self.path is None: # Delete requested, so remove the current path from sys.path, if present
if self.config_manager.userCodeDir is not None:
sys.path.remove(self.config_manager.userCodeDir)
self.config_manager.userCodeDir = None
logger.info("Removed custom module search path from configuration and sys.path.")
else:
if self.path != self.config_manager.userCodeDir:
if self.config_manager.userCodeDir is not None:
sys.path.remove(self.config_manager.userCodeDir)
sys.path.append(self.path)
self.config_manager.userCodeDir = self.path
logger.info("Saved custom module search path and added it to sys.path: {}".format(self.path)) |
def layer(self, img, x=0, y=0, name=""):
"""Creates a new layer from file, Layer, PIL Image.
If img is an image file or PIL Image object,
Creates a new layer with the given image file.
The image is positioned on the canvas at x, y.
If img is a Layer,
uses that layer's x and y position and name.
"""
from types import StringType
if isinstance(img, Image.Image):
img = img.convert("RGBA")
self.layers.append(Layer(self, img, x, y, name))
return len(self.layers)-1
if isinstance(img, Layer):
img.canvas = self
self.layers.append(img)
return len(self.layers)-1
if type(img) == StringType:
img = Image.open(img)
img = img.convert("RGBA")
self.layers.append(Layer(self, img, x, y, name))
return len(self.layers)-1 | Creates a new layer from file, Layer, PIL Image.
If img is an image file or PIL Image object,
Creates a new layer with the given image file.
The image is positioned on the canvas at x, y.
If img is a Layer,
uses that layer's x and y position and name. | Below is the the instruction that describes the task:
### Input:
Creates a new layer from file, Layer, PIL Image.
If img is an image file or PIL Image object,
Creates a new layer with the given image file.
The image is positioned on the canvas at x, y.
If img is a Layer,
uses that layer's x and y position and name.
### Response:
def layer(self, img, x=0, y=0, name=""):
"""Creates a new layer from file, Layer, PIL Image.
If img is an image file or PIL Image object,
Creates a new layer with the given image file.
The image is positioned on the canvas at x, y.
If img is a Layer,
uses that layer's x and y position and name.
"""
from types import StringType
if isinstance(img, Image.Image):
img = img.convert("RGBA")
self.layers.append(Layer(self, img, x, y, name))
return len(self.layers)-1
if isinstance(img, Layer):
img.canvas = self
self.layers.append(img)
return len(self.layers)-1
if type(img) == StringType:
img = Image.open(img)
img = img.convert("RGBA")
self.layers.append(Layer(self, img, x, y, name))
return len(self.layers)-1 |
def try_ntimes(_howmany, func, *argv, **kwarg):
"""Try a function n times.
Try to execute func(*argv, **kwarg) ``_howmany`` times. If it successfully run
one time, then return as normal. If it fails N times, then raise the
exception in the last run.
**中文文档**
反复尝试一个函数或方法``_howmany``次。
对func函数使用try, except, pass 若干次, 期间只要有一次成功, 就正常返回。
如果一次都没有成功, 则行为跟最后一次执行了func(*argv, **kwarg)函数一样。
这个实现利用了python中可以把函数作为一个参数传入另一个函数的特质, 将func
函数中的参数原封不动地封装到了try_ntimes的参数中。只用一个额外参数``_howmany``
控制重复次数。
"""
if (not isinstance(_howmany, int)) or (_howmany < 1):
raise Exception("'_howmany' argument has to be int and greater than 0")
counter = 1
while counter <= _howmany:
try:
return func(*argv, **kwarg)
except Exception as e:
current_exception = e
counter += 1
raise current_exception | Try a function n times.
Try to execute func(*argv, **kwarg) ``_howmany`` times. If it successfully run
one time, then return as normal. If it fails N times, then raise the
exception in the last run.
**中文文档**
反复尝试一个函数或方法``_howmany``次。
对func函数使用try, except, pass 若干次, 期间只要有一次成功, 就正常返回。
如果一次都没有成功, 则行为跟最后一次执行了func(*argv, **kwarg)函数一样。
这个实现利用了python中可以把函数作为一个参数传入另一个函数的特质, 将func
函数中的参数原封不动地封装到了try_ntimes的参数中。只用一个额外参数``_howmany``
控制重复次数。 | Below is the the instruction that describes the task:
### Input:
Try a function n times.
Try to execute func(*argv, **kwarg) ``_howmany`` times. If it successfully run
one time, then return as normal. If it fails N times, then raise the
exception in the last run.
**中文文档**
反复尝试一个函数或方法``_howmany``次。
对func函数使用try, except, pass 若干次, 期间只要有一次成功, 就正常返回。
如果一次都没有成功, 则行为跟最后一次执行了func(*argv, **kwarg)函数一样。
这个实现利用了python中可以把函数作为一个参数传入另一个函数的特质, 将func
函数中的参数原封不动地封装到了try_ntimes的参数中。只用一个额外参数``_howmany``
控制重复次数。
### Response:
def try_ntimes(_howmany, func, *argv, **kwarg):
"""Try a function n times.
Try to execute func(*argv, **kwarg) ``_howmany`` times. If it successfully run
one time, then return as normal. If it fails N times, then raise the
exception in the last run.
**中文文档**
反复尝试一个函数或方法``_howmany``次。
对func函数使用try, except, pass 若干次, 期间只要有一次成功, 就正常返回。
如果一次都没有成功, 则行为跟最后一次执行了func(*argv, **kwarg)函数一样。
这个实现利用了python中可以把函数作为一个参数传入另一个函数的特质, 将func
函数中的参数原封不动地封装到了try_ntimes的参数中。只用一个额外参数``_howmany``
控制重复次数。
"""
if (not isinstance(_howmany, int)) or (_howmany < 1):
raise Exception("'_howmany' argument has to be int and greater than 0")
counter = 1
while counter <= _howmany:
try:
return func(*argv, **kwarg)
except Exception as e:
current_exception = e
counter += 1
raise current_exception |
def filter_unique_peptides(peptides, score, ns):
""" Filters unique peptides from multiple Percolator output XML files.
Takes a dir with a set of XMLs, a score to filter on and a namespace.
Outputs an ElementTree.
"""
scores = {'q': 'q_value',
'pep': 'pep',
'p': 'p_value',
'svm': 'svm_score'}
highest = {}
for el in peptides:
featscore = float(el.xpath('xmlns:%s' % scores[score],
namespaces=ns)[0].text)
seq = reader.get_peptide_seq(el, ns)
if seq not in highest:
highest[seq] = {
'pep_el': formatting.stringify_strip_namespace_declaration(
el, ns), 'score': featscore}
if score == 'svm': # greater than score is accepted
if featscore > highest[seq]['score']:
highest[seq] = {
'pep_el':
formatting.stringify_strip_namespace_declaration(el, ns),
'score': featscore}
else: # lower than score is accepted
if featscore < highest[seq]['score']:
highest[seq] = {
'pep_el':
formatting.stringify_strip_namespace_declaration(el, ns),
'score': featscore}
formatting.clear_el(el)
for pep in list(highest.values()):
yield pep['pep_el'] | Filters unique peptides from multiple Percolator output XML files.
Takes a dir with a set of XMLs, a score to filter on and a namespace.
Outputs an ElementTree. | Below is the the instruction that describes the task:
### Input:
Filters unique peptides from multiple Percolator output XML files.
Takes a dir with a set of XMLs, a score to filter on and a namespace.
Outputs an ElementTree.
### Response:
def filter_unique_peptides(peptides, score, ns):
""" Filters unique peptides from multiple Percolator output XML files.
Takes a dir with a set of XMLs, a score to filter on and a namespace.
Outputs an ElementTree.
"""
scores = {'q': 'q_value',
'pep': 'pep',
'p': 'p_value',
'svm': 'svm_score'}
highest = {}
for el in peptides:
featscore = float(el.xpath('xmlns:%s' % scores[score],
namespaces=ns)[0].text)
seq = reader.get_peptide_seq(el, ns)
if seq not in highest:
highest[seq] = {
'pep_el': formatting.stringify_strip_namespace_declaration(
el, ns), 'score': featscore}
if score == 'svm': # greater than score is accepted
if featscore > highest[seq]['score']:
highest[seq] = {
'pep_el':
formatting.stringify_strip_namespace_declaration(el, ns),
'score': featscore}
else: # lower than score is accepted
if featscore < highest[seq]['score']:
highest[seq] = {
'pep_el':
formatting.stringify_strip_namespace_declaration(el, ns),
'score': featscore}
formatting.clear_el(el)
for pep in list(highest.values()):
yield pep['pep_el'] |
def filter_on_attributes(ava, required=None, optional=None, acs=None,
fail_on_unfulfilled_requirements=True):
""" Filter
:param ava: An attribute value assertion as a dictionary
:param required: list of RequestedAttribute instances defined to be
required
:param optional: list of RequestedAttribute instances defined to be
optional
:param fail_on_unfulfilled_requirements: If required attributes
are missing fail or fail not depending on this parameter.
:return: The modified attribute value assertion
"""
def _match_attr_name(attr, ava):
local_name = None
for a in ['name_format', 'friendly_name']:
_val = attr.get(a)
if _val:
if a == 'name_format':
local_name = get_local_name(acs, attr['name'], _val)
else:
local_name = _val
break
if local_name:
_fn = _match(local_name, ava)
else:
_fn = None
if not _fn: # In the unlikely case that someone has provided us with
# URIs as attribute names
_fn = _match(attr["name"], ava)
return _fn
def _apply_attr_value_restrictions(attr, res, must=False):
try:
values = [av["text"] for av in attr["attribute_value"]]
except KeyError:
values = []
try:
res[_fn].extend(_filter_values(ava[_fn], values))
except KeyError:
res[_fn] = _filter_values(ava[_fn], values)
return _filter_values(ava[_fn], values, must)
res = {}
if required is None:
required = []
for attr in required:
_fn = _match_attr_name(attr, ava)
if _fn:
_apply_attr_value_restrictions(attr, res, True)
elif fail_on_unfulfilled_requirements:
desc = "Required attribute missing: '%s'" % (attr["name"])
raise MissingValue(desc)
if optional is None:
optional = []
for attr in optional:
_fn = _match_attr_name(attr, ava)
if _fn:
_apply_attr_value_restrictions(attr, res, False)
return res | Filter
:param ava: An attribute value assertion as a dictionary
:param required: list of RequestedAttribute instances defined to be
required
:param optional: list of RequestedAttribute instances defined to be
optional
:param fail_on_unfulfilled_requirements: If required attributes
are missing fail or fail not depending on this parameter.
:return: The modified attribute value assertion | Below is the the instruction that describes the task:
### Input:
Filter
:param ava: An attribute value assertion as a dictionary
:param required: list of RequestedAttribute instances defined to be
required
:param optional: list of RequestedAttribute instances defined to be
optional
:param fail_on_unfulfilled_requirements: If required attributes
are missing fail or fail not depending on this parameter.
:return: The modified attribute value assertion
### Response:
def filter_on_attributes(ava, required=None, optional=None, acs=None,
fail_on_unfulfilled_requirements=True):
""" Filter
:param ava: An attribute value assertion as a dictionary
:param required: list of RequestedAttribute instances defined to be
required
:param optional: list of RequestedAttribute instances defined to be
optional
:param fail_on_unfulfilled_requirements: If required attributes
are missing fail or fail not depending on this parameter.
:return: The modified attribute value assertion
"""
def _match_attr_name(attr, ava):
local_name = None
for a in ['name_format', 'friendly_name']:
_val = attr.get(a)
if _val:
if a == 'name_format':
local_name = get_local_name(acs, attr['name'], _val)
else:
local_name = _val
break
if local_name:
_fn = _match(local_name, ava)
else:
_fn = None
if not _fn: # In the unlikely case that someone has provided us with
# URIs as attribute names
_fn = _match(attr["name"], ava)
return _fn
def _apply_attr_value_restrictions(attr, res, must=False):
try:
values = [av["text"] for av in attr["attribute_value"]]
except KeyError:
values = []
try:
res[_fn].extend(_filter_values(ava[_fn], values))
except KeyError:
res[_fn] = _filter_values(ava[_fn], values)
return _filter_values(ava[_fn], values, must)
res = {}
if required is None:
required = []
for attr in required:
_fn = _match_attr_name(attr, ava)
if _fn:
_apply_attr_value_restrictions(attr, res, True)
elif fail_on_unfulfilled_requirements:
desc = "Required attribute missing: '%s'" % (attr["name"])
raise MissingValue(desc)
if optional is None:
optional = []
for attr in optional:
_fn = _match_attr_name(attr, ava)
if _fn:
_apply_attr_value_restrictions(attr, res, False)
return res |
def discussion_is_still_open(self, discussion_type, auto_close_after):
"""
Checks if a type of discussion is still open
are a certain number of days.
"""
discussion_enabled = getattr(self, discussion_type)
if (discussion_enabled and isinstance(auto_close_after, int) and
auto_close_after >= 0):
return (timezone.now() - (
self.start_publication or self.publication_date)).days < \
auto_close_after
return discussion_enabled | Checks if a type of discussion is still open
are a certain number of days. | Below is the the instruction that describes the task:
### Input:
Checks if a type of discussion is still open
are a certain number of days.
### Response:
def discussion_is_still_open(self, discussion_type, auto_close_after):
"""
Checks if a type of discussion is still open
are a certain number of days.
"""
discussion_enabled = getattr(self, discussion_type)
if (discussion_enabled and isinstance(auto_close_after, int) and
auto_close_after >= 0):
return (timezone.now() - (
self.start_publication or self.publication_date)).days < \
auto_close_after
return discussion_enabled |
def deepish_copy(org):
"""Improved speed deep copy for dictionaries of simple python types.
Thanks to Gregg Lind:
http://writeonly.wordpress.com/2009/05/07/deepcopy-is-a-pig-for-simple-data/
"""
out = dict().fromkeys(org)
for k, v in org.items():
if isinstance(v, dict):
out[k] = deepish_copy(v)
else:
try:
out[k] = v.copy() # dicts, sets
except AttributeError:
try:
out[k] = v[:] # lists, tuples, strings, unicode
except TypeError:
out[k] = v # ints
return out | Improved speed deep copy for dictionaries of simple python types.
Thanks to Gregg Lind:
http://writeonly.wordpress.com/2009/05/07/deepcopy-is-a-pig-for-simple-data/ | Below is the the instruction that describes the task:
### Input:
Improved speed deep copy for dictionaries of simple python types.
Thanks to Gregg Lind:
http://writeonly.wordpress.com/2009/05/07/deepcopy-is-a-pig-for-simple-data/
### Response:
def deepish_copy(org):
"""Improved speed deep copy for dictionaries of simple python types.
Thanks to Gregg Lind:
http://writeonly.wordpress.com/2009/05/07/deepcopy-is-a-pig-for-simple-data/
"""
out = dict().fromkeys(org)
for k, v in org.items():
if isinstance(v, dict):
out[k] = deepish_copy(v)
else:
try:
out[k] = v.copy() # dicts, sets
except AttributeError:
try:
out[k] = v[:] # lists, tuples, strings, unicode
except TypeError:
out[k] = v # ints
return out |
def close(self):
"""Closes the record file."""
if not self.is_open:
return
if self.writable:
check_call(_LIB.MXRecordIOWriterFree(self.handle))
else:
check_call(_LIB.MXRecordIOReaderFree(self.handle))
self.is_open = False
self.pid = None | Closes the record file. | Below is the the instruction that describes the task:
### Input:
Closes the record file.
### Response:
def close(self):
"""Closes the record file."""
if not self.is_open:
return
if self.writable:
check_call(_LIB.MXRecordIOWriterFree(self.handle))
else:
check_call(_LIB.MXRecordIOReaderFree(self.handle))
self.is_open = False
self.pid = None |
def is_valid_file(filename):
"""
Check if the specifed file exists and is not empty
:param filename: full path to the file that needs to be checked
:return: Status, Message
"""
if os.path.exists(filename):
if not os.path.getsize(filename):
logger.warning('%s : file is empty.', filename)
return False
else:
logger.warning('%s : file does not exist.', filename)
return False
return True | Check if the specifed file exists and is not empty
:param filename: full path to the file that needs to be checked
:return: Status, Message | Below is the the instruction that describes the task:
### Input:
Check if the specifed file exists and is not empty
:param filename: full path to the file that needs to be checked
:return: Status, Message
### Response:
def is_valid_file(filename):
"""
Check if the specifed file exists and is not empty
:param filename: full path to the file that needs to be checked
:return: Status, Message
"""
if os.path.exists(filename):
if not os.path.getsize(filename):
logger.warning('%s : file is empty.', filename)
return False
else:
logger.warning('%s : file does not exist.', filename)
return False
return True |
def has_metaclass(parent):
""" we have to check the cls_node without changing it.
There are two possiblities:
1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta')
2) clsdef => simple_stmt => expr_stmt => Leaf('__meta')
"""
for node in parent.children:
if node.type == syms.suite:
return has_metaclass(node)
elif node.type == syms.simple_stmt and node.children:
expr_node = node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
left_side = expr_node.children[0]
if isinstance(left_side, Leaf) and \
left_side.value == '__metaclass__':
return True
return False | we have to check the cls_node without changing it.
There are two possiblities:
1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta')
2) clsdef => simple_stmt => expr_stmt => Leaf('__meta') | Below is the the instruction that describes the task:
### Input:
we have to check the cls_node without changing it.
There are two possiblities:
1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta')
2) clsdef => simple_stmt => expr_stmt => Leaf('__meta')
### Response:
def has_metaclass(parent):
""" we have to check the cls_node without changing it.
There are two possiblities:
1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta')
2) clsdef => simple_stmt => expr_stmt => Leaf('__meta')
"""
for node in parent.children:
if node.type == syms.suite:
return has_metaclass(node)
elif node.type == syms.simple_stmt and node.children:
expr_node = node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
left_side = expr_node.children[0]
if isinstance(left_side, Leaf) and \
left_side.value == '__metaclass__':
return True
return False |
def do_write(self, msg):
"""Handling writing an individual record; we do a fresh open every time.
This assumes emit() has already locked the file."""
self.stream = self.do_open()
stream = self.stream
stream.write(msg)
if self.terminator:
stream.write(self.terminator)
stream.flush()
self._close()
return | Handling writing an individual record; we do a fresh open every time.
This assumes emit() has already locked the file. | Below is the the instruction that describes the task:
### Input:
Handling writing an individual record; we do a fresh open every time.
This assumes emit() has already locked the file.
### Response:
def do_write(self, msg):
"""Handling writing an individual record; we do a fresh open every time.
This assumes emit() has already locked the file."""
self.stream = self.do_open()
stream = self.stream
stream.write(msg)
if self.terminator:
stream.write(self.terminator)
stream.flush()
self._close()
return |
def plot_txn_time_hist(transactions, bin_minutes=5, tz='America/New_York',
ax=None, **kwargs):
"""
Plots a histogram of transaction times, binning the times into
buckets of a given duration.
Parameters
----------
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
bin_minutes : float, optional
Sizes of the bins in minutes, defaults to 5 minutes.
tz : str, optional
Time zone to plot against. Note that if the specified
zone does not apply daylight savings, the distribution
may be partially offset.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
txn_time = transactions.copy()
txn_time.index = txn_time.index.tz_convert(pytz.timezone(tz))
txn_time.index = txn_time.index.map(lambda x: x.hour * 60 + x.minute)
txn_time['trade_value'] = (txn_time.amount * txn_time.price).abs()
txn_time = txn_time.groupby(level=0).sum().reindex(index=range(570, 961))
txn_time.index = (txn_time.index / bin_minutes).astype(int) * bin_minutes
txn_time = txn_time.groupby(level=0).sum()
txn_time['time_str'] = txn_time.index.map(lambda x:
str(datetime.time(int(x / 60),
x % 60))[:-3])
trade_value_sum = txn_time.trade_value.sum()
txn_time.trade_value = txn_time.trade_value.fillna(0) / trade_value_sum
ax.bar(txn_time.index, txn_time.trade_value, width=bin_minutes, **kwargs)
ax.set_xlim(570, 960)
ax.set_xticks(txn_time.index[::int(30 / bin_minutes)])
ax.set_xticklabels(txn_time.time_str[::int(30 / bin_minutes)])
ax.set_title('Transaction time distribution')
ax.set_ylabel('Proportion')
ax.set_xlabel('')
return ax | Plots a histogram of transaction times, binning the times into
buckets of a given duration.
Parameters
----------
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
bin_minutes : float, optional
Sizes of the bins in minutes, defaults to 5 minutes.
tz : str, optional
Time zone to plot against. Note that if the specified
zone does not apply daylight savings, the distribution
may be partially offset.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on. | Below is the the instruction that describes the task:
### Input:
Plots a histogram of transaction times, binning the times into
buckets of a given duration.
Parameters
----------
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
bin_minutes : float, optional
Sizes of the bins in minutes, defaults to 5 minutes.
tz : str, optional
Time zone to plot against. Note that if the specified
zone does not apply daylight savings, the distribution
may be partially offset.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
### Response:
def plot_txn_time_hist(transactions, bin_minutes=5, tz='America/New_York',
ax=None, **kwargs):
"""
Plots a histogram of transaction times, binning the times into
buckets of a given duration.
Parameters
----------
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
bin_minutes : float, optional
Sizes of the bins in minutes, defaults to 5 minutes.
tz : str, optional
Time zone to plot against. Note that if the specified
zone does not apply daylight savings, the distribution
may be partially offset.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
txn_time = transactions.copy()
txn_time.index = txn_time.index.tz_convert(pytz.timezone(tz))
txn_time.index = txn_time.index.map(lambda x: x.hour * 60 + x.minute)
txn_time['trade_value'] = (txn_time.amount * txn_time.price).abs()
txn_time = txn_time.groupby(level=0).sum().reindex(index=range(570, 961))
txn_time.index = (txn_time.index / bin_minutes).astype(int) * bin_minutes
txn_time = txn_time.groupby(level=0).sum()
txn_time['time_str'] = txn_time.index.map(lambda x:
str(datetime.time(int(x / 60),
x % 60))[:-3])
trade_value_sum = txn_time.trade_value.sum()
txn_time.trade_value = txn_time.trade_value.fillna(0) / trade_value_sum
ax.bar(txn_time.index, txn_time.trade_value, width=bin_minutes, **kwargs)
ax.set_xlim(570, 960)
ax.set_xticks(txn_time.index[::int(30 / bin_minutes)])
ax.set_xticklabels(txn_time.time_str[::int(30 / bin_minutes)])
ax.set_title('Transaction time distribution')
ax.set_ylabel('Proportion')
ax.set_xlabel('')
return ax |
def to_xml(self, tag_name="buyer"):
'''
Returns an XMLi representation of the object.
@param tag_name:str Tag name
@return: Element
'''
for n, v in {"name": self.name, "address": self.address}.items():
if is_empty_or_none(v):
raise ValueError("'%s' attribute cannot be empty or None." % n)
if self.__require_id and is_empty_or_none(self.identifier):
raise ValueError("identifier attribute cannot be empty or None.")
doc = Document()
root = doc.createElement(tag_name)
self._create_text_node(root, "id", self.identifier)
self._create_text_node(root, "name", self.name, True)
if self.phone:
self._create_text_node(root, "phone", self.phone, True)
root.appendChild(self.address.to_xml())
return root | Returns an XMLi representation of the object.
@param tag_name:str Tag name
@return: Element | Below is the the instruction that describes the task:
### Input:
Returns an XMLi representation of the object.
@param tag_name:str Tag name
@return: Element
### Response:
def to_xml(self, tag_name="buyer"):
'''
Returns an XMLi representation of the object.
@param tag_name:str Tag name
@return: Element
'''
for n, v in {"name": self.name, "address": self.address}.items():
if is_empty_or_none(v):
raise ValueError("'%s' attribute cannot be empty or None." % n)
if self.__require_id and is_empty_or_none(self.identifier):
raise ValueError("identifier attribute cannot be empty or None.")
doc = Document()
root = doc.createElement(tag_name)
self._create_text_node(root, "id", self.identifier)
self._create_text_node(root, "name", self.name, True)
if self.phone:
self._create_text_node(root, "phone", self.phone, True)
root.appendChild(self.address.to_xml())
return root |
def infer(self, sequence, reset=True, sequenceNumber=None, burnIn=2,
enableFeedback=True, apicalTiebreak=True,
apicalModulationBasalThreshold=True, inertia=True):
"""
Infer on a single given sequence. Sequence format:
sequence = [
set([16, 22, 32]), # Position 0
set([13, 15, 33]) # Position 1
]
Parameters:
----------------------------
@param sequence (list)
Sequence to infer, in the canonical format specified above
@param reset (bool)
If set to True (which is the default value), the network will
be reset after inference.
@param sequenceNumber (int)
Number of the sequence (must match the number given during
learning).
@param burnIn (int)
Number of patterns to wait within a sequence before computing
accuracy figures
"""
if enableFeedback is False:
self._disableL2()
self.network.regions["L4Column_0"].getSelf()._tm.disableApicalDependence = True
else:
self._enableL2()
self._setLearningMode(l4Learning=False, l2Learning=False)
if sequenceNumber is not None:
if sequenceNumber not in self.objectL2Representations:
raise ValueError("The provided sequence was not given during learning")
self.network.regions["L4Column_0"].getSelf()._tm.setUseApicalModulationBasalThreshold(apicalModulationBasalThreshold)
self.network.regions["L4Column_0"].getSelf()._tm.setUseApicalTiebreak(apicalTiebreak)
self.network.regions["L2Column_0"].getSelf()._pooler.setUseInertia(inertia)
L2Responses=[]
L4Responses=[]
L4Predicted=[]
activityTrace = numpy.zeros(len(sequence))
totalActiveCells = 0
totalPredictedActiveCells = 0
for i,s in enumerate(sequence):
self.sensorInputs[0].addDataToQueue(list(s), 0, 0)
self.network.run(1)
activityTrace[i] = len(self.getL4Representations()[0])
L4Responses.append(self.getL4Representations()[0])
L4Predicted.append(self.getL4PredictedCells()[0])
L2Responses.append(self.getL2Representations()[0])
if i >= burnIn:
totalActiveCells += len(self.getL4Representations()[0])
totalPredictedActiveCells += len(self.getL4PredictedActiveCells()[0])
if reset:
self.sendReset()
avgActiveCells = float(totalActiveCells) / len(sequence)
avgPredictedActiveCells = float(totalPredictedActiveCells) / len(sequence)
responses = {
"L2Responses": L2Responses,
"L4Responses": L4Responses,
"L4Predicted": L4Predicted
}
return avgActiveCells,avgPredictedActiveCells,activityTrace, responses | Infer on a single given sequence. Sequence format:
sequence = [
set([16, 22, 32]), # Position 0
set([13, 15, 33]) # Position 1
]
Parameters:
----------------------------
@param sequence (list)
Sequence to infer, in the canonical format specified above
@param reset (bool)
If set to True (which is the default value), the network will
be reset after inference.
@param sequenceNumber (int)
Number of the sequence (must match the number given during
learning).
@param burnIn (int)
Number of patterns to wait within a sequence before computing
accuracy figures | Below is the the instruction that describes the task:
### Input:
Infer on a single given sequence. Sequence format:
sequence = [
set([16, 22, 32]), # Position 0
set([13, 15, 33]) # Position 1
]
Parameters:
----------------------------
@param sequence (list)
Sequence to infer, in the canonical format specified above
@param reset (bool)
If set to True (which is the default value), the network will
be reset after inference.
@param sequenceNumber (int)
Number of the sequence (must match the number given during
learning).
@param burnIn (int)
Number of patterns to wait within a sequence before computing
accuracy figures
### Response:
def infer(self, sequence, reset=True, sequenceNumber=None, burnIn=2,
enableFeedback=True, apicalTiebreak=True,
apicalModulationBasalThreshold=True, inertia=True):
"""
Infer on a single given sequence. Sequence format:
sequence = [
set([16, 22, 32]), # Position 0
set([13, 15, 33]) # Position 1
]
Parameters:
----------------------------
@param sequence (list)
Sequence to infer, in the canonical format specified above
@param reset (bool)
If set to True (which is the default value), the network will
be reset after inference.
@param sequenceNumber (int)
Number of the sequence (must match the number given during
learning).
@param burnIn (int)
Number of patterns to wait within a sequence before computing
accuracy figures
"""
if enableFeedback is False:
self._disableL2()
self.network.regions["L4Column_0"].getSelf()._tm.disableApicalDependence = True
else:
self._enableL2()
self._setLearningMode(l4Learning=False, l2Learning=False)
if sequenceNumber is not None:
if sequenceNumber not in self.objectL2Representations:
raise ValueError("The provided sequence was not given during learning")
self.network.regions["L4Column_0"].getSelf()._tm.setUseApicalModulationBasalThreshold(apicalModulationBasalThreshold)
self.network.regions["L4Column_0"].getSelf()._tm.setUseApicalTiebreak(apicalTiebreak)
self.network.regions["L2Column_0"].getSelf()._pooler.setUseInertia(inertia)
L2Responses=[]
L4Responses=[]
L4Predicted=[]
activityTrace = numpy.zeros(len(sequence))
totalActiveCells = 0
totalPredictedActiveCells = 0
for i,s in enumerate(sequence):
self.sensorInputs[0].addDataToQueue(list(s), 0, 0)
self.network.run(1)
activityTrace[i] = len(self.getL4Representations()[0])
L4Responses.append(self.getL4Representations()[0])
L4Predicted.append(self.getL4PredictedCells()[0])
L2Responses.append(self.getL2Representations()[0])
if i >= burnIn:
totalActiveCells += len(self.getL4Representations()[0])
totalPredictedActiveCells += len(self.getL4PredictedActiveCells()[0])
if reset:
self.sendReset()
avgActiveCells = float(totalActiveCells) / len(sequence)
avgPredictedActiveCells = float(totalPredictedActiveCells) / len(sequence)
responses = {
"L2Responses": L2Responses,
"L4Responses": L4Responses,
"L4Predicted": L4Predicted
}
return avgActiveCells,avgPredictedActiveCells,activityTrace, responses |
def fit_arrhenius(temps, diffusivities):
"""
Returns Ea, c, standard error of Ea from the Arrhenius fit:
D = c * exp(-Ea/kT)
Args:
temps ([float]): A sequence of temperatures. units: K
diffusivities ([float]): A sequence of diffusivities (e.g.,
from DiffusionAnalyzer.diffusivity). units: cm^2/s
"""
t_1 = 1 / np.array(temps)
logd = np.log(diffusivities)
# Do a least squares regression of log(D) vs 1/T
a = np.array([t_1, np.ones(len(temps))]).T
w, res, _, _ = np.linalg.lstsq(a, logd, rcond=None)
w = np.array(w)
n = len(temps)
if n > 2:
std_Ea = (res[0] / (n - 2) / (
n * np.var(t_1))) ** 0.5 * const.k / const.e
else:
std_Ea = None
return -w[0] * const.k / const.e, np.exp(w[1]), std_Ea | Returns Ea, c, standard error of Ea from the Arrhenius fit:
D = c * exp(-Ea/kT)
Args:
temps ([float]): A sequence of temperatures. units: K
diffusivities ([float]): A sequence of diffusivities (e.g.,
from DiffusionAnalyzer.diffusivity). units: cm^2/s | Below is the the instruction that describes the task:
### Input:
Returns Ea, c, standard error of Ea from the Arrhenius fit:
D = c * exp(-Ea/kT)
Args:
temps ([float]): A sequence of temperatures. units: K
diffusivities ([float]): A sequence of diffusivities (e.g.,
from DiffusionAnalyzer.diffusivity). units: cm^2/s
### Response:
def fit_arrhenius(temps, diffusivities):
"""
Returns Ea, c, standard error of Ea from the Arrhenius fit:
D = c * exp(-Ea/kT)
Args:
temps ([float]): A sequence of temperatures. units: K
diffusivities ([float]): A sequence of diffusivities (e.g.,
from DiffusionAnalyzer.diffusivity). units: cm^2/s
"""
t_1 = 1 / np.array(temps)
logd = np.log(diffusivities)
# Do a least squares regression of log(D) vs 1/T
a = np.array([t_1, np.ones(len(temps))]).T
w, res, _, _ = np.linalg.lstsq(a, logd, rcond=None)
w = np.array(w)
n = len(temps)
if n > 2:
std_Ea = (res[0] / (n - 2) / (
n * np.var(t_1))) ** 0.5 * const.k / const.e
else:
std_Ea = None
return -w[0] * const.k / const.e, np.exp(w[1]), std_Ea |
def readInstances(self, makeGlyphs=True, makeKerning=True, makeInfo=True):
""" Read all instance elements.
::
<instance familyname="SuperFamily" filename="OutputNameInstance1.ufo" location="location-token-aaa" stylename="Regular">
"""
for instanceElement in self.root.findall('.instances/instance'):
self._readSingleInstanceElement(instanceElement, makeGlyphs=makeGlyphs, makeKerning=makeKerning, makeInfo=makeInfo) | Read all instance elements.
::
<instance familyname="SuperFamily" filename="OutputNameInstance1.ufo" location="location-token-aaa" stylename="Regular"> | Below is the the instruction that describes the task:
### Input:
Read all instance elements.
::
<instance familyname="SuperFamily" filename="OutputNameInstance1.ufo" location="location-token-aaa" stylename="Regular">
### Response:
def readInstances(self, makeGlyphs=True, makeKerning=True, makeInfo=True):
""" Read all instance elements.
::
<instance familyname="SuperFamily" filename="OutputNameInstance1.ufo" location="location-token-aaa" stylename="Regular">
"""
for instanceElement in self.root.findall('.instances/instance'):
self._readSingleInstanceElement(instanceElement, makeGlyphs=makeGlyphs, makeKerning=makeKerning, makeInfo=makeInfo) |
def close_fast(self):
"""Turn the device off."""
close_command = StandardSend(self._address,
COMMAND_LIGHT_OFF_FAST_0X14_0X00)
self._send_method(close_command, self._closed_message_received) | Turn the device off. | Below is the the instruction that describes the task:
### Input:
Turn the device off.
### Response:
def close_fast(self):
"""Turn the device off."""
close_command = StandardSend(self._address,
COMMAND_LIGHT_OFF_FAST_0X14_0X00)
self._send_method(close_command, self._closed_message_received) |
def afni_copy(filename):
''' creates a ``+orig`` copy of the given dataset and returns the filename as a string '''
if nl.pkg_available('afni',True):
afni_filename = "%s+orig" % nl.prefix(filename)
if not os.path.exists(afni_filename + ".HEAD"):
nl.calc(filename,'a',prefix=nl.prefix(filename))
return afni_filename | creates a ``+orig`` copy of the given dataset and returns the filename as a string | Below is the the instruction that describes the task:
### Input:
creates a ``+orig`` copy of the given dataset and returns the filename as a string
### Response:
def afni_copy(filename):
''' creates a ``+orig`` copy of the given dataset and returns the filename as a string '''
if nl.pkg_available('afni',True):
afni_filename = "%s+orig" % nl.prefix(filename)
if not os.path.exists(afni_filename + ".HEAD"):
nl.calc(filename,'a',prefix=nl.prefix(filename))
return afni_filename |
def _multitaper_spectrum(self, clm, k, convention='power', unit='per_l',
lmax=None, taper_wt=None):
"""
Return the multitaper spectrum estimate and standard error for an
input SHCoeffs class instance.
"""
if lmax is None:
lmax = clm.lmax
sh = clm.to_array(normalization='4pi', csphase=1, lmax=lmax)
if taper_wt is None:
mtse, sd = _shtools.SHMultiTaperMaskSE(sh, self.tapers, lmax=lmax,
k=k)
else:
mtse, sd = _shtools.SHMultiTaperMaskSE(sh, self.tapers, lmax=lmax,
k=k, taper_wt=taper_wt)
if (unit == 'per_l'):
pass
elif (unit == 'per_lm'):
degree_l = _np.arange(len(mtse))
mtse /= (2.0 * degree_l + 1.0)
sd /= (2.0 * degree_l + 1.0)
else:
raise ValueError(
"unit must be 'per_l' or 'per_lm'." +
"Input value was {:s}".format(repr(unit)))
if (convention == 'power'):
return mtse, sd
elif (convention == 'energy'):
return mtse * 4.0 * _np.pi, sd * 4.0 * _np.pi
else:
raise ValueError(
"convention must be 'power' or 'energy'." +
"Input value was {:s}".format(repr(convention))) | Return the multitaper spectrum estimate and standard error for an
input SHCoeffs class instance. | Below is the the instruction that describes the task:
### Input:
Return the multitaper spectrum estimate and standard error for an
input SHCoeffs class instance.
### Response:
def _multitaper_spectrum(self, clm, k, convention='power', unit='per_l',
lmax=None, taper_wt=None):
"""
Return the multitaper spectrum estimate and standard error for an
input SHCoeffs class instance.
"""
if lmax is None:
lmax = clm.lmax
sh = clm.to_array(normalization='4pi', csphase=1, lmax=lmax)
if taper_wt is None:
mtse, sd = _shtools.SHMultiTaperMaskSE(sh, self.tapers, lmax=lmax,
k=k)
else:
mtse, sd = _shtools.SHMultiTaperMaskSE(sh, self.tapers, lmax=lmax,
k=k, taper_wt=taper_wt)
if (unit == 'per_l'):
pass
elif (unit == 'per_lm'):
degree_l = _np.arange(len(mtse))
mtse /= (2.0 * degree_l + 1.0)
sd /= (2.0 * degree_l + 1.0)
else:
raise ValueError(
"unit must be 'per_l' or 'per_lm'." +
"Input value was {:s}".format(repr(unit)))
if (convention == 'power'):
return mtse, sd
elif (convention == 'energy'):
return mtse * 4.0 * _np.pi, sd * 4.0 * _np.pi
else:
raise ValueError(
"convention must be 'power' or 'energy'." +
"Input value was {:s}".format(repr(convention))) |
def diabetes(display=False):
""" Return the diabetes data in a nice package. """
d = sklearn.datasets.load_diabetes()
df = pd.DataFrame(data=d.data, columns=d.feature_names) # pylint: disable=E1101
return df, d.target | Return the diabetes data in a nice package. | Below is the the instruction that describes the task:
### Input:
Return the diabetes data in a nice package.
### Response:
def diabetes(display=False):
""" Return the diabetes data in a nice package. """
d = sklearn.datasets.load_diabetes()
df = pd.DataFrame(data=d.data, columns=d.feature_names) # pylint: disable=E1101
return df, d.target |
def extract_input_lines(self, range_str, raw=False):
"""Return as a string a set of input history slices.
Parameters
----------
range_str : string
The set of slices is given as a string, like "~5/6-~4/2 4:8 9",
since this function is for use by magic functions which get their
arguments as strings. The number before the / is the session
number: ~n goes n back from the current session.
Optional Parameters:
- raw(False): by default, the processed input is used. If this is
true, the raw input history is used instead.
Note that slices can be called with two notations:
N:M -> standard python form, means including items N...(M-1).
N-M -> include items N..M (closed endpoint)."""
lines = self.history_manager.get_range_by_str(range_str, raw=raw)
return "\n".join(x for _, _, x in lines) | Return as a string a set of input history slices.
Parameters
----------
range_str : string
The set of slices is given as a string, like "~5/6-~4/2 4:8 9",
since this function is for use by magic functions which get their
arguments as strings. The number before the / is the session
number: ~n goes n back from the current session.
Optional Parameters:
- raw(False): by default, the processed input is used. If this is
true, the raw input history is used instead.
Note that slices can be called with two notations:
N:M -> standard python form, means including items N...(M-1).
N-M -> include items N..M (closed endpoint). | Below is the the instruction that describes the task:
### Input:
Return as a string a set of input history slices.
Parameters
----------
range_str : string
The set of slices is given as a string, like "~5/6-~4/2 4:8 9",
since this function is for use by magic functions which get their
arguments as strings. The number before the / is the session
number: ~n goes n back from the current session.
Optional Parameters:
- raw(False): by default, the processed input is used. If this is
true, the raw input history is used instead.
Note that slices can be called with two notations:
N:M -> standard python form, means including items N...(M-1).
N-M -> include items N..M (closed endpoint).
### Response:
def extract_input_lines(self, range_str, raw=False):
"""Return as a string a set of input history slices.
Parameters
----------
range_str : string
The set of slices is given as a string, like "~5/6-~4/2 4:8 9",
since this function is for use by magic functions which get their
arguments as strings. The number before the / is the session
number: ~n goes n back from the current session.
Optional Parameters:
- raw(False): by default, the processed input is used. If this is
true, the raw input history is used instead.
Note that slices can be called with two notations:
N:M -> standard python form, means including items N...(M-1).
N-M -> include items N..M (closed endpoint)."""
lines = self.history_manager.get_range_by_str(range_str, raw=raw)
return "\n".join(x for _, _, x in lines) |
def _get_child_admin_site(self, rel):
"""
Returns the separate AdminSite instance that django-polymorphic
maintains for child models.
This admin site needs to be passed to the widget so that it passes the
check of whether the field is pointing to a model that's registered
in the admin.
The hackiness of this implementation reflects the hackiness of the way
django-polymorphic does things.
"""
if rel.to not in self.admin_site._registry:
# Go through the objects the model inherits from and find one
# that's registered in the main admin and has a reference to the
# child admin site in it attributes.
for parent in rel.to.mro():
if parent in self.admin_site._registry \
and hasattr(self.admin_site._registry[parent], '_child_admin_site'):
return self.admin_site._registry[parent]._child_admin_site
return self.admin_site | Returns the separate AdminSite instance that django-polymorphic
maintains for child models.
This admin site needs to be passed to the widget so that it passes the
check of whether the field is pointing to a model that's registered
in the admin.
The hackiness of this implementation reflects the hackiness of the way
django-polymorphic does things. | Below is the the instruction that describes the task:
### Input:
Returns the separate AdminSite instance that django-polymorphic
maintains for child models.
This admin site needs to be passed to the widget so that it passes the
check of whether the field is pointing to a model that's registered
in the admin.
The hackiness of this implementation reflects the hackiness of the way
django-polymorphic does things.
### Response:
def _get_child_admin_site(self, rel):
"""
Returns the separate AdminSite instance that django-polymorphic
maintains for child models.
This admin site needs to be passed to the widget so that it passes the
check of whether the field is pointing to a model that's registered
in the admin.
The hackiness of this implementation reflects the hackiness of the way
django-polymorphic does things.
"""
if rel.to not in self.admin_site._registry:
# Go through the objects the model inherits from and find one
# that's registered in the main admin and has a reference to the
# child admin site in it attributes.
for parent in rel.to.mro():
if parent in self.admin_site._registry \
and hasattr(self.admin_site._registry[parent], '_child_admin_site'):
return self.admin_site._registry[parent]._child_admin_site
return self.admin_site |
def index(
self,
symbol='000001',
market='sh',
category='9',
start='0',
offset='100'):
'''
获取指数k线
K线种类:
- 0 5分钟K线
- 1 15分钟K线
- 2 30分钟K线
- 3 1小时K线
- 4 日K线
- 5 周K线
- 6 月K线
- 7 1分钟
- 8 1分钟K线
- 9 日K线
- 10 季K线
- 11 年K线
:param symbol: 股票代码
:param category: 数据类别
:param market: 证券市场
:param start: 开始位置
:param offset: 每次获取条数
:return: pd.dataFrame or None
'''
market = 1 if market == 'sh' else 0
with self.client.connect(*self.bestip):
data = self.client.get_index_bars(
int(category), int(market), str(symbol), int(start), int(offset))
return self.client.to_df(data) | 获取指数k线
K线种类:
- 0 5分钟K线
- 1 15分钟K线
- 2 30分钟K线
- 3 1小时K线
- 4 日K线
- 5 周K线
- 6 月K线
- 7 1分钟
- 8 1分钟K线
- 9 日K线
- 10 季K线
- 11 年K线
:param symbol: 股票代码
:param category: 数据类别
:param market: 证券市场
:param start: 开始位置
:param offset: 每次获取条数
:return: pd.dataFrame or None | Below is the the instruction that describes the task:
### Input:
获取指数k线
K线种类:
- 0 5分钟K线
- 1 15分钟K线
- 2 30分钟K线
- 3 1小时K线
- 4 日K线
- 5 周K线
- 6 月K线
- 7 1分钟
- 8 1分钟K线
- 9 日K线
- 10 季K线
- 11 年K线
:param symbol: 股票代码
:param category: 数据类别
:param market: 证券市场
:param start: 开始位置
:param offset: 每次获取条数
:return: pd.dataFrame or None
### Response:
def index(
self,
symbol='000001',
market='sh',
category='9',
start='0',
offset='100'):
'''
获取指数k线
K线种类:
- 0 5分钟K线
- 1 15分钟K线
- 2 30分钟K线
- 3 1小时K线
- 4 日K线
- 5 周K线
- 6 月K线
- 7 1分钟
- 8 1分钟K线
- 9 日K线
- 10 季K线
- 11 年K线
:param symbol: 股票代码
:param category: 数据类别
:param market: 证券市场
:param start: 开始位置
:param offset: 每次获取条数
:return: pd.dataFrame or None
'''
market = 1 if market == 'sh' else 0
with self.client.connect(*self.bestip):
data = self.client.get_index_bars(
int(category), int(market), str(symbol), int(start), int(offset))
return self.client.to_df(data) |
def task_submission_options(f):
"""
Options shared by both transfer and delete task submission
"""
def notify_opt_callback(ctx, param, value):
"""
Parse --notify
- "" is the same as "off"
- parse by lowercase, comma-split, strip spaces
- "off,x" is invalid for any x
- "on,x" is valid for any valid x (other than "off")
- "failed", "succeeded", "inactive" are normal vals
In code, produces True, False, or a set
"""
# if no value was set, don't set any explicit options
# the API default is "everything on"
if value is None:
return {}
value = value.lower()
value = [x.strip() for x in value.split(",")]
# [""] is what you'll get if value is "" to start with
# special-case it into "off", which helps avoid surprising scripts
# which take a notification settings as inputs and build --notify
if value == [""]:
value = ["off"]
off = "off" in value
on = "on" in value
# set-ize it -- duplicates are fine
vals = set([x for x in value if x not in ("off", "on")])
if (vals or on) and off:
raise click.UsageError('--notify cannot accept "off" and another value')
allowed_vals = set(("on", "succeeded", "failed", "inactive"))
if not vals <= allowed_vals:
raise click.UsageError(
"--notify received at least one invalid value among {}".format(
list(vals)
)
)
# return the notification options to send!
# on means don't set anything (default)
if on:
return {}
# off means turn off everything
if off:
return {
"notify_on_succeeded": False,
"notify_on_failed": False,
"notify_on_inactive": False,
}
# otherwise, return the exact set of values seen
else:
return {
"notify_on_succeeded": "succeeded" in vals,
"notify_on_failed": "failed" in vals,
"notify_on_inactive": "inactive" in vals,
}
f = click.option(
"--dry-run",
is_flag=True,
help=("Don't actually submit the task, print submission " "data instead"),
)(f)
f = click.option(
"--notify",
callback=notify_opt_callback,
help=(
"Comma separated list of task events which notify by email. "
"'on' and 'off' may be used to enable or disable notifications "
"for all event types. Otherwise, use 'succeeded', 'failed', or "
"'inactive'"
),
)(f)
f = click.option(
"--submission-id",
help=(
"Task submission ID, as generated by `globus task "
"generate-submission-id`. Used for safe resubmission in the "
"presence of network failures."
),
)(f)
f = click.option("--label", default=None, help="Set a label for this task.")(f)
f = click.option(
"--deadline",
default=None,
type=ISOTimeType(),
help="Set a deadline for this to be canceled if not completed by.",
)(f)
f = click.option(
"--skip-activation-check",
is_flag=True,
help=("Submit the task even if the endpoint(s) " "aren't currently activated."),
)(f)
return f | Options shared by both transfer and delete task submission | Below is the the instruction that describes the task:
### Input:
Options shared by both transfer and delete task submission
### Response:
def task_submission_options(f):
"""
Options shared by both transfer and delete task submission
"""
def notify_opt_callback(ctx, param, value):
"""
Parse --notify
- "" is the same as "off"
- parse by lowercase, comma-split, strip spaces
- "off,x" is invalid for any x
- "on,x" is valid for any valid x (other than "off")
- "failed", "succeeded", "inactive" are normal vals
In code, produces True, False, or a set
"""
# if no value was set, don't set any explicit options
# the API default is "everything on"
if value is None:
return {}
value = value.lower()
value = [x.strip() for x in value.split(",")]
# [""] is what you'll get if value is "" to start with
# special-case it into "off", which helps avoid surprising scripts
# which take a notification settings as inputs and build --notify
if value == [""]:
value = ["off"]
off = "off" in value
on = "on" in value
# set-ize it -- duplicates are fine
vals = set([x for x in value if x not in ("off", "on")])
if (vals or on) and off:
raise click.UsageError('--notify cannot accept "off" and another value')
allowed_vals = set(("on", "succeeded", "failed", "inactive"))
if not vals <= allowed_vals:
raise click.UsageError(
"--notify received at least one invalid value among {}".format(
list(vals)
)
)
# return the notification options to send!
# on means don't set anything (default)
if on:
return {}
# off means turn off everything
if off:
return {
"notify_on_succeeded": False,
"notify_on_failed": False,
"notify_on_inactive": False,
}
# otherwise, return the exact set of values seen
else:
return {
"notify_on_succeeded": "succeeded" in vals,
"notify_on_failed": "failed" in vals,
"notify_on_inactive": "inactive" in vals,
}
f = click.option(
"--dry-run",
is_flag=True,
help=("Don't actually submit the task, print submission " "data instead"),
)(f)
f = click.option(
"--notify",
callback=notify_opt_callback,
help=(
"Comma separated list of task events which notify by email. "
"'on' and 'off' may be used to enable or disable notifications "
"for all event types. Otherwise, use 'succeeded', 'failed', or "
"'inactive'"
),
)(f)
f = click.option(
"--submission-id",
help=(
"Task submission ID, as generated by `globus task "
"generate-submission-id`. Used for safe resubmission in the "
"presence of network failures."
),
)(f)
f = click.option("--label", default=None, help="Set a label for this task.")(f)
f = click.option(
"--deadline",
default=None,
type=ISOTimeType(),
help="Set a deadline for this to be canceled if not completed by.",
)(f)
f = click.option(
"--skip-activation-check",
is_flag=True,
help=("Submit the task even if the endpoint(s) " "aren't currently activated."),
)(f)
return f |
def local_reduction_attention(x, block_length, multihead_params):
"""Reduce the length dimension using self attention.
Args:
x (tf.Tensor): float32 of shape [batch, length, depth]
block_length (int): Block length for local attention (Compression factor)
multihead_params (dict): parameters for multihead attention
Returns:
tf.Tensor: Compressed tensor of shape [batch, length // factor, depth]
"""
@expert_utils.add_name_scope()
def dot_product_self_local_attention_flattened(q, k, v):
"""Strided block local self-attention.
No overlap between the blocks.
Args:
q (tf.Tensor): shape [batch, heads, length, depth_k]
k (tf.Tensor): shape [batch, heads, length, depth_k]
v (tf.Tensor): shape [batch, heads, length, depth_v]
Returns:
tf.Tensor: shape [batch, heads, length, depth_v]
"""
_, num_head, _, depth = q.get_shape().as_list()
# Extract the blocks
def pad_and_reshape(x):
"""Split the length dim into [num_block, block_length]."""
length_x = common_layers.shape_list(x)[2]
# Add some padding, but won't matter as the last block will never be
# attended by the query (after compression)
x = tf.pad(x, [[0, 0], [0, 0], [0, -length_x % block_length], [0, 0]])
x = tf.reshape(
x,
[
common_layers.shape_list(x)[0], # Batch
num_head, # Head
common_layers.shape_list(x)[2] // block_length, # Num blocks
block_length, # Block length
depth, # Depth
])
return x
q, k, v = [pad_and_reshape(t) for t in (q, k, v)]
# Perform attention on the flattened dot product
logits = tf.matmul(q, k, transpose_b=True)
logits = tf.reshape(
logits,
[
common_layers.shape_list(logits)[0], # Batch
num_head, # Head
common_layers.shape_list(logits)[2], # Num blocks
block_length**2, # Flatten last dimension
])
weights = tf.nn.softmax(logits)
weights = tf.reshape(
weights,
[
common_layers.shape_list(weights)[0], # Batch
num_head, # Head
common_layers.shape_list(weights)[2], # Num blocks
block_length,
block_length, # Restore the block length dimension
])
weights = tf.reduce_sum(weights, axis=3, keep_dims=True) # Compress block
v_out = tf.matmul(weights, v) # [1, block_length] @ [block_length, depth]
v_out = tf.squeeze(v_out, axis=3)
return v_out
return multihead_attention(
x,
None,
bias=None,
output_depth=x.get_shape().as_list()[-1],
attention_type=dot_product_self_local_attention_flattened,
**multihead_params) | Reduce the length dimension using self attention.
Args:
x (tf.Tensor): float32 of shape [batch, length, depth]
block_length (int): Block length for local attention (Compression factor)
multihead_params (dict): parameters for multihead attention
Returns:
tf.Tensor: Compressed tensor of shape [batch, length // factor, depth] | Below is the the instruction that describes the task:
### Input:
Reduce the length dimension using self attention.
Args:
x (tf.Tensor): float32 of shape [batch, length, depth]
block_length (int): Block length for local attention (Compression factor)
multihead_params (dict): parameters for multihead attention
Returns:
tf.Tensor: Compressed tensor of shape [batch, length // factor, depth]
### Response:
def local_reduction_attention(x, block_length, multihead_params):
"""Reduce the length dimension using self attention.
Args:
x (tf.Tensor): float32 of shape [batch, length, depth]
block_length (int): Block length for local attention (Compression factor)
multihead_params (dict): parameters for multihead attention
Returns:
tf.Tensor: Compressed tensor of shape [batch, length // factor, depth]
"""
@expert_utils.add_name_scope()
def dot_product_self_local_attention_flattened(q, k, v):
"""Strided block local self-attention.
No overlap between the blocks.
Args:
q (tf.Tensor): shape [batch, heads, length, depth_k]
k (tf.Tensor): shape [batch, heads, length, depth_k]
v (tf.Tensor): shape [batch, heads, length, depth_v]
Returns:
tf.Tensor: shape [batch, heads, length, depth_v]
"""
_, num_head, _, depth = q.get_shape().as_list()
# Extract the blocks
def pad_and_reshape(x):
"""Split the length dim into [num_block, block_length]."""
length_x = common_layers.shape_list(x)[2]
# Add some padding, but won't matter as the last block will never be
# attended by the query (after compression)
x = tf.pad(x, [[0, 0], [0, 0], [0, -length_x % block_length], [0, 0]])
x = tf.reshape(
x,
[
common_layers.shape_list(x)[0], # Batch
num_head, # Head
common_layers.shape_list(x)[2] // block_length, # Num blocks
block_length, # Block length
depth, # Depth
])
return x
q, k, v = [pad_and_reshape(t) for t in (q, k, v)]
# Perform attention on the flattened dot product
logits = tf.matmul(q, k, transpose_b=True)
logits = tf.reshape(
logits,
[
common_layers.shape_list(logits)[0], # Batch
num_head, # Head
common_layers.shape_list(logits)[2], # Num blocks
block_length**2, # Flatten last dimension
])
weights = tf.nn.softmax(logits)
weights = tf.reshape(
weights,
[
common_layers.shape_list(weights)[0], # Batch
num_head, # Head
common_layers.shape_list(weights)[2], # Num blocks
block_length,
block_length, # Restore the block length dimension
])
weights = tf.reduce_sum(weights, axis=3, keep_dims=True) # Compress block
v_out = tf.matmul(weights, v) # [1, block_length] @ [block_length, depth]
v_out = tf.squeeze(v_out, axis=3)
return v_out
return multihead_attention(
x,
None,
bias=None,
output_depth=x.get_shape().as_list()[-1],
attention_type=dot_product_self_local_attention_flattened,
**multihead_params) |
def show_status(self):
"""Show status of unregistered migrations"""
if not self.check_directory():
return
migrations = self.get_unregistered_migrations()
if migrations:
logger.info('Unregistered migrations:')
for migration in migrations:
logger.info(migration.filename)
else:
logger.info(self.NO_MIGRATIONS_MSG) | Show status of unregistered migrations | Below is the the instruction that describes the task:
### Input:
Show status of unregistered migrations
### Response:
def show_status(self):
"""Show status of unregistered migrations"""
if not self.check_directory():
return
migrations = self.get_unregistered_migrations()
if migrations:
logger.info('Unregistered migrations:')
for migration in migrations:
logger.info(migration.filename)
else:
logger.info(self.NO_MIGRATIONS_MSG) |
def set_led_brightness(self, brightness):
"""Set the LED brightness for the current group/button."""
set_cmd = self._create_set_property_msg("_led_brightness", 0x07,
brightness)
self._send_method(set_cmd, self._property_set) | Set the LED brightness for the current group/button. | Below is the the instruction that describes the task:
### Input:
Set the LED brightness for the current group/button.
### Response:
def set_led_brightness(self, brightness):
"""Set the LED brightness for the current group/button."""
set_cmd = self._create_set_property_msg("_led_brightness", 0x07,
brightness)
self._send_method(set_cmd, self._property_set) |
def _prep_params(params):
'''Remove empty (None) valued keywords and self from function parameters'''
return {k: v for (k, v) in params.items() if v is not None and k != 'self'} | Remove empty (None) valued keywords and self from function parameters | Below is the the instruction that describes the task:
### Input:
Remove empty (None) valued keywords and self from function parameters
### Response:
def _prep_params(params):
'''Remove empty (None) valued keywords and self from function parameters'''
return {k: v for (k, v) in params.items() if v is not None and k != 'self'} |
def paintEvent(self, event):
"""
Runs the paint event for this item.
"""
painter = QtGui.QPainter()
painter.begin(self)
try:
x = 0
y = 2
w = self.width() - 1
h = self.height() - 3
palette = self.palette()
clr = palette.color(palette.WindowText)
clr.setAlpha(100)
painter.setPen(QtGui.QPen(clr))
if not self.isActive() and not self._hovered:
painter.setBrush(palette.color(palette.Button))
else:
painter.setBrush(palette.color(palette.Window))
painter.fillRect(x, y, w, h, painter.brush())
painter.drawLine(x, y, w, y)
painter.drawLine(w, y, w, h + 2)
if self.parent().indexOf(self) == 0:
painter.drawLine(x, y, x, h + 2)
# draw the drag buttons
if not self._locked:
center = self._dragLabel.geometry().center()
x = 6
y = center.y()
width = 3
painter.setBrush(palette.color(palette.Window).lighter(120))
painter.drawRect(x - width / 2, (y - width - 2) - width / 2, width, width)
painter.drawRect(x - width / 2, y - width / 2, width, width)
painter.drawRect(x - width / 2, (y + width + 2) - width / 2, width, width)
finally:
painter.end() | Runs the paint event for this item. | Below is the the instruction that describes the task:
### Input:
Runs the paint event for this item.
### Response:
def paintEvent(self, event):
"""
Runs the paint event for this item.
"""
painter = QtGui.QPainter()
painter.begin(self)
try:
x = 0
y = 2
w = self.width() - 1
h = self.height() - 3
palette = self.palette()
clr = palette.color(palette.WindowText)
clr.setAlpha(100)
painter.setPen(QtGui.QPen(clr))
if not self.isActive() and not self._hovered:
painter.setBrush(palette.color(palette.Button))
else:
painter.setBrush(palette.color(palette.Window))
painter.fillRect(x, y, w, h, painter.brush())
painter.drawLine(x, y, w, y)
painter.drawLine(w, y, w, h + 2)
if self.parent().indexOf(self) == 0:
painter.drawLine(x, y, x, h + 2)
# draw the drag buttons
if not self._locked:
center = self._dragLabel.geometry().center()
x = 6
y = center.y()
width = 3
painter.setBrush(palette.color(palette.Window).lighter(120))
painter.drawRect(x - width / 2, (y - width - 2) - width / 2, width, width)
painter.drawRect(x - width / 2, y - width / 2, width, width)
painter.drawRect(x - width / 2, (y + width + 2) - width / 2, width, width)
finally:
painter.end() |
async def clear(self):
"""Close all free connections in pool."""
with (await self._cond):
while self._free:
conn = self._free.popleft()
await conn.close()
self._cond.notify() | Close all free connections in pool. | Below is the the instruction that describes the task:
### Input:
Close all free connections in pool.
### Response:
async def clear(self):
"""Close all free connections in pool."""
with (await self._cond):
while self._free:
conn = self._free.popleft()
await conn.close()
self._cond.notify() |
def get_spider_stats(self, spider_name):
"""get-spider-stats <spider> - get stats of a running spider"""
if spider_name is None:
spider_name = self.spider_name
else:
self.spider_name = spider_name
if self.spider_name is None:
self.spider_name = self.list_running()[0].split(':')[-1]
return(self.jsonrpc_call('stats', 'get_stats', self.spider_name)) | get-spider-stats <spider> - get stats of a running spider | Below is the the instruction that describes the task:
### Input:
get-spider-stats <spider> - get stats of a running spider
### Response:
def get_spider_stats(self, spider_name):
"""get-spider-stats <spider> - get stats of a running spider"""
if spider_name is None:
spider_name = self.spider_name
else:
self.spider_name = spider_name
if self.spider_name is None:
self.spider_name = self.list_running()[0].split(':')[-1]
return(self.jsonrpc_call('stats', 'get_stats', self.spider_name)) |
def assert_no_text(self, *args, **kwargs):
"""
Asserts that the page or current node doesn't have the given text content, ignoring any
HTML tags.
Args:
*args: Variable length argument list for :class:`TextQuery`.
**kwargs: Arbitrary keyword arguments for :class:`TextQuery`.
Returns:
True
Raises:
ExpectationNotMet: If the assertion hasn't succeeded during the wait time.
"""
query = TextQuery(*args, **kwargs)
@self.synchronize(wait=query.wait)
def assert_no_text():
count = query.resolve_for(self)
if matches_count(count, query.options) and (
count > 0 or expects_none(query.options)):
raise ExpectationNotMet(query.negative_failure_message)
return True
return assert_no_text() | Asserts that the page or current node doesn't have the given text content, ignoring any
HTML tags.
Args:
*args: Variable length argument list for :class:`TextQuery`.
**kwargs: Arbitrary keyword arguments for :class:`TextQuery`.
Returns:
True
Raises:
ExpectationNotMet: If the assertion hasn't succeeded during the wait time. | Below is the the instruction that describes the task:
### Input:
Asserts that the page or current node doesn't have the given text content, ignoring any
HTML tags.
Args:
*args: Variable length argument list for :class:`TextQuery`.
**kwargs: Arbitrary keyword arguments for :class:`TextQuery`.
Returns:
True
Raises:
ExpectationNotMet: If the assertion hasn't succeeded during the wait time.
### Response:
def assert_no_text(self, *args, **kwargs):
"""
Asserts that the page or current node doesn't have the given text content, ignoring any
HTML tags.
Args:
*args: Variable length argument list for :class:`TextQuery`.
**kwargs: Arbitrary keyword arguments for :class:`TextQuery`.
Returns:
True
Raises:
ExpectationNotMet: If the assertion hasn't succeeded during the wait time.
"""
query = TextQuery(*args, **kwargs)
@self.synchronize(wait=query.wait)
def assert_no_text():
count = query.resolve_for(self)
if matches_count(count, query.options) and (
count > 0 or expects_none(query.options)):
raise ExpectationNotMet(query.negative_failure_message)
return True
return assert_no_text() |
def default(return_X_y=True):
"""credit default dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the category of student or not, credit card balance,
and income.
y contains the outcome of default (0) or not (1).
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/ISLR/Default.html
"""
# y is binary
# recommend LogisticGAM
default = pd.read_csv(PATH + '/default.csv', index_col=0)
if return_X_y:
default = default.values
default[:,0] = np.unique(default[:,0], return_inverse=True)[1]
default[:,1] = np.unique(default[:,1], return_inverse=True)[1]
X = default[:,1:]
y = default[:,0]
return _clean_X_y(X, y)
return default | credit default dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the category of student or not, credit card balance,
and income.
y contains the outcome of default (0) or not (1).
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/ISLR/Default.html | Below is the the instruction that describes the task:
### Input:
credit default dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the category of student or not, credit card balance,
and income.
y contains the outcome of default (0) or not (1).
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/ISLR/Default.html
### Response:
def default(return_X_y=True):
"""credit default dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the category of student or not, credit card balance,
and income.
y contains the outcome of default (0) or not (1).
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/ISLR/Default.html
"""
# y is binary
# recommend LogisticGAM
default = pd.read_csv(PATH + '/default.csv', index_col=0)
if return_X_y:
default = default.values
default[:,0] = np.unique(default[:,0], return_inverse=True)[1]
default[:,1] = np.unique(default[:,1], return_inverse=True)[1]
X = default[:,1:]
y = default[:,0]
return _clean_X_y(X, y)
return default |
def is_same_key(key_1, key_2):
"""Extract the key from two host entries and compare them.
:param key_1: Host key
:type key_1: str
:param key_2: Host key
:type key_2: str
"""
# The key format get will be like '|1|2rUumCavEXWVaVyB5uMl6m85pZo=|Cp'
# 'EL6l7VTY37T/fg/ihhNb/GPgs= ssh-rsa AAAAB', we only need to compare
# the part start with 'ssh-rsa' followed with '= ', because the hash
# value in the beginning will change each time.
k_1 = key_1.split('= ')[1]
k_2 = key_2.split('= ')[1]
return k_1 == k_2 | Extract the key from two host entries and compare them.
:param key_1: Host key
:type key_1: str
:param key_2: Host key
:type key_2: str | Below is the the instruction that describes the task:
### Input:
Extract the key from two host entries and compare them.
:param key_1: Host key
:type key_1: str
:param key_2: Host key
:type key_2: str
### Response:
def is_same_key(key_1, key_2):
"""Extract the key from two host entries and compare them.
:param key_1: Host key
:type key_1: str
:param key_2: Host key
:type key_2: str
"""
# The key format get will be like '|1|2rUumCavEXWVaVyB5uMl6m85pZo=|Cp'
# 'EL6l7VTY37T/fg/ihhNb/GPgs= ssh-rsa AAAAB', we only need to compare
# the part start with 'ssh-rsa' followed with '= ', because the hash
# value in the beginning will change each time.
k_1 = key_1.split('= ')[1]
k_2 = key_2.split('= ')[1]
return k_1 == k_2 |
def resolve_alias(s: sym.Symbol, ns: Optional[Namespace] = None) -> sym.Symbol:
"""Resolve the aliased symbol in the current namespace."""
if s in _SPECIAL_FORMS:
return s
ns = Maybe(ns).or_else(get_current_ns)
if s.ns is not None:
aliased_ns = ns.get_alias(sym.symbol(s.ns))
if aliased_ns is not None:
return sym.symbol(s.name, aliased_ns.name)
else:
return s
else:
which_var = ns.find(sym.symbol(s.name))
if which_var is not None:
return sym.symbol(which_var.name.name, which_var.ns.name)
else:
return sym.symbol(s.name, ns=ns.name) | Resolve the aliased symbol in the current namespace. | Below is the the instruction that describes the task:
### Input:
Resolve the aliased symbol in the current namespace.
### Response:
def resolve_alias(s: sym.Symbol, ns: Optional[Namespace] = None) -> sym.Symbol:
"""Resolve the aliased symbol in the current namespace."""
if s in _SPECIAL_FORMS:
return s
ns = Maybe(ns).or_else(get_current_ns)
if s.ns is not None:
aliased_ns = ns.get_alias(sym.symbol(s.ns))
if aliased_ns is not None:
return sym.symbol(s.name, aliased_ns.name)
else:
return s
else:
which_var = ns.find(sym.symbol(s.name))
if which_var is not None:
return sym.symbol(which_var.name.name, which_var.ns.name)
else:
return sym.symbol(s.name, ns=ns.name) |
def _build_query_dict(self, formdata=None):
"""
Take submitted data from form and create a query dict to be
used in a Q object (or filter)
"""
if self.is_valid() and formdata is None:
formdata = self.cleaned_data
key = "{field}__{operator}".format(**formdata)
if formdata['operator'] == "isnull":
return {key: None}
elif formdata['operator'] == "istrue":
return {formdata['field']: True}
elif formdata['operator'] == "isfalse":
return {formdata['field']: False}
return {key: formdata['value']} | Take submitted data from form and create a query dict to be
used in a Q object (or filter) | Below is the the instruction that describes the task:
### Input:
Take submitted data from form and create a query dict to be
used in a Q object (or filter)
### Response:
def _build_query_dict(self, formdata=None):
"""
Take submitted data from form and create a query dict to be
used in a Q object (or filter)
"""
if self.is_valid() and formdata is None:
formdata = self.cleaned_data
key = "{field}__{operator}".format(**formdata)
if formdata['operator'] == "isnull":
return {key: None}
elif formdata['operator'] == "istrue":
return {formdata['field']: True}
elif formdata['operator'] == "isfalse":
return {formdata['field']: False}
return {key: formdata['value']} |
def lookup_default(self, name):
"""Looks up the default for a parameter name. This by default
looks into the :attr:`default_map` if available.
"""
if self.default_map is not None:
rv = self.default_map.get(name)
if callable(rv):
rv = rv()
return rv | Looks up the default for a parameter name. This by default
looks into the :attr:`default_map` if available. | Below is the the instruction that describes the task:
### Input:
Looks up the default for a parameter name. This by default
looks into the :attr:`default_map` if available.
### Response:
def lookup_default(self, name):
"""Looks up the default for a parameter name. This by default
looks into the :attr:`default_map` if available.
"""
if self.default_map is not None:
rv = self.default_map.get(name)
if callable(rv):
rv = rv()
return rv |
def _GetFileAndLine():
"""Returns (filename, linenumber) for the stack frame."""
# Use sys._getframe(). This avoids creating a traceback object.
# pylint: disable=protected-access
f = _sys._getframe()
# pylint: enable=protected-access
our_file = f.f_code.co_filename
f = f.f_back
while f:
code = f.f_code
if code.co_filename != our_file:
return (code.co_filename, f.f_lineno)
f = f.f_back
return ('<unknown>', 0) | Returns (filename, linenumber) for the stack frame. | Below is the the instruction that describes the task:
### Input:
Returns (filename, linenumber) for the stack frame.
### Response:
def _GetFileAndLine():
"""Returns (filename, linenumber) for the stack frame."""
# Use sys._getframe(). This avoids creating a traceback object.
# pylint: disable=protected-access
f = _sys._getframe()
# pylint: enable=protected-access
our_file = f.f_code.co_filename
f = f.f_back
while f:
code = f.f_code
if code.co_filename != our_file:
return (code.co_filename, f.f_lineno)
f = f.f_back
return ('<unknown>', 0) |
def checked_run(cmd):
"""Prepare and run a subprocess cmd, checking for successful completion."""
completed_process = run(cmd)
if completed_process.returncode > 0:
print("Command failed! Hanging around in case someone needs a "
"docker connection. (Ctrl-C to quit now)")
time.sleep(300)
raise RuntimeError
return completed_process | Prepare and run a subprocess cmd, checking for successful completion. | Below is the the instruction that describes the task:
### Input:
Prepare and run a subprocess cmd, checking for successful completion.
### Response:
def checked_run(cmd):
"""Prepare and run a subprocess cmd, checking for successful completion."""
completed_process = run(cmd)
if completed_process.returncode > 0:
print("Command failed! Hanging around in case someone needs a "
"docker connection. (Ctrl-C to quit now)")
time.sleep(300)
raise RuntimeError
return completed_process |
def from_dict(input_dict):
"""
Instantiate an object of a derived class using the information
in input_dict (built by the to_dict method of the derived class).
More specifically, after reading the derived class from input_dict,
it calls the method _build_from_input_dict of the derived class.
Note: This method should not be overrided in the derived class. In case
it is needed, please override _build_from_input_dict instate.
:param dict input_dict: Dictionary with all the information needed to
instantiate the object.
"""
import copy
input_dict = copy.deepcopy(input_dict)
mapping_class = input_dict.pop('class')
input_dict["name"] = str(input_dict["name"])
import GPy
mapping_class = eval(mapping_class)
return mapping_class._build_from_input_dict(mapping_class, input_dict) | Instantiate an object of a derived class using the information
in input_dict (built by the to_dict method of the derived class).
More specifically, after reading the derived class from input_dict,
it calls the method _build_from_input_dict of the derived class.
Note: This method should not be overrided in the derived class. In case
it is needed, please override _build_from_input_dict instate.
:param dict input_dict: Dictionary with all the information needed to
instantiate the object. | Below is the the instruction that describes the task:
### Input:
Instantiate an object of a derived class using the information
in input_dict (built by the to_dict method of the derived class).
More specifically, after reading the derived class from input_dict,
it calls the method _build_from_input_dict of the derived class.
Note: This method should not be overrided in the derived class. In case
it is needed, please override _build_from_input_dict instate.
:param dict input_dict: Dictionary with all the information needed to
instantiate the object.
### Response:
def from_dict(input_dict):
"""
Instantiate an object of a derived class using the information
in input_dict (built by the to_dict method of the derived class).
More specifically, after reading the derived class from input_dict,
it calls the method _build_from_input_dict of the derived class.
Note: This method should not be overrided in the derived class. In case
it is needed, please override _build_from_input_dict instate.
:param dict input_dict: Dictionary with all the information needed to
instantiate the object.
"""
import copy
input_dict = copy.deepcopy(input_dict)
mapping_class = input_dict.pop('class')
input_dict["name"] = str(input_dict["name"])
import GPy
mapping_class = eval(mapping_class)
return mapping_class._build_from_input_dict(mapping_class, input_dict) |
def cli(ctx, obj):
"""Show Alerta server and client versions."""
client = obj['client']
click.echo('alerta {}'.format(client.mgmt_status()['version']))
click.echo('alerta client {}'.format(client_version))
click.echo('requests {}'.format(requests_version))
click.echo('click {}'.format(click.__version__))
ctx.exit() | Show Alerta server and client versions. | Below is the the instruction that describes the task:
### Input:
Show Alerta server and client versions.
### Response:
def cli(ctx, obj):
"""Show Alerta server and client versions."""
client = obj['client']
click.echo('alerta {}'.format(client.mgmt_status()['version']))
click.echo('alerta client {}'.format(client_version))
click.echo('requests {}'.format(requests_version))
click.echo('click {}'.format(click.__version__))
ctx.exit() |
def status(directory: str) -> Tuple[RepositoryLocation, Branch, Commit]:
"""
Gets the status of the subrepo that has been cloned into the given directory.
:param directory: the directory containing the subrepo
:return: a tuple consisting of the URL the subrepo is tracking, the branch that has been checked out and the commit
reference
"""
if not os.path.exists(directory):
raise ValueError(f"No subrepo found in \"{directory}\"")
try:
result = run([GIT_COMMAND, _GIT_SUBREPO_COMMAND, _GIT_SUBREPO_STATUS_COMMAND, _GIT_SUBREPO_VERBOSE_FLAG,
get_directory_relative_to_git_root(directory)],
execution_directory=get_git_root_directory(directory))
except RunException as e:
if "Command failed: 'git rev-parse --verify HEAD'" in e.stderr:
raise NotAGitSubrepoException(directory) from e
raise e
if re.search("is not a subrepo$", result):
raise NotAGitSubrepoException(directory)
url = re.search("Remote URL:\s*(.*)", result).group(1)
branch = re.search("Tracking Branch:\s*(.*)", result).group(1)
commit = re.search("Pulled Commit:\s*(.*)", result).group(1)
return url, branch, commit | Gets the status of the subrepo that has been cloned into the given directory.
:param directory: the directory containing the subrepo
:return: a tuple consisting of the URL the subrepo is tracking, the branch that has been checked out and the commit
reference | Below is the the instruction that describes the task:
### Input:
Gets the status of the subrepo that has been cloned into the given directory.
:param directory: the directory containing the subrepo
:return: a tuple consisting of the URL the subrepo is tracking, the branch that has been checked out and the commit
reference
### Response:
def status(directory: str) -> Tuple[RepositoryLocation, Branch, Commit]:
"""
Gets the status of the subrepo that has been cloned into the given directory.
:param directory: the directory containing the subrepo
:return: a tuple consisting of the URL the subrepo is tracking, the branch that has been checked out and the commit
reference
"""
if not os.path.exists(directory):
raise ValueError(f"No subrepo found in \"{directory}\"")
try:
result = run([GIT_COMMAND, _GIT_SUBREPO_COMMAND, _GIT_SUBREPO_STATUS_COMMAND, _GIT_SUBREPO_VERBOSE_FLAG,
get_directory_relative_to_git_root(directory)],
execution_directory=get_git_root_directory(directory))
except RunException as e:
if "Command failed: 'git rev-parse --verify HEAD'" in e.stderr:
raise NotAGitSubrepoException(directory) from e
raise e
if re.search("is not a subrepo$", result):
raise NotAGitSubrepoException(directory)
url = re.search("Remote URL:\s*(.*)", result).group(1)
branch = re.search("Tracking Branch:\s*(.*)", result).group(1)
commit = re.search("Pulled Commit:\s*(.*)", result).group(1)
return url, branch, commit |
def setDefaults(self, instance):
"""Only call during object initialization, this function sets fields
to schema defaults. It's adapted from the original to support
IAcquireFieldDefaults adapters. If IAcquireFieldDefaults adapter
does not find a suitable field, or that field's value is Falseish,
this function will not continue with the normal default machinery.
"""
for field in self.values():
# ## bika addition: we fire adapters for IAcquireFieldDefaults.
# If IAcquireFieldDefaults returns None, this signifies "ignore" return.
# First adapter found with non-None result, wins.
value = None
if shasattr(field, 'acquire'):
adapters = {}
for adapter in getAdapters((instance,), IAcquireFieldDefaults):
sort_val = getattr(adapter[1], 'sort', 1000)
if sort_val not in adapters:
adapters[sort_val] = []
adapters[sort_val].append(adapter)
if adapters:
keys = sorted(adapters.keys())
keys.reverse()
adapter = adapters[keys[0]]
_value = adapter[0][1](field)
if _value is not None:
value = _value
if field.getName().lower() == 'id':
continue
# If our adapter reflects a value for a reference field, it will
# be permitted.
if field.type == "reference" and not value:
continue
default = value if value else field.getDefault(instance)
# always set defaults on writable fields
mutator = field.getMutator(instance)
if mutator is None:
continue
args = (default,)
kw = {'field': field.__name__,
'_initializing_': True}
if shasattr(field, 'default_content_type'):
# specify a mimetype if the mutator takes a mimetype argument if
# the schema supplies a default, we honour that, otherwise we use
# the site property
default_content_type = field.default_content_type
if default_content_type is None:
default_content_type = getDefaultContentType(instance)
kw['mimetype'] = default_content_type
mapply(mutator, *args, **kw) | Only call during object initialization, this function sets fields
to schema defaults. It's adapted from the original to support
IAcquireFieldDefaults adapters. If IAcquireFieldDefaults adapter
does not find a suitable field, or that field's value is Falseish,
this function will not continue with the normal default machinery. | Below is the the instruction that describes the task:
### Input:
Only call during object initialization, this function sets fields
to schema defaults. It's adapted from the original to support
IAcquireFieldDefaults adapters. If IAcquireFieldDefaults adapter
does not find a suitable field, or that field's value is Falseish,
this function will not continue with the normal default machinery.
### Response:
def setDefaults(self, instance):
"""Only call during object initialization, this function sets fields
to schema defaults. It's adapted from the original to support
IAcquireFieldDefaults adapters. If IAcquireFieldDefaults adapter
does not find a suitable field, or that field's value is Falseish,
this function will not continue with the normal default machinery.
"""
for field in self.values():
# ## bika addition: we fire adapters for IAcquireFieldDefaults.
# If IAcquireFieldDefaults returns None, this signifies "ignore" return.
# First adapter found with non-None result, wins.
value = None
if shasattr(field, 'acquire'):
adapters = {}
for adapter in getAdapters((instance,), IAcquireFieldDefaults):
sort_val = getattr(adapter[1], 'sort', 1000)
if sort_val not in adapters:
adapters[sort_val] = []
adapters[sort_val].append(adapter)
if adapters:
keys = sorted(adapters.keys())
keys.reverse()
adapter = adapters[keys[0]]
_value = adapter[0][1](field)
if _value is not None:
value = _value
if field.getName().lower() == 'id':
continue
# If our adapter reflects a value for a reference field, it will
# be permitted.
if field.type == "reference" and not value:
continue
default = value if value else field.getDefault(instance)
# always set defaults on writable fields
mutator = field.getMutator(instance)
if mutator is None:
continue
args = (default,)
kw = {'field': field.__name__,
'_initializing_': True}
if shasattr(field, 'default_content_type'):
# specify a mimetype if the mutator takes a mimetype argument if
# the schema supplies a default, we honour that, otherwise we use
# the site property
default_content_type = field.default_content_type
if default_content_type is None:
default_content_type = getDefaultContentType(instance)
kw['mimetype'] = default_content_type
mapply(mutator, *args, **kw) |
def pipe2(flags=0):
"""
Wrapper around ``pipe2(2)``
:param flags:
Optional flags to set. This should almost always include O_CLOEXEC so
that the resulting code is not racy (see the discussion about O_CLOEXEC
to understand why this flag is essential). It can also include
O_NONBLOCK or O_DIRECT, depending on the desired behavior.
:returns:
A pair of descriptors (read_end, write_end)
"""
pair = (c_int * 2)()
_pipe2(byref(pair), flags)
return pair[0], pair[1] | Wrapper around ``pipe2(2)``
:param flags:
Optional flags to set. This should almost always include O_CLOEXEC so
that the resulting code is not racy (see the discussion about O_CLOEXEC
to understand why this flag is essential). It can also include
O_NONBLOCK or O_DIRECT, depending on the desired behavior.
:returns:
A pair of descriptors (read_end, write_end) | Below is the the instruction that describes the task:
### Input:
Wrapper around ``pipe2(2)``
:param flags:
Optional flags to set. This should almost always include O_CLOEXEC so
that the resulting code is not racy (see the discussion about O_CLOEXEC
to understand why this flag is essential). It can also include
O_NONBLOCK or O_DIRECT, depending on the desired behavior.
:returns:
A pair of descriptors (read_end, write_end)
### Response:
def pipe2(flags=0):
"""
Wrapper around ``pipe2(2)``
:param flags:
Optional flags to set. This should almost always include O_CLOEXEC so
that the resulting code is not racy (see the discussion about O_CLOEXEC
to understand why this flag is essential). It can also include
O_NONBLOCK or O_DIRECT, depending on the desired behavior.
:returns:
A pair of descriptors (read_end, write_end)
"""
pair = (c_int * 2)()
_pipe2(byref(pair), flags)
return pair[0], pair[1] |
def range(self):
"""A tuple containing the numeric range for this Slot.
The Python equivalent of the CLIPS deftemplate-slot-range function.
"""
data = clips.data.DataObject(self._env)
lib.EnvDeftemplateSlotRange(
self._env, self._tpl, self._name, data.byref)
return tuple(data.value) if isinstance(data.value, list) else () | A tuple containing the numeric range for this Slot.
The Python equivalent of the CLIPS deftemplate-slot-range function. | Below is the the instruction that describes the task:
### Input:
A tuple containing the numeric range for this Slot.
The Python equivalent of the CLIPS deftemplate-slot-range function.
### Response:
def range(self):
"""A tuple containing the numeric range for this Slot.
The Python equivalent of the CLIPS deftemplate-slot-range function.
"""
data = clips.data.DataObject(self._env)
lib.EnvDeftemplateSlotRange(
self._env, self._tpl, self._name, data.byref)
return tuple(data.value) if isinstance(data.value, list) else () |
def edge_by_id(self, edge):
"""
Returns the edge that connects the head_id and tail_id nodes
"""
try:
head, tail, data = self.edges[edge]
except KeyError:
head, tail = None, None
raise GraphError('Invalid edge %s' % edge)
return (head, tail) | Returns the edge that connects the head_id and tail_id nodes | Below is the the instruction that describes the task:
### Input:
Returns the edge that connects the head_id and tail_id nodes
### Response:
def edge_by_id(self, edge):
"""
Returns the edge that connects the head_id and tail_id nodes
"""
try:
head, tail, data = self.edges[edge]
except KeyError:
head, tail = None, None
raise GraphError('Invalid edge %s' % edge)
return (head, tail) |
def title(self, category):
""" Return the total printed length of this category item.
"""
return sum(
[self.getWidth(category, x) for x in self.fields]) | Return the total printed length of this category item. | Below is the the instruction that describes the task:
### Input:
Return the total printed length of this category item.
### Response:
def title(self, category):
""" Return the total printed length of this category item.
"""
return sum(
[self.getWidth(category, x) for x in self.fields]) |
def logProbability(self, distn):
"""Form of distribution must be an array of counts in order of self.keys."""
x = numpy.asarray(distn)
n = x.sum()
return (logFactorial(n) - numpy.sum([logFactorial(k) for k in x]) +
numpy.sum(x * numpy.log(self.dist.pmf))) | Form of distribution must be an array of counts in order of self.keys. | Below is the the instruction that describes the task:
### Input:
Form of distribution must be an array of counts in order of self.keys.
### Response:
def logProbability(self, distn):
"""Form of distribution must be an array of counts in order of self.keys."""
x = numpy.asarray(distn)
n = x.sum()
return (logFactorial(n) - numpy.sum([logFactorial(k) for k in x]) +
numpy.sum(x * numpy.log(self.dist.pmf))) |
def cmd(self, *args, **kwargs):
'''adb command, add -s serial by default. return the subprocess.Popen object.'''
serial = self.device_serial()
if serial:
if " " in serial: # TODO how to include special chars on command line
serial = "'%s'" % serial
return self.raw_cmd(*["-s", serial] + list(args))
else:
return self.raw_cmd(*args) | adb command, add -s serial by default. return the subprocess.Popen object. | Below is the the instruction that describes the task:
### Input:
adb command, add -s serial by default. return the subprocess.Popen object.
### Response:
def cmd(self, *args, **kwargs):
'''adb command, add -s serial by default. return the subprocess.Popen object.'''
serial = self.device_serial()
if serial:
if " " in serial: # TODO how to include special chars on command line
serial = "'%s'" % serial
return self.raw_cmd(*["-s", serial] + list(args))
else:
return self.raw_cmd(*args) |
def search_series(self, name=None, imdb_id=None, zap2it_id=None):
"""Search series"""
# perform the request
params = {'name': name, 'imdbId': imdb_id, 'zap2itId': zap2it_id}
r = self.session.get(self.base_url + '/search/series', params=params)
if r.status_code == 404:
return None
r.raise_for_status()
return r.json()['data'] | Search series | Below is the the instruction that describes the task:
### Input:
Search series
### Response:
def search_series(self, name=None, imdb_id=None, zap2it_id=None):
"""Search series"""
# perform the request
params = {'name': name, 'imdbId': imdb_id, 'zap2itId': zap2it_id}
r = self.session.get(self.base_url + '/search/series', params=params)
if r.status_code == 404:
return None
r.raise_for_status()
return r.json()['data'] |
def _GetSubFileEntries(self):
"""Retrieves sub file entries.
Yields:
TARFileEntry: a sub file entry.
"""
tar_file = self._file_system.GetTARFile()
if self._directory is None:
self._directory = self._GetDirectory()
if self._directory and tar_file:
for path_spec in self._directory.entries:
location = getattr(path_spec, 'location', None)
if location is None:
continue
kwargs = {}
try:
kwargs['tar_info'] = tar_file.getmember(location[1:])
except KeyError:
kwargs['is_virtual'] = True
yield TARFileEntry(
self._resolver_context, self._file_system, path_spec, **kwargs) | Retrieves sub file entries.
Yields:
TARFileEntry: a sub file entry. | Below is the the instruction that describes the task:
### Input:
Retrieves sub file entries.
Yields:
TARFileEntry: a sub file entry.
### Response:
def _GetSubFileEntries(self):
"""Retrieves sub file entries.
Yields:
TARFileEntry: a sub file entry.
"""
tar_file = self._file_system.GetTARFile()
if self._directory is None:
self._directory = self._GetDirectory()
if self._directory and tar_file:
for path_spec in self._directory.entries:
location = getattr(path_spec, 'location', None)
if location is None:
continue
kwargs = {}
try:
kwargs['tar_info'] = tar_file.getmember(location[1:])
except KeyError:
kwargs['is_virtual'] = True
yield TARFileEntry(
self._resolver_context, self._file_system, path_spec, **kwargs) |
def description(self):
"""
A list of the metrics this query will ask for.
"""
if 'metrics' in self.raw:
metrics = self.raw['metrics']
head = metrics[0:-1] or metrics[0:1]
text = ", ".join(head)
if len(metrics) > 1:
tail = metrics[-1]
text = text + " and " + tail
else:
text = 'n/a'
return text | A list of the metrics this query will ask for. | Below is the the instruction that describes the task:
### Input:
A list of the metrics this query will ask for.
### Response:
def description(self):
"""
A list of the metrics this query will ask for.
"""
if 'metrics' in self.raw:
metrics = self.raw['metrics']
head = metrics[0:-1] or metrics[0:1]
text = ", ".join(head)
if len(metrics) > 1:
tail = metrics[-1]
text = text + " and " + tail
else:
text = 'n/a'
return text |
def paths(self):
"""The list of search paths. It is built from registered finders, which
has ``paths`` property. Can be useful for compilers to resolve internal
dependencies.
"""
if not hasattr(self, '_paths'):
paths = []
for finder in self.finders:
if hasattr(finder, 'paths'):
paths.extend(finder.paths)
self._paths = paths
return self._paths | The list of search paths. It is built from registered finders, which
has ``paths`` property. Can be useful for compilers to resolve internal
dependencies. | Below is the the instruction that describes the task:
### Input:
The list of search paths. It is built from registered finders, which
has ``paths`` property. Can be useful for compilers to resolve internal
dependencies.
### Response:
def paths(self):
"""The list of search paths. It is built from registered finders, which
has ``paths`` property. Can be useful for compilers to resolve internal
dependencies.
"""
if not hasattr(self, '_paths'):
paths = []
for finder in self.finders:
if hasattr(finder, 'paths'):
paths.extend(finder.paths)
self._paths = paths
return self._paths |
def _get_go2nthdridx(self, gos_all):
"""Get GO IDs header index for each user GO ID and corresponding parent GO IDs."""
go2nthdridx = {}
# NtHdrIdx Namedtuple fields:
# * format_txt: Used to determine the format when writing Excel cells
# * hdr_idx: Value printed in an Excel cell
# shortcuts
obj = GrouperInit.NtMaker(self)
# Create go2nthdridx
for goid in gos_all:
go2nthdridx[goid] = obj.get_nt(goid)
return go2nthdridx | Get GO IDs header index for each user GO ID and corresponding parent GO IDs. | Below is the the instruction that describes the task:
### Input:
Get GO IDs header index for each user GO ID and corresponding parent GO IDs.
### Response:
def _get_go2nthdridx(self, gos_all):
"""Get GO IDs header index for each user GO ID and corresponding parent GO IDs."""
go2nthdridx = {}
# NtHdrIdx Namedtuple fields:
# * format_txt: Used to determine the format when writing Excel cells
# * hdr_idx: Value printed in an Excel cell
# shortcuts
obj = GrouperInit.NtMaker(self)
# Create go2nthdridx
for goid in gos_all:
go2nthdridx[goid] = obj.get_nt(goid)
return go2nthdridx |
def poll(self, timeout_ms=0, max_records=None):
"""Fetch data from assigned topics / partitions.
Records are fetched and returned in batches by topic-partition.
On each poll, consumer will try to use the last consumed offset as the
starting offset and fetch sequentially. The last consumed offset can be
manually set through :meth:`~kafka.KafkaConsumer.seek` or automatically
set as the last committed offset for the subscribed list of partitions.
Incompatible with iterator interface -- use one or the other, not both.
Arguments:
timeout_ms (int, optional): Milliseconds spent waiting in poll if
data is not available in the buffer. If 0, returns immediately
with any records that are available currently in the buffer,
else returns empty. Must not be negative. Default: 0
max_records (int, optional): The maximum number of records returned
in a single call to :meth:`~kafka.KafkaConsumer.poll`.
Default: Inherit value from max_poll_records.
Returns:
dict: Topic to list of records since the last fetch for the
subscribed list of topics and partitions.
"""
assert timeout_ms >= 0, 'Timeout must not be negative'
if max_records is None:
max_records = self.config['max_poll_records']
assert isinstance(max_records, int), 'max_records must be an integer'
assert max_records > 0, 'max_records must be positive'
# Poll for new data until the timeout expires
start = time.time()
remaining = timeout_ms
while True:
records = self._poll_once(remaining, max_records)
if records:
return records
elapsed_ms = (time.time() - start) * 1000
remaining = timeout_ms - elapsed_ms
if remaining <= 0:
return {} | Fetch data from assigned topics / partitions.
Records are fetched and returned in batches by topic-partition.
On each poll, consumer will try to use the last consumed offset as the
starting offset and fetch sequentially. The last consumed offset can be
manually set through :meth:`~kafka.KafkaConsumer.seek` or automatically
set as the last committed offset for the subscribed list of partitions.
Incompatible with iterator interface -- use one or the other, not both.
Arguments:
timeout_ms (int, optional): Milliseconds spent waiting in poll if
data is not available in the buffer. If 0, returns immediately
with any records that are available currently in the buffer,
else returns empty. Must not be negative. Default: 0
max_records (int, optional): The maximum number of records returned
in a single call to :meth:`~kafka.KafkaConsumer.poll`.
Default: Inherit value from max_poll_records.
Returns:
dict: Topic to list of records since the last fetch for the
subscribed list of topics and partitions. | Below is the the instruction that describes the task:
### Input:
Fetch data from assigned topics / partitions.
Records are fetched and returned in batches by topic-partition.
On each poll, consumer will try to use the last consumed offset as the
starting offset and fetch sequentially. The last consumed offset can be
manually set through :meth:`~kafka.KafkaConsumer.seek` or automatically
set as the last committed offset for the subscribed list of partitions.
Incompatible with iterator interface -- use one or the other, not both.
Arguments:
timeout_ms (int, optional): Milliseconds spent waiting in poll if
data is not available in the buffer. If 0, returns immediately
with any records that are available currently in the buffer,
else returns empty. Must not be negative. Default: 0
max_records (int, optional): The maximum number of records returned
in a single call to :meth:`~kafka.KafkaConsumer.poll`.
Default: Inherit value from max_poll_records.
Returns:
dict: Topic to list of records since the last fetch for the
subscribed list of topics and partitions.
### Response:
def poll(self, timeout_ms=0, max_records=None):
"""Fetch data from assigned topics / partitions.
Records are fetched and returned in batches by topic-partition.
On each poll, consumer will try to use the last consumed offset as the
starting offset and fetch sequentially. The last consumed offset can be
manually set through :meth:`~kafka.KafkaConsumer.seek` or automatically
set as the last committed offset for the subscribed list of partitions.
Incompatible with iterator interface -- use one or the other, not both.
Arguments:
timeout_ms (int, optional): Milliseconds spent waiting in poll if
data is not available in the buffer. If 0, returns immediately
with any records that are available currently in the buffer,
else returns empty. Must not be negative. Default: 0
max_records (int, optional): The maximum number of records returned
in a single call to :meth:`~kafka.KafkaConsumer.poll`.
Default: Inherit value from max_poll_records.
Returns:
dict: Topic to list of records since the last fetch for the
subscribed list of topics and partitions.
"""
assert timeout_ms >= 0, 'Timeout must not be negative'
if max_records is None:
max_records = self.config['max_poll_records']
assert isinstance(max_records, int), 'max_records must be an integer'
assert max_records > 0, 'max_records must be positive'
# Poll for new data until the timeout expires
start = time.time()
remaining = timeout_ms
while True:
records = self._poll_once(remaining, max_records)
if records:
return records
elapsed_ms = (time.time() - start) * 1000
remaining = timeout_ms - elapsed_ms
if remaining <= 0:
return {} |
def get_root_path(self, path):
"""See :py:meth:`~stash.repository.Repository.get_root_path`."""
# Look at the directories present in the current working directory. In
# case a .svn directory is present, we know we are in the root directory
# of a Subversion repository (for Subversion 1.7.x). In case no
# repository specific folder is found, and the current directory has a
# parent directory, look if a repository specific directory can be found
# in the parent directory.
while path != '/':
if '.svn' in os.listdir(path):
return path
path = os.path.abspath(os.path.join(path, os.pardir))
# No Subversion repository found.
return None | See :py:meth:`~stash.repository.Repository.get_root_path`. | Below is the the instruction that describes the task:
### Input:
See :py:meth:`~stash.repository.Repository.get_root_path`.
### Response:
def get_root_path(self, path):
"""See :py:meth:`~stash.repository.Repository.get_root_path`."""
# Look at the directories present in the current working directory. In
# case a .svn directory is present, we know we are in the root directory
# of a Subversion repository (for Subversion 1.7.x). In case no
# repository specific folder is found, and the current directory has a
# parent directory, look if a repository specific directory can be found
# in the parent directory.
while path != '/':
if '.svn' in os.listdir(path):
return path
path = os.path.abspath(os.path.join(path, os.pardir))
# No Subversion repository found.
return None |
def get_schema(frame, name, keys=None, con=None, dtype=None):
"""
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : string
name of SQL table
keys : string or sequence, default: None
columns to use a primary key
con: an open SQL database connection object or a SQLAlchemy connectable
Using SQLAlchemy makes it possible to use any DB supported by that
library, default: None
If a DBAPI2 object, only sqlite3 is supported.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
"""
pandas_sql = pandasSQL_builder(con=con)
return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype) | Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : string
name of SQL table
keys : string or sequence, default: None
columns to use a primary key
con: an open SQL database connection object or a SQLAlchemy connectable
Using SQLAlchemy makes it possible to use any DB supported by that
library, default: None
If a DBAPI2 object, only sqlite3 is supported.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection. | Below is the the instruction that describes the task:
### Input:
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : string
name of SQL table
keys : string or sequence, default: None
columns to use a primary key
con: an open SQL database connection object or a SQLAlchemy connectable
Using SQLAlchemy makes it possible to use any DB supported by that
library, default: None
If a DBAPI2 object, only sqlite3 is supported.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
### Response:
def get_schema(frame, name, keys=None, con=None, dtype=None):
"""
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : string
name of SQL table
keys : string or sequence, default: None
columns to use a primary key
con: an open SQL database connection object or a SQLAlchemy connectable
Using SQLAlchemy makes it possible to use any DB supported by that
library, default: None
If a DBAPI2 object, only sqlite3 is supported.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
"""
pandas_sql = pandasSQL_builder(con=con)
return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype) |
def term(name):
'''
Send a TERM to service via daemontools
CLI Example:
.. code-block:: bash
salt '*' daemontools.term <service name>
'''
cmd = 'svc -t {0}'.format(_service_path(name))
return not __salt__['cmd.retcode'](cmd, python_shell=False) | Send a TERM to service via daemontools
CLI Example:
.. code-block:: bash
salt '*' daemontools.term <service name> | Below is the the instruction that describes the task:
### Input:
Send a TERM to service via daemontools
CLI Example:
.. code-block:: bash
salt '*' daemontools.term <service name>
### Response:
def term(name):
'''
Send a TERM to service via daemontools
CLI Example:
.. code-block:: bash
salt '*' daemontools.term <service name>
'''
cmd = 'svc -t {0}'.format(_service_path(name))
return not __salt__['cmd.retcode'](cmd, python_shell=False) |
def check_exists(self):
'''
Check if resource exists, update self.exists, returns
Returns:
None: sets self.exists
'''
response = self.repo.api.http_request('HEAD', self.uri)
self.status_code = response.status_code
# resource exists
if self.status_code == 200:
self.exists = True
# resource no longer here
elif self.status_code == 410:
self.exists = False
# resource not found
elif self.status_code == 404:
self.exists = False
return self.exists | Check if resource exists, update self.exists, returns
Returns:
None: sets self.exists | Below is the the instruction that describes the task:
### Input:
Check if resource exists, update self.exists, returns
Returns:
None: sets self.exists
### Response:
def check_exists(self):
'''
Check if resource exists, update self.exists, returns
Returns:
None: sets self.exists
'''
response = self.repo.api.http_request('HEAD', self.uri)
self.status_code = response.status_code
# resource exists
if self.status_code == 200:
self.exists = True
# resource no longer here
elif self.status_code == 410:
self.exists = False
# resource not found
elif self.status_code == 404:
self.exists = False
return self.exists |
def __get_keywords(self):
"""
Get all the keywords related of this page
Returns:
An array of strings
"""
txt = self.text
for line in txt:
for word in split_words(line):
yield(word) | Get all the keywords related of this page
Returns:
An array of strings | Below is the the instruction that describes the task:
### Input:
Get all the keywords related of this page
Returns:
An array of strings
### Response:
def __get_keywords(self):
"""
Get all the keywords related of this page
Returns:
An array of strings
"""
txt = self.text
for line in txt:
for word in split_words(line):
yield(word) |
def paired_environment_phenotype_grid_circles(environment,
phenotypes, **kwargs):
"""
Plots the given environment (EnvironmentFile object) and phenotypes
(2d array of binary strings) onto the same image and saves
the image based on the name of the environment file. The environment file
will be represented by coloring square cells, while the phenotypes are
represented as concentric circles indicating the set of tasks the organism
at that location can perform.
By default, color is determined using the palettes in the EnvironmentFile
object passed as the first parameter. The easiest way to change color
palettes is to assign new palettes to environment.task_palette and
environment.resource_palette before calling this function. If either the
environment or phenotypes grids contain integers greater than 1, you should
pass a `denom` keyword argument indicating how to normalize them. Using
differnet denoms for the environment and phenotypes is not currently
supported (if you need to, you should probably just divide everything by
the appropraite denoms before passing them to this funciton).
Inputs:
environment - an EnvironmentFile object indicatng the distribution
of resources and the appropriate palettes to use.
phenotypes - a 2d array of binary strings representing
the placement of phenotypes across the environment
kwargs:
denom - an integer indicating how to normalize numbers in the
environment and phenotype grids if neccesary.
"""
denom, palette = get_kwargs(environment, kwargs)
plot_world(environment, palette=environment.resource_palette, denom=denom)
plot_phens_circles(phenotypes, palette=environment.task_palette)
plt.savefig("phenotype_niches_circles"+environment.name, dpi=1000)
return plt.gcf() | Plots the given environment (EnvironmentFile object) and phenotypes
(2d array of binary strings) onto the same image and saves
the image based on the name of the environment file. The environment file
will be represented by coloring square cells, while the phenotypes are
represented as concentric circles indicating the set of tasks the organism
at that location can perform.
By default, color is determined using the palettes in the EnvironmentFile
object passed as the first parameter. The easiest way to change color
palettes is to assign new palettes to environment.task_palette and
environment.resource_palette before calling this function. If either the
environment or phenotypes grids contain integers greater than 1, you should
pass a `denom` keyword argument indicating how to normalize them. Using
differnet denoms for the environment and phenotypes is not currently
supported (if you need to, you should probably just divide everything by
the appropraite denoms before passing them to this funciton).
Inputs:
environment - an EnvironmentFile object indicatng the distribution
of resources and the appropriate palettes to use.
phenotypes - a 2d array of binary strings representing
the placement of phenotypes across the environment
kwargs:
denom - an integer indicating how to normalize numbers in the
environment and phenotype grids if neccesary. | Below is the the instruction that describes the task:
### Input:
Plots the given environment (EnvironmentFile object) and phenotypes
(2d array of binary strings) onto the same image and saves
the image based on the name of the environment file. The environment file
will be represented by coloring square cells, while the phenotypes are
represented as concentric circles indicating the set of tasks the organism
at that location can perform.
By default, color is determined using the palettes in the EnvironmentFile
object passed as the first parameter. The easiest way to change color
palettes is to assign new palettes to environment.task_palette and
environment.resource_palette before calling this function. If either the
environment or phenotypes grids contain integers greater than 1, you should
pass a `denom` keyword argument indicating how to normalize them. Using
differnet denoms for the environment and phenotypes is not currently
supported (if you need to, you should probably just divide everything by
the appropraite denoms before passing them to this funciton).
Inputs:
environment - an EnvironmentFile object indicatng the distribution
of resources and the appropriate palettes to use.
phenotypes - a 2d array of binary strings representing
the placement of phenotypes across the environment
kwargs:
denom - an integer indicating how to normalize numbers in the
environment and phenotype grids if neccesary.
### Response:
def paired_environment_phenotype_grid_circles(environment,
phenotypes, **kwargs):
"""
Plots the given environment (EnvironmentFile object) and phenotypes
(2d array of binary strings) onto the same image and saves
the image based on the name of the environment file. The environment file
will be represented by coloring square cells, while the phenotypes are
represented as concentric circles indicating the set of tasks the organism
at that location can perform.
By default, color is determined using the palettes in the EnvironmentFile
object passed as the first parameter. The easiest way to change color
palettes is to assign new palettes to environment.task_palette and
environment.resource_palette before calling this function. If either the
environment or phenotypes grids contain integers greater than 1, you should
pass a `denom` keyword argument indicating how to normalize them. Using
differnet denoms for the environment and phenotypes is not currently
supported (if you need to, you should probably just divide everything by
the appropraite denoms before passing them to this funciton).
Inputs:
environment - an EnvironmentFile object indicatng the distribution
of resources and the appropriate palettes to use.
phenotypes - a 2d array of binary strings representing
the placement of phenotypes across the environment
kwargs:
denom - an integer indicating how to normalize numbers in the
environment and phenotype grids if neccesary.
"""
denom, palette = get_kwargs(environment, kwargs)
plot_world(environment, palette=environment.resource_palette, denom=denom)
plot_phens_circles(phenotypes, palette=environment.task_palette)
plt.savefig("phenotype_niches_circles"+environment.name, dpi=1000)
return plt.gcf() |
def save(self, destination, **kwargs):
"""Serialize and save a model.
Example:
end_model = EndModel(...)
end_model.train_model(...)
end_model.save("my_end_model.pkl")
"""
with open(destination, "wb") as f:
torch.save(self, f, **kwargs) | Serialize and save a model.
Example:
end_model = EndModel(...)
end_model.train_model(...)
end_model.save("my_end_model.pkl") | Below is the the instruction that describes the task:
### Input:
Serialize and save a model.
Example:
end_model = EndModel(...)
end_model.train_model(...)
end_model.save("my_end_model.pkl")
### Response:
def save(self, destination, **kwargs):
"""Serialize and save a model.
Example:
end_model = EndModel(...)
end_model.train_model(...)
end_model.save("my_end_model.pkl")
"""
with open(destination, "wb") as f:
torch.save(self, f, **kwargs) |
def apply(self, func, axis=0, broadcast=None, reduce=None,
result_type=None):
"""
Analogous to DataFrame.apply, for SparseDataFrame
Parameters
----------
func : function
Function to apply to each column
axis : {0, 1, 'index', 'columns'}
broadcast : bool, default False
For aggregation functions, return object of same size with values
propagated
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='broadcast'.
reduce : boolean or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
apply will use reduce to determine whether the result should be a
Series or a DataFrame. If reduce is None (the default), apply's
return value will be guessed by calling func an empty Series (note:
while guessing, exceptions raised by func will be ignored). If
reduce is True a Series will always be returned, and if False a
DataFrame will always be returned.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='reduce'.
result_type : {'expand', 'reduce', 'broadcast, None}
These only act when axis=1 {columns}:
* 'expand' : list-like results will be turned into columns.
* 'reduce' : return a Series if possible rather than expanding
list-like results. This is the opposite to 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the frame, the original index & columns will be retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
.. versionadded:: 0.23.0
Returns
-------
applied : Series or SparseDataFrame
"""
if not len(self.columns):
return self
axis = self._get_axis_number(axis)
if isinstance(func, np.ufunc):
new_series = {}
for k, v in self.items():
applied = func(v)
applied.fill_value = func(v.fill_value)
new_series[k] = applied
return self._constructor(
new_series, index=self.index, columns=self.columns,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self)
from pandas.core.apply import frame_apply
op = frame_apply(self,
func=func,
axis=axis,
reduce=reduce,
broadcast=broadcast,
result_type=result_type)
return op.get_result() | Analogous to DataFrame.apply, for SparseDataFrame
Parameters
----------
func : function
Function to apply to each column
axis : {0, 1, 'index', 'columns'}
broadcast : bool, default False
For aggregation functions, return object of same size with values
propagated
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='broadcast'.
reduce : boolean or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
apply will use reduce to determine whether the result should be a
Series or a DataFrame. If reduce is None (the default), apply's
return value will be guessed by calling func an empty Series (note:
while guessing, exceptions raised by func will be ignored). If
reduce is True a Series will always be returned, and if False a
DataFrame will always be returned.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='reduce'.
result_type : {'expand', 'reduce', 'broadcast, None}
These only act when axis=1 {columns}:
* 'expand' : list-like results will be turned into columns.
* 'reduce' : return a Series if possible rather than expanding
list-like results. This is the opposite to 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the frame, the original index & columns will be retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
.. versionadded:: 0.23.0
Returns
-------
applied : Series or SparseDataFrame | Below is the the instruction that describes the task:
### Input:
Analogous to DataFrame.apply, for SparseDataFrame
Parameters
----------
func : function
Function to apply to each column
axis : {0, 1, 'index', 'columns'}
broadcast : bool, default False
For aggregation functions, return object of same size with values
propagated
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='broadcast'.
reduce : boolean or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
apply will use reduce to determine whether the result should be a
Series or a DataFrame. If reduce is None (the default), apply's
return value will be guessed by calling func an empty Series (note:
while guessing, exceptions raised by func will be ignored). If
reduce is True a Series will always be returned, and if False a
DataFrame will always be returned.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='reduce'.
result_type : {'expand', 'reduce', 'broadcast, None}
These only act when axis=1 {columns}:
* 'expand' : list-like results will be turned into columns.
* 'reduce' : return a Series if possible rather than expanding
list-like results. This is the opposite to 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the frame, the original index & columns will be retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
.. versionadded:: 0.23.0
Returns
-------
applied : Series or SparseDataFrame
### Response:
def apply(self, func, axis=0, broadcast=None, reduce=None,
result_type=None):
"""
Analogous to DataFrame.apply, for SparseDataFrame
Parameters
----------
func : function
Function to apply to each column
axis : {0, 1, 'index', 'columns'}
broadcast : bool, default False
For aggregation functions, return object of same size with values
propagated
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='broadcast'.
reduce : boolean or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
apply will use reduce to determine whether the result should be a
Series or a DataFrame. If reduce is None (the default), apply's
return value will be guessed by calling func an empty Series (note:
while guessing, exceptions raised by func will be ignored). If
reduce is True a Series will always be returned, and if False a
DataFrame will always be returned.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='reduce'.
result_type : {'expand', 'reduce', 'broadcast, None}
These only act when axis=1 {columns}:
* 'expand' : list-like results will be turned into columns.
* 'reduce' : return a Series if possible rather than expanding
list-like results. This is the opposite to 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the frame, the original index & columns will be retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
.. versionadded:: 0.23.0
Returns
-------
applied : Series or SparseDataFrame
"""
if not len(self.columns):
return self
axis = self._get_axis_number(axis)
if isinstance(func, np.ufunc):
new_series = {}
for k, v in self.items():
applied = func(v)
applied.fill_value = func(v.fill_value)
new_series[k] = applied
return self._constructor(
new_series, index=self.index, columns=self.columns,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self)
from pandas.core.apply import frame_apply
op = frame_apply(self,
func=func,
axis=axis,
reduce=reduce,
broadcast=broadcast,
result_type=result_type)
return op.get_result() |
def stream(self, callback=None):
"""
Runtime copy of job messages. This required the 'stream` flag to be set to True otherwise it will
not be able to copy any output, while it will block until the process exits.
:note: This function will block until it reaches end of stream or the process is no longer running.
:param callback: callback method that will get called for each received message
callback accepts 3 arguments
- level int: the log message levels, refer to the docs for available levels
and their meanings
- message str: the actual output message
- flags int: flags associated with this message
- 0x2 means EOF with success exit status
- 0x4 means EOF with error
for example (eof = flag & 0x6) eof will be true for last message u will ever
receive on this callback.
Note: if callback is none, a default callback will be used that prints output on stdout/stderr
based on level.
:return: None
"""
if callback is None:
callback = Response.__default
if not callable(callback):
raise Exception('callback must be callable')
queue = 'stream:%s' % self.id
r = self._client._redis
# we can terminate quickly by checking if the process is not running and it has no queued output.
# if not self.running and r.llen(queue) == 0:
# return
while True:
data = r.blpop(queue, 10)
if data is None:
if not self.running:
break
continue
_, body = data
payload = json.loads(body.decode())
message = payload['message']
line = message['message']
meta = message['meta']
callback(meta >> 16, line, meta & 0xff)
if meta & 0x6 != 0:
break | Runtime copy of job messages. This required the 'stream` flag to be set to True otherwise it will
not be able to copy any output, while it will block until the process exits.
:note: This function will block until it reaches end of stream or the process is no longer running.
:param callback: callback method that will get called for each received message
callback accepts 3 arguments
- level int: the log message levels, refer to the docs for available levels
and their meanings
- message str: the actual output message
- flags int: flags associated with this message
- 0x2 means EOF with success exit status
- 0x4 means EOF with error
for example (eof = flag & 0x6) eof will be true for last message u will ever
receive on this callback.
Note: if callback is none, a default callback will be used that prints output on stdout/stderr
based on level.
:return: None | Below is the the instruction that describes the task:
### Input:
Runtime copy of job messages. This required the 'stream` flag to be set to True otherwise it will
not be able to copy any output, while it will block until the process exits.
:note: This function will block until it reaches end of stream or the process is no longer running.
:param callback: callback method that will get called for each received message
callback accepts 3 arguments
- level int: the log message levels, refer to the docs for available levels
and their meanings
- message str: the actual output message
- flags int: flags associated with this message
- 0x2 means EOF with success exit status
- 0x4 means EOF with error
for example (eof = flag & 0x6) eof will be true for last message u will ever
receive on this callback.
Note: if callback is none, a default callback will be used that prints output on stdout/stderr
based on level.
:return: None
### Response:
def stream(self, callback=None):
"""
Runtime copy of job messages. This required the 'stream` flag to be set to True otherwise it will
not be able to copy any output, while it will block until the process exits.
:note: This function will block until it reaches end of stream or the process is no longer running.
:param callback: callback method that will get called for each received message
callback accepts 3 arguments
- level int: the log message levels, refer to the docs for available levels
and their meanings
- message str: the actual output message
- flags int: flags associated with this message
- 0x2 means EOF with success exit status
- 0x4 means EOF with error
for example (eof = flag & 0x6) eof will be true for last message u will ever
receive on this callback.
Note: if callback is none, a default callback will be used that prints output on stdout/stderr
based on level.
:return: None
"""
if callback is None:
callback = Response.__default
if not callable(callback):
raise Exception('callback must be callable')
queue = 'stream:%s' % self.id
r = self._client._redis
# we can terminate quickly by checking if the process is not running and it has no queued output.
# if not self.running and r.llen(queue) == 0:
# return
while True:
data = r.blpop(queue, 10)
if data is None:
if not self.running:
break
continue
_, body = data
payload = json.loads(body.decode())
message = payload['message']
line = message['message']
meta = message['meta']
callback(meta >> 16, line, meta & 0xff)
if meta & 0x6 != 0:
break |
def _log_likelihood_transit_plus_line(theta, params, model, t, data_flux,
err_flux, priorbounds):
'''
Given a batman TransitModel and its proposed parameters (theta), update the
batman params object with the proposed parameters and evaluate the gaussian
likelihood.
Note: the priorbounds are only needed to parse theta.
'''
u = []
for ix, key in enumerate(sorted(priorbounds.keys())):
if key == 'rp':
params.rp = theta[ix]
elif key == 't0':
params.t0 = theta[ix]
elif key == 'sma':
params.a = theta[ix]
elif key == 'incl':
params.inc = theta[ix]
elif key == 'period':
params.per = theta[ix]
elif key == 'ecc':
params.per = theta[ix]
elif key == 'omega':
params.w = theta[ix]
elif key == 'u_linear':
u.append(theta[ix])
elif key == 'u_quadratic':
u.append(theta[ix])
params.u = u
elif key == 'poly_order0':
poly_order0 = theta[ix]
elif key == 'poly_order1':
poly_order1 = theta[ix]
try:
poly_order0
except Exception as e:
poly_order0 = 0
else:
pass
transit = model.light_curve(params)
line = poly_order0 + t*poly_order1
model = transit + line
residuals = data_flux - model
log_likelihood = -0.5*(
np.sum((residuals/err_flux)**2 + np.log(2*np.pi*(err_flux)**2))
)
return log_likelihood | Given a batman TransitModel and its proposed parameters (theta), update the
batman params object with the proposed parameters and evaluate the gaussian
likelihood.
Note: the priorbounds are only needed to parse theta. | Below is the the instruction that describes the task:
### Input:
Given a batman TransitModel and its proposed parameters (theta), update the
batman params object with the proposed parameters and evaluate the gaussian
likelihood.
Note: the priorbounds are only needed to parse theta.
### Response:
def _log_likelihood_transit_plus_line(theta, params, model, t, data_flux,
err_flux, priorbounds):
'''
Given a batman TransitModel and its proposed parameters (theta), update the
batman params object with the proposed parameters and evaluate the gaussian
likelihood.
Note: the priorbounds are only needed to parse theta.
'''
u = []
for ix, key in enumerate(sorted(priorbounds.keys())):
if key == 'rp':
params.rp = theta[ix]
elif key == 't0':
params.t0 = theta[ix]
elif key == 'sma':
params.a = theta[ix]
elif key == 'incl':
params.inc = theta[ix]
elif key == 'period':
params.per = theta[ix]
elif key == 'ecc':
params.per = theta[ix]
elif key == 'omega':
params.w = theta[ix]
elif key == 'u_linear':
u.append(theta[ix])
elif key == 'u_quadratic':
u.append(theta[ix])
params.u = u
elif key == 'poly_order0':
poly_order0 = theta[ix]
elif key == 'poly_order1':
poly_order1 = theta[ix]
try:
poly_order0
except Exception as e:
poly_order0 = 0
else:
pass
transit = model.light_curve(params)
line = poly_order0 + t*poly_order1
model = transit + line
residuals = data_flux - model
log_likelihood = -0.5*(
np.sum((residuals/err_flux)**2 + np.log(2*np.pi*(err_flux)**2))
)
return log_likelihood |
def new_sent(self, text, ID=None, **kwargs):
''' Create a new sentence and add it to this Document '''
if ID is None:
ID = next(self.__idgen)
return self.add_sent(Sentence(text, ID=ID, **kwargs)) | Create a new sentence and add it to this Document | Below is the the instruction that describes the task:
### Input:
Create a new sentence and add it to this Document
### Response:
def new_sent(self, text, ID=None, **kwargs):
''' Create a new sentence and add it to this Document '''
if ID is None:
ID = next(self.__idgen)
return self.add_sent(Sentence(text, ID=ID, **kwargs)) |
def authenticate(self, request):
"""
Returns two-tuple of (user, token) if authentication succeeds,
or None otherwise.
"""
try:
oauth_request = oauth_provider.utils.get_oauth_request(request)
except oauth.Error as err:
raise exceptions.AuthenticationFailed(err.message)
if not oauth_request:
return None
oauth_params = oauth_provider.consts.OAUTH_PARAMETERS_NAMES
found = any(param for param in oauth_params if param in oauth_request)
missing = list(param for param in oauth_params if param not in oauth_request)
if not found:
# OAuth authentication was not attempted.
return None
if missing:
# OAuth was attempted but missing parameters.
msg = 'Missing parameters: %s' % (', '.join(missing))
raise exceptions.AuthenticationFailed(msg)
if not self.check_nonce(request, oauth_request):
msg = 'Nonce check failed'
raise exceptions.AuthenticationFailed(msg)
try:
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = oauth_provider_store.get_consumer(request, oauth_request, consumer_key)
except oauth_provider.store.InvalidConsumerError:
msg = 'Invalid consumer token: %s' % oauth_request.get_parameter('oauth_consumer_key')
raise exceptions.AuthenticationFailed(msg)
if consumer.status != oauth_provider.consts.ACCEPTED:
msg = 'Invalid consumer key status: %s' % consumer.get_status_display()
raise exceptions.AuthenticationFailed(msg)
try:
token_param = oauth_request.get_parameter('oauth_token')
token = oauth_provider_store.get_access_token(request, oauth_request, consumer, token_param)
except oauth_provider.store.InvalidTokenError:
msg = 'Invalid access token: %s' % oauth_request.get_parameter('oauth_token')
raise exceptions.AuthenticationFailed(msg)
try:
self.validate_token(request, consumer, token)
except oauth.Error as err:
raise exceptions.AuthenticationFailed(err.message)
user = token.user
if not user.is_active:
msg = 'User inactive or deleted: %s' % user.username
raise exceptions.AuthenticationFailed(msg)
return (token.user, token) | Returns two-tuple of (user, token) if authentication succeeds,
or None otherwise. | Below is the the instruction that describes the task:
### Input:
Returns two-tuple of (user, token) if authentication succeeds,
or None otherwise.
### Response:
def authenticate(self, request):
"""
Returns two-tuple of (user, token) if authentication succeeds,
or None otherwise.
"""
try:
oauth_request = oauth_provider.utils.get_oauth_request(request)
except oauth.Error as err:
raise exceptions.AuthenticationFailed(err.message)
if not oauth_request:
return None
oauth_params = oauth_provider.consts.OAUTH_PARAMETERS_NAMES
found = any(param for param in oauth_params if param in oauth_request)
missing = list(param for param in oauth_params if param not in oauth_request)
if not found:
# OAuth authentication was not attempted.
return None
if missing:
# OAuth was attempted but missing parameters.
msg = 'Missing parameters: %s' % (', '.join(missing))
raise exceptions.AuthenticationFailed(msg)
if not self.check_nonce(request, oauth_request):
msg = 'Nonce check failed'
raise exceptions.AuthenticationFailed(msg)
try:
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = oauth_provider_store.get_consumer(request, oauth_request, consumer_key)
except oauth_provider.store.InvalidConsumerError:
msg = 'Invalid consumer token: %s' % oauth_request.get_parameter('oauth_consumer_key')
raise exceptions.AuthenticationFailed(msg)
if consumer.status != oauth_provider.consts.ACCEPTED:
msg = 'Invalid consumer key status: %s' % consumer.get_status_display()
raise exceptions.AuthenticationFailed(msg)
try:
token_param = oauth_request.get_parameter('oauth_token')
token = oauth_provider_store.get_access_token(request, oauth_request, consumer, token_param)
except oauth_provider.store.InvalidTokenError:
msg = 'Invalid access token: %s' % oauth_request.get_parameter('oauth_token')
raise exceptions.AuthenticationFailed(msg)
try:
self.validate_token(request, consumer, token)
except oauth.Error as err:
raise exceptions.AuthenticationFailed(err.message)
user = token.user
if not user.is_active:
msg = 'User inactive or deleted: %s' % user.username
raise exceptions.AuthenticationFailed(msg)
return (token.user, token) |
def update_priority(self, tree_idx_list, priority_list):
""" Update priorities of the elements in the tree """
for tree_idx, priority, segment_tree in zip(tree_idx_list, priority_list, self.segment_trees):
segment_tree.update(tree_idx, priority) | Update priorities of the elements in the tree | Below is the the instruction that describes the task:
### Input:
Update priorities of the elements in the tree
### Response:
def update_priority(self, tree_idx_list, priority_list):
""" Update priorities of the elements in the tree """
for tree_idx, priority, segment_tree in zip(tree_idx_list, priority_list, self.segment_trees):
segment_tree.update(tree_idx, priority) |
def indices_removed(lst, idxs):
'''Returns a copy of lst with each index in idxs removed.'''
ret = [item for k,item in enumerate(lst) if k not in idxs]
return type(lst)(ret) | Returns a copy of lst with each index in idxs removed. | Below is the the instruction that describes the task:
### Input:
Returns a copy of lst with each index in idxs removed.
### Response:
def indices_removed(lst, idxs):
'''Returns a copy of lst with each index in idxs removed.'''
ret = [item for k,item in enumerate(lst) if k not in idxs]
return type(lst)(ret) |
def _aggregate_one_result(
self, sock_info, slave_ok, cmd, collation=None, session=None):
"""Internal helper to run an aggregate that returns a single result."""
result = self._command(
sock_info,
cmd,
slave_ok,
codec_options=self.__write_response_codec_options,
read_concern=self.read_concern,
collation=collation,
session=session)
batch = result['cursor']['firstBatch']
return batch[0] if batch else None | Internal helper to run an aggregate that returns a single result. | Below is the the instruction that describes the task:
### Input:
Internal helper to run an aggregate that returns a single result.
### Response:
def _aggregate_one_result(
self, sock_info, slave_ok, cmd, collation=None, session=None):
"""Internal helper to run an aggregate that returns a single result."""
result = self._command(
sock_info,
cmd,
slave_ok,
codec_options=self.__write_response_codec_options,
read_concern=self.read_concern,
collation=collation,
session=session)
batch = result['cursor']['firstBatch']
return batch[0] if batch else None |
def download_rdf(self, force=False):
"""Ensures a fresh-enough RDF file is downloaded and extracted.
Returns True on error."""
if self.downloading:
return True
if not force and (os.path.exists(RDF_PATH) and
(time.time() - os.path.getmtime(RDF_PATH)) < RDF_MAX_AGE):
return False
self.downloading = True
logging.info('Re-downloading RDF library from %s' % RDF_URL)
try:
shutil.rmtree(os.path.join(self.rdf_library_dir, 'cache'))
except OSError as e:
# Ignore not finding the directory to remove.
if e.errno != errno.ENOENT:
raise
try:
with open(RDF_PATH, 'w') as f:
with requests.get(RDF_URL, stream=True) as r:
shutil.copyfileobj(r.raw, f)
except requests.exceptions.RequestException as e:
logging.error(e)
return True
try:
with tarfile.open(RDF_PATH, 'r') as f:
f.extractall(self.rdf_library_dir)
except tarfile.TarError as e:
logging.error(e)
try:
os.unlink(RDF_PATH)
except:
pass
return True
self.downloading = False
return False | Ensures a fresh-enough RDF file is downloaded and extracted.
Returns True on error. | Below is the the instruction that describes the task:
### Input:
Ensures a fresh-enough RDF file is downloaded and extracted.
Returns True on error.
### Response:
def download_rdf(self, force=False):
"""Ensures a fresh-enough RDF file is downloaded and extracted.
Returns True on error."""
if self.downloading:
return True
if not force and (os.path.exists(RDF_PATH) and
(time.time() - os.path.getmtime(RDF_PATH)) < RDF_MAX_AGE):
return False
self.downloading = True
logging.info('Re-downloading RDF library from %s' % RDF_URL)
try:
shutil.rmtree(os.path.join(self.rdf_library_dir, 'cache'))
except OSError as e:
# Ignore not finding the directory to remove.
if e.errno != errno.ENOENT:
raise
try:
with open(RDF_PATH, 'w') as f:
with requests.get(RDF_URL, stream=True) as r:
shutil.copyfileobj(r.raw, f)
except requests.exceptions.RequestException as e:
logging.error(e)
return True
try:
with tarfile.open(RDF_PATH, 'r') as f:
f.extractall(self.rdf_library_dir)
except tarfile.TarError as e:
logging.error(e)
try:
os.unlink(RDF_PATH)
except:
pass
return True
self.downloading = False
return False |
def location_purge(location_id, delete=False, verbosity=0):
"""Print and conditionally delete files not referenced by meta data.
:param location_id: Id of the
:class:`~resolwe.flow.models.DataLocation` model that data
objects reference to.
:param delete: If ``True``, then delete unreferenced files.
"""
try:
location = DataLocation.objects.get(id=location_id)
except DataLocation.DoesNotExist:
logger.warning("Data location does not exist", extra={'location_id': location_id})
return
unreferenced_files = set()
purged_data = Data.objects.none()
referenced_by_data = location.data.exists()
if referenced_by_data:
if location.data.exclude(status__in=[Data.STATUS_DONE, Data.STATUS_ERROR]).exists():
return
# Perform cleanup.
purge_files_sets = list()
purged_data = location.data.all()
for data in purged_data:
purge_files_sets.append(get_purge_files(
location.get_path(),
data.output,
data.process.output_schema,
data.descriptor,
getattr(data.descriptor_schema, 'schema', [])
))
intersected_files = set.intersection(*purge_files_sets) if purge_files_sets else set()
unreferenced_files.update(intersected_files)
else:
# Remove data directory.
unreferenced_files.add(location.get_path())
unreferenced_files.add(location.get_runtime_path())
if verbosity >= 1:
# Print unreferenced files
if unreferenced_files:
logger.info(__("Unreferenced files for location id {} ({}):", location_id, len(unreferenced_files)))
for name in unreferenced_files:
logger.info(__(" {}", name))
else:
logger.info(__("No unreferenced files for location id {}", location_id))
# Go through unreferenced files and delete them.
if delete:
for name in unreferenced_files:
if os.path.isfile(name) or os.path.islink(name):
os.remove(name)
elif os.path.isdir(name):
shutil.rmtree(name)
location.purged = True
location.save()
if not referenced_by_data:
location.delete() | Print and conditionally delete files not referenced by meta data.
:param location_id: Id of the
:class:`~resolwe.flow.models.DataLocation` model that data
objects reference to.
:param delete: If ``True``, then delete unreferenced files. | Below is the the instruction that describes the task:
### Input:
Print and conditionally delete files not referenced by meta data.
:param location_id: Id of the
:class:`~resolwe.flow.models.DataLocation` model that data
objects reference to.
:param delete: If ``True``, then delete unreferenced files.
### Response:
def location_purge(location_id, delete=False, verbosity=0):
"""Print and conditionally delete files not referenced by meta data.
:param location_id: Id of the
:class:`~resolwe.flow.models.DataLocation` model that data
objects reference to.
:param delete: If ``True``, then delete unreferenced files.
"""
try:
location = DataLocation.objects.get(id=location_id)
except DataLocation.DoesNotExist:
logger.warning("Data location does not exist", extra={'location_id': location_id})
return
unreferenced_files = set()
purged_data = Data.objects.none()
referenced_by_data = location.data.exists()
if referenced_by_data:
if location.data.exclude(status__in=[Data.STATUS_DONE, Data.STATUS_ERROR]).exists():
return
# Perform cleanup.
purge_files_sets = list()
purged_data = location.data.all()
for data in purged_data:
purge_files_sets.append(get_purge_files(
location.get_path(),
data.output,
data.process.output_schema,
data.descriptor,
getattr(data.descriptor_schema, 'schema', [])
))
intersected_files = set.intersection(*purge_files_sets) if purge_files_sets else set()
unreferenced_files.update(intersected_files)
else:
# Remove data directory.
unreferenced_files.add(location.get_path())
unreferenced_files.add(location.get_runtime_path())
if verbosity >= 1:
# Print unreferenced files
if unreferenced_files:
logger.info(__("Unreferenced files for location id {} ({}):", location_id, len(unreferenced_files)))
for name in unreferenced_files:
logger.info(__(" {}", name))
else:
logger.info(__("No unreferenced files for location id {}", location_id))
# Go through unreferenced files and delete them.
if delete:
for name in unreferenced_files:
if os.path.isfile(name) or os.path.islink(name):
os.remove(name)
elif os.path.isdir(name):
shutil.rmtree(name)
location.purged = True
location.save()
if not referenced_by_data:
location.delete() |
def sum(self, field):
"""
Returns the sum of the field in the result set of the query
by wrapping the query and performing a SUM aggregate of the specified field
:param field: the field to pass to the SUM aggregate
:type field: str
:return: The sum of the specified field
:rtype: int
"""
q = Query(self.connection).from_table(self, fields=[
SumField(field)
])
rows = q.select(bypass_safe_limit=True)
return list(rows[0].values())[0] | Returns the sum of the field in the result set of the query
by wrapping the query and performing a SUM aggregate of the specified field
:param field: the field to pass to the SUM aggregate
:type field: str
:return: The sum of the specified field
:rtype: int | Below is the the instruction that describes the task:
### Input:
Returns the sum of the field in the result set of the query
by wrapping the query and performing a SUM aggregate of the specified field
:param field: the field to pass to the SUM aggregate
:type field: str
:return: The sum of the specified field
:rtype: int
### Response:
def sum(self, field):
"""
Returns the sum of the field in the result set of the query
by wrapping the query and performing a SUM aggregate of the specified field
:param field: the field to pass to the SUM aggregate
:type field: str
:return: The sum of the specified field
:rtype: int
"""
q = Query(self.connection).from_table(self, fields=[
SumField(field)
])
rows = q.select(bypass_safe_limit=True)
return list(rows[0].values())[0] |
def init_app(self, app):
"""Initialize a :class:`~flask.Flask` application for use with
this extension.
"""
self._jobs = []
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['restpoints'] = self
app.restpoints_instance = self
app.add_url_rule('/ping', 'ping', ping)
app.add_url_rule('/time', 'time', time)
app.add_url_rule('/status', 'status', status(self._jobs)) | Initialize a :class:`~flask.Flask` application for use with
this extension. | Below is the the instruction that describes the task:
### Input:
Initialize a :class:`~flask.Flask` application for use with
this extension.
### Response:
def init_app(self, app):
"""Initialize a :class:`~flask.Flask` application for use with
this extension.
"""
self._jobs = []
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['restpoints'] = self
app.restpoints_instance = self
app.add_url_rule('/ping', 'ping', ping)
app.add_url_rule('/time', 'time', time)
app.add_url_rule('/status', 'status', status(self._jobs)) |
def show_listener(self, lbaas_listener, **_params):
"""Fetches information for a lbaas_listener."""
return self.get(self.lbaas_listener_path % (lbaas_listener),
params=_params) | Fetches information for a lbaas_listener. | Below is the the instruction that describes the task:
### Input:
Fetches information for a lbaas_listener.
### Response:
def show_listener(self, lbaas_listener, **_params):
"""Fetches information for a lbaas_listener."""
return self.get(self.lbaas_listener_path % (lbaas_listener),
params=_params) |
def main(xmpp_server, xmpp_port, peer_name, node_name, app_id,
xmpp_jid=None, xmpp_password=None):
"""
Runs the framework
:param xmpp_server: Address of the XMPP server
:param xmpp_port: Port of the XMPP server
:param peer_name: Name of the peer
:param node_name: Name (also, UID) of the node hosting the peer
:param app_id: Application ID
:param xmpp_jid: XMPP JID, None for Anonymous login
:param xmpp_password: XMPP account password
"""
# Create the framework
framework = pelix.framework.create_framework(
('pelix.ipopo.core',
'pelix.ipopo.waiting',
'pelix.shell.core',
'pelix.shell.ipopo',
'pelix.shell.console',
# Herald core
'herald.core',
'herald.directory',
'herald.shell',
# Herald XMPP
'herald.transports.xmpp.directory',
'herald.transports.xmpp.transport',
# RPC
'pelix.remote.dispatcher',
'pelix.remote.registry',
'herald.remote.discovery',
'herald.remote.herald_xmlrpc',),
{herald.FWPROP_NODE_UID: node_name,
herald.FWPROP_NODE_NAME: node_name,
herald.FWPROP_PEER_NAME: peer_name,
herald.FWPROP_APPLICATION_ID: app_id})
context = framework.get_bundle_context()
# Start everything
framework.start()
# Instantiate components
with use_waiting_list(context) as ipopo:
# ... XMPP Transport
ipopo.add(herald.transports.xmpp.FACTORY_TRANSPORT,
"herald-xmpp-transport",
{herald.transports.xmpp.PROP_XMPP_SERVER: xmpp_server,
herald.transports.xmpp.PROP_XMPP_PORT: xmpp_port,
herald.transports.xmpp.PROP_XMPP_JID: xmpp_jid,
herald.transports.xmpp.PROP_XMPP_PASSWORD: xmpp_password})
# Start the framework and wait for it to stop
framework.wait_for_stop() | Runs the framework
:param xmpp_server: Address of the XMPP server
:param xmpp_port: Port of the XMPP server
:param peer_name: Name of the peer
:param node_name: Name (also, UID) of the node hosting the peer
:param app_id: Application ID
:param xmpp_jid: XMPP JID, None for Anonymous login
:param xmpp_password: XMPP account password | Below is the the instruction that describes the task:
### Input:
Runs the framework
:param xmpp_server: Address of the XMPP server
:param xmpp_port: Port of the XMPP server
:param peer_name: Name of the peer
:param node_name: Name (also, UID) of the node hosting the peer
:param app_id: Application ID
:param xmpp_jid: XMPP JID, None for Anonymous login
:param xmpp_password: XMPP account password
### Response:
def main(xmpp_server, xmpp_port, peer_name, node_name, app_id,
xmpp_jid=None, xmpp_password=None):
"""
Runs the framework
:param xmpp_server: Address of the XMPP server
:param xmpp_port: Port of the XMPP server
:param peer_name: Name of the peer
:param node_name: Name (also, UID) of the node hosting the peer
:param app_id: Application ID
:param xmpp_jid: XMPP JID, None for Anonymous login
:param xmpp_password: XMPP account password
"""
# Create the framework
framework = pelix.framework.create_framework(
('pelix.ipopo.core',
'pelix.ipopo.waiting',
'pelix.shell.core',
'pelix.shell.ipopo',
'pelix.shell.console',
# Herald core
'herald.core',
'herald.directory',
'herald.shell',
# Herald XMPP
'herald.transports.xmpp.directory',
'herald.transports.xmpp.transport',
# RPC
'pelix.remote.dispatcher',
'pelix.remote.registry',
'herald.remote.discovery',
'herald.remote.herald_xmlrpc',),
{herald.FWPROP_NODE_UID: node_name,
herald.FWPROP_NODE_NAME: node_name,
herald.FWPROP_PEER_NAME: peer_name,
herald.FWPROP_APPLICATION_ID: app_id})
context = framework.get_bundle_context()
# Start everything
framework.start()
# Instantiate components
with use_waiting_list(context) as ipopo:
# ... XMPP Transport
ipopo.add(herald.transports.xmpp.FACTORY_TRANSPORT,
"herald-xmpp-transport",
{herald.transports.xmpp.PROP_XMPP_SERVER: xmpp_server,
herald.transports.xmpp.PROP_XMPP_PORT: xmpp_port,
herald.transports.xmpp.PROP_XMPP_JID: xmpp_jid,
herald.transports.xmpp.PROP_XMPP_PASSWORD: xmpp_password})
# Start the framework and wait for it to stop
framework.wait_for_stop() |
def filtered_list(cls, name=None, obj=None):
"""List datacenters matching name and compatible
with obj"""
options = {}
if name:
options['id'] = cls.usable_id(name)
def obj_ok(dc, obj):
if not obj or obj['datacenter_id'] == dc['id']:
return True
return False
return [x for x in cls.list(options) if obj_ok(x, obj)] | List datacenters matching name and compatible
with obj | Below is the the instruction that describes the task:
### Input:
List datacenters matching name and compatible
with obj
### Response:
def filtered_list(cls, name=None, obj=None):
"""List datacenters matching name and compatible
with obj"""
options = {}
if name:
options['id'] = cls.usable_id(name)
def obj_ok(dc, obj):
if not obj or obj['datacenter_id'] == dc['id']:
return True
return False
return [x for x in cls.list(options) if obj_ok(x, obj)] |
def wait_for_element_visible(self, selector, by=By.CSS_SELECTOR,
timeout=settings.LARGE_TIMEOUT):
""" Waits for an element to appear in the HTML of a page.
The element must be visible (it cannot be hidden). """
if page_utils.is_xpath_selector(selector):
by = By.XPATH
if page_utils.is_link_text_selector(selector):
selector = page_utils.get_link_text_from_selector(selector)
by = By.LINK_TEXT
return page_actions.wait_for_element_visible(
self.driver, selector, by, timeout) | Waits for an element to appear in the HTML of a page.
The element must be visible (it cannot be hidden). | Below is the the instruction that describes the task:
### Input:
Waits for an element to appear in the HTML of a page.
The element must be visible (it cannot be hidden).
### Response:
def wait_for_element_visible(self, selector, by=By.CSS_SELECTOR,
timeout=settings.LARGE_TIMEOUT):
""" Waits for an element to appear in the HTML of a page.
The element must be visible (it cannot be hidden). """
if page_utils.is_xpath_selector(selector):
by = By.XPATH
if page_utils.is_link_text_selector(selector):
selector = page_utils.get_link_text_from_selector(selector)
by = By.LINK_TEXT
return page_actions.wait_for_element_visible(
self.driver, selector, by, timeout) |
def skill_update(self, skill_id, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/skills#update-skill-by-id"
api_path = "/api/v2/skills/{skill_id}"
api_path = api_path.format(skill_id=skill_id)
return self.call(api_path, method="PUT", data=data, **kwargs) | https://developer.zendesk.com/rest_api/docs/chat/skills#update-skill-by-id | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/chat/skills#update-skill-by-id
### Response:
def skill_update(self, skill_id, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/skills#update-skill-by-id"
api_path = "/api/v2/skills/{skill_id}"
api_path = api_path.format(skill_id=skill_id)
return self.call(api_path, method="PUT", data=data, **kwargs) |
def transition(self, duration, brightness=None, temperature=None):
""" Transition wrapper.
Short-circuit transition if necessary.
:param duration: Duration of transition.
:param brightness: Transition to this brightness.
:param temperature: Transition to this temperature.
"""
# Transition immediately if duration is zero.
if duration == 0:
if brightness is not None:
self.brightness = brightness
if temperature is not None:
self.temperature = temperature
return
if brightness != self.brightness or temperature != self.temperature:
self._transition(duration, brightness, temperature) | Transition wrapper.
Short-circuit transition if necessary.
:param duration: Duration of transition.
:param brightness: Transition to this brightness.
:param temperature: Transition to this temperature. | Below is the the instruction that describes the task:
### Input:
Transition wrapper.
Short-circuit transition if necessary.
:param duration: Duration of transition.
:param brightness: Transition to this brightness.
:param temperature: Transition to this temperature.
### Response:
def transition(self, duration, brightness=None, temperature=None):
""" Transition wrapper.
Short-circuit transition if necessary.
:param duration: Duration of transition.
:param brightness: Transition to this brightness.
:param temperature: Transition to this temperature.
"""
# Transition immediately if duration is zero.
if duration == 0:
if brightness is not None:
self.brightness = brightness
if temperature is not None:
self.temperature = temperature
return
if brightness != self.brightness or temperature != self.temperature:
self._transition(duration, brightness, temperature) |
def scatter_master_notifications(self):
"""Generate children notifications from a master notification
Also update notification number
Master notification are raised when a notification must be sent out. They are not
launched by reactionners (only children are) but they are used to build the
children notifications.
From one master notification, several children notifications may be built,
indeed one per each contact...
:return: None
"""
now = time.time()
# We only want the master scheduled notifications that are immediately launchable
notifications = [a for a in self.actions.values()
if a.is_a == u'notification' and a.status == ACT_STATUS_SCHEDULED
and not a.contact and a.is_launchable(now)]
if notifications:
logger.debug("Scatter master notification: %d notifications",
len(notifications))
for notification in notifications:
logger.debug("Scheduler got a master notification: %s", notification)
# This is a "master" notification created by an host/service.
# We use it to create children notifications (for the contacts and
# notification_commands) which are executed in the reactionner.
item = self.find_item_by_id(notification.ref)
children = []
notification_period = None
if getattr(item, 'notification_period', None) is not None:
notification_period = self.timeperiods[item.notification_period]
if not item.is_blocking_notifications(notification_period,
self.hosts, self.services,
notification.type, now):
# If it is possible to send notifications
# of this type at the current time, then create
# a single notification for each contact of this item.
children = item.scatter_notification(
notification, self.contacts, self.notificationways, self.timeperiods,
self.macromodulations, self.escalations,
self.find_item_by_id(getattr(item, "host", None))
)
for notif in children:
logger.debug(" - child notification: %s", notif)
notif.status = ACT_STATUS_SCHEDULED
# Add the notification to the scheduler objects
self.add(notif)
# If we have notification_interval then schedule
# the next notification (problems only)
if notification.type == u'PROBLEM':
# Update the ref notif number after raise the one of the notification
if children:
# notif_nb of the master notification
# was already current_notification_number+1.
# If notifications were sent,
# then host/service-counter will also be incremented
item.current_notification_number = notification.notif_nb
if item.notification_interval and notification.t_to_go is not None:
# We must continue to send notifications.
# Just leave it in the actions list and set it to "scheduled"
# and it will be found again later
# Ask the service/host to compute the next notif time. It can be just
# a.t_to_go + item.notification_interval*item.__class__.interval_length
# or maybe before because we have an
# escalation that need to raise up before
notification.t_to_go = item.get_next_notification_time(notification,
self.escalations,
self.timeperiods)
notification.notif_nb = item.current_notification_number + 1
logger.debug("Repeat master notification: %s", notification)
else:
# Wipe out this master notification. It is a master one
item.remove_in_progress_notification(notification)
logger.debug("Remove master notification (no repeat): %s", notification)
else:
# Wipe out this master notification.
logger.debug("Remove master notification (no more a problem): %s", notification)
# We don't repeat recover/downtime/flap/etc...
item.remove_in_progress_notification(notification) | Generate children notifications from a master notification
Also update notification number
Master notification are raised when a notification must be sent out. They are not
launched by reactionners (only children are) but they are used to build the
children notifications.
From one master notification, several children notifications may be built,
indeed one per each contact...
:return: None | Below is the the instruction that describes the task:
### Input:
Generate children notifications from a master notification
Also update notification number
Master notification are raised when a notification must be sent out. They are not
launched by reactionners (only children are) but they are used to build the
children notifications.
From one master notification, several children notifications may be built,
indeed one per each contact...
:return: None
### Response:
def scatter_master_notifications(self):
"""Generate children notifications from a master notification
Also update notification number
Master notification are raised when a notification must be sent out. They are not
launched by reactionners (only children are) but they are used to build the
children notifications.
From one master notification, several children notifications may be built,
indeed one per each contact...
:return: None
"""
now = time.time()
# We only want the master scheduled notifications that are immediately launchable
notifications = [a for a in self.actions.values()
if a.is_a == u'notification' and a.status == ACT_STATUS_SCHEDULED
and not a.contact and a.is_launchable(now)]
if notifications:
logger.debug("Scatter master notification: %d notifications",
len(notifications))
for notification in notifications:
logger.debug("Scheduler got a master notification: %s", notification)
# This is a "master" notification created by an host/service.
# We use it to create children notifications (for the contacts and
# notification_commands) which are executed in the reactionner.
item = self.find_item_by_id(notification.ref)
children = []
notification_period = None
if getattr(item, 'notification_period', None) is not None:
notification_period = self.timeperiods[item.notification_period]
if not item.is_blocking_notifications(notification_period,
self.hosts, self.services,
notification.type, now):
# If it is possible to send notifications
# of this type at the current time, then create
# a single notification for each contact of this item.
children = item.scatter_notification(
notification, self.contacts, self.notificationways, self.timeperiods,
self.macromodulations, self.escalations,
self.find_item_by_id(getattr(item, "host", None))
)
for notif in children:
logger.debug(" - child notification: %s", notif)
notif.status = ACT_STATUS_SCHEDULED
# Add the notification to the scheduler objects
self.add(notif)
# If we have notification_interval then schedule
# the next notification (problems only)
if notification.type == u'PROBLEM':
# Update the ref notif number after raise the one of the notification
if children:
# notif_nb of the master notification
# was already current_notification_number+1.
# If notifications were sent,
# then host/service-counter will also be incremented
item.current_notification_number = notification.notif_nb
if item.notification_interval and notification.t_to_go is not None:
# We must continue to send notifications.
# Just leave it in the actions list and set it to "scheduled"
# and it will be found again later
# Ask the service/host to compute the next notif time. It can be just
# a.t_to_go + item.notification_interval*item.__class__.interval_length
# or maybe before because we have an
# escalation that need to raise up before
notification.t_to_go = item.get_next_notification_time(notification,
self.escalations,
self.timeperiods)
notification.notif_nb = item.current_notification_number + 1
logger.debug("Repeat master notification: %s", notification)
else:
# Wipe out this master notification. It is a master one
item.remove_in_progress_notification(notification)
logger.debug("Remove master notification (no repeat): %s", notification)
else:
# Wipe out this master notification.
logger.debug("Remove master notification (no more a problem): %s", notification)
# We don't repeat recover/downtime/flap/etc...
item.remove_in_progress_notification(notification) |
def disconnect(self):
"""
Disconnect from a TWS or IB gateway application.
This will clear all session state.
"""
if not self.client.isConnected():
return
stats = self.client.connectionStats()
self._logger.info(
f'Disconnecting from {self.client.host}:{self.client.port}, '
f'{util.formatSI(stats.numBytesSent)}B sent '
f'in {stats.numMsgSent} messages, '
f'{util.formatSI(stats.numBytesRecv)}B received '
f'in {stats.numMsgRecv} messages, '
f'session time {util.formatSI(stats.duration)}s.')
self.client.disconnect() | Disconnect from a TWS or IB gateway application.
This will clear all session state. | Below is the the instruction that describes the task:
### Input:
Disconnect from a TWS or IB gateway application.
This will clear all session state.
### Response:
def disconnect(self):
"""
Disconnect from a TWS or IB gateway application.
This will clear all session state.
"""
if not self.client.isConnected():
return
stats = self.client.connectionStats()
self._logger.info(
f'Disconnecting from {self.client.host}:{self.client.port}, '
f'{util.formatSI(stats.numBytesSent)}B sent '
f'in {stats.numMsgSent} messages, '
f'{util.formatSI(stats.numBytesRecv)}B received '
f'in {stats.numMsgRecv} messages, '
f'session time {util.formatSI(stats.duration)}s.')
self.client.disconnect() |
def _create_one(self, ctx):
"""
Creates an instance to be saved when a model is created.
"""
assert isinstance(ctx, ResourceQueryContext)
fields = dict_pick(ctx.data, self._model_columns)
model = self.model_cls(**fields)
return model | Creates an instance to be saved when a model is created. | Below is the the instruction that describes the task:
### Input:
Creates an instance to be saved when a model is created.
### Response:
def _create_one(self, ctx):
"""
Creates an instance to be saved when a model is created.
"""
assert isinstance(ctx, ResourceQueryContext)
fields = dict_pick(ctx.data, self._model_columns)
model = self.model_cls(**fields)
return model |
def get_vaults(self):
"""Gets the vault list resulting from the search.
return: (osid.authorization.VaultList) - the vault list
raise: IllegalState - list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.VaultList(self._results, runtime=self._runtime) | Gets the vault list resulting from the search.
return: (osid.authorization.VaultList) - the vault list
raise: IllegalState - list has already been retrieved
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the vault list resulting from the search.
return: (osid.authorization.VaultList) - the vault list
raise: IllegalState - list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_vaults(self):
"""Gets the vault list resulting from the search.
return: (osid.authorization.VaultList) - the vault list
raise: IllegalState - list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.VaultList(self._results, runtime=self._runtime) |
def construct_task_instance(self, session=None, lock_for_update=False):
"""
Construct a TaskInstance from the database based on the primary key
:param session: DB session.
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
"""
TI = airflow.models.TaskInstance
qry = session.query(TI).filter(
TI.dag_id == self._dag_id,
TI.task_id == self._task_id,
TI.execution_date == self._execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
return ti | Construct a TaskInstance from the database based on the primary key
:param session: DB session.
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed. | Below is the the instruction that describes the task:
### Input:
Construct a TaskInstance from the database based on the primary key
:param session: DB session.
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
### Response:
def construct_task_instance(self, session=None, lock_for_update=False):
"""
Construct a TaskInstance from the database based on the primary key
:param session: DB session.
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
"""
TI = airflow.models.TaskInstance
qry = session.query(TI).filter(
TI.dag_id == self._dag_id,
TI.task_id == self._task_id,
TI.execution_date == self._execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
return ti |
def list_renderers(*args):
'''
List the renderers loaded on the minion
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' sys.list_renderers
Render names can be specified as globs.
.. code-block:: bash
salt '*' sys.list_renderers 'yaml*'
'''
renderers_ = salt.loader.render(__opts__, [])
renderers = set()
if not args:
for rend in six.iterkeys(renderers_):
renderers.add(rend)
return sorted(renderers)
for module in args:
for rend in fnmatch.filter(renderers_, module):
renderers.add(rend)
return sorted(renderers) | List the renderers loaded on the minion
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' sys.list_renderers
Render names can be specified as globs.
.. code-block:: bash
salt '*' sys.list_renderers 'yaml*' | Below is the the instruction that describes the task:
### Input:
List the renderers loaded on the minion
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' sys.list_renderers
Render names can be specified as globs.
.. code-block:: bash
salt '*' sys.list_renderers 'yaml*'
### Response:
def list_renderers(*args):
'''
List the renderers loaded on the minion
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' sys.list_renderers
Render names can be specified as globs.
.. code-block:: bash
salt '*' sys.list_renderers 'yaml*'
'''
renderers_ = salt.loader.render(__opts__, [])
renderers = set()
if not args:
for rend in six.iterkeys(renderers_):
renderers.add(rend)
return sorted(renderers)
for module in args:
for rend in fnmatch.filter(renderers_, module):
renderers.add(rend)
return sorted(renderers) |
def _submit(self, pathfile, filedata, filename):
'''
Submit either a file from disk, or a in-memory file to the solver service, and
return the request ID associated with the new captcha task.
'''
if pathfile and os.path.exists(pathfile):
files = {'file': open(pathfile, 'rb')}
elif filedata:
assert filename
files = {'file' : (filename, io.BytesIO(filedata))}
else:
raise ValueError("You must pass either a valid file path, or a bytes array containing the captcha image!")
payload = {
'key' : self.api_key,
'method' : 'post',
'json' : True,
}
self.log.info("Uploading to 2Captcha.com.")
url = self.getUrlFor('input', {})
request = requests.post(url, files=files, data=payload)
if not request.ok:
raise exc.CaptchaSolverFailure("Posting captcha to solve failed!")
resp_json = json.loads(request.text)
return self._process_response(resp_json) | Submit either a file from disk, or a in-memory file to the solver service, and
return the request ID associated with the new captcha task. | Below is the the instruction that describes the task:
### Input:
Submit either a file from disk, or a in-memory file to the solver service, and
return the request ID associated with the new captcha task.
### Response:
def _submit(self, pathfile, filedata, filename):
'''
Submit either a file from disk, or a in-memory file to the solver service, and
return the request ID associated with the new captcha task.
'''
if pathfile and os.path.exists(pathfile):
files = {'file': open(pathfile, 'rb')}
elif filedata:
assert filename
files = {'file' : (filename, io.BytesIO(filedata))}
else:
raise ValueError("You must pass either a valid file path, or a bytes array containing the captcha image!")
payload = {
'key' : self.api_key,
'method' : 'post',
'json' : True,
}
self.log.info("Uploading to 2Captcha.com.")
url = self.getUrlFor('input', {})
request = requests.post(url, files=files, data=payload)
if not request.ok:
raise exc.CaptchaSolverFailure("Posting captcha to solve failed!")
resp_json = json.loads(request.text)
return self._process_response(resp_json) |
def check_path_consistency(self, resource):
'''Path arguments must be consistent for all methods.'''
msg = ('Method "{}" path variables {}) do not conform with the '
'resource subpath declaration ({}).')
errors = []
# If subpath is not set, it will be detected by another checker
if resource.subpath is None:
return errors
declared = sorted(self.path_params_regex.findall(resource.subpath))
for callback in resource.callbacks:
actual = sorted(utils.filter_annotations_by_ptype(
callback, Ptypes.path))
if declared == actual:
continue
errors.append(msg.format(
'{}.{}'.format(resource.__name__, callback.__name__),
actual, resource.subpath))
return errors | Path arguments must be consistent for all methods. | Below is the the instruction that describes the task:
### Input:
Path arguments must be consistent for all methods.
### Response:
def check_path_consistency(self, resource):
'''Path arguments must be consistent for all methods.'''
msg = ('Method "{}" path variables {}) do not conform with the '
'resource subpath declaration ({}).')
errors = []
# If subpath is not set, it will be detected by another checker
if resource.subpath is None:
return errors
declared = sorted(self.path_params_regex.findall(resource.subpath))
for callback in resource.callbacks:
actual = sorted(utils.filter_annotations_by_ptype(
callback, Ptypes.path))
if declared == actual:
continue
errors.append(msg.format(
'{}.{}'.format(resource.__name__, callback.__name__),
actual, resource.subpath))
return errors |
async def starttls(
self,
server_hostname: str = None,
validate_certs: bool = None,
client_cert: DefaultStrType = _default,
client_key: DefaultStrType = _default,
cert_bundle: DefaultStrType = _default,
tls_context: DefaultSSLContextType = _default,
timeout: DefaultNumType = _default,
) -> SMTPResponse:
"""
Puts the connection to the SMTP server into TLS mode.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
If the server supports TLS, this will encrypt the rest of the SMTP
session. If you provide the keyfile and certfile parameters,
the identity of the SMTP server and client can be checked (if
validate_certs is True). You can also provide a custom SSLContext
object. If no certs or SSLContext is given, and TLS config was
provided when initializing the class, STARTTLS will use to that,
otherwise it will use the Python defaults.
:raises SMTPException: server does not support STARTTLS
:raises SMTPServerDisconnected: connection lost
:raises ValueError: invalid options provided
"""
self._raise_error_if_disconnected()
await self._ehlo_or_helo_if_needed()
if validate_certs is not None:
self.validate_certs = validate_certs
if timeout is _default:
timeout = self.timeout # type: ignore
if client_cert is not _default:
self.client_cert = client_cert # type: ignore
if client_key is not _default:
self.client_key = client_key # type: ignore
if cert_bundle is not _default:
self.cert_bundle = cert_bundle # type: ignore
if tls_context is not _default:
self.tls_context = tls_context # type: ignore
if self.tls_context is not None and self.client_cert is not None:
raise ValueError(
"Either a TLS context or a certificate/key must be provided"
)
if server_hostname is None:
server_hostname = self.hostname
tls_context = self._get_tls_context()
if not self.supports_extension("starttls"):
raise SMTPException("SMTP STARTTLS extension not supported by server.")
async with self._command_lock:
try:
response, protocol = await self.protocol.starttls( # type: ignore
tls_context, server_hostname=server_hostname, timeout=timeout
)
except SMTPServerDisconnected:
self.close()
raise
self.transport = protocol._app_transport
# RFC 3207 part 4.2:
# The client MUST discard any knowledge obtained from the server, such
# as the list of SMTP service extensions, which was not obtained from
# the TLS negotiation itself.
self._reset_server_state()
return response | Puts the connection to the SMTP server into TLS mode.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
If the server supports TLS, this will encrypt the rest of the SMTP
session. If you provide the keyfile and certfile parameters,
the identity of the SMTP server and client can be checked (if
validate_certs is True). You can also provide a custom SSLContext
object. If no certs or SSLContext is given, and TLS config was
provided when initializing the class, STARTTLS will use to that,
otherwise it will use the Python defaults.
:raises SMTPException: server does not support STARTTLS
:raises SMTPServerDisconnected: connection lost
:raises ValueError: invalid options provided | Below is the the instruction that describes the task:
### Input:
Puts the connection to the SMTP server into TLS mode.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
If the server supports TLS, this will encrypt the rest of the SMTP
session. If you provide the keyfile and certfile parameters,
the identity of the SMTP server and client can be checked (if
validate_certs is True). You can also provide a custom SSLContext
object. If no certs or SSLContext is given, and TLS config was
provided when initializing the class, STARTTLS will use to that,
otherwise it will use the Python defaults.
:raises SMTPException: server does not support STARTTLS
:raises SMTPServerDisconnected: connection lost
:raises ValueError: invalid options provided
### Response:
async def starttls(
self,
server_hostname: str = None,
validate_certs: bool = None,
client_cert: DefaultStrType = _default,
client_key: DefaultStrType = _default,
cert_bundle: DefaultStrType = _default,
tls_context: DefaultSSLContextType = _default,
timeout: DefaultNumType = _default,
) -> SMTPResponse:
"""
Puts the connection to the SMTP server into TLS mode.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
If the server supports TLS, this will encrypt the rest of the SMTP
session. If you provide the keyfile and certfile parameters,
the identity of the SMTP server and client can be checked (if
validate_certs is True). You can also provide a custom SSLContext
object. If no certs or SSLContext is given, and TLS config was
provided when initializing the class, STARTTLS will use to that,
otherwise it will use the Python defaults.
:raises SMTPException: server does not support STARTTLS
:raises SMTPServerDisconnected: connection lost
:raises ValueError: invalid options provided
"""
self._raise_error_if_disconnected()
await self._ehlo_or_helo_if_needed()
if validate_certs is not None:
self.validate_certs = validate_certs
if timeout is _default:
timeout = self.timeout # type: ignore
if client_cert is not _default:
self.client_cert = client_cert # type: ignore
if client_key is not _default:
self.client_key = client_key # type: ignore
if cert_bundle is not _default:
self.cert_bundle = cert_bundle # type: ignore
if tls_context is not _default:
self.tls_context = tls_context # type: ignore
if self.tls_context is not None and self.client_cert is not None:
raise ValueError(
"Either a TLS context or a certificate/key must be provided"
)
if server_hostname is None:
server_hostname = self.hostname
tls_context = self._get_tls_context()
if not self.supports_extension("starttls"):
raise SMTPException("SMTP STARTTLS extension not supported by server.")
async with self._command_lock:
try:
response, protocol = await self.protocol.starttls( # type: ignore
tls_context, server_hostname=server_hostname, timeout=timeout
)
except SMTPServerDisconnected:
self.close()
raise
self.transport = protocol._app_transport
# RFC 3207 part 4.2:
# The client MUST discard any knowledge obtained from the server, such
# as the list of SMTP service extensions, which was not obtained from
# the TLS negotiation itself.
self._reset_server_state()
return response |
def start_at(self, start_at):
"""
Sets the start_at of this Shift.
RFC 3339; shifted to location timezone + offset. Precision up to the minute is respected; seconds are truncated.
:param start_at: The start_at of this Shift.
:type: str
"""
if start_at is None:
raise ValueError("Invalid value for `start_at`, must not be `None`")
if len(start_at) < 1:
raise ValueError("Invalid value for `start_at`, length must be greater than or equal to `1`")
self._start_at = start_at | Sets the start_at of this Shift.
RFC 3339; shifted to location timezone + offset. Precision up to the minute is respected; seconds are truncated.
:param start_at: The start_at of this Shift.
:type: str | Below is the the instruction that describes the task:
### Input:
Sets the start_at of this Shift.
RFC 3339; shifted to location timezone + offset. Precision up to the minute is respected; seconds are truncated.
:param start_at: The start_at of this Shift.
:type: str
### Response:
def start_at(self, start_at):
"""
Sets the start_at of this Shift.
RFC 3339; shifted to location timezone + offset. Precision up to the minute is respected; seconds are truncated.
:param start_at: The start_at of this Shift.
:type: str
"""
if start_at is None:
raise ValueError("Invalid value for `start_at`, must not be `None`")
if len(start_at) < 1:
raise ValueError("Invalid value for `start_at`, length must be greater than or equal to `1`")
self._start_at = start_at |
def _param_grad_helper(self,X,X2,target):
"""Return shape is NxMx(Ntheta)"""
if X2 is None: X2 = X
FX = np.column_stack([f(X) for f in self.F])
FX2 = np.column_stack([f(X2) for f in self.F])
DER = np.zeros((self.n,self.n,self.n))
for i in range(self.n):
DER[i,i,i] = np.sqrt(self.weights[i])
dw = self.variance * mdot(FX,DER,self.G_1,np.diag(np.sqrt(self.weights)),FX2.T)
dv = mdot(FX,np.diag(np.sqrt(self.weights)),self.G_1,np.diag(np.sqrt(self.weights)),FX2.T)
np.add(target[:,:,0],np.transpose(dv,(0,2,1)), target[:,:,0])
np.add(target[:,:,1:],np.transpose(dw,(0,2,1)), target[:,:,1:]) | Return shape is NxMx(Ntheta) | Below is the the instruction that describes the task:
### Input:
Return shape is NxMx(Ntheta)
### Response:
def _param_grad_helper(self,X,X2,target):
"""Return shape is NxMx(Ntheta)"""
if X2 is None: X2 = X
FX = np.column_stack([f(X) for f in self.F])
FX2 = np.column_stack([f(X2) for f in self.F])
DER = np.zeros((self.n,self.n,self.n))
for i in range(self.n):
DER[i,i,i] = np.sqrt(self.weights[i])
dw = self.variance * mdot(FX,DER,self.G_1,np.diag(np.sqrt(self.weights)),FX2.T)
dv = mdot(FX,np.diag(np.sqrt(self.weights)),self.G_1,np.diag(np.sqrt(self.weights)),FX2.T)
np.add(target[:,:,0],np.transpose(dv,(0,2,1)), target[:,:,0])
np.add(target[:,:,1:],np.transpose(dw,(0,2,1)), target[:,:,1:]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.