code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def id_request(self):
"""The Force.com Identity Service (return type dict of text_type)"""
# https://developer.salesforce.com/page/Digging_Deeper_into_OAuth_2.0_at_Salesforce.com?language=en&language=en#The_Force.com_Identity_Service
if 'id' in self.oauth:
url = self.oauth['id']
else:
# dynamic auth without 'id' parameter
url = self.urls_request()['identity']
ret = self.handle_api_exceptions('GET', url) # TODO
return ret.json() | The Force.com Identity Service (return type dict of text_type) | Below is the the instruction that describes the task:
### Input:
The Force.com Identity Service (return type dict of text_type)
### Response:
def id_request(self):
"""The Force.com Identity Service (return type dict of text_type)"""
# https://developer.salesforce.com/page/Digging_Deeper_into_OAuth_2.0_at_Salesforce.com?language=en&language=en#The_Force.com_Identity_Service
if 'id' in self.oauth:
url = self.oauth['id']
else:
# dynamic auth without 'id' parameter
url = self.urls_request()['identity']
ret = self.handle_api_exceptions('GET', url) # TODO
return ret.json() |
def endpoint(self, endpoint):
"""Like :meth:`Flask.endpoint` but for a blueprint. This does not
prefix the endpoint with the blueprint name, this has to be done
explicitly by the user of this method. If the endpoint is prefixed
with a `.` it will be registered to the current blueprint, otherwise
it's an application independent endpoint.
"""
def decorator(f):
def register_endpoint(state):
state.app.view_functions[endpoint] = f
self.record_once(register_endpoint)
return f
return decorator | Like :meth:`Flask.endpoint` but for a blueprint. This does not
prefix the endpoint with the blueprint name, this has to be done
explicitly by the user of this method. If the endpoint is prefixed
with a `.` it will be registered to the current blueprint, otherwise
it's an application independent endpoint. | Below is the the instruction that describes the task:
### Input:
Like :meth:`Flask.endpoint` but for a blueprint. This does not
prefix the endpoint with the blueprint name, this has to be done
explicitly by the user of this method. If the endpoint is prefixed
with a `.` it will be registered to the current blueprint, otherwise
it's an application independent endpoint.
### Response:
def endpoint(self, endpoint):
"""Like :meth:`Flask.endpoint` but for a blueprint. This does not
prefix the endpoint with the blueprint name, this has to be done
explicitly by the user of this method. If the endpoint is prefixed
with a `.` it will be registered to the current blueprint, otherwise
it's an application independent endpoint.
"""
def decorator(f):
def register_endpoint(state):
state.app.view_functions[endpoint] = f
self.record_once(register_endpoint)
return f
return decorator |
def delete_user(self, recipient_email):
"""
Remove user from encryption
"""
emailid_list = self.list_user_emails()
if recipient_email not in emailid_list:
raise Exception("User {0} not present!".format(recipient_email))
else:
emailid_list.remove(recipient_email)
self.y = self.decrypt()
self.encrypt(emailid_list=emailid_list) | Remove user from encryption | Below is the the instruction that describes the task:
### Input:
Remove user from encryption
### Response:
def delete_user(self, recipient_email):
"""
Remove user from encryption
"""
emailid_list = self.list_user_emails()
if recipient_email not in emailid_list:
raise Exception("User {0} not present!".format(recipient_email))
else:
emailid_list.remove(recipient_email)
self.y = self.decrypt()
self.encrypt(emailid_list=emailid_list) |
def some(args):
"""
%prog some bedfile idsfile > newbedfile
Retrieve a subset of bed features given a list of ids.
"""
from jcvi.formats.base import SetFile
from jcvi.utils.cbook import gene_name
p = OptionParser(some.__doc__)
p.add_option("-v", dest="inverse", default=False, action="store_true",
help="Get the inverse, like grep -v [default: %default]")
p.set_outfile()
p.set_stripnames()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, idsfile = args
inverse = opts.inverse
ostrip = opts.strip_names
fw = must_open(opts.outfile, "w")
ids = SetFile(idsfile)
if ostrip:
ids = set(gene_name(x) for x in ids)
bed = Bed(bedfile)
ntotal = nkeep = 0
for b in bed:
ntotal += 1
keep = b.accn in ids
if inverse:
keep = not keep
if keep:
nkeep += 1
print(b, file=fw)
fw.close()
logging.debug("Stats: {0} features kept.".\
format(percentage(nkeep, ntotal))) | %prog some bedfile idsfile > newbedfile
Retrieve a subset of bed features given a list of ids. | Below is the the instruction that describes the task:
### Input:
%prog some bedfile idsfile > newbedfile
Retrieve a subset of bed features given a list of ids.
### Response:
def some(args):
"""
%prog some bedfile idsfile > newbedfile
Retrieve a subset of bed features given a list of ids.
"""
from jcvi.formats.base import SetFile
from jcvi.utils.cbook import gene_name
p = OptionParser(some.__doc__)
p.add_option("-v", dest="inverse", default=False, action="store_true",
help="Get the inverse, like grep -v [default: %default]")
p.set_outfile()
p.set_stripnames()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, idsfile = args
inverse = opts.inverse
ostrip = opts.strip_names
fw = must_open(opts.outfile, "w")
ids = SetFile(idsfile)
if ostrip:
ids = set(gene_name(x) for x in ids)
bed = Bed(bedfile)
ntotal = nkeep = 0
for b in bed:
ntotal += 1
keep = b.accn in ids
if inverse:
keep = not keep
if keep:
nkeep += 1
print(b, file=fw)
fw.close()
logging.debug("Stats: {0} features kept.".\
format(percentage(nkeep, ntotal))) |
def running(name, **kwargs):
r'''
Defines and starts a new VM with specified arguments, or restart a
VM (or group of VMs). (Runs ``vagrant up``.)
:param name: the Salt_id node name you wish your VM to have.
If ``name`` contains a "?" or "*" then it will re-start a group of VMs
which have been paused or stopped.
Each machine must be initially started individually using this function
or the vagrant.init execution module call.
\[NOTE:\] Keyword arguments are silently ignored when re-starting an existing VM.
Possible keyword arguments:
- cwd: The directory (path) containing the Vagrantfile
- machine: ('') the name of the machine (in the Vagrantfile) if not default
- vagrant_runas: ('root') the username who owns the vagrantbox file
- vagrant_provider: the provider to run the VM (usually 'virtualbox')
- vm: ({}) a dictionary containing these or other keyword arguments
.. code-block:: yaml
node_name:
vagrant.running
.. code-block:: yaml
node_name:
vagrant.running:
- cwd: /projects/my_project
- vagrant_runas: my_username
- machine: machine1
'''
if '*' in name or '?' in name:
return _vagrant_call(name, 'start', 'restarted',
"Machine has been restarted", "running")
else:
ret = {'name': name,
'changes': {},
'result': True,
'comment': '{0} is already running'.format(name)
}
try:
info = __salt__['vagrant.vm_state'](name)
if info[0]['state'] != 'running':
__salt__['vagrant.start'](name)
ret['changes'][name] = 'Machine started'
ret['comment'] = 'Node {0} started'.format(name)
except (SaltInvocationError, CommandExecutionError):
# there was no viable existing machine to start
ret, kwargs = _find_init_change(name, ret, **kwargs)
kwargs['start'] = True
__salt__['vagrant.init'](name, **kwargs)
ret['changes'][name] = 'Node defined and started'
ret['comment'] = 'Node {0} defined and started'.format(name)
return ret | r'''
Defines and starts a new VM with specified arguments, or restart a
VM (or group of VMs). (Runs ``vagrant up``.)
:param name: the Salt_id node name you wish your VM to have.
If ``name`` contains a "?" or "*" then it will re-start a group of VMs
which have been paused or stopped.
Each machine must be initially started individually using this function
or the vagrant.init execution module call.
\[NOTE:\] Keyword arguments are silently ignored when re-starting an existing VM.
Possible keyword arguments:
- cwd: The directory (path) containing the Vagrantfile
- machine: ('') the name of the machine (in the Vagrantfile) if not default
- vagrant_runas: ('root') the username who owns the vagrantbox file
- vagrant_provider: the provider to run the VM (usually 'virtualbox')
- vm: ({}) a dictionary containing these or other keyword arguments
.. code-block:: yaml
node_name:
vagrant.running
.. code-block:: yaml
node_name:
vagrant.running:
- cwd: /projects/my_project
- vagrant_runas: my_username
- machine: machine1 | Below is the the instruction that describes the task:
### Input:
r'''
Defines and starts a new VM with specified arguments, or restart a
VM (or group of VMs). (Runs ``vagrant up``.)
:param name: the Salt_id node name you wish your VM to have.
If ``name`` contains a "?" or "*" then it will re-start a group of VMs
which have been paused or stopped.
Each machine must be initially started individually using this function
or the vagrant.init execution module call.
\[NOTE:\] Keyword arguments are silently ignored when re-starting an existing VM.
Possible keyword arguments:
- cwd: The directory (path) containing the Vagrantfile
- machine: ('') the name of the machine (in the Vagrantfile) if not default
- vagrant_runas: ('root') the username who owns the vagrantbox file
- vagrant_provider: the provider to run the VM (usually 'virtualbox')
- vm: ({}) a dictionary containing these or other keyword arguments
.. code-block:: yaml
node_name:
vagrant.running
.. code-block:: yaml
node_name:
vagrant.running:
- cwd: /projects/my_project
- vagrant_runas: my_username
- machine: machine1
### Response:
def running(name, **kwargs):
r'''
Defines and starts a new VM with specified arguments, or restart a
VM (or group of VMs). (Runs ``vagrant up``.)
:param name: the Salt_id node name you wish your VM to have.
If ``name`` contains a "?" or "*" then it will re-start a group of VMs
which have been paused or stopped.
Each machine must be initially started individually using this function
or the vagrant.init execution module call.
\[NOTE:\] Keyword arguments are silently ignored when re-starting an existing VM.
Possible keyword arguments:
- cwd: The directory (path) containing the Vagrantfile
- machine: ('') the name of the machine (in the Vagrantfile) if not default
- vagrant_runas: ('root') the username who owns the vagrantbox file
- vagrant_provider: the provider to run the VM (usually 'virtualbox')
- vm: ({}) a dictionary containing these or other keyword arguments
.. code-block:: yaml
node_name:
vagrant.running
.. code-block:: yaml
node_name:
vagrant.running:
- cwd: /projects/my_project
- vagrant_runas: my_username
- machine: machine1
'''
if '*' in name or '?' in name:
return _vagrant_call(name, 'start', 'restarted',
"Machine has been restarted", "running")
else:
ret = {'name': name,
'changes': {},
'result': True,
'comment': '{0} is already running'.format(name)
}
try:
info = __salt__['vagrant.vm_state'](name)
if info[0]['state'] != 'running':
__salt__['vagrant.start'](name)
ret['changes'][name] = 'Machine started'
ret['comment'] = 'Node {0} started'.format(name)
except (SaltInvocationError, CommandExecutionError):
# there was no viable existing machine to start
ret, kwargs = _find_init_change(name, ret, **kwargs)
kwargs['start'] = True
__salt__['vagrant.init'](name, **kwargs)
ret['changes'][name] = 'Node defined and started'
ret['comment'] = 'Node {0} defined and started'.format(name)
return ret |
def solve_venn2_circles(venn_areas):
'''
Given the list of "venn areas" (as output from compute_venn2_areas, i.e. [A, B, AB]),
finds the positions and radii of the two circles.
The return value is a tuple (coords, radii), where coords is a 2x2 array of coordinates and
radii is a 2x1 array of circle radii.
Assumes the input values to be nonnegative and not all zero.
In particular, the first two values must be positive.
>>> c, r = solve_venn2_circles((1, 1, 0))
>>> np.round(r, 3)
array([ 0.564, 0.564])
>>> c, r = solve_venn2_circles(compute_venn2_areas((1, 2, 3)))
>>> np.round(r, 3)
array([ 0.461, 0.515])
'''
(A_a, A_b, A_ab) = list(map(float, venn_areas))
r_a, r_b = np.sqrt(A_a / np.pi), np.sqrt(A_b / np.pi)
radii = np.array([r_a, r_b])
if A_ab > tol:
# Nonzero intersection
coords = np.zeros((2, 2))
coords[1][0] = find_distance_by_area(radii[0], radii[1], A_ab)
else:
# Zero intersection
coords = np.zeros((2, 2))
coords[1][0] = radii[0] + radii[1] + max(np.mean(radii) * 1.1, 0.2) # The max here is needed for the case r_a = r_b = 0
coords = normalize_by_center_of_mass(coords, radii)
return (coords, radii) | Given the list of "venn areas" (as output from compute_venn2_areas, i.e. [A, B, AB]),
finds the positions and radii of the two circles.
The return value is a tuple (coords, radii), where coords is a 2x2 array of coordinates and
radii is a 2x1 array of circle radii.
Assumes the input values to be nonnegative and not all zero.
In particular, the first two values must be positive.
>>> c, r = solve_venn2_circles((1, 1, 0))
>>> np.round(r, 3)
array([ 0.564, 0.564])
>>> c, r = solve_venn2_circles(compute_venn2_areas((1, 2, 3)))
>>> np.round(r, 3)
array([ 0.461, 0.515]) | Below is the the instruction that describes the task:
### Input:
Given the list of "venn areas" (as output from compute_venn2_areas, i.e. [A, B, AB]),
finds the positions and radii of the two circles.
The return value is a tuple (coords, radii), where coords is a 2x2 array of coordinates and
radii is a 2x1 array of circle radii.
Assumes the input values to be nonnegative and not all zero.
In particular, the first two values must be positive.
>>> c, r = solve_venn2_circles((1, 1, 0))
>>> np.round(r, 3)
array([ 0.564, 0.564])
>>> c, r = solve_venn2_circles(compute_venn2_areas((1, 2, 3)))
>>> np.round(r, 3)
array([ 0.461, 0.515])
### Response:
def solve_venn2_circles(venn_areas):
'''
Given the list of "venn areas" (as output from compute_venn2_areas, i.e. [A, B, AB]),
finds the positions and radii of the two circles.
The return value is a tuple (coords, radii), where coords is a 2x2 array of coordinates and
radii is a 2x1 array of circle radii.
Assumes the input values to be nonnegative and not all zero.
In particular, the first two values must be positive.
>>> c, r = solve_venn2_circles((1, 1, 0))
>>> np.round(r, 3)
array([ 0.564, 0.564])
>>> c, r = solve_venn2_circles(compute_venn2_areas((1, 2, 3)))
>>> np.round(r, 3)
array([ 0.461, 0.515])
'''
(A_a, A_b, A_ab) = list(map(float, venn_areas))
r_a, r_b = np.sqrt(A_a / np.pi), np.sqrt(A_b / np.pi)
radii = np.array([r_a, r_b])
if A_ab > tol:
# Nonzero intersection
coords = np.zeros((2, 2))
coords[1][0] = find_distance_by_area(radii[0], radii[1], A_ab)
else:
# Zero intersection
coords = np.zeros((2, 2))
coords[1][0] = radii[0] + radii[1] + max(np.mean(radii) * 1.1, 0.2) # The max here is needed for the case r_a = r_b = 0
coords = normalize_by_center_of_mass(coords, radii)
return (coords, radii) |
def send(self, data, room=None, skip_sid=None, namespace=None,
callback=None):
"""Send a message to one or more connected clients.
The only difference with the :func:`socketio.Server.send` method is
that when the ``namespace`` argument is not given the namespace
associated with the class is used.
"""
return self.server.send(data, room=room, skip_sid=skip_sid,
namespace=namespace or self.namespace,
callback=callback) | Send a message to one or more connected clients.
The only difference with the :func:`socketio.Server.send` method is
that when the ``namespace`` argument is not given the namespace
associated with the class is used. | Below is the the instruction that describes the task:
### Input:
Send a message to one or more connected clients.
The only difference with the :func:`socketio.Server.send` method is
that when the ``namespace`` argument is not given the namespace
associated with the class is used.
### Response:
def send(self, data, room=None, skip_sid=None, namespace=None,
callback=None):
"""Send a message to one or more connected clients.
The only difference with the :func:`socketio.Server.send` method is
that when the ``namespace`` argument is not given the namespace
associated with the class is used.
"""
return self.server.send(data, room=room, skip_sid=skip_sid,
namespace=namespace or self.namespace,
callback=callback) |
def build_rects(tmxmap, layer, tileset=None, real_gid=None):
"""generate a set of non-overlapping rects that represents the distribution
of the specified gid.
useful for generating rects for use in collision detection
Use at your own risk: this is experimental...will change in future
GID Note: You will need to add 1 to the GID reported by Tiled.
:param tmxmap: TiledMap object
:param layer: int or string name of layer
:param tileset: int or string name of tileset
:param real_gid: Tiled GID of the tile + 1 (see note)
:return: List of pygame Rect objects
"""
if isinstance(tileset, int):
try:
tileset = tmxmap.tilesets[tileset]
except IndexError:
msg = "Tileset #{0} not found in map {1}."
logger.debug(msg.format(tileset, tmxmap))
raise IndexError
elif isinstance(tileset, str):
try:
tileset = [t for t in tmxmap.tilesets if t.name == tileset].pop()
except IndexError:
msg = "Tileset \"{0}\" not found in map {1}."
logger.debug(msg.format(tileset, tmxmap))
raise ValueError
elif tileset:
msg = "Tileset must be either a int or string. got: {0}"
logger.debug(msg.format(type(tileset)))
raise TypeError
gid = None
if real_gid:
try:
gid, flags = tmxmap.map_gid(real_gid)[0]
except IndexError:
msg = "GID #{0} not found"
logger.debug(msg.format(real_gid))
raise ValueError
if isinstance(layer, int):
layer_data = tmxmap.get_layer_data(layer)
elif isinstance(layer, str):
try:
layer = [l for l in tmxmap.layers if l.name == layer].pop()
layer_data = layer.data
except IndexError:
msg = "Layer \"{0}\" not found in map {1}."
logger.debug(msg.format(layer, tmxmap))
raise ValueError
p = itertools.product(range(tmxmap.width), range(tmxmap.height))
if gid:
points = [(x, y) for (x, y) in p if layer_data[y][x] == gid]
else:
points = [(x, y) for (x, y) in p if layer_data[y][x]]
rects = simplify(points, tmxmap.tilewidth, tmxmap.tileheight)
return rects | generate a set of non-overlapping rects that represents the distribution
of the specified gid.
useful for generating rects for use in collision detection
Use at your own risk: this is experimental...will change in future
GID Note: You will need to add 1 to the GID reported by Tiled.
:param tmxmap: TiledMap object
:param layer: int or string name of layer
:param tileset: int or string name of tileset
:param real_gid: Tiled GID of the tile + 1 (see note)
:return: List of pygame Rect objects | Below is the the instruction that describes the task:
### Input:
generate a set of non-overlapping rects that represents the distribution
of the specified gid.
useful for generating rects for use in collision detection
Use at your own risk: this is experimental...will change in future
GID Note: You will need to add 1 to the GID reported by Tiled.
:param tmxmap: TiledMap object
:param layer: int or string name of layer
:param tileset: int or string name of tileset
:param real_gid: Tiled GID of the tile + 1 (see note)
:return: List of pygame Rect objects
### Response:
def build_rects(tmxmap, layer, tileset=None, real_gid=None):
"""generate a set of non-overlapping rects that represents the distribution
of the specified gid.
useful for generating rects for use in collision detection
Use at your own risk: this is experimental...will change in future
GID Note: You will need to add 1 to the GID reported by Tiled.
:param tmxmap: TiledMap object
:param layer: int or string name of layer
:param tileset: int or string name of tileset
:param real_gid: Tiled GID of the tile + 1 (see note)
:return: List of pygame Rect objects
"""
if isinstance(tileset, int):
try:
tileset = tmxmap.tilesets[tileset]
except IndexError:
msg = "Tileset #{0} not found in map {1}."
logger.debug(msg.format(tileset, tmxmap))
raise IndexError
elif isinstance(tileset, str):
try:
tileset = [t for t in tmxmap.tilesets if t.name == tileset].pop()
except IndexError:
msg = "Tileset \"{0}\" not found in map {1}."
logger.debug(msg.format(tileset, tmxmap))
raise ValueError
elif tileset:
msg = "Tileset must be either a int or string. got: {0}"
logger.debug(msg.format(type(tileset)))
raise TypeError
gid = None
if real_gid:
try:
gid, flags = tmxmap.map_gid(real_gid)[0]
except IndexError:
msg = "GID #{0} not found"
logger.debug(msg.format(real_gid))
raise ValueError
if isinstance(layer, int):
layer_data = tmxmap.get_layer_data(layer)
elif isinstance(layer, str):
try:
layer = [l for l in tmxmap.layers if l.name == layer].pop()
layer_data = layer.data
except IndexError:
msg = "Layer \"{0}\" not found in map {1}."
logger.debug(msg.format(layer, tmxmap))
raise ValueError
p = itertools.product(range(tmxmap.width), range(tmxmap.height))
if gid:
points = [(x, y) for (x, y) in p if layer_data[y][x] == gid]
else:
points = [(x, y) for (x, y) in p if layer_data[y][x]]
rects = simplify(points, tmxmap.tilewidth, tmxmap.tileheight)
return rects |
def add_item(self, query_params=None):
'''
Add an item to this checklist. Returns a dictionary of values of new
item.
'''
return self.fetch_json(
uri_path=self.base_uri + '/checkItems',
http_method='POST',
query_params=query_params or {}
) | Add an item to this checklist. Returns a dictionary of values of new
item. | Below is the the instruction that describes the task:
### Input:
Add an item to this checklist. Returns a dictionary of values of new
item.
### Response:
def add_item(self, query_params=None):
'''
Add an item to this checklist. Returns a dictionary of values of new
item.
'''
return self.fetch_json(
uri_path=self.base_uri + '/checkItems',
http_method='POST',
query_params=query_params or {}
) |
def _parse_config_file_impl(filename):
"""
Format for the file is:
credentials:
project_id: ...
access_token: ...
api_domain: ...
:param filename: The filename to parse
:return: A tuple with:
- project_id
- access_token
- api_domain
"""
try:
doc = yaml.load(file(filename).read())
project_id = doc["credentials"]["project_id"]
access_token = doc["credentials"]["access_token"]
api_domain = doc["credentials"]["api_domain"]
return project_id, access_token, api_domain
except:
return None, None, None | Format for the file is:
credentials:
project_id: ...
access_token: ...
api_domain: ...
:param filename: The filename to parse
:return: A tuple with:
- project_id
- access_token
- api_domain | Below is the the instruction that describes the task:
### Input:
Format for the file is:
credentials:
project_id: ...
access_token: ...
api_domain: ...
:param filename: The filename to parse
:return: A tuple with:
- project_id
- access_token
- api_domain
### Response:
def _parse_config_file_impl(filename):
"""
Format for the file is:
credentials:
project_id: ...
access_token: ...
api_domain: ...
:param filename: The filename to parse
:return: A tuple with:
- project_id
- access_token
- api_domain
"""
try:
doc = yaml.load(file(filename).read())
project_id = doc["credentials"]["project_id"]
access_token = doc["credentials"]["access_token"]
api_domain = doc["credentials"]["api_domain"]
return project_id, access_token, api_domain
except:
return None, None, None |
def AddEventSource(self, event_source):
"""Adds an event source.
Args:
event_source (EventSource): event source.
Raises:
IOError: when the storage file is closed or read-only.
OSError: when the storage file is closed or read-only.
"""
self._RaiseIfNotWritable()
self._AddAttributeContainer(
self._CONTAINER_TYPE_EVENT_SOURCE, event_source) | Adds an event source.
Args:
event_source (EventSource): event source.
Raises:
IOError: when the storage file is closed or read-only.
OSError: when the storage file is closed or read-only. | Below is the the instruction that describes the task:
### Input:
Adds an event source.
Args:
event_source (EventSource): event source.
Raises:
IOError: when the storage file is closed or read-only.
OSError: when the storage file is closed or read-only.
### Response:
def AddEventSource(self, event_source):
"""Adds an event source.
Args:
event_source (EventSource): event source.
Raises:
IOError: when the storage file is closed or read-only.
OSError: when the storage file is closed or read-only.
"""
self._RaiseIfNotWritable()
self._AddAttributeContainer(
self._CONTAINER_TYPE_EVENT_SOURCE, event_source) |
def shift_by_n_processors(self, x, mesh_axis, offset, wrap):
"""Receive the slice from processor pcoord - offset.
Args:
x: a LaidOutTensor
mesh_axis: an integer
offset: an integer
wrap: a boolean. If True, then wrap around. Otherwise, pad with zeros.
"""
n = self.shape[mesh_axis].size
source_pcoord = []
for i in xrange(n):
c = i - offset
if c != c % n:
if wrap:
c = c % n
else:
c = None
source_pcoord.append(c)
return self.receive(x, mesh_axis, source_pcoord) | Receive the slice from processor pcoord - offset.
Args:
x: a LaidOutTensor
mesh_axis: an integer
offset: an integer
wrap: a boolean. If True, then wrap around. Otherwise, pad with zeros. | Below is the the instruction that describes the task:
### Input:
Receive the slice from processor pcoord - offset.
Args:
x: a LaidOutTensor
mesh_axis: an integer
offset: an integer
wrap: a boolean. If True, then wrap around. Otherwise, pad with zeros.
### Response:
def shift_by_n_processors(self, x, mesh_axis, offset, wrap):
"""Receive the slice from processor pcoord - offset.
Args:
x: a LaidOutTensor
mesh_axis: an integer
offset: an integer
wrap: a boolean. If True, then wrap around. Otherwise, pad with zeros.
"""
n = self.shape[mesh_axis].size
source_pcoord = []
for i in xrange(n):
c = i - offset
if c != c % n:
if wrap:
c = c % n
else:
c = None
source_pcoord.append(c)
return self.receive(x, mesh_axis, source_pcoord) |
def _create_search_filter(filter_by):
"""
:param filter_by:
:return: dict
"""
return ",".join(
[
"{0}:{1}".format(key, value)
for key, value in filter_by.items()
if value is not None
]
) | :param filter_by:
:return: dict | Below is the the instruction that describes the task:
### Input:
:param filter_by:
:return: dict
### Response:
def _create_search_filter(filter_by):
"""
:param filter_by:
:return: dict
"""
return ",".join(
[
"{0}:{1}".format(key, value)
for key, value in filter_by.items()
if value is not None
]
) |
def _get_area(self):
"""
Subclasses may override this method.
"""
from fontTools.pens.areaPen import AreaPen
pen = AreaPen(self.layer)
self.draw(pen)
return abs(pen.value) | Subclasses may override this method. | Below is the the instruction that describes the task:
### Input:
Subclasses may override this method.
### Response:
def _get_area(self):
"""
Subclasses may override this method.
"""
from fontTools.pens.areaPen import AreaPen
pen = AreaPen(self.layer)
self.draw(pen)
return abs(pen.value) |
def get_pfam(pdb_id):
"""Return PFAM annotations of given PDB_ID
Parameters
----------
pdb_id : string
A 4 character string giving a pdb entry of interest
Returns
-------
out : dict
A dictionary containing the PFAM annotations for the specified PDB ID
Examples
--------
>>> pfam_info = get_pfam('2LME')
>>> print(pfam_info)
{'pfamHit': {'@pfamAcc': 'PF03895.10', '@pfamName': 'YadA_anchor',
'@structureId': '2LME', '@pdbResNumEnd': '105', '@pdbResNumStart': '28',
'@pfamDesc': 'YadA-like C-terminal region', '@eValue': '5.0E-22', '@chainId': 'A'}}
"""
out = get_info(pdb_id, url_root = 'http://www.rcsb.org/pdb/rest/hmmer?structureId=')
out = to_dict(out)
if not out['hmmer3']:
return dict()
return remove_at_sign(out['hmmer3']) | Return PFAM annotations of given PDB_ID
Parameters
----------
pdb_id : string
A 4 character string giving a pdb entry of interest
Returns
-------
out : dict
A dictionary containing the PFAM annotations for the specified PDB ID
Examples
--------
>>> pfam_info = get_pfam('2LME')
>>> print(pfam_info)
{'pfamHit': {'@pfamAcc': 'PF03895.10', '@pfamName': 'YadA_anchor',
'@structureId': '2LME', '@pdbResNumEnd': '105', '@pdbResNumStart': '28',
'@pfamDesc': 'YadA-like C-terminal region', '@eValue': '5.0E-22', '@chainId': 'A'}} | Below is the the instruction that describes the task:
### Input:
Return PFAM annotations of given PDB_ID
Parameters
----------
pdb_id : string
A 4 character string giving a pdb entry of interest
Returns
-------
out : dict
A dictionary containing the PFAM annotations for the specified PDB ID
Examples
--------
>>> pfam_info = get_pfam('2LME')
>>> print(pfam_info)
{'pfamHit': {'@pfamAcc': 'PF03895.10', '@pfamName': 'YadA_anchor',
'@structureId': '2LME', '@pdbResNumEnd': '105', '@pdbResNumStart': '28',
'@pfamDesc': 'YadA-like C-terminal region', '@eValue': '5.0E-22', '@chainId': 'A'}}
### Response:
def get_pfam(pdb_id):
"""Return PFAM annotations of given PDB_ID
Parameters
----------
pdb_id : string
A 4 character string giving a pdb entry of interest
Returns
-------
out : dict
A dictionary containing the PFAM annotations for the specified PDB ID
Examples
--------
>>> pfam_info = get_pfam('2LME')
>>> print(pfam_info)
{'pfamHit': {'@pfamAcc': 'PF03895.10', '@pfamName': 'YadA_anchor',
'@structureId': '2LME', '@pdbResNumEnd': '105', '@pdbResNumStart': '28',
'@pfamDesc': 'YadA-like C-terminal region', '@eValue': '5.0E-22', '@chainId': 'A'}}
"""
out = get_info(pdb_id, url_root = 'http://www.rcsb.org/pdb/rest/hmmer?structureId=')
out = to_dict(out)
if not out['hmmer3']:
return dict()
return remove_at_sign(out['hmmer3']) |
def db_group(self):
'''str: database system group (defaults to
:attr:`db_user <tmdeploy.config.AnsibleHostVariableSection.db_user>`)
'''
if self._db_group is None:
self._db_group = self.db_user
return self._db_group | str: database system group (defaults to
:attr:`db_user <tmdeploy.config.AnsibleHostVariableSection.db_user>`) | Below is the the instruction that describes the task:
### Input:
str: database system group (defaults to
:attr:`db_user <tmdeploy.config.AnsibleHostVariableSection.db_user>`)
### Response:
def db_group(self):
'''str: database system group (defaults to
:attr:`db_user <tmdeploy.config.AnsibleHostVariableSection.db_user>`)
'''
if self._db_group is None:
self._db_group = self.db_user
return self._db_group |
def openlines(image, linelength=10, dAngle=10, mask=None):
"""
Do a morphological opening along lines of different angles.
Return difference between max and min response to different angles for each pixel.
This effectively removes dots and only keeps lines.
image - pixel image to operate on
length - length of the structural element
angluar_resolution - angle step for the rotating lines
mask - if present, only use unmasked pixels for operations
"""
nAngles = 180//dAngle
openingstack = np.zeros((nAngles,image.shape[0],image.shape[1]),image.dtype)
for iAngle in range(nAngles):
angle = dAngle * iAngle
se = strel_line(linelength,angle)
openingstack[iAngle,:,:] = opening(image, mask=mask, footprint=se)
imLines = np.max(openingstack,axis=0) - np.min(openingstack,axis=0)
return imLines | Do a morphological opening along lines of different angles.
Return difference between max and min response to different angles for each pixel.
This effectively removes dots and only keeps lines.
image - pixel image to operate on
length - length of the structural element
angluar_resolution - angle step for the rotating lines
mask - if present, only use unmasked pixels for operations | Below is the the instruction that describes the task:
### Input:
Do a morphological opening along lines of different angles.
Return difference between max and min response to different angles for each pixel.
This effectively removes dots and only keeps lines.
image - pixel image to operate on
length - length of the structural element
angluar_resolution - angle step for the rotating lines
mask - if present, only use unmasked pixels for operations
### Response:
def openlines(image, linelength=10, dAngle=10, mask=None):
"""
Do a morphological opening along lines of different angles.
Return difference between max and min response to different angles for each pixel.
This effectively removes dots and only keeps lines.
image - pixel image to operate on
length - length of the structural element
angluar_resolution - angle step for the rotating lines
mask - if present, only use unmasked pixels for operations
"""
nAngles = 180//dAngle
openingstack = np.zeros((nAngles,image.shape[0],image.shape[1]),image.dtype)
for iAngle in range(nAngles):
angle = dAngle * iAngle
se = strel_line(linelength,angle)
openingstack[iAngle,:,:] = opening(image, mask=mask, footprint=se)
imLines = np.max(openingstack,axis=0) - np.min(openingstack,axis=0)
return imLines |
def _get_event_source_obj(awsclient, evt_source):
"""
Given awsclient, event_source dictionary item
create an event_source object of the appropriate event type
to schedule this event, and return the object.
"""
event_source_map = {
'dynamodb': event_source.dynamodb_stream.DynamoDBStreamEventSource,
'kinesis': event_source.kinesis.KinesisEventSource,
's3': event_source.s3.S3EventSource,
'sns': event_source.sns.SNSEventSource,
'events': event_source.cloudwatch.CloudWatchEventSource,
'cloudfront': event_source.cloudfront.CloudFrontEventSource,
'cloudwatch_logs': event_source.cloudwatch_logs.CloudWatchLogsEventSource,
}
evt_type = _get_event_type(evt_source)
event_source_func = event_source_map.get(evt_type, None)
if not event_source:
raise ValueError('Unknown event source: {0}'.format(
evt_source['arn']))
return event_source_func(awsclient, evt_source) | Given awsclient, event_source dictionary item
create an event_source object of the appropriate event type
to schedule this event, and return the object. | Below is the the instruction that describes the task:
### Input:
Given awsclient, event_source dictionary item
create an event_source object of the appropriate event type
to schedule this event, and return the object.
### Response:
def _get_event_source_obj(awsclient, evt_source):
"""
Given awsclient, event_source dictionary item
create an event_source object of the appropriate event type
to schedule this event, and return the object.
"""
event_source_map = {
'dynamodb': event_source.dynamodb_stream.DynamoDBStreamEventSource,
'kinesis': event_source.kinesis.KinesisEventSource,
's3': event_source.s3.S3EventSource,
'sns': event_source.sns.SNSEventSource,
'events': event_source.cloudwatch.CloudWatchEventSource,
'cloudfront': event_source.cloudfront.CloudFrontEventSource,
'cloudwatch_logs': event_source.cloudwatch_logs.CloudWatchLogsEventSource,
}
evt_type = _get_event_type(evt_source)
event_source_func = event_source_map.get(evt_type, None)
if not event_source:
raise ValueError('Unknown event source: {0}'.format(
evt_source['arn']))
return event_source_func(awsclient, evt_source) |
def _altair_line_num_(self, xfield, yfield, opts, style, encode):
"""
Get a line + text number chart
"""
try:
c = self._altair_chart_num_("line", xfield,
yfield, opts, style, encode)
except Exception as e:
self.err(e, "Can not draw a line num chart")
return
return c | Get a line + text number chart | Below is the the instruction that describes the task:
### Input:
Get a line + text number chart
### Response:
def _altair_line_num_(self, xfield, yfield, opts, style, encode):
"""
Get a line + text number chart
"""
try:
c = self._altair_chart_num_("line", xfield,
yfield, opts, style, encode)
except Exception as e:
self.err(e, "Can not draw a line num chart")
return
return c |
def epochs(steps=None, epoch_steps=1):
"""Iterator over epochs until steps is reached. 1-indexed.
Args:
steps: int, total number of steps. Infinite if None.
epoch_steps: int, number of steps per epoch. Can also be an iterable<int> to
enable variable length epochs.
Yields:
(epoch: int, epoch id, epoch_steps: int, number of steps in this epoch)
"""
try:
iter(epoch_steps)
except TypeError:
epoch_steps = itertools.repeat(epoch_steps)
step = 0
for epoch, epoch_steps in enumerate(epoch_steps):
epoch_steps = min(epoch_steps, steps - step)
yield (epoch + 1, epoch_steps)
step += epoch_steps
if steps and step >= steps:
break | Iterator over epochs until steps is reached. 1-indexed.
Args:
steps: int, total number of steps. Infinite if None.
epoch_steps: int, number of steps per epoch. Can also be an iterable<int> to
enable variable length epochs.
Yields:
(epoch: int, epoch id, epoch_steps: int, number of steps in this epoch) | Below is the the instruction that describes the task:
### Input:
Iterator over epochs until steps is reached. 1-indexed.
Args:
steps: int, total number of steps. Infinite if None.
epoch_steps: int, number of steps per epoch. Can also be an iterable<int> to
enable variable length epochs.
Yields:
(epoch: int, epoch id, epoch_steps: int, number of steps in this epoch)
### Response:
def epochs(steps=None, epoch_steps=1):
"""Iterator over epochs until steps is reached. 1-indexed.
Args:
steps: int, total number of steps. Infinite if None.
epoch_steps: int, number of steps per epoch. Can also be an iterable<int> to
enable variable length epochs.
Yields:
(epoch: int, epoch id, epoch_steps: int, number of steps in this epoch)
"""
try:
iter(epoch_steps)
except TypeError:
epoch_steps = itertools.repeat(epoch_steps)
step = 0
for epoch, epoch_steps in enumerate(epoch_steps):
epoch_steps = min(epoch_steps, steps - step)
yield (epoch + 1, epoch_steps)
step += epoch_steps
if steps and step >= steps:
break |
def config_prov(config):
"""Read providers from configfile and de-duplicate it."""
try:
providers = [e.strip() for e in (config['info']
['providers']).split(',')]
except KeyError as e:
print("Error reading config item: {}".format(e))
sys.exit()
providers = list(OrderedDict.fromkeys(providers))
return providers | Read providers from configfile and de-duplicate it. | Below is the the instruction that describes the task:
### Input:
Read providers from configfile and de-duplicate it.
### Response:
def config_prov(config):
"""Read providers from configfile and de-duplicate it."""
try:
providers = [e.strip() for e in (config['info']
['providers']).split(',')]
except KeyError as e:
print("Error reading config item: {}".format(e))
sys.exit()
providers = list(OrderedDict.fromkeys(providers))
return providers |
def update(self, friendly_name=values.unset, unique_name=values.unset):
"""
Update the FieldTypeInstance
:param unicode friendly_name: A string to describe the resource
:param unicode unique_name: An application-defined string that uniquely identifies the resource
:returns: Updated FieldTypeInstance
:rtype: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeInstance
"""
return self._proxy.update(friendly_name=friendly_name, unique_name=unique_name, ) | Update the FieldTypeInstance
:param unicode friendly_name: A string to describe the resource
:param unicode unique_name: An application-defined string that uniquely identifies the resource
:returns: Updated FieldTypeInstance
:rtype: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeInstance | Below is the the instruction that describes the task:
### Input:
Update the FieldTypeInstance
:param unicode friendly_name: A string to describe the resource
:param unicode unique_name: An application-defined string that uniquely identifies the resource
:returns: Updated FieldTypeInstance
:rtype: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeInstance
### Response:
def update(self, friendly_name=values.unset, unique_name=values.unset):
"""
Update the FieldTypeInstance
:param unicode friendly_name: A string to describe the resource
:param unicode unique_name: An application-defined string that uniquely identifies the resource
:returns: Updated FieldTypeInstance
:rtype: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeInstance
"""
return self._proxy.update(friendly_name=friendly_name, unique_name=unique_name, ) |
def czdivide(a, b, null=0):
'''
czdivide(a, b) returns the quotient a / b as a numpy array object. Like numpy's divide function
or a/b syntax, czdivide will thread over the latest dimension possible. Unlike numpy's divide,
czdivide works with sparse matrices. Additionally, czdivide multiplies a by the zinv of b, so
divide-by-zero entries are replaced with 0 in the result.
The optional argument null (default: 0) may be given to specify that zeros in the arary b should
instead be replaced with the given value in the result. Note that if this value is not equal to
0, then any sparse array passed as argument b must be reified.
The czdivide function never raises an error due to divide-by-zero; if you desire this behavior,
use the cdivide function instead.
'''
if null == 0: return a.multiply(zinv(b)) if sps.issparse(a) else a * zinv(b)
elif sps.issparse(b): b = b.toarray()
else: b = np.asarray(b)
z = np.isclose(b, 0)
q = np.logical_not(z)
zi = q / (b + z)
if sps.issparse(a):
r = a.multiply(zi).tocsr()
else:
r = np.asarray(a) * zi
r[np.ones(a.shape, dtype=np.bool)*z] = null
return r | czdivide(a, b) returns the quotient a / b as a numpy array object. Like numpy's divide function
or a/b syntax, czdivide will thread over the latest dimension possible. Unlike numpy's divide,
czdivide works with sparse matrices. Additionally, czdivide multiplies a by the zinv of b, so
divide-by-zero entries are replaced with 0 in the result.
The optional argument null (default: 0) may be given to specify that zeros in the arary b should
instead be replaced with the given value in the result. Note that if this value is not equal to
0, then any sparse array passed as argument b must be reified.
The czdivide function never raises an error due to divide-by-zero; if you desire this behavior,
use the cdivide function instead. | Below is the the instruction that describes the task:
### Input:
czdivide(a, b) returns the quotient a / b as a numpy array object. Like numpy's divide function
or a/b syntax, czdivide will thread over the latest dimension possible. Unlike numpy's divide,
czdivide works with sparse matrices. Additionally, czdivide multiplies a by the zinv of b, so
divide-by-zero entries are replaced with 0 in the result.
The optional argument null (default: 0) may be given to specify that zeros in the arary b should
instead be replaced with the given value in the result. Note that if this value is not equal to
0, then any sparse array passed as argument b must be reified.
The czdivide function never raises an error due to divide-by-zero; if you desire this behavior,
use the cdivide function instead.
### Response:
def czdivide(a, b, null=0):
'''
czdivide(a, b) returns the quotient a / b as a numpy array object. Like numpy's divide function
or a/b syntax, czdivide will thread over the latest dimension possible. Unlike numpy's divide,
czdivide works with sparse matrices. Additionally, czdivide multiplies a by the zinv of b, so
divide-by-zero entries are replaced with 0 in the result.
The optional argument null (default: 0) may be given to specify that zeros in the arary b should
instead be replaced with the given value in the result. Note that if this value is not equal to
0, then any sparse array passed as argument b must be reified.
The czdivide function never raises an error due to divide-by-zero; if you desire this behavior,
use the cdivide function instead.
'''
if null == 0: return a.multiply(zinv(b)) if sps.issparse(a) else a * zinv(b)
elif sps.issparse(b): b = b.toarray()
else: b = np.asarray(b)
z = np.isclose(b, 0)
q = np.logical_not(z)
zi = q / (b + z)
if sps.issparse(a):
r = a.multiply(zi).tocsr()
else:
r = np.asarray(a) * zi
r[np.ones(a.shape, dtype=np.bool)*z] = null
return r |
def _post_run_hook(self, runtime):
''' generates a report showing slices from each axis of an arbitrary
volume of in_file, with the resulting binary brain mask overlaid '''
self._anat_file = self.inputs.in_file
self._mask_file = self.aggregate_outputs(runtime=runtime).mask_file
self._seg_files = [self._mask_file]
self._masked = True
NIWORKFLOWS_LOG.info(
'Generating report for nilearn.compute_epi_mask. file "%s", and mask file "%s"',
self._anat_file, self._mask_file)
return super(ComputeEPIMask, self)._post_run_hook(runtime) | generates a report showing slices from each axis of an arbitrary
volume of in_file, with the resulting binary brain mask overlaid | Below is the the instruction that describes the task:
### Input:
generates a report showing slices from each axis of an arbitrary
volume of in_file, with the resulting binary brain mask overlaid
### Response:
def _post_run_hook(self, runtime):
''' generates a report showing slices from each axis of an arbitrary
volume of in_file, with the resulting binary brain mask overlaid '''
self._anat_file = self.inputs.in_file
self._mask_file = self.aggregate_outputs(runtime=runtime).mask_file
self._seg_files = [self._mask_file]
self._masked = True
NIWORKFLOWS_LOG.info(
'Generating report for nilearn.compute_epi_mask. file "%s", and mask file "%s"',
self._anat_file, self._mask_file)
return super(ComputeEPIMask, self)._post_run_hook(runtime) |
def _determine_ctxt(self):
"""Determines the Volume API endpoint information.
Determines the appropriate version of the API that should be used
as well as the catalog_info string that would be supplied. Returns
a dict containing the volume_api_version and the volume_catalog_info.
"""
rel = os_release(self.pkg, base='icehouse')
version = '2'
if CompareOpenStackReleases(rel) >= 'pike':
version = '3'
service_type = 'volumev{version}'.format(version=version)
service_name = 'cinderv{version}'.format(version=version)
endpoint_type = 'publicURL'
if config('use-internal-endpoints'):
endpoint_type = 'internalURL'
catalog_info = '{type}:{name}:{endpoint}'.format(
type=service_type, name=service_name, endpoint=endpoint_type)
return {
'volume_api_version': version,
'volume_catalog_info': catalog_info,
} | Determines the Volume API endpoint information.
Determines the appropriate version of the API that should be used
as well as the catalog_info string that would be supplied. Returns
a dict containing the volume_api_version and the volume_catalog_info. | Below is the the instruction that describes the task:
### Input:
Determines the Volume API endpoint information.
Determines the appropriate version of the API that should be used
as well as the catalog_info string that would be supplied. Returns
a dict containing the volume_api_version and the volume_catalog_info.
### Response:
def _determine_ctxt(self):
"""Determines the Volume API endpoint information.
Determines the appropriate version of the API that should be used
as well as the catalog_info string that would be supplied. Returns
a dict containing the volume_api_version and the volume_catalog_info.
"""
rel = os_release(self.pkg, base='icehouse')
version = '2'
if CompareOpenStackReleases(rel) >= 'pike':
version = '3'
service_type = 'volumev{version}'.format(version=version)
service_name = 'cinderv{version}'.format(version=version)
endpoint_type = 'publicURL'
if config('use-internal-endpoints'):
endpoint_type = 'internalURL'
catalog_info = '{type}:{name}:{endpoint}'.format(
type=service_type, name=service_name, endpoint=endpoint_type)
return {
'volume_api_version': version,
'volume_catalog_info': catalog_info,
} |
def sample_less_than_condition(choices_in, condition):
"""Creates a random sample from choices without replacement, subject to the
condition that each element of the output is greater than the corresponding
element of the condition array.
condition should be in ascending order.
"""
output = np.zeros(min(condition.shape[0], choices_in.shape[0]))
choices = copy.deepcopy(choices_in)
for i, _ in enumerate(output):
# randomly select one of the choices which meets condition
avail_inds = np.where(choices < condition[i])[0]
selected_ind = np.random.choice(avail_inds)
output[i] = choices[selected_ind]
# remove the chosen value
choices = np.delete(choices, selected_ind)
return output | Creates a random sample from choices without replacement, subject to the
condition that each element of the output is greater than the corresponding
element of the condition array.
condition should be in ascending order. | Below is the the instruction that describes the task:
### Input:
Creates a random sample from choices without replacement, subject to the
condition that each element of the output is greater than the corresponding
element of the condition array.
condition should be in ascending order.
### Response:
def sample_less_than_condition(choices_in, condition):
"""Creates a random sample from choices without replacement, subject to the
condition that each element of the output is greater than the corresponding
element of the condition array.
condition should be in ascending order.
"""
output = np.zeros(min(condition.shape[0], choices_in.shape[0]))
choices = copy.deepcopy(choices_in)
for i, _ in enumerate(output):
# randomly select one of the choices which meets condition
avail_inds = np.where(choices < condition[i])[0]
selected_ind = np.random.choice(avail_inds)
output[i] = choices[selected_ind]
# remove the chosen value
choices = np.delete(choices, selected_ind)
return output |
def transform_folder(args):
"""
Transform all the files in the source dataset for the given command and save
the results as a single pickle file in the destination dataset
:param args: tuple with the following arguments:
- the command name: 'zero', 'one', 'two', ...
- transforms to apply to wav file
- full path of the source dataset
- full path of the destination dataset
"""
command, (transform, src, dest) = args
try:
print(progress.value, "remaining")
# Apply transformations to all files
data = []
data_dir = os.path.join(src, command)
for filename in os.listdir(data_dir):
path = os.path.join(data_dir, filename)
data.append(transform({'path': path}))
# Save results
pickleFile = os.path.join(dest, "{}.pkl".format(command))
gc.disable()
with open(pickleFile, "wb") as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
gc.enable()
# Update progress
with progress.get_lock():
progress.value -= 1
except Exception as e:
print(command, e, file=sys.stderr)
traceback.print_exc() | Transform all the files in the source dataset for the given command and save
the results as a single pickle file in the destination dataset
:param args: tuple with the following arguments:
- the command name: 'zero', 'one', 'two', ...
- transforms to apply to wav file
- full path of the source dataset
- full path of the destination dataset | Below is the the instruction that describes the task:
### Input:
Transform all the files in the source dataset for the given command and save
the results as a single pickle file in the destination dataset
:param args: tuple with the following arguments:
- the command name: 'zero', 'one', 'two', ...
- transforms to apply to wav file
- full path of the source dataset
- full path of the destination dataset
### Response:
def transform_folder(args):
"""
Transform all the files in the source dataset for the given command and save
the results as a single pickle file in the destination dataset
:param args: tuple with the following arguments:
- the command name: 'zero', 'one', 'two', ...
- transforms to apply to wav file
- full path of the source dataset
- full path of the destination dataset
"""
command, (transform, src, dest) = args
try:
print(progress.value, "remaining")
# Apply transformations to all files
data = []
data_dir = os.path.join(src, command)
for filename in os.listdir(data_dir):
path = os.path.join(data_dir, filename)
data.append(transform({'path': path}))
# Save results
pickleFile = os.path.join(dest, "{}.pkl".format(command))
gc.disable()
with open(pickleFile, "wb") as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
gc.enable()
# Update progress
with progress.get_lock():
progress.value -= 1
except Exception as e:
print(command, e, file=sys.stderr)
traceback.print_exc() |
def _set_statistics_oam(self, v, load=False):
"""
Setter method for statistics_oam, mapped from YANG variable /mpls_state/statistics_oam (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_statistics_oam is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_statistics_oam() directly.
YANG Description: OAM packet statistics
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=statistics_oam.statistics_oam, is_container='container', presence=False, yang_name="statistics-oam", rest_name="statistics-oam", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """statistics_oam must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=statistics_oam.statistics_oam, is_container='container', presence=False, yang_name="statistics-oam", rest_name="statistics-oam", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__statistics_oam = t
if hasattr(self, '_set'):
self._set() | Setter method for statistics_oam, mapped from YANG variable /mpls_state/statistics_oam (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_statistics_oam is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_statistics_oam() directly.
YANG Description: OAM packet statistics | Below is the the instruction that describes the task:
### Input:
Setter method for statistics_oam, mapped from YANG variable /mpls_state/statistics_oam (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_statistics_oam is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_statistics_oam() directly.
YANG Description: OAM packet statistics
### Response:
def _set_statistics_oam(self, v, load=False):
"""
Setter method for statistics_oam, mapped from YANG variable /mpls_state/statistics_oam (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_statistics_oam is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_statistics_oam() directly.
YANG Description: OAM packet statistics
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=statistics_oam.statistics_oam, is_container='container', presence=False, yang_name="statistics-oam", rest_name="statistics-oam", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """statistics_oam must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=statistics_oam.statistics_oam, is_container='container', presence=False, yang_name="statistics-oam", rest_name="statistics-oam", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__statistics_oam = t
if hasattr(self, '_set'):
self._set() |
def delete(self, **kwargs):
"""Delete a notification."""
url = self.base_url + '/%s' % kwargs['notification_id']
resp = self.client.delete(url=url)
return resp | Delete a notification. | Below is the the instruction that describes the task:
### Input:
Delete a notification.
### Response:
def delete(self, **kwargs):
"""Delete a notification."""
url = self.base_url + '/%s' % kwargs['notification_id']
resp = self.client.delete(url=url)
return resp |
def close(self):
"""
Close and upload local log file to remote storage Wasb.
"""
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
return
super().close()
if not self.upload_on_close:
return
local_loc = os.path.join(self.local_base, self.log_relative_path)
remote_loc = os.path.join(self.remote_base, self.log_relative_path)
if os.path.exists(local_loc):
# read log and remove old logs to get just the latest additions
with open(local_loc, 'r') as logfile:
log = logfile.read()
self.wasb_write(log, remote_loc, append=True)
if self.delete_local_copy:
shutil.rmtree(os.path.dirname(local_loc))
# Mark closed so we don't double write if close is called twice
self.closed = True | Close and upload local log file to remote storage Wasb. | Below is the the instruction that describes the task:
### Input:
Close and upload local log file to remote storage Wasb.
### Response:
def close(self):
"""
Close and upload local log file to remote storage Wasb.
"""
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
return
super().close()
if not self.upload_on_close:
return
local_loc = os.path.join(self.local_base, self.log_relative_path)
remote_loc = os.path.join(self.remote_base, self.log_relative_path)
if os.path.exists(local_loc):
# read log and remove old logs to get just the latest additions
with open(local_loc, 'r') as logfile:
log = logfile.read()
self.wasb_write(log, remote_loc, append=True)
if self.delete_local_copy:
shutil.rmtree(os.path.dirname(local_loc))
# Mark closed so we don't double write if close is called twice
self.closed = True |
def upsert_pending_licensors(cursor, document_id):
"""Update or insert records for pending license acceptors."""
cursor.execute("""\
SELECT "uuid", "metadata"
FROM pending_documents
WHERE id = %s""", (document_id,))
uuid_, metadata = cursor.fetchone()
acceptors = set([uid for uid, type_ in _dissect_roles(metadata)])
# Acquire a list of existing acceptors.
cursor.execute("""\
SELECT "user_id", "accepted"
FROM license_acceptances
WHERE uuid = %s""", (uuid_,))
existing_acceptors_mapping = dict(cursor.fetchall())
# Who's not in the existing list?
existing_acceptors = set(existing_acceptors_mapping.keys())
new_acceptors = acceptors.difference(existing_acceptors)
# Insert the new licensor acceptors.
for acceptor in new_acceptors:
cursor.execute("""\
INSERT INTO license_acceptances
("uuid", "user_id", "accepted")
VALUES (%s, %s, NULL)""", (uuid_, acceptor,))
# Has everyone already accepted?
cursor.execute("""\
SELECT user_id
FROM license_acceptances
WHERE
uuid = %s
AND
(accepted is UNKNOWN OR accepted is FALSE)""", (uuid_,))
defectors = set(cursor.fetchall())
if not defectors:
# Update the pending document license acceptance state.
cursor.execute("""\
update pending_documents set license_accepted = 't'
where id = %s""", (document_id,)) | Update or insert records for pending license acceptors. | Below is the the instruction that describes the task:
### Input:
Update or insert records for pending license acceptors.
### Response:
def upsert_pending_licensors(cursor, document_id):
"""Update or insert records for pending license acceptors."""
cursor.execute("""\
SELECT "uuid", "metadata"
FROM pending_documents
WHERE id = %s""", (document_id,))
uuid_, metadata = cursor.fetchone()
acceptors = set([uid for uid, type_ in _dissect_roles(metadata)])
# Acquire a list of existing acceptors.
cursor.execute("""\
SELECT "user_id", "accepted"
FROM license_acceptances
WHERE uuid = %s""", (uuid_,))
existing_acceptors_mapping = dict(cursor.fetchall())
# Who's not in the existing list?
existing_acceptors = set(existing_acceptors_mapping.keys())
new_acceptors = acceptors.difference(existing_acceptors)
# Insert the new licensor acceptors.
for acceptor in new_acceptors:
cursor.execute("""\
INSERT INTO license_acceptances
("uuid", "user_id", "accepted")
VALUES (%s, %s, NULL)""", (uuid_, acceptor,))
# Has everyone already accepted?
cursor.execute("""\
SELECT user_id
FROM license_acceptances
WHERE
uuid = %s
AND
(accepted is UNKNOWN OR accepted is FALSE)""", (uuid_,))
defectors = set(cursor.fetchall())
if not defectors:
# Update the pending document license acceptance state.
cursor.execute("""\
update pending_documents set license_accepted = 't'
where id = %s""", (document_id,)) |
def get_ipv4(hostname):
"""Get list of ipv4 addresses for hostname
"""
addrinfo = socket.getaddrinfo(hostname, None, socket.AF_INET,
socket.SOCK_STREAM)
return [addrinfo[x][4][0] for x in range(len(addrinfo))] | Get list of ipv4 addresses for hostname | Below is the the instruction that describes the task:
### Input:
Get list of ipv4 addresses for hostname
### Response:
def get_ipv4(hostname):
"""Get list of ipv4 addresses for hostname
"""
addrinfo = socket.getaddrinfo(hostname, None, socket.AF_INET,
socket.SOCK_STREAM)
return [addrinfo[x][4][0] for x in range(len(addrinfo))] |
def set_api_version(self, major, minor):
"""Set the API version this module was designed for.
Each module must declare the mib12 API version it was compiled with as a
2 byte major.minor number. This information is used by the pic12_executive
to decide whether the application is compatible.
"""
if not self._is_byte(major) or not self._is_byte(minor):
raise ArgumentError("Invalid API version number with component that does not fit in 1 byte",
major=major, minor=minor)
self.api_version = (major, minor) | Set the API version this module was designed for.
Each module must declare the mib12 API version it was compiled with as a
2 byte major.minor number. This information is used by the pic12_executive
to decide whether the application is compatible. | Below is the the instruction that describes the task:
### Input:
Set the API version this module was designed for.
Each module must declare the mib12 API version it was compiled with as a
2 byte major.minor number. This information is used by the pic12_executive
to decide whether the application is compatible.
### Response:
def set_api_version(self, major, minor):
"""Set the API version this module was designed for.
Each module must declare the mib12 API version it was compiled with as a
2 byte major.minor number. This information is used by the pic12_executive
to decide whether the application is compatible.
"""
if not self._is_byte(major) or not self._is_byte(minor):
raise ArgumentError("Invalid API version number with component that does not fit in 1 byte",
major=major, minor=minor)
self.api_version = (major, minor) |
def show_more(context, label=None, loading=settings.LOADING):
"""Show the link to get the next page in a Twitter-like pagination.
Usage::
{% show_more %}
Alternatively you can override the label passed to the default template::
{% show_more "even more" %}
You can override the loading text too::
{% show_more "even more" "working" %}
Must be called after ``{% paginate objects %}``.
"""
# This template tag could raise a PaginationError: you have to call
# *paginate* or *lazy_paginate* before including the showmore template.
data = utils.get_data_from_context(context)
page = data['page']
# show the template only if there is a next page
if page.has_next():
request = context['request']
page_number = page.next_page_number()
# Generate the querystring.
querystring_key = data['querystring_key']
querystring = utils.get_querystring_for_page(
request, page_number, querystring_key,
default_number=data['default_number'])
return {
'label': label,
'loading': loading,
'path': iri_to_uri(data['override_path'] or request.path),
'querystring': querystring,
'querystring_key': querystring_key,
'request': request,
}
# No next page, nothing to see.
return {} | Show the link to get the next page in a Twitter-like pagination.
Usage::
{% show_more %}
Alternatively you can override the label passed to the default template::
{% show_more "even more" %}
You can override the loading text too::
{% show_more "even more" "working" %}
Must be called after ``{% paginate objects %}``. | Below is the the instruction that describes the task:
### Input:
Show the link to get the next page in a Twitter-like pagination.
Usage::
{% show_more %}
Alternatively you can override the label passed to the default template::
{% show_more "even more" %}
You can override the loading text too::
{% show_more "even more" "working" %}
Must be called after ``{% paginate objects %}``.
### Response:
def show_more(context, label=None, loading=settings.LOADING):
"""Show the link to get the next page in a Twitter-like pagination.
Usage::
{% show_more %}
Alternatively you can override the label passed to the default template::
{% show_more "even more" %}
You can override the loading text too::
{% show_more "even more" "working" %}
Must be called after ``{% paginate objects %}``.
"""
# This template tag could raise a PaginationError: you have to call
# *paginate* or *lazy_paginate* before including the showmore template.
data = utils.get_data_from_context(context)
page = data['page']
# show the template only if there is a next page
if page.has_next():
request = context['request']
page_number = page.next_page_number()
# Generate the querystring.
querystring_key = data['querystring_key']
querystring = utils.get_querystring_for_page(
request, page_number, querystring_key,
default_number=data['default_number'])
return {
'label': label,
'loading': loading,
'path': iri_to_uri(data['override_path'] or request.path),
'querystring': querystring,
'querystring_key': querystring_key,
'request': request,
}
# No next page, nothing to see.
return {} |
def remove_group(self, name, swap_group=None):
"""
Delete a group by given group parameter
If you delete a group and content is restricted to that group, the content will be hidden from all users
To prevent this, use this parameter to specify a different group to transfer the restrictions
(comments and worklogs only) to
:param name: str
:param swap_group: str
:return:
"""
log.warning('Removing group...')
url = 'rest/api/2/group'
if swap_group is not None:
params = {'groupname': name, 'swapGroup': swap_group}
else:
params = {'groupname': name}
return self.delete(url, params=params) | Delete a group by given group parameter
If you delete a group and content is restricted to that group, the content will be hidden from all users
To prevent this, use this parameter to specify a different group to transfer the restrictions
(comments and worklogs only) to
:param name: str
:param swap_group: str
:return: | Below is the the instruction that describes the task:
### Input:
Delete a group by given group parameter
If you delete a group and content is restricted to that group, the content will be hidden from all users
To prevent this, use this parameter to specify a different group to transfer the restrictions
(comments and worklogs only) to
:param name: str
:param swap_group: str
:return:
### Response:
def remove_group(self, name, swap_group=None):
"""
Delete a group by given group parameter
If you delete a group and content is restricted to that group, the content will be hidden from all users
To prevent this, use this parameter to specify a different group to transfer the restrictions
(comments and worklogs only) to
:param name: str
:param swap_group: str
:return:
"""
log.warning('Removing group...')
url = 'rest/api/2/group'
if swap_group is not None:
params = {'groupname': name, 'swapGroup': swap_group}
else:
params = {'groupname': name}
return self.delete(url, params=params) |
def enable_process_breakpoints(self, dwProcessId):
"""
Enables all disabled breakpoints for the given process.
@type dwProcessId: int
@param dwProcessId: Process global ID.
"""
# enable code breakpoints
for bp in self.get_process_code_breakpoints(dwProcessId):
if bp.is_disabled():
self.enable_code_breakpoint(dwProcessId, bp.get_address())
# enable page breakpoints
for bp in self.get_process_page_breakpoints(dwProcessId):
if bp.is_disabled():
self.enable_page_breakpoint(dwProcessId, bp.get_address())
# enable hardware breakpoints
if self.system.has_process(dwProcessId):
aProcess = self.system.get_process(dwProcessId)
else:
aProcess = Process(dwProcessId)
aProcess.scan_threads()
for aThread in aProcess.iter_threads():
dwThreadId = aThread.get_tid()
for bp in self.get_thread_hardware_breakpoints(dwThreadId):
if bp.is_disabled():
self.enable_hardware_breakpoint(dwThreadId, bp.get_address()) | Enables all disabled breakpoints for the given process.
@type dwProcessId: int
@param dwProcessId: Process global ID. | Below is the the instruction that describes the task:
### Input:
Enables all disabled breakpoints for the given process.
@type dwProcessId: int
@param dwProcessId: Process global ID.
### Response:
def enable_process_breakpoints(self, dwProcessId):
"""
Enables all disabled breakpoints for the given process.
@type dwProcessId: int
@param dwProcessId: Process global ID.
"""
# enable code breakpoints
for bp in self.get_process_code_breakpoints(dwProcessId):
if bp.is_disabled():
self.enable_code_breakpoint(dwProcessId, bp.get_address())
# enable page breakpoints
for bp in self.get_process_page_breakpoints(dwProcessId):
if bp.is_disabled():
self.enable_page_breakpoint(dwProcessId, bp.get_address())
# enable hardware breakpoints
if self.system.has_process(dwProcessId):
aProcess = self.system.get_process(dwProcessId)
else:
aProcess = Process(dwProcessId)
aProcess.scan_threads()
for aThread in aProcess.iter_threads():
dwThreadId = aThread.get_tid()
for bp in self.get_thread_hardware_breakpoints(dwThreadId):
if bp.is_disabled():
self.enable_hardware_breakpoint(dwThreadId, bp.get_address()) |
def get_base(self, option):
"""
Parse the base command option. Can be supplied as a 3 character code or a settings variable name
If base is not supplied, looks for settings CURRENCIES_BASE and SHOP_DEFAULT_CURRENCY
"""
if option:
if option.isupper():
if len(option) > 3:
return getattr(settings, option), True
elif len(option) == 3:
return option, True
raise ImproperlyConfigured("Invalid currency code found: %s" % option)
for attr in ('CURRENCIES_BASE', 'SHOP_DEFAULT_CURRENCY'):
try:
return getattr(settings, attr), True
except AttributeError:
continue
return 'USD', False | Parse the base command option. Can be supplied as a 3 character code or a settings variable name
If base is not supplied, looks for settings CURRENCIES_BASE and SHOP_DEFAULT_CURRENCY | Below is the the instruction that describes the task:
### Input:
Parse the base command option. Can be supplied as a 3 character code or a settings variable name
If base is not supplied, looks for settings CURRENCIES_BASE and SHOP_DEFAULT_CURRENCY
### Response:
def get_base(self, option):
"""
Parse the base command option. Can be supplied as a 3 character code or a settings variable name
If base is not supplied, looks for settings CURRENCIES_BASE and SHOP_DEFAULT_CURRENCY
"""
if option:
if option.isupper():
if len(option) > 3:
return getattr(settings, option), True
elif len(option) == 3:
return option, True
raise ImproperlyConfigured("Invalid currency code found: %s" % option)
for attr in ('CURRENCIES_BASE', 'SHOP_DEFAULT_CURRENCY'):
try:
return getattr(settings, attr), True
except AttributeError:
continue
return 'USD', False |
def gen_columns(obj) -> Generator[Tuple[str, Column], None, None]:
"""
Asks a SQLAlchemy ORM object: "what are your SQLAlchemy columns?"
Yields tuples of ``(attr_name, Column)`` from an SQLAlchemy ORM object
instance. Also works with the corresponding SQLAlchemy ORM class. Examples:
.. code-block:: python
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql.schema import Column
from sqlalchemy.sql.sqltypes import Integer
Base = declarative_base()
class MyClass(Base):
__tablename__ = "mytable"
pk = Column("pk", Integer, primary_key=True, autoincrement=True)
a = Column("a", Integer)
x = MyClass()
list(gen_columns(x))
list(gen_columns(MyClass))
"""
mapper = obj.__mapper__ # type: Mapper
assert mapper, "gen_columns called on {!r} which is not an " \
"SQLAlchemy ORM object".format(obj)
colmap = mapper.columns # type: OrderedProperties
if not colmap:
return
for attrname, column in colmap.items():
# NB: column.name is the SQL column name, not the attribute name
yield attrname, column | Asks a SQLAlchemy ORM object: "what are your SQLAlchemy columns?"
Yields tuples of ``(attr_name, Column)`` from an SQLAlchemy ORM object
instance. Also works with the corresponding SQLAlchemy ORM class. Examples:
.. code-block:: python
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql.schema import Column
from sqlalchemy.sql.sqltypes import Integer
Base = declarative_base()
class MyClass(Base):
__tablename__ = "mytable"
pk = Column("pk", Integer, primary_key=True, autoincrement=True)
a = Column("a", Integer)
x = MyClass()
list(gen_columns(x))
list(gen_columns(MyClass)) | Below is the the instruction that describes the task:
### Input:
Asks a SQLAlchemy ORM object: "what are your SQLAlchemy columns?"
Yields tuples of ``(attr_name, Column)`` from an SQLAlchemy ORM object
instance. Also works with the corresponding SQLAlchemy ORM class. Examples:
.. code-block:: python
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql.schema import Column
from sqlalchemy.sql.sqltypes import Integer
Base = declarative_base()
class MyClass(Base):
__tablename__ = "mytable"
pk = Column("pk", Integer, primary_key=True, autoincrement=True)
a = Column("a", Integer)
x = MyClass()
list(gen_columns(x))
list(gen_columns(MyClass))
### Response:
def gen_columns(obj) -> Generator[Tuple[str, Column], None, None]:
"""
Asks a SQLAlchemy ORM object: "what are your SQLAlchemy columns?"
Yields tuples of ``(attr_name, Column)`` from an SQLAlchemy ORM object
instance. Also works with the corresponding SQLAlchemy ORM class. Examples:
.. code-block:: python
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql.schema import Column
from sqlalchemy.sql.sqltypes import Integer
Base = declarative_base()
class MyClass(Base):
__tablename__ = "mytable"
pk = Column("pk", Integer, primary_key=True, autoincrement=True)
a = Column("a", Integer)
x = MyClass()
list(gen_columns(x))
list(gen_columns(MyClass))
"""
mapper = obj.__mapper__ # type: Mapper
assert mapper, "gen_columns called on {!r} which is not an " \
"SQLAlchemy ORM object".format(obj)
colmap = mapper.columns # type: OrderedProperties
if not colmap:
return
for attrname, column in colmap.items():
# NB: column.name is the SQL column name, not the attribute name
yield attrname, column |
def generate_output_csv(evaluation_results, filename='results.csv'):
"""Generate the evaluation results in the format
Parameters
----------
evaluation_results : list of dictionaries
Each dictionary contains the keys 'filename' and 'results', where
'results' itself is a list of dictionaries. Each of the results has
the keys 'latex' and 'probability'
Examples
--------
MfrDB3907_85801, a, b, c, d, e, f, g, h, i, j
scores, 1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1
MfrDB3907_85802, 1, |, l, COMMA, junk, x, X, \times
scores, 10, 8.001, 2, 0.5, 0.1, 0,-0.5, -1, -100
"""
with open(filename, 'w') as f:
for result in evaluation_results:
for i, entry in enumerate(result['results']):
if entry['semantics'] == ',':
result['results']['semantics'] = 'COMMA'
f.write("%s, " % result['filename'])
f.write(", ".join([entry['semantics'] for entry in result['results']]))
f.write("\n")
f.write("%s, " % "scores")
f.write(", ".join([str(entry['probability']) for entry in result['results']]))
f.write("\n") | Generate the evaluation results in the format
Parameters
----------
evaluation_results : list of dictionaries
Each dictionary contains the keys 'filename' and 'results', where
'results' itself is a list of dictionaries. Each of the results has
the keys 'latex' and 'probability'
Examples
--------
MfrDB3907_85801, a, b, c, d, e, f, g, h, i, j
scores, 1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1
MfrDB3907_85802, 1, |, l, COMMA, junk, x, X, \times
scores, 10, 8.001, 2, 0.5, 0.1, 0,-0.5, -1, -100 | Below is the the instruction that describes the task:
### Input:
Generate the evaluation results in the format
Parameters
----------
evaluation_results : list of dictionaries
Each dictionary contains the keys 'filename' and 'results', where
'results' itself is a list of dictionaries. Each of the results has
the keys 'latex' and 'probability'
Examples
--------
MfrDB3907_85801, a, b, c, d, e, f, g, h, i, j
scores, 1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1
MfrDB3907_85802, 1, |, l, COMMA, junk, x, X, \times
scores, 10, 8.001, 2, 0.5, 0.1, 0,-0.5, -1, -100
### Response:
def generate_output_csv(evaluation_results, filename='results.csv'):
"""Generate the evaluation results in the format
Parameters
----------
evaluation_results : list of dictionaries
Each dictionary contains the keys 'filename' and 'results', where
'results' itself is a list of dictionaries. Each of the results has
the keys 'latex' and 'probability'
Examples
--------
MfrDB3907_85801, a, b, c, d, e, f, g, h, i, j
scores, 1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1
MfrDB3907_85802, 1, |, l, COMMA, junk, x, X, \times
scores, 10, 8.001, 2, 0.5, 0.1, 0,-0.5, -1, -100
"""
with open(filename, 'w') as f:
for result in evaluation_results:
for i, entry in enumerate(result['results']):
if entry['semantics'] == ',':
result['results']['semantics'] = 'COMMA'
f.write("%s, " % result['filename'])
f.write(", ".join([entry['semantics'] for entry in result['results']]))
f.write("\n")
f.write("%s, " % "scores")
f.write(", ".join([str(entry['probability']) for entry in result['results']]))
f.write("\n") |
def T(self, T):
"""
Set the temperature of the stream to the specified value, and
recalculate it's enthalpy.
:param T: Temperature. [°C]
"""
self._T = T
self._Hfr = self._calculate_Hfr(T) | Set the temperature of the stream to the specified value, and
recalculate it's enthalpy.
:param T: Temperature. [°C] | Below is the the instruction that describes the task:
### Input:
Set the temperature of the stream to the specified value, and
recalculate it's enthalpy.
:param T: Temperature. [°C]
### Response:
def T(self, T):
"""
Set the temperature of the stream to the specified value, and
recalculate it's enthalpy.
:param T: Temperature. [°C]
"""
self._T = T
self._Hfr = self._calculate_Hfr(T) |
def update_payload(self, fields=None):
"""Wrap submitted data within an extra dict."""
payload = super(DiscoveryRule, self).update_payload(fields)
if 'search_' in payload:
payload['search'] = payload.pop('search_')
return {u'discovery_rule': payload} | Wrap submitted data within an extra dict. | Below is the the instruction that describes the task:
### Input:
Wrap submitted data within an extra dict.
### Response:
def update_payload(self, fields=None):
"""Wrap submitted data within an extra dict."""
payload = super(DiscoveryRule, self).update_payload(fields)
if 'search_' in payload:
payload['search'] = payload.pop('search_')
return {u'discovery_rule': payload} |
def line_rate(self, filename=None):
"""
Return the global line rate of the coverage report. If the
`filename` file is given, return the line rate of the file.
"""
if filename is None:
el = self.xml
else:
el = self._get_class_element_by_filename(filename)
return float(el.attrib['line-rate']) | Return the global line rate of the coverage report. If the
`filename` file is given, return the line rate of the file. | Below is the the instruction that describes the task:
### Input:
Return the global line rate of the coverage report. If the
`filename` file is given, return the line rate of the file.
### Response:
def line_rate(self, filename=None):
"""
Return the global line rate of the coverage report. If the
`filename` file is given, return the line rate of the file.
"""
if filename is None:
el = self.xml
else:
el = self._get_class_element_by_filename(filename)
return float(el.attrib['line-rate']) |
def uifile(self):
"""
Returns the uifile for this scaffold.
:return <str>
"""
output = ''
# build from a zip file
if zipfile.is_zipfile(self.source()):
zfile = zipfile.ZipFile(self.source(), 'r')
if 'properties.ui' in zfile.namelist():
tempdir = tempfile.gettempdir()
output = os.path.join(tempdir,
'{0}_properties.ui'.format(self.name()))
f = open(output, 'w')
f.write(zfile.read('properties.ui'))
f.close()
zfile.close()
else:
uifile = os.path.join(os.path.dirname(self.source()),
'properties.ui')
if os.path.exists(uifile):
output = uifile
return output | Returns the uifile for this scaffold.
:return <str> | Below is the the instruction that describes the task:
### Input:
Returns the uifile for this scaffold.
:return <str>
### Response:
def uifile(self):
"""
Returns the uifile for this scaffold.
:return <str>
"""
output = ''
# build from a zip file
if zipfile.is_zipfile(self.source()):
zfile = zipfile.ZipFile(self.source(), 'r')
if 'properties.ui' in zfile.namelist():
tempdir = tempfile.gettempdir()
output = os.path.join(tempdir,
'{0}_properties.ui'.format(self.name()))
f = open(output, 'w')
f.write(zfile.read('properties.ui'))
f.close()
zfile.close()
else:
uifile = os.path.join(os.path.dirname(self.source()),
'properties.ui')
if os.path.exists(uifile):
output = uifile
return output |
def uninstall_handler(self, event_type, handler, user_handle=None):
"""Uninstalls handlers for events in this resource.
:param event_type: Logical event identifier.
:param handler: Interpreted as a valid reference to a handler to be uninstalled by a client application.
:param user_handle: The user handle (ctypes object or None) returned by install_handler.
"""
self.visalib.uninstall_visa_handler(self.session, event_type, handler, user_handle) | Uninstalls handlers for events in this resource.
:param event_type: Logical event identifier.
:param handler: Interpreted as a valid reference to a handler to be uninstalled by a client application.
:param user_handle: The user handle (ctypes object or None) returned by install_handler. | Below is the the instruction that describes the task:
### Input:
Uninstalls handlers for events in this resource.
:param event_type: Logical event identifier.
:param handler: Interpreted as a valid reference to a handler to be uninstalled by a client application.
:param user_handle: The user handle (ctypes object or None) returned by install_handler.
### Response:
def uninstall_handler(self, event_type, handler, user_handle=None):
"""Uninstalls handlers for events in this resource.
:param event_type: Logical event identifier.
:param handler: Interpreted as a valid reference to a handler to be uninstalled by a client application.
:param user_handle: The user handle (ctypes object or None) returned by install_handler.
"""
self.visalib.uninstall_visa_handler(self.session, event_type, handler, user_handle) |
def view(tilesets):
'''
Create a higlass viewer that displays the specified tilesets
Parameters:
-----------
Returns
-------
Nothing
'''
from .server import Server
from .client import View
curr_view = View()
server = Server()
server.start(tilesets)
for ts in tilesets:
if (ts.track_type is not None
and ts.track_position is not None):
curr_view.add_track(ts.track_type,
ts.track_position,
api_url=server.api_address,
tileset_uuid=ts.uuid,
)
curr_view.server = server
return curr_view | Create a higlass viewer that displays the specified tilesets
Parameters:
-----------
Returns
-------
Nothing | Below is the the instruction that describes the task:
### Input:
Create a higlass viewer that displays the specified tilesets
Parameters:
-----------
Returns
-------
Nothing
### Response:
def view(tilesets):
'''
Create a higlass viewer that displays the specified tilesets
Parameters:
-----------
Returns
-------
Nothing
'''
from .server import Server
from .client import View
curr_view = View()
server = Server()
server.start(tilesets)
for ts in tilesets:
if (ts.track_type is not None
and ts.track_position is not None):
curr_view.add_track(ts.track_type,
ts.track_position,
api_url=server.api_address,
tileset_uuid=ts.uuid,
)
curr_view.server = server
return curr_view |
def get_info(
self,
userSpecifier,
**kwargs
):
"""
Fetch the user information for the specified user. This endpoint is
intended to be used by the user themself to obtain their own
information.
Args:
userSpecifier:
The User Specifier
Returns:
v20.response.Response containing the results from submitting the
request
"""
request = Request(
'GET',
'/v3/users/{userSpecifier}'
)
request.set_path_param(
'userSpecifier',
userSpecifier
)
response = self.ctx.request(request)
if response.content_type is None:
return response
if not response.content_type.startswith("application/json"):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
#
# Parse responses as defined by the API specification
#
if str(response.status) == "200":
if jbody.get('userInfo') is not None:
parsed_body['userInfo'] = \
self.ctx.user.UserInfo.from_dict(
jbody['userInfo'],
self.ctx
)
elif str(response.status) == "401":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "403":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "405":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
#
# Unexpected response status
#
else:
parsed_body = jbody
response.body = parsed_body
return response | Fetch the user information for the specified user. This endpoint is
intended to be used by the user themself to obtain their own
information.
Args:
userSpecifier:
The User Specifier
Returns:
v20.response.Response containing the results from submitting the
request | Below is the the instruction that describes the task:
### Input:
Fetch the user information for the specified user. This endpoint is
intended to be used by the user themself to obtain their own
information.
Args:
userSpecifier:
The User Specifier
Returns:
v20.response.Response containing the results from submitting the
request
### Response:
def get_info(
self,
userSpecifier,
**kwargs
):
"""
Fetch the user information for the specified user. This endpoint is
intended to be used by the user themself to obtain their own
information.
Args:
userSpecifier:
The User Specifier
Returns:
v20.response.Response containing the results from submitting the
request
"""
request = Request(
'GET',
'/v3/users/{userSpecifier}'
)
request.set_path_param(
'userSpecifier',
userSpecifier
)
response = self.ctx.request(request)
if response.content_type is None:
return response
if not response.content_type.startswith("application/json"):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
#
# Parse responses as defined by the API specification
#
if str(response.status) == "200":
if jbody.get('userInfo') is not None:
parsed_body['userInfo'] = \
self.ctx.user.UserInfo.from_dict(
jbody['userInfo'],
self.ctx
)
elif str(response.status) == "401":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "403":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "405":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
#
# Unexpected response status
#
else:
parsed_body = jbody
response.body = parsed_body
return response |
def tree_diff(a, b, n=5, sort=False):
"""Dump any data-structure or object, traverse
it depth-first in-order and apply a unified diff.
Depth-first in-order is just like structure would be printed.
:param a: data_structure a
:param b: data_structure b
:param n: lines of context
:type n: int
:param sort: sort the data-structure
ATTENTION: Sorting means changing the data-structure. The test-result may
differ. But in case of dictionaries the results become comparable because
the sorting negates the hash-algorithms "de-sorting".
>>> a = recursive_sort(freeze([
... 'a',
... [3, 4],
... {'a': [3, {'w' : set([4, '3', frozenset([3,5,2])])}]},
... []
... ]))
>>> b = recursive_sort(freeze([
... 'a',
... [7, 3],
... {'a': [3, {'w' : set([4, '3', frozenset([2,5,3])])}]},
... []
... ]))
>>> transparent_repr("\\n".join(tree_diff(a, b).split("\\n")[2:]))
@@ -7,6 +7,6 @@
'w'),),
3),
'a'),),
'a',
(3,
- 4))
+ 7))
>>> a = [
... 'a',
... [3, 4],
... {'a': [3, {'w' : set([4, '3', frozenset([3,5,2])])}]},
... []
... ]
>>> b = [
... 'a',
... [7, 3],
... {'a': [3, {'w' : set([4, '3', frozenset([2,5,3])])}]},
... []
... ]
>>> transparent_repr("\\n".join(
... tree_diff(a, b, sort=True
... ).split("\\n")[2:]))
@@ -11,6 +11,6 @@
'3',
4)]),)],
3)),)],
'a',
(3,
- 4))
+ 7))
"""
a = dump(a)
b = dump(b)
if not sort:
a = vformat(a).split("\n")
b = vformat(b).split("\n")
else:
a = vformat(recursive_sort(a)).split("\n")
b = vformat(recursive_sort(b)).split("\n")
return "\n".join(difflib.unified_diff(a, b, n=n, lineterm="")) | Dump any data-structure or object, traverse
it depth-first in-order and apply a unified diff.
Depth-first in-order is just like structure would be printed.
:param a: data_structure a
:param b: data_structure b
:param n: lines of context
:type n: int
:param sort: sort the data-structure
ATTENTION: Sorting means changing the data-structure. The test-result may
differ. But in case of dictionaries the results become comparable because
the sorting negates the hash-algorithms "de-sorting".
>>> a = recursive_sort(freeze([
... 'a',
... [3, 4],
... {'a': [3, {'w' : set([4, '3', frozenset([3,5,2])])}]},
... []
... ]))
>>> b = recursive_sort(freeze([
... 'a',
... [7, 3],
... {'a': [3, {'w' : set([4, '3', frozenset([2,5,3])])}]},
... []
... ]))
>>> transparent_repr("\\n".join(tree_diff(a, b).split("\\n")[2:]))
@@ -7,6 +7,6 @@
'w'),),
3),
'a'),),
'a',
(3,
- 4))
+ 7))
>>> a = [
... 'a',
... [3, 4],
... {'a': [3, {'w' : set([4, '3', frozenset([3,5,2])])}]},
... []
... ]
>>> b = [
... 'a',
... [7, 3],
... {'a': [3, {'w' : set([4, '3', frozenset([2,5,3])])}]},
... []
... ]
>>> transparent_repr("\\n".join(
... tree_diff(a, b, sort=True
... ).split("\\n")[2:]))
@@ -11,6 +11,6 @@
'3',
4)]),)],
3)),)],
'a',
(3,
- 4))
+ 7)) | Below is the the instruction that describes the task:
### Input:
Dump any data-structure or object, traverse
it depth-first in-order and apply a unified diff.
Depth-first in-order is just like structure would be printed.
:param a: data_structure a
:param b: data_structure b
:param n: lines of context
:type n: int
:param sort: sort the data-structure
ATTENTION: Sorting means changing the data-structure. The test-result may
differ. But in case of dictionaries the results become comparable because
the sorting negates the hash-algorithms "de-sorting".
>>> a = recursive_sort(freeze([
... 'a',
... [3, 4],
... {'a': [3, {'w' : set([4, '3', frozenset([3,5,2])])}]},
... []
... ]))
>>> b = recursive_sort(freeze([
... 'a',
... [7, 3],
... {'a': [3, {'w' : set([4, '3', frozenset([2,5,3])])}]},
... []
... ]))
>>> transparent_repr("\\n".join(tree_diff(a, b).split("\\n")[2:]))
@@ -7,6 +7,6 @@
'w'),),
3),
'a'),),
'a',
(3,
- 4))
+ 7))
>>> a = [
... 'a',
... [3, 4],
... {'a': [3, {'w' : set([4, '3', frozenset([3,5,2])])}]},
... []
... ]
>>> b = [
... 'a',
... [7, 3],
... {'a': [3, {'w' : set([4, '3', frozenset([2,5,3])])}]},
... []
... ]
>>> transparent_repr("\\n".join(
... tree_diff(a, b, sort=True
... ).split("\\n")[2:]))
@@ -11,6 +11,6 @@
'3',
4)]),)],
3)),)],
'a',
(3,
- 4))
+ 7))
### Response:
def tree_diff(a, b, n=5, sort=False):
"""Dump any data-structure or object, traverse
it depth-first in-order and apply a unified diff.
Depth-first in-order is just like structure would be printed.
:param a: data_structure a
:param b: data_structure b
:param n: lines of context
:type n: int
:param sort: sort the data-structure
ATTENTION: Sorting means changing the data-structure. The test-result may
differ. But in case of dictionaries the results become comparable because
the sorting negates the hash-algorithms "de-sorting".
>>> a = recursive_sort(freeze([
... 'a',
... [3, 4],
... {'a': [3, {'w' : set([4, '3', frozenset([3,5,2])])}]},
... []
... ]))
>>> b = recursive_sort(freeze([
... 'a',
... [7, 3],
... {'a': [3, {'w' : set([4, '3', frozenset([2,5,3])])}]},
... []
... ]))
>>> transparent_repr("\\n".join(tree_diff(a, b).split("\\n")[2:]))
@@ -7,6 +7,6 @@
'w'),),
3),
'a'),),
'a',
(3,
- 4))
+ 7))
>>> a = [
... 'a',
... [3, 4],
... {'a': [3, {'w' : set([4, '3', frozenset([3,5,2])])}]},
... []
... ]
>>> b = [
... 'a',
... [7, 3],
... {'a': [3, {'w' : set([4, '3', frozenset([2,5,3])])}]},
... []
... ]
>>> transparent_repr("\\n".join(
... tree_diff(a, b, sort=True
... ).split("\\n")[2:]))
@@ -11,6 +11,6 @@
'3',
4)]),)],
3)),)],
'a',
(3,
- 4))
+ 7))
"""
a = dump(a)
b = dump(b)
if not sort:
a = vformat(a).split("\n")
b = vformat(b).split("\n")
else:
a = vformat(recursive_sort(a)).split("\n")
b = vformat(recursive_sort(b)).split("\n")
return "\n".join(difflib.unified_diff(a, b, n=n, lineterm="")) |
def _temporary_filenames(total):
"""Context manager to create temporary files and remove them after use."""
temp_files = [_get_temporary_filename('optimage-') for i in range(total)]
yield temp_files
for temp_file in temp_files:
try:
os.remove(temp_file)
except OSError:
# Continue in case we could not remove the file. One reason is that
# the fail was never created.
pass | Context manager to create temporary files and remove them after use. | Below is the the instruction that describes the task:
### Input:
Context manager to create temporary files and remove them after use.
### Response:
def _temporary_filenames(total):
"""Context manager to create temporary files and remove them after use."""
temp_files = [_get_temporary_filename('optimage-') for i in range(total)]
yield temp_files
for temp_file in temp_files:
try:
os.remove(temp_file)
except OSError:
# Continue in case we could not remove the file. One reason is that
# the fail was never created.
pass |
def get_compound_afrs(self):
"""
Determine the amount flow rates of all the compounds.
:returns: List of amount flow rates. [kmol/h]
"""
result = self._compound_mfrs * 1.0
for compound in self.material.compounds:
index = self.material.get_compound_index(compound)
result[index] = stoich.amount(compound, result[index])
return result | Determine the amount flow rates of all the compounds.
:returns: List of amount flow rates. [kmol/h] | Below is the the instruction that describes the task:
### Input:
Determine the amount flow rates of all the compounds.
:returns: List of amount flow rates. [kmol/h]
### Response:
def get_compound_afrs(self):
"""
Determine the amount flow rates of all the compounds.
:returns: List of amount flow rates. [kmol/h]
"""
result = self._compound_mfrs * 1.0
for compound in self.material.compounds:
index = self.material.get_compound_index(compound)
result[index] = stoich.amount(compound, result[index])
return result |
def show(self):
"""
Display (with a pretty print) this object
"""
off = 0
for n, i in enumerate(self.get_instructions()):
print("{:8d} (0x{:08x}) {:04x} {:30} {}".format(n, off, i.get_op_value(), i.get_name(), i.get_output(self.idx)))
off += i.get_length() | Display (with a pretty print) this object | Below is the the instruction that describes the task:
### Input:
Display (with a pretty print) this object
### Response:
def show(self):
"""
Display (with a pretty print) this object
"""
off = 0
for n, i in enumerate(self.get_instructions()):
print("{:8d} (0x{:08x}) {:04x} {:30} {}".format(n, off, i.get_op_value(), i.get_name(), i.get_output(self.idx)))
off += i.get_length() |
def histogram_cumulative(data,**kwargs):
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
'''
return_edges = kwargs.pop('return_edges', True)
norm = kwargs.pop('normalize', False)
P, edges = np.histogram(data, **kwargs)
P = np.cumsum(P)
if norm: P = P/P[-1]
if not return_edges: edges = np.diff(edges) / 2. + edges[:-1]
return P, edges | r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density. | Below is the the instruction that describes the task:
### Input:
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
### Response:
def histogram_cumulative(data,**kwargs):
r'''
Compute cumulative histogram.
See `numpy.histrogram <https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html>`_
:extra options:
**return_edges** ([``True``] | [``False``])
Return the bin edges if set to ``True``, return their midpoints otherwise.
**normalize** ([``False``] | ``True``)
Normalize such that the final probability is one. In this case the function returns the (binned)
cumulative probability density.
'''
return_edges = kwargs.pop('return_edges', True)
norm = kwargs.pop('normalize', False)
P, edges = np.histogram(data, **kwargs)
P = np.cumsum(P)
if norm: P = P/P[-1]
if not return_edges: edges = np.diff(edges) / 2. + edges[:-1]
return P, edges |
def unindent(self):
"""
Un-indents text at cursor position.
"""
_logger().debug('unindent')
cursor = self.editor.textCursor()
_logger().debug('cursor has selection %r', cursor.hasSelection())
if cursor.hasSelection():
cursor.beginEditBlock()
self.unindent_selection(cursor)
cursor.endEditBlock()
self.editor.setTextCursor(cursor)
else:
tab_len = self.editor.tab_length
indentation = cursor.positionInBlock()
indentation -= self.min_column
if indentation == 0:
return
max_spaces = indentation % tab_len
if max_spaces == 0:
max_spaces = tab_len
spaces = self.count_deletable_spaces(cursor, max_spaces)
_logger().info('deleting %d space before cursor' % spaces)
cursor.beginEditBlock()
for _ in range(spaces):
cursor.deletePreviousChar()
cursor.endEditBlock()
self.editor.setTextCursor(cursor)
_logger().debug(cursor.block().text()) | Un-indents text at cursor position. | Below is the the instruction that describes the task:
### Input:
Un-indents text at cursor position.
### Response:
def unindent(self):
"""
Un-indents text at cursor position.
"""
_logger().debug('unindent')
cursor = self.editor.textCursor()
_logger().debug('cursor has selection %r', cursor.hasSelection())
if cursor.hasSelection():
cursor.beginEditBlock()
self.unindent_selection(cursor)
cursor.endEditBlock()
self.editor.setTextCursor(cursor)
else:
tab_len = self.editor.tab_length
indentation = cursor.positionInBlock()
indentation -= self.min_column
if indentation == 0:
return
max_spaces = indentation % tab_len
if max_spaces == 0:
max_spaces = tab_len
spaces = self.count_deletable_spaces(cursor, max_spaces)
_logger().info('deleting %d space before cursor' % spaces)
cursor.beginEditBlock()
for _ in range(spaces):
cursor.deletePreviousChar()
cursor.endEditBlock()
self.editor.setTextCursor(cursor)
_logger().debug(cursor.block().text()) |
def process_json_response(self, response):
"""For a json response, check if there was any error and throw exception.
Otherwise, create a housecanary.response.Response."""
response_json = response.json()
# handle errors
code_key = "code"
if code_key in response_json and response_json[code_key] != constants.HTTP_CODE_OK:
code = response_json[code_key]
message = response_json
if "message" in response_json:
message = response_json["message"]
elif "code_description" in response_json:
message = response_json["code_description"]
if code == constants.HTTP_FORBIDDEN:
raise housecanary.exceptions.UnauthorizedException(code, message)
if code == constants.HTTP_TOO_MANY_REQUESTS:
raise housecanary.exceptions.RateLimitException(code, message, response)
else:
raise housecanary.exceptions.RequestException(code, message)
request_url = response.request.url
endpoint_name = self._parse_endpoint_name_from_url(request_url)
return Response.create(endpoint_name, response_json, response) | For a json response, check if there was any error and throw exception.
Otherwise, create a housecanary.response.Response. | Below is the the instruction that describes the task:
### Input:
For a json response, check if there was any error and throw exception.
Otherwise, create a housecanary.response.Response.
### Response:
def process_json_response(self, response):
"""For a json response, check if there was any error and throw exception.
Otherwise, create a housecanary.response.Response."""
response_json = response.json()
# handle errors
code_key = "code"
if code_key in response_json and response_json[code_key] != constants.HTTP_CODE_OK:
code = response_json[code_key]
message = response_json
if "message" in response_json:
message = response_json["message"]
elif "code_description" in response_json:
message = response_json["code_description"]
if code == constants.HTTP_FORBIDDEN:
raise housecanary.exceptions.UnauthorizedException(code, message)
if code == constants.HTTP_TOO_MANY_REQUESTS:
raise housecanary.exceptions.RateLimitException(code, message, response)
else:
raise housecanary.exceptions.RequestException(code, message)
request_url = response.request.url
endpoint_name = self._parse_endpoint_name_from_url(request_url)
return Response.create(endpoint_name, response_json, response) |
def destroy(self):
""" A reimplemented destructor that cancels
the dialog before destroying.
"""
dialog = self.dialog
if dialog:
#: Clear the dismiss listener
#: (or we get an error during the callback)
dialog.setOnDismissListener(None)
dialog.dismiss()
del self.dialog
super(AndroidDialog, self).destroy() | A reimplemented destructor that cancels
the dialog before destroying. | Below is the the instruction that describes the task:
### Input:
A reimplemented destructor that cancels
the dialog before destroying.
### Response:
def destroy(self):
""" A reimplemented destructor that cancels
the dialog before destroying.
"""
dialog = self.dialog
if dialog:
#: Clear the dismiss listener
#: (or we get an error during the callback)
dialog.setOnDismissListener(None)
dialog.dismiss()
del self.dialog
super(AndroidDialog, self).destroy() |
def _get_fully_qualified_name(self):
"return full parents name + self name (useful as key)"
parent_name = self._get_parent_name()
if not parent_name:
return self._name
else:
return "%s.%s" % (parent_name, self._name) | return full parents name + self name (useful as key) | Below is the the instruction that describes the task:
### Input:
return full parents name + self name (useful as key)
### Response:
def _get_fully_qualified_name(self):
"return full parents name + self name (useful as key)"
parent_name = self._get_parent_name()
if not parent_name:
return self._name
else:
return "%s.%s" % (parent_name, self._name) |
def _resolve_hostname(name):
"""Returns resolved hostname using the ssh config"""
if env.ssh_config is None:
return name
elif not os.path.exists(os.path.join("nodes", name + ".json")):
resolved_name = env.ssh_config.lookup(name)['hostname']
if os.path.exists(os.path.join("nodes", resolved_name + ".json")):
name = resolved_name
return name | Returns resolved hostname using the ssh config | Below is the the instruction that describes the task:
### Input:
Returns resolved hostname using the ssh config
### Response:
def _resolve_hostname(name):
"""Returns resolved hostname using the ssh config"""
if env.ssh_config is None:
return name
elif not os.path.exists(os.path.join("nodes", name + ".json")):
resolved_name = env.ssh_config.lookup(name)['hostname']
if os.path.exists(os.path.join("nodes", resolved_name + ".json")):
name = resolved_name
return name |
def set_bn_eval(m:nn.Module)->None:
"Set bn layers in eval mode for all recursive children of `m`."
for l in m.children():
if isinstance(l, bn_types) and not next(l.parameters()).requires_grad:
l.eval()
set_bn_eval(l) | Set bn layers in eval mode for all recursive children of `m`. | Below is the the instruction that describes the task:
### Input:
Set bn layers in eval mode for all recursive children of `m`.
### Response:
def set_bn_eval(m:nn.Module)->None:
"Set bn layers in eval mode for all recursive children of `m`."
for l in m.children():
if isinstance(l, bn_types) and not next(l.parameters()).requires_grad:
l.eval()
set_bn_eval(l) |
def __get_award_emoji(self, item_type, item_id):
"""Get award emojis for issue/merge request"""
emojis = []
group_emojis = self.client.emojis(item_type, item_id)
for raw_emojis in group_emojis:
for emoji in json.loads(raw_emojis):
emojis.append(emoji)
return emojis | Get award emojis for issue/merge request | Below is the the instruction that describes the task:
### Input:
Get award emojis for issue/merge request
### Response:
def __get_award_emoji(self, item_type, item_id):
"""Get award emojis for issue/merge request"""
emojis = []
group_emojis = self.client.emojis(item_type, item_id)
for raw_emojis in group_emojis:
for emoji in json.loads(raw_emojis):
emojis.append(emoji)
return emojis |
def process_response(self, request, response):
"""
Disconnects the signal receiver to prevent it from staying active.
"""
if hasattr(threadlocal, 'auditlog'):
pre_save.disconnect(sender=LogEntry, dispatch_uid=threadlocal.auditlog['signal_duid'])
return response | Disconnects the signal receiver to prevent it from staying active. | Below is the the instruction that describes the task:
### Input:
Disconnects the signal receiver to prevent it from staying active.
### Response:
def process_response(self, request, response):
"""
Disconnects the signal receiver to prevent it from staying active.
"""
if hasattr(threadlocal, 'auditlog'):
pre_save.disconnect(sender=LogEntry, dispatch_uid=threadlocal.auditlog['signal_duid'])
return response |
def get_reference_end_from_cigar(reference_start, cigar):
'''
This returns the coordinate just past the last aligned base.
This matches the behavior of pysam's reference_end method
'''
reference_end = reference_start
# iterate through cigartuple
for i in xrange(len(cigar)):
k, n = cigar[i]
if k in (0,2,3,7,8): # M, D, N, =, X
reference_end += n
return reference_end | This returns the coordinate just past the last aligned base.
This matches the behavior of pysam's reference_end method | Below is the the instruction that describes the task:
### Input:
This returns the coordinate just past the last aligned base.
This matches the behavior of pysam's reference_end method
### Response:
def get_reference_end_from_cigar(reference_start, cigar):
'''
This returns the coordinate just past the last aligned base.
This matches the behavior of pysam's reference_end method
'''
reference_end = reference_start
# iterate through cigartuple
for i in xrange(len(cigar)):
k, n = cigar[i]
if k in (0,2,3,7,8): # M, D, N, =, X
reference_end += n
return reference_end |
def resolve_redirection(self, request, context):
"""Check for redirections."""
current_page = context['current_page']
lang = context['lang']
if current_page.redirect_to_url:
return HttpResponsePermanentRedirect(current_page.redirect_to_url)
if current_page.redirect_to:
return HttpResponsePermanentRedirect(
current_page.redirect_to.get_url_path(lang)) | Check for redirections. | Below is the the instruction that describes the task:
### Input:
Check for redirections.
### Response:
def resolve_redirection(self, request, context):
"""Check for redirections."""
current_page = context['current_page']
lang = context['lang']
if current_page.redirect_to_url:
return HttpResponsePermanentRedirect(current_page.redirect_to_url)
if current_page.redirect_to:
return HttpResponsePermanentRedirect(
current_page.redirect_to.get_url_path(lang)) |
def get_projected_player_game_stats_by_team(self, season, week, team_id):
"""
Projected Player Game Stats by Team
"""
result = self._method_call("PlayerGameProjectionStatsByTeam/{season}/{week}/{team_id}", "projections", season=season, week=week, team_id=team_id)
return result | Projected Player Game Stats by Team | Below is the the instruction that describes the task:
### Input:
Projected Player Game Stats by Team
### Response:
def get_projected_player_game_stats_by_team(self, season, week, team_id):
"""
Projected Player Game Stats by Team
"""
result = self._method_call("PlayerGameProjectionStatsByTeam/{season}/{week}/{team_id}", "projections", season=season, week=week, team_id=team_id)
return result |
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind] | Return a shuffled copy of y eventually shuffle among same labels. | Below is the the instruction that describes the task:
### Input:
Return a shuffled copy of y eventually shuffle among same labels.
### Response:
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind] |
def radiance2tb(rad, wavelength):
"""
Get the Tb from the radiance using the Planck function
rad:
Radiance in SI units
wavelength:
Wavelength in SI units (meter)
"""
from pyspectral.blackbody import blackbody_rad2temp as rad2temp
return rad2temp(wavelength, rad) | Get the Tb from the radiance using the Planck function
rad:
Radiance in SI units
wavelength:
Wavelength in SI units (meter) | Below is the the instruction that describes the task:
### Input:
Get the Tb from the radiance using the Planck function
rad:
Radiance in SI units
wavelength:
Wavelength in SI units (meter)
### Response:
def radiance2tb(rad, wavelength):
"""
Get the Tb from the radiance using the Planck function
rad:
Radiance in SI units
wavelength:
Wavelength in SI units (meter)
"""
from pyspectral.blackbody import blackbody_rad2temp as rad2temp
return rad2temp(wavelength, rad) |
def led_changed(self, addr, group, val):
"""Capture a change to the LED for this button."""
_LOGGER.debug("Button %d LED changed from %d to %d",
self._group, self._value, val)
led_on = bool(val)
if led_on != bool(self._value):
self._update_subscribers(int(led_on)) | Capture a change to the LED for this button. | Below is the the instruction that describes the task:
### Input:
Capture a change to the LED for this button.
### Response:
def led_changed(self, addr, group, val):
"""Capture a change to the LED for this button."""
_LOGGER.debug("Button %d LED changed from %d to %d",
self._group, self._value, val)
led_on = bool(val)
if led_on != bool(self._value):
self._update_subscribers(int(led_on)) |
def server_poweroff(host=None,
admin_username=None,
admin_password=None,
module=None):
'''
Powers down the managed server.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
module
The element to power off on the chassis such as a blade.
If not provided, the chassis will be powered off.
CLI Example:
.. code-block:: bash
salt dell dracr.server_poweroff
salt dell dracr.server_poweroff module=server-1
'''
return __execute_cmd('serveraction powerdown',
host=host, admin_username=admin_username,
admin_password=admin_password, module=module) | Powers down the managed server.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
module
The element to power off on the chassis such as a blade.
If not provided, the chassis will be powered off.
CLI Example:
.. code-block:: bash
salt dell dracr.server_poweroff
salt dell dracr.server_poweroff module=server-1 | Below is the the instruction that describes the task:
### Input:
Powers down the managed server.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
module
The element to power off on the chassis such as a blade.
If not provided, the chassis will be powered off.
CLI Example:
.. code-block:: bash
salt dell dracr.server_poweroff
salt dell dracr.server_poweroff module=server-1
### Response:
def server_poweroff(host=None,
admin_username=None,
admin_password=None,
module=None):
'''
Powers down the managed server.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
module
The element to power off on the chassis such as a blade.
If not provided, the chassis will be powered off.
CLI Example:
.. code-block:: bash
salt dell dracr.server_poweroff
salt dell dracr.server_poweroff module=server-1
'''
return __execute_cmd('serveraction powerdown',
host=host, admin_username=admin_username,
admin_password=admin_password, module=module) |
def _apply_replace_backrefs(m, repl=None, flags=0):
"""Expand with either the `ReplaceTemplate` or compile on the fly, or return None."""
if m is None:
raise ValueError("Match is None!")
else:
if isinstance(repl, ReplaceTemplate):
return repl.expand(m)
elif isinstance(repl, (str, bytes)):
return _bregex_parse._ReplaceParser().parse(m.re, repl, bool(flags & FORMAT)).expand(m) | Expand with either the `ReplaceTemplate` or compile on the fly, or return None. | Below is the the instruction that describes the task:
### Input:
Expand with either the `ReplaceTemplate` or compile on the fly, or return None.
### Response:
def _apply_replace_backrefs(m, repl=None, flags=0):
"""Expand with either the `ReplaceTemplate` or compile on the fly, or return None."""
if m is None:
raise ValueError("Match is None!")
else:
if isinstance(repl, ReplaceTemplate):
return repl.expand(m)
elif isinstance(repl, (str, bytes)):
return _bregex_parse._ReplaceParser().parse(m.re, repl, bool(flags & FORMAT)).expand(m) |
def buildMaskImage(rootname, bitvalue, output, extname='DQ', extver=1):
""" Builds mask image from rootname's DQ array
If there is no valid 'DQ' array in image, then return
an empty string.
"""
# If no bitvalue is set or rootname given, assume no mask is desired
# However, this name would be useful as the output mask from
# other processing, such as MultiDrizzle, so return it anyway.
#if bitvalue == None or rootname == None:
# return None
# build output name
maskname = output
# If an old version of the maskfile was present, remove it and rebuild it.
if fileutil.findFile(maskname):
fileutil.removeFile(maskname)
# Open input file with DQ array
fdq = fileutil.openImage(rootname, mode='readonly', memmap=False)
try:
_extn = fileutil.findExtname(fdq, extname, extver=extver)
if _extn is not None:
# Read in DQ array
dqarr = fdq[_extn].data
else:
dqarr = None
# For the case where there is no DQ array,
# create a mask image of all ones.
if dqarr is None:
# We need to get the dimensions of the output DQ array
# Since the DQ array is non-existent, look for the SCI extension
_sci_extn = fileutil.findExtname(fdq,'SCI',extver=extver)
if _sci_extn is not None:
_shape = fdq[_sci_extn].data.shape
dqarr = np.zeros(_shape,dtype=np.uint16)
else:
raise Exception
# Build mask array from DQ array
maskarr = buildMask(dqarr,bitvalue)
#Write out the mask file as simple FITS file
fmask = fits.open(maskname, mode='append', memmap=False)
maskhdu = fits.PrimaryHDU(data = maskarr)
fmask.append(maskhdu)
#Close files
fmask.close()
del fmask
fdq.close()
del fdq
except:
fdq.close()
del fdq
# Safeguard against leaving behind an incomplete file
if fileutil.findFile(maskname):
os.remove(maskname)
_errstr = "\nWarning: Problem creating MASK file for "+rootname+".\n"
#raise IOError, _errstr
print(_errstr)
return None
# Return the name of the mask image written out
return maskname | Builds mask image from rootname's DQ array
If there is no valid 'DQ' array in image, then return
an empty string. | Below is the the instruction that describes the task:
### Input:
Builds mask image from rootname's DQ array
If there is no valid 'DQ' array in image, then return
an empty string.
### Response:
def buildMaskImage(rootname, bitvalue, output, extname='DQ', extver=1):
""" Builds mask image from rootname's DQ array
If there is no valid 'DQ' array in image, then return
an empty string.
"""
# If no bitvalue is set or rootname given, assume no mask is desired
# However, this name would be useful as the output mask from
# other processing, such as MultiDrizzle, so return it anyway.
#if bitvalue == None or rootname == None:
# return None
# build output name
maskname = output
# If an old version of the maskfile was present, remove it and rebuild it.
if fileutil.findFile(maskname):
fileutil.removeFile(maskname)
# Open input file with DQ array
fdq = fileutil.openImage(rootname, mode='readonly', memmap=False)
try:
_extn = fileutil.findExtname(fdq, extname, extver=extver)
if _extn is not None:
# Read in DQ array
dqarr = fdq[_extn].data
else:
dqarr = None
# For the case where there is no DQ array,
# create a mask image of all ones.
if dqarr is None:
# We need to get the dimensions of the output DQ array
# Since the DQ array is non-existent, look for the SCI extension
_sci_extn = fileutil.findExtname(fdq,'SCI',extver=extver)
if _sci_extn is not None:
_shape = fdq[_sci_extn].data.shape
dqarr = np.zeros(_shape,dtype=np.uint16)
else:
raise Exception
# Build mask array from DQ array
maskarr = buildMask(dqarr,bitvalue)
#Write out the mask file as simple FITS file
fmask = fits.open(maskname, mode='append', memmap=False)
maskhdu = fits.PrimaryHDU(data = maskarr)
fmask.append(maskhdu)
#Close files
fmask.close()
del fmask
fdq.close()
del fdq
except:
fdq.close()
del fdq
# Safeguard against leaving behind an incomplete file
if fileutil.findFile(maskname):
os.remove(maskname)
_errstr = "\nWarning: Problem creating MASK file for "+rootname+".\n"
#raise IOError, _errstr
print(_errstr)
return None
# Return the name of the mask image written out
return maskname |
def visitEncapsulatedShape(self, ctx: ShExDocParser.EncapsulatedShapeContext):
""" encapsulatedShape: '(' innerShape ')' cardinality? annotation* semanticActions """
enc_shape = ShexOneOfShapeParser(self.context)
enc_shape.visit(ctx.innerShape())
self.expression = enc_shape.expression
self._card_annotations_and_semacts(ctx) | encapsulatedShape: '(' innerShape ')' cardinality? annotation* semanticActions | Below is the the instruction that describes the task:
### Input:
encapsulatedShape: '(' innerShape ')' cardinality? annotation* semanticActions
### Response:
def visitEncapsulatedShape(self, ctx: ShExDocParser.EncapsulatedShapeContext):
""" encapsulatedShape: '(' innerShape ')' cardinality? annotation* semanticActions """
enc_shape = ShexOneOfShapeParser(self.context)
enc_shape.visit(ctx.innerShape())
self.expression = enc_shape.expression
self._card_annotations_and_semacts(ctx) |
def iter_blobs(self, predicate=lambda t: True):
"""
:return: Iterator yielding tuples of Blob objects and stages, tuple(stage, Blob)
:param predicate:
Function(t) returning True if tuple(stage, Blob) should be yielded by the
iterator. A default filter, the BlobFilter, allows you to yield blobs
only if they match a given list of paths. """
for entry in mviter(self.entries):
blob = entry.to_blob(self.repo)
blob.size = entry.size
output = (entry.stage, blob)
if predicate(output):
yield output | :return: Iterator yielding tuples of Blob objects and stages, tuple(stage, Blob)
:param predicate:
Function(t) returning True if tuple(stage, Blob) should be yielded by the
iterator. A default filter, the BlobFilter, allows you to yield blobs
only if they match a given list of paths. | Below is the the instruction that describes the task:
### Input:
:return: Iterator yielding tuples of Blob objects and stages, tuple(stage, Blob)
:param predicate:
Function(t) returning True if tuple(stage, Blob) should be yielded by the
iterator. A default filter, the BlobFilter, allows you to yield blobs
only if they match a given list of paths.
### Response:
def iter_blobs(self, predicate=lambda t: True):
"""
:return: Iterator yielding tuples of Blob objects and stages, tuple(stage, Blob)
:param predicate:
Function(t) returning True if tuple(stage, Blob) should be yielded by the
iterator. A default filter, the BlobFilter, allows you to yield blobs
only if they match a given list of paths. """
for entry in mviter(self.entries):
blob = entry.to_blob(self.repo)
blob.size = entry.size
output = (entry.stage, blob)
if predicate(output):
yield output |
def main(arguments=None): # suppress(unused-function)
"""Entry point for the linter."""
result = _parse_arguments(arguments)
linter_funcs = _ordered(linter_functions_from_filters,
result.whitelist,
result.blacklist)
global_options = vars(result)
tool_options = tool_options_from_global(global_options, len(result.files))
any_would_run = _any_would_run(_run_lint_on_file_exceptions,
result.files,
result.stamp_file_path,
result.log_technical_terms_to,
linter_funcs,
tool_options,
result.fix_what_you_can)
if any_would_run:
for linter_function in linter_funcs.values():
if linter_function.before_all:
linter_function.before_all(global_options, tool_options)
use_multiprocessing = _should_use_multiprocessing(len(result.files))
else:
use_multiprocessing = False
if use_multiprocessing:
mapper = parmap.map
else:
# suppress(E731)
mapper = lambda f, i, *a: [f(*((x, ) + a)) for x in i]
errors = list(itertools.chain(*mapper(_run_lint_on_file_stamped,
result.files,
result.stamp_file_path,
result.log_technical_terms_to,
linter_funcs,
tool_options,
result.fix_what_you_can)))
for error in sorted(errors):
_report_lint_error(error.failure, os.path.relpath(error.absolute_path))
if any_would_run:
for linter_funcs in linter_funcs.values():
if linter_funcs.after_all:
linter_funcs.after_all(global_options, tool_options)
return len(errors) | Entry point for the linter. | Below is the the instruction that describes the task:
### Input:
Entry point for the linter.
### Response:
def main(arguments=None): # suppress(unused-function)
"""Entry point for the linter."""
result = _parse_arguments(arguments)
linter_funcs = _ordered(linter_functions_from_filters,
result.whitelist,
result.blacklist)
global_options = vars(result)
tool_options = tool_options_from_global(global_options, len(result.files))
any_would_run = _any_would_run(_run_lint_on_file_exceptions,
result.files,
result.stamp_file_path,
result.log_technical_terms_to,
linter_funcs,
tool_options,
result.fix_what_you_can)
if any_would_run:
for linter_function in linter_funcs.values():
if linter_function.before_all:
linter_function.before_all(global_options, tool_options)
use_multiprocessing = _should_use_multiprocessing(len(result.files))
else:
use_multiprocessing = False
if use_multiprocessing:
mapper = parmap.map
else:
# suppress(E731)
mapper = lambda f, i, *a: [f(*((x, ) + a)) for x in i]
errors = list(itertools.chain(*mapper(_run_lint_on_file_stamped,
result.files,
result.stamp_file_path,
result.log_technical_terms_to,
linter_funcs,
tool_options,
result.fix_what_you_can)))
for error in sorted(errors):
_report_lint_error(error.failure, os.path.relpath(error.absolute_path))
if any_would_run:
for linter_funcs in linter_funcs.values():
if linter_funcs.after_all:
linter_funcs.after_all(global_options, tool_options)
return len(errors) |
def schedule_telegram_message(message, to, sender=None, priority=None):
"""Schedules Telegram message for delivery.
:param str message: text to send.
:param list|str|unicode to: recipients addresses or Django User model heir instances with `telegram` attributes.
:param User sender: User model heir instance
:param int priority: number describing message priority. If set overrides priority provided with message type.
"""
schedule_messages(message, recipients('telegram', to), sender=sender, priority=priority) | Schedules Telegram message for delivery.
:param str message: text to send.
:param list|str|unicode to: recipients addresses or Django User model heir instances with `telegram` attributes.
:param User sender: User model heir instance
:param int priority: number describing message priority. If set overrides priority provided with message type. | Below is the the instruction that describes the task:
### Input:
Schedules Telegram message for delivery.
:param str message: text to send.
:param list|str|unicode to: recipients addresses or Django User model heir instances with `telegram` attributes.
:param User sender: User model heir instance
:param int priority: number describing message priority. If set overrides priority provided with message type.
### Response:
def schedule_telegram_message(message, to, sender=None, priority=None):
"""Schedules Telegram message for delivery.
:param str message: text to send.
:param list|str|unicode to: recipients addresses or Django User model heir instances with `telegram` attributes.
:param User sender: User model heir instance
:param int priority: number describing message priority. If set overrides priority provided with message type.
"""
schedule_messages(message, recipients('telegram', to), sender=sender, priority=priority) |
def classify(self, dataset, missing_value_action='auto'):
"""
Return a classification, for each example in the ``dataset``, using the
trained random forest model. The output SFrame contains predictions
as class labels (0 or 1) and probabilities associated with the the example.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions i.e class labels and probabilities
associated with each of the class labels.
See Also
----------
create, evaluate, predict
Examples
----------
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
>>> model = turicreate.random_forest_classifier.create(data,
>>> target='is_expensive',
>>> features=['bath', 'bedroom', 'size'])
>>> classes = model.classify(data)
"""
return super(RandomForestClassifier, self).classify(dataset,
missing_value_action=missing_value_action) | Return a classification, for each example in the ``dataset``, using the
trained random forest model. The output SFrame contains predictions
as class labels (0 or 1) and probabilities associated with the the example.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions i.e class labels and probabilities
associated with each of the class labels.
See Also
----------
create, evaluate, predict
Examples
----------
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
>>> model = turicreate.random_forest_classifier.create(data,
>>> target='is_expensive',
>>> features=['bath', 'bedroom', 'size'])
>>> classes = model.classify(data) | Below is the the instruction that describes the task:
### Input:
Return a classification, for each example in the ``dataset``, using the
trained random forest model. The output SFrame contains predictions
as class labels (0 or 1) and probabilities associated with the the example.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions i.e class labels and probabilities
associated with each of the class labels.
See Also
----------
create, evaluate, predict
Examples
----------
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
>>> model = turicreate.random_forest_classifier.create(data,
>>> target='is_expensive',
>>> features=['bath', 'bedroom', 'size'])
>>> classes = model.classify(data)
### Response:
def classify(self, dataset, missing_value_action='auto'):
"""
Return a classification, for each example in the ``dataset``, using the
trained random forest model. The output SFrame contains predictions
as class labels (0 or 1) and probabilities associated with the the example.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions i.e class labels and probabilities
associated with each of the class labels.
See Also
----------
create, evaluate, predict
Examples
----------
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
>>> model = turicreate.random_forest_classifier.create(data,
>>> target='is_expensive',
>>> features=['bath', 'bedroom', 'size'])
>>> classes = model.classify(data)
"""
return super(RandomForestClassifier, self).classify(dataset,
missing_value_action=missing_value_action) |
def __check_command_completion(self, testsemicolon=True):
"""Check for command(s) completion
This function should be called each time a new argument is
seen by the parser in order to check a command is complete. As
not only one command can be ended when receiving a new
argument (nested commands case), we apply the same work to
parent commands.
:param testsemicolon: if True, indicates that the next
expected token must be a semicolon (for commands that need one)
:return: True if command is
considered as complete, False otherwise.
"""
if not self.__curcommand.iscomplete():
return True
ctype = self.__curcommand.get_type()
if ctype == "action" or \
(ctype == "control" and
not self.__curcommand.accept_children):
if testsemicolon:
self.__set_expected("semicolon")
return True
while self.__curcommand.parent:
cmd = self.__curcommand
self.__curcommand = self.__curcommand.parent
if self.__curcommand.get_type() in ["control", "test"]:
if self.__curcommand.iscomplete():
if self.__curcommand.get_type() == "control":
break
continue
if not self.__curcommand.check_next_arg("test", cmd, add=False):
return False
if not self.__curcommand.iscomplete():
if self.__curcommand.variable_args_nb:
self.__set_expected("comma", "right_parenthesis")
break
return True | Check for command(s) completion
This function should be called each time a new argument is
seen by the parser in order to check a command is complete. As
not only one command can be ended when receiving a new
argument (nested commands case), we apply the same work to
parent commands.
:param testsemicolon: if True, indicates that the next
expected token must be a semicolon (for commands that need one)
:return: True if command is
considered as complete, False otherwise. | Below is the the instruction that describes the task:
### Input:
Check for command(s) completion
This function should be called each time a new argument is
seen by the parser in order to check a command is complete. As
not only one command can be ended when receiving a new
argument (nested commands case), we apply the same work to
parent commands.
:param testsemicolon: if True, indicates that the next
expected token must be a semicolon (for commands that need one)
:return: True if command is
considered as complete, False otherwise.
### Response:
def __check_command_completion(self, testsemicolon=True):
"""Check for command(s) completion
This function should be called each time a new argument is
seen by the parser in order to check a command is complete. As
not only one command can be ended when receiving a new
argument (nested commands case), we apply the same work to
parent commands.
:param testsemicolon: if True, indicates that the next
expected token must be a semicolon (for commands that need one)
:return: True if command is
considered as complete, False otherwise.
"""
if not self.__curcommand.iscomplete():
return True
ctype = self.__curcommand.get_type()
if ctype == "action" or \
(ctype == "control" and
not self.__curcommand.accept_children):
if testsemicolon:
self.__set_expected("semicolon")
return True
while self.__curcommand.parent:
cmd = self.__curcommand
self.__curcommand = self.__curcommand.parent
if self.__curcommand.get_type() in ["control", "test"]:
if self.__curcommand.iscomplete():
if self.__curcommand.get_type() == "control":
break
continue
if not self.__curcommand.check_next_arg("test", cmd, add=False):
return False
if not self.__curcommand.iscomplete():
if self.__curcommand.variable_args_nb:
self.__set_expected("comma", "right_parenthesis")
break
return True |
def unirange(a, b):
"""Returns a regular expression string to match the given non-BMP range."""
if b < a:
raise ValueError("Bad character range")
if a < 0x10000 or b < 0x10000:
raise ValueError("unirange is only defined for non-BMP ranges")
if sys.maxunicode > 0xffff:
# wide build
return u'[%s-%s]' % (unichr(a), unichr(b))
else:
# narrow build stores surrogates, and the 're' module handles them
# (incorrectly) as characters. Since there is still ordering among
# these characters, expand the range to one that it understands. Some
# background in http://bugs.python.org/issue3665 and
# http://bugs.python.org/issue12749
#
# Additionally, the lower constants are using unichr rather than
# literals because jython [which uses the wide path] can't load this
# file if they are literals.
ah, al = _surrogatepair(a)
bh, bl = _surrogatepair(b)
if ah == bh:
return u'(?:%s[%s-%s])' % (unichr(ah), unichr(al), unichr(bl))
else:
buf = []
buf.append(u'%s[%s-%s]' %
(unichr(ah), unichr(al),
ah == bh and unichr(bl) or unichr(0xdfff)))
if ah - bh > 1:
buf.append(u'[%s-%s][%s-%s]' %
unichr(ah+1), unichr(bh-1), unichr(0xdc00), unichr(0xdfff))
if ah != bh:
buf.append(u'%s[%s-%s]' %
(unichr(bh), unichr(0xdc00), unichr(bl)))
return u'(?:' + u'|'.join(buf) + u')' | Returns a regular expression string to match the given non-BMP range. | Below is the the instruction that describes the task:
### Input:
Returns a regular expression string to match the given non-BMP range.
### Response:
def unirange(a, b):
"""Returns a regular expression string to match the given non-BMP range."""
if b < a:
raise ValueError("Bad character range")
if a < 0x10000 or b < 0x10000:
raise ValueError("unirange is only defined for non-BMP ranges")
if sys.maxunicode > 0xffff:
# wide build
return u'[%s-%s]' % (unichr(a), unichr(b))
else:
# narrow build stores surrogates, and the 're' module handles them
# (incorrectly) as characters. Since there is still ordering among
# these characters, expand the range to one that it understands. Some
# background in http://bugs.python.org/issue3665 and
# http://bugs.python.org/issue12749
#
# Additionally, the lower constants are using unichr rather than
# literals because jython [which uses the wide path] can't load this
# file if they are literals.
ah, al = _surrogatepair(a)
bh, bl = _surrogatepair(b)
if ah == bh:
return u'(?:%s[%s-%s])' % (unichr(ah), unichr(al), unichr(bl))
else:
buf = []
buf.append(u'%s[%s-%s]' %
(unichr(ah), unichr(al),
ah == bh and unichr(bl) or unichr(0xdfff)))
if ah - bh > 1:
buf.append(u'[%s-%s][%s-%s]' %
unichr(ah+1), unichr(bh-1), unichr(0xdc00), unichr(0xdfff))
if ah != bh:
buf.append(u'%s[%s-%s]' %
(unichr(bh), unichr(0xdc00), unichr(bl)))
return u'(?:' + u'|'.join(buf) + u')' |
def calculate_activity_rate(self, strain_data, cumulative=False,
in_seconds=False):
'''
Main function to calculate the activity rate (for each of the
magnitudes in target_magnitudes) for all of the cells specified in
the input strain model file
:param strain_data:
Strain model as an instance of :class:
openquake.hmtk.strain.geodetic_strain.GeodeticStrain
:param bool cumulative:
Set to true if the cumulative rate is required, False for
incremental
:param bool in_seconds:
Returns the activity rate in seconds (True) or else as an annual
activity rate
'''
self.strain = strain_data
self.strain.target_magnitudes = self.target_magnitudes
# Adjust strain rates from annual to seconds (SI)
for key in STRAIN_VARIABLES:
self.strain.data[key] = self.strain.data[key] / SECS_PER_YEAR
if 'region' not in self.strain.data:
raise ValueError('Cannot implment SHIFT methodology without '
'definition of regionalisation')
else:
self._reclassify_Bird_regions_with_data()
# Initially all seismicity rates assigned to background rate
self.strain.seismicity_rate = np.tile(
self.base_rate,
[self.strain.get_number_observations(), 1])
regionalisation_zones = (
np.unique(self.strain.data['region'])).tolist()
for region in regionalisation_zones:
id0 = self.strain.data['region'] == region
if b'IPL' in region:
# For intra-plate seismicity everything is refered to
# the background rate
continue
elif b'OSR_special_1' in region:
# Special case 1 - normal and transform faulting
calculated_rate = self.get_rate_osr_normal_transform(
self.threshold_moment, id0)
elif b'OSR_special_2' in region:
# Special case 2 - convergent and transform faulting
calculated_rate = self.get_rate_osr_convergent_transform(
self.threshold_moment, id0)
else:
region = region.decode('utf-8')
calculated_rate = \
self.regionalisation[region]['adjustment_factor'] * \
self.continuum_seismicity(self.threshold_moment,
self.strain.data['e1h'][id0],
self.strain.data['e2h'][id0],
self.strain.data['err'][id0],
self.regionalisation[region])
for jloc, iloc in enumerate(np.where(id0)[0]):
# Where the calculated rate exceeds the base rate then becomes
# calculated rate. In this version the magnitudes are treated
# independently (i.e. if Rate(M < 7) > Base Rate (M < 7) but
# Rate (M > 7) < Base Rate (M > 7) then returned Rate (M < 7)
# = Rate (M < 7) and returned Rate (M > 7) = Base Rate (M > 7)
id1 = calculated_rate[jloc] > self.base_rate
self.strain.seismicity_rate[iloc, id1] = calculated_rate[jloc,
id1]
if not cumulative and self.number_magnitudes > 1:
# Seismicity rates are currently cumulative - need to turn them
# into discrete
for iloc in range(0, self.number_magnitudes - 1):
self.strain.seismicity_rate[:, iloc] = \
self.strain.seismicity_rate[:, iloc] -\
self.strain.seismicity_rate[:, iloc + 1]
if not in_seconds:
self.strain.seismicity_rate = self.strain.seismicity_rate * \
SECS_PER_YEAR
for key in STRAIN_VARIABLES:
self.strain.data[key] = self.strain.data[key] * SECS_PER_YEAR | Main function to calculate the activity rate (for each of the
magnitudes in target_magnitudes) for all of the cells specified in
the input strain model file
:param strain_data:
Strain model as an instance of :class:
openquake.hmtk.strain.geodetic_strain.GeodeticStrain
:param bool cumulative:
Set to true if the cumulative rate is required, False for
incremental
:param bool in_seconds:
Returns the activity rate in seconds (True) or else as an annual
activity rate | Below is the the instruction that describes the task:
### Input:
Main function to calculate the activity rate (for each of the
magnitudes in target_magnitudes) for all of the cells specified in
the input strain model file
:param strain_data:
Strain model as an instance of :class:
openquake.hmtk.strain.geodetic_strain.GeodeticStrain
:param bool cumulative:
Set to true if the cumulative rate is required, False for
incremental
:param bool in_seconds:
Returns the activity rate in seconds (True) or else as an annual
activity rate
### Response:
def calculate_activity_rate(self, strain_data, cumulative=False,
in_seconds=False):
'''
Main function to calculate the activity rate (for each of the
magnitudes in target_magnitudes) for all of the cells specified in
the input strain model file
:param strain_data:
Strain model as an instance of :class:
openquake.hmtk.strain.geodetic_strain.GeodeticStrain
:param bool cumulative:
Set to true if the cumulative rate is required, False for
incremental
:param bool in_seconds:
Returns the activity rate in seconds (True) or else as an annual
activity rate
'''
self.strain = strain_data
self.strain.target_magnitudes = self.target_magnitudes
# Adjust strain rates from annual to seconds (SI)
for key in STRAIN_VARIABLES:
self.strain.data[key] = self.strain.data[key] / SECS_PER_YEAR
if 'region' not in self.strain.data:
raise ValueError('Cannot implment SHIFT methodology without '
'definition of regionalisation')
else:
self._reclassify_Bird_regions_with_data()
# Initially all seismicity rates assigned to background rate
self.strain.seismicity_rate = np.tile(
self.base_rate,
[self.strain.get_number_observations(), 1])
regionalisation_zones = (
np.unique(self.strain.data['region'])).tolist()
for region in regionalisation_zones:
id0 = self.strain.data['region'] == region
if b'IPL' in region:
# For intra-plate seismicity everything is refered to
# the background rate
continue
elif b'OSR_special_1' in region:
# Special case 1 - normal and transform faulting
calculated_rate = self.get_rate_osr_normal_transform(
self.threshold_moment, id0)
elif b'OSR_special_2' in region:
# Special case 2 - convergent and transform faulting
calculated_rate = self.get_rate_osr_convergent_transform(
self.threshold_moment, id0)
else:
region = region.decode('utf-8')
calculated_rate = \
self.regionalisation[region]['adjustment_factor'] * \
self.continuum_seismicity(self.threshold_moment,
self.strain.data['e1h'][id0],
self.strain.data['e2h'][id0],
self.strain.data['err'][id0],
self.regionalisation[region])
for jloc, iloc in enumerate(np.where(id0)[0]):
# Where the calculated rate exceeds the base rate then becomes
# calculated rate. In this version the magnitudes are treated
# independently (i.e. if Rate(M < 7) > Base Rate (M < 7) but
# Rate (M > 7) < Base Rate (M > 7) then returned Rate (M < 7)
# = Rate (M < 7) and returned Rate (M > 7) = Base Rate (M > 7)
id1 = calculated_rate[jloc] > self.base_rate
self.strain.seismicity_rate[iloc, id1] = calculated_rate[jloc,
id1]
if not cumulative and self.number_magnitudes > 1:
# Seismicity rates are currently cumulative - need to turn them
# into discrete
for iloc in range(0, self.number_magnitudes - 1):
self.strain.seismicity_rate[:, iloc] = \
self.strain.seismicity_rate[:, iloc] -\
self.strain.seismicity_rate[:, iloc + 1]
if not in_seconds:
self.strain.seismicity_rate = self.strain.seismicity_rate * \
SECS_PER_YEAR
for key in STRAIN_VARIABLES:
self.strain.data[key] = self.strain.data[key] * SECS_PER_YEAR |
def is_twss(self, phrase):
"""
The magic function- this accepts a phrase and tells you if it
classifies as an entendre
"""
featureset = self.extract_features(phrase)
return self.classifier.classify(featureset) | The magic function- this accepts a phrase and tells you if it
classifies as an entendre | Below is the the instruction that describes the task:
### Input:
The magic function- this accepts a phrase and tells you if it
classifies as an entendre
### Response:
def is_twss(self, phrase):
"""
The magic function- this accepts a phrase and tells you if it
classifies as an entendre
"""
featureset = self.extract_features(phrase)
return self.classifier.classify(featureset) |
def stub_main():
"""setuptools blah: it still can't run a module as a script entry_point"""
from google.apputils import run_script_module
import butcher.main
run_script_module.RunScriptModule(butcher.main) | setuptools blah: it still can't run a module as a script entry_point | Below is the the instruction that describes the task:
### Input:
setuptools blah: it still can't run a module as a script entry_point
### Response:
def stub_main():
"""setuptools blah: it still can't run a module as a script entry_point"""
from google.apputils import run_script_module
import butcher.main
run_script_module.RunScriptModule(butcher.main) |
def update(self, client=None, unique_writer_identity=False):
"""API call: update sink configuration via a PUT request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current sink.
:type unique_writer_identity: bool
:param unique_writer_identity: (Optional) determines the kind of
IAM identity returned as
writer_identity in the new sink.
"""
client = self._require_client(client)
resource = client.sinks_api.sink_update(
self.project,
self.name,
self.filter_,
self.destination,
unique_writer_identity=unique_writer_identity,
)
self._update_from_api_repr(resource) | API call: update sink configuration via a PUT request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current sink.
:type unique_writer_identity: bool
:param unique_writer_identity: (Optional) determines the kind of
IAM identity returned as
writer_identity in the new sink. | Below is the the instruction that describes the task:
### Input:
API call: update sink configuration via a PUT request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current sink.
:type unique_writer_identity: bool
:param unique_writer_identity: (Optional) determines the kind of
IAM identity returned as
writer_identity in the new sink.
### Response:
def update(self, client=None, unique_writer_identity=False):
"""API call: update sink configuration via a PUT request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current sink.
:type unique_writer_identity: bool
:param unique_writer_identity: (Optional) determines the kind of
IAM identity returned as
writer_identity in the new sink.
"""
client = self._require_client(client)
resource = client.sinks_api.sink_update(
self.project,
self.name,
self.filter_,
self.destination,
unique_writer_identity=unique_writer_identity,
)
self._update_from_api_repr(resource) |
def _template(node_id, value=None):
"Check if a template is assigned to it and render that with the value"
result = []
select_template_from_node = fetch_query_string('select_template_from_node.sql')
try:
result = db.execute(text(select_template_from_node), node_id=node_id)
template_result = result.fetchone()
result.close()
if template_result and template_result['name']:
template = template_result['name']
if isinstance(value, dict):
return render_template(template, **value)
else:
return render_template(template, value=value)
except DatabaseError as err:
current_app.logger.error("DatabaseError: %s", err)
# No template assigned to this node so just return the value
return value | Check if a template is assigned to it and render that with the value | Below is the the instruction that describes the task:
### Input:
Check if a template is assigned to it and render that with the value
### Response:
def _template(node_id, value=None):
"Check if a template is assigned to it and render that with the value"
result = []
select_template_from_node = fetch_query_string('select_template_from_node.sql')
try:
result = db.execute(text(select_template_from_node), node_id=node_id)
template_result = result.fetchone()
result.close()
if template_result and template_result['name']:
template = template_result['name']
if isinstance(value, dict):
return render_template(template, **value)
else:
return render_template(template, value=value)
except DatabaseError as err:
current_app.logger.error("DatabaseError: %s", err)
# No template assigned to this node so just return the value
return value |
def _set_bcpIn(self, value):
"""
Subclasses may override this method.
"""
x, y = absoluteBCPIn(self.anchor, value)
segment = self._segment
if segment.type == "move" and value != (0, 0):
raise FontPartsError(("Cannot set the bcpIn for the first "
"point in an open contour.")
)
else:
offCurves = segment.offCurve
if offCurves:
# if the two off curves are located at the anchor
# coordinates we can switch to a line segment type.
if value == (0, 0) and self.bcpOut == (0, 0):
segment.type = "line"
segment.smooth = False
else:
offCurves[-1].x = x
offCurves[-1].y = y
elif value != (0, 0):
segment.type = "curve"
offCurves = segment.offCurve
offCurves[-1].x = x
offCurves[-1].y = y | Subclasses may override this method. | Below is the the instruction that describes the task:
### Input:
Subclasses may override this method.
### Response:
def _set_bcpIn(self, value):
"""
Subclasses may override this method.
"""
x, y = absoluteBCPIn(self.anchor, value)
segment = self._segment
if segment.type == "move" and value != (0, 0):
raise FontPartsError(("Cannot set the bcpIn for the first "
"point in an open contour.")
)
else:
offCurves = segment.offCurve
if offCurves:
# if the two off curves are located at the anchor
# coordinates we can switch to a line segment type.
if value == (0, 0) and self.bcpOut == (0, 0):
segment.type = "line"
segment.smooth = False
else:
offCurves[-1].x = x
offCurves[-1].y = y
elif value != (0, 0):
segment.type = "curve"
offCurves = segment.offCurve
offCurves[-1].x = x
offCurves[-1].y = y |
def set_(key, value, profile=None):
'''
Set a key/value pair in memcached
'''
conn = salt.utils.memcached.get_conn(profile)
time = profile.get('expire', DEFAULT_EXPIRATION)
return salt.utils.memcached.set_(conn, key, value, time=time) | Set a key/value pair in memcached | Below is the the instruction that describes the task:
### Input:
Set a key/value pair in memcached
### Response:
def set_(key, value, profile=None):
'''
Set a key/value pair in memcached
'''
conn = salt.utils.memcached.get_conn(profile)
time = profile.get('expire', DEFAULT_EXPIRATION)
return salt.utils.memcached.set_(conn, key, value, time=time) |
async def connect(self, conn_id, connection_string):
"""Connect to a device.
See :meth:`AbstractDeviceAdapter.connect`.
"""
self._ensure_connection(conn_id, False)
msg = dict(connection_string=connection_string)
await self._send_command(OPERATIONS.CONNECT, msg, COMMANDS.ConnectResponse)
self._setup_connection(conn_id, connection_string) | Connect to a device.
See :meth:`AbstractDeviceAdapter.connect`. | Below is the the instruction that describes the task:
### Input:
Connect to a device.
See :meth:`AbstractDeviceAdapter.connect`.
### Response:
async def connect(self, conn_id, connection_string):
"""Connect to a device.
See :meth:`AbstractDeviceAdapter.connect`.
"""
self._ensure_connection(conn_id, False)
msg = dict(connection_string=connection_string)
await self._send_command(OPERATIONS.CONNECT, msg, COMMANDS.ConnectResponse)
self._setup_connection(conn_id, connection_string) |
def process_results(self, results=None, **value):
"""take results list of all events and put them in a dict"""
channels = []
for res in results:
channels.extend(res.pop('channels', '').split())
value.update(res)
value['channels'] = channels
value['success'] = value.get('retcode') == '318'
return value | take results list of all events and put them in a dict | Below is the the instruction that describes the task:
### Input:
take results list of all events and put them in a dict
### Response:
def process_results(self, results=None, **value):
"""take results list of all events and put them in a dict"""
channels = []
for res in results:
channels.extend(res.pop('channels', '').split())
value.update(res)
value['channels'] = channels
value['success'] = value.get('retcode') == '318'
return value |
def new_output(output_type=None, output_text=None, output_png=None,
output_html=None, output_svg=None, output_latex=None, output_json=None,
output_javascript=None, output_jpeg=None, prompt_number=None,
etype=None, evalue=None, traceback=None):
"""Create a new code cell with input and output"""
output = NotebookNode()
if output_type is not None:
output.output_type = unicode(output_type)
if output_type != 'pyerr':
if output_text is not None:
output.text = unicode(output_text)
if output_png is not None:
output.png = bytes(output_png)
if output_jpeg is not None:
output.jpeg = bytes(output_jpeg)
if output_html is not None:
output.html = unicode(output_html)
if output_svg is not None:
output.svg = unicode(output_svg)
if output_latex is not None:
output.latex = unicode(output_latex)
if output_json is not None:
output.json = unicode(output_json)
if output_javascript is not None:
output.javascript = unicode(output_javascript)
if output_type == u'pyout':
if prompt_number is not None:
output.prompt_number = int(prompt_number)
if output_type == u'pyerr':
if etype is not None:
output.etype = unicode(etype)
if evalue is not None:
output.evalue = unicode(evalue)
if traceback is not None:
output.traceback = [unicode(frame) for frame in list(traceback)]
return output | Create a new code cell with input and output | Below is the the instruction that describes the task:
### Input:
Create a new code cell with input and output
### Response:
def new_output(output_type=None, output_text=None, output_png=None,
output_html=None, output_svg=None, output_latex=None, output_json=None,
output_javascript=None, output_jpeg=None, prompt_number=None,
etype=None, evalue=None, traceback=None):
"""Create a new code cell with input and output"""
output = NotebookNode()
if output_type is not None:
output.output_type = unicode(output_type)
if output_type != 'pyerr':
if output_text is not None:
output.text = unicode(output_text)
if output_png is not None:
output.png = bytes(output_png)
if output_jpeg is not None:
output.jpeg = bytes(output_jpeg)
if output_html is not None:
output.html = unicode(output_html)
if output_svg is not None:
output.svg = unicode(output_svg)
if output_latex is not None:
output.latex = unicode(output_latex)
if output_json is not None:
output.json = unicode(output_json)
if output_javascript is not None:
output.javascript = unicode(output_javascript)
if output_type == u'pyout':
if prompt_number is not None:
output.prompt_number = int(prompt_number)
if output_type == u'pyerr':
if etype is not None:
output.etype = unicode(etype)
if evalue is not None:
output.evalue = unicode(evalue)
if traceback is not None:
output.traceback = [unicode(frame) for frame in list(traceback)]
return output |
def parse_qualifier(parser, event, node): #pylint: disable=unused-argument
"""Parse CIM/XML QUALIFIER element and return CIMQualifier"""
name = _get_required_attribute(node, 'NAME')
cim_type = _get_required_attribute(node, 'TYPE')
# TODO 2/16 KS: Why is propagated not used?
propagated = _get_attribute(node, 'PROPAGATED')
(next_event, next_node) = six.next(parser)
if _is_end(next_event, next_node, 'QUALIFIER'):
return CIMQualifier(name, None, type=cim_type)
if _is_start(next_event, next_node, 'VALUE'):
value = parse_value(parser, next_event, next_node)
elif _is_start(next_event, next_node, 'VALUE.ARRAY'):
#pylint: disable=redefined-variable-type
# redefined from str to list.
value = parse_value_array(parser, next_event, next_node)
else:
raise ParseError('Expecting (VALUE | VALUE.ARRAY)')
result = CIMQualifier(name, tocimobj(cim_type, value))
_get_end_event(parser, 'QUALIFIER')
return result | Parse CIM/XML QUALIFIER element and return CIMQualifier | Below is the the instruction that describes the task:
### Input:
Parse CIM/XML QUALIFIER element and return CIMQualifier
### Response:
def parse_qualifier(parser, event, node): #pylint: disable=unused-argument
"""Parse CIM/XML QUALIFIER element and return CIMQualifier"""
name = _get_required_attribute(node, 'NAME')
cim_type = _get_required_attribute(node, 'TYPE')
# TODO 2/16 KS: Why is propagated not used?
propagated = _get_attribute(node, 'PROPAGATED')
(next_event, next_node) = six.next(parser)
if _is_end(next_event, next_node, 'QUALIFIER'):
return CIMQualifier(name, None, type=cim_type)
if _is_start(next_event, next_node, 'VALUE'):
value = parse_value(parser, next_event, next_node)
elif _is_start(next_event, next_node, 'VALUE.ARRAY'):
#pylint: disable=redefined-variable-type
# redefined from str to list.
value = parse_value_array(parser, next_event, next_node)
else:
raise ParseError('Expecting (VALUE | VALUE.ARRAY)')
result = CIMQualifier(name, tocimobj(cim_type, value))
_get_end_event(parser, 'QUALIFIER')
return result |
def digital_write(pin_num, value, hardware_addr=0):
"""Writes the value to the input pin specified.
.. note:: This function is for familiarality with users of other types of
IO board. Consider accessing the ``output_pins`` attribute of a
PiFaceDigital object:
>>> pfd = PiFaceDigital(hardware_addr)
>>> pfd.output_pins[pin_num].value = 1
:param pin_num: The pin number to write to.
:type pin_num: int
:param value: The value to write.
:type value: int
:param hardware_addr: The board to read from (default: 0)
:type hardware_addr: int
"""
_get_pifacedigital(hardware_addr).output_pins[pin_num].value = value | Writes the value to the input pin specified.
.. note:: This function is for familiarality with users of other types of
IO board. Consider accessing the ``output_pins`` attribute of a
PiFaceDigital object:
>>> pfd = PiFaceDigital(hardware_addr)
>>> pfd.output_pins[pin_num].value = 1
:param pin_num: The pin number to write to.
:type pin_num: int
:param value: The value to write.
:type value: int
:param hardware_addr: The board to read from (default: 0)
:type hardware_addr: int | Below is the the instruction that describes the task:
### Input:
Writes the value to the input pin specified.
.. note:: This function is for familiarality with users of other types of
IO board. Consider accessing the ``output_pins`` attribute of a
PiFaceDigital object:
>>> pfd = PiFaceDigital(hardware_addr)
>>> pfd.output_pins[pin_num].value = 1
:param pin_num: The pin number to write to.
:type pin_num: int
:param value: The value to write.
:type value: int
:param hardware_addr: The board to read from (default: 0)
:type hardware_addr: int
### Response:
def digital_write(pin_num, value, hardware_addr=0):
"""Writes the value to the input pin specified.
.. note:: This function is for familiarality with users of other types of
IO board. Consider accessing the ``output_pins`` attribute of a
PiFaceDigital object:
>>> pfd = PiFaceDigital(hardware_addr)
>>> pfd.output_pins[pin_num].value = 1
:param pin_num: The pin number to write to.
:type pin_num: int
:param value: The value to write.
:type value: int
:param hardware_addr: The board to read from (default: 0)
:type hardware_addr: int
"""
_get_pifacedigital(hardware_addr).output_pins[pin_num].value = value |
def color(self, key=None):
"""
Returns the color for this data set.
:return <QColor>
"""
if key is not None:
return self._colorMap.get(nativestring(key), self._color)
return self._color | Returns the color for this data set.
:return <QColor> | Below is the the instruction that describes the task:
### Input:
Returns the color for this data set.
:return <QColor>
### Response:
def color(self, key=None):
"""
Returns the color for this data set.
:return <QColor>
"""
if key is not None:
return self._colorMap.get(nativestring(key), self._color)
return self._color |
def get_downloader(session, class_name, args):
"""
Decides which downloader to use.
"""
external = {
'wget': WgetDownloader,
'curl': CurlDownloader,
'aria2': Aria2Downloader,
'axel': AxelDownloader,
}
for bin, class_ in iteritems(external):
if getattr(args, bin):
return class_(session, bin=getattr(args, bin),
downloader_arguments=args.downloader_arguments)
return NativeDownloader(session) | Decides which downloader to use. | Below is the the instruction that describes the task:
### Input:
Decides which downloader to use.
### Response:
def get_downloader(session, class_name, args):
"""
Decides which downloader to use.
"""
external = {
'wget': WgetDownloader,
'curl': CurlDownloader,
'aria2': Aria2Downloader,
'axel': AxelDownloader,
}
for bin, class_ in iteritems(external):
if getattr(args, bin):
return class_(session, bin=getattr(args, bin),
downloader_arguments=args.downloader_arguments)
return NativeDownloader(session) |
def _verify_sector_identifier(self, request):
"""
Verify `sector_identifier_uri` is reachable and that it contains
`redirect_uri`s.
:param request: Provider registration request
:return: si_redirects, sector_id
:raises: InvalidSectorIdentifier
"""
si_url = request["sector_identifier_uri"]
try:
res = self.endpoint_context.httpc.get(si_url)
except Exception as err:
logger.error(err)
res = None
if not res:
raise InvalidSectorIdentifier("Couldn't read from sector_identifier_uri")
logger.debug("sector_identifier_uri => %s", sanitize(res.text))
try:
si_redirects = json.loads(res.text)
except ValueError:
raise InvalidSectorIdentifier(
"Error deserializing sector_identifier_uri content")
if "redirect_uris" in request:
logger.debug("redirect_uris: %s", request["redirect_uris"])
for uri in request["redirect_uris"]:
if uri not in si_redirects:
raise InvalidSectorIdentifier(
"redirect_uri missing from sector_identifiers")
return si_redirects, si_url | Verify `sector_identifier_uri` is reachable and that it contains
`redirect_uri`s.
:param request: Provider registration request
:return: si_redirects, sector_id
:raises: InvalidSectorIdentifier | Below is the the instruction that describes the task:
### Input:
Verify `sector_identifier_uri` is reachable and that it contains
`redirect_uri`s.
:param request: Provider registration request
:return: si_redirects, sector_id
:raises: InvalidSectorIdentifier
### Response:
def _verify_sector_identifier(self, request):
"""
Verify `sector_identifier_uri` is reachable and that it contains
`redirect_uri`s.
:param request: Provider registration request
:return: si_redirects, sector_id
:raises: InvalidSectorIdentifier
"""
si_url = request["sector_identifier_uri"]
try:
res = self.endpoint_context.httpc.get(si_url)
except Exception as err:
logger.error(err)
res = None
if not res:
raise InvalidSectorIdentifier("Couldn't read from sector_identifier_uri")
logger.debug("sector_identifier_uri => %s", sanitize(res.text))
try:
si_redirects = json.loads(res.text)
except ValueError:
raise InvalidSectorIdentifier(
"Error deserializing sector_identifier_uri content")
if "redirect_uris" in request:
logger.debug("redirect_uris: %s", request["redirect_uris"])
for uri in request["redirect_uris"]:
if uri not in si_redirects:
raise InvalidSectorIdentifier(
"redirect_uri missing from sector_identifiers")
return si_redirects, si_url |
def traverse_nodes(self, qids, up=True, down=False, **args):
"""
Traverse (optionally) up and (optionally) down from an input set of nodes
Arguments
---------
qids : list[str]
list of seed node IDs to start from
up : bool
if True, include ancestors
down : bool
if True, include descendants
relations : list[str]
list of relations used to filter
Return
------
list[str]
nodes reachable from qids
"""
g = self.get_filtered_graph(**args)
nodes = set()
for id in qids:
# reflexive - always add self
nodes.add(id)
if down:
nodes.update(nx.descendants(g, id))
if up:
nodes.update(nx.ancestors(g, id))
return nodes | Traverse (optionally) up and (optionally) down from an input set of nodes
Arguments
---------
qids : list[str]
list of seed node IDs to start from
up : bool
if True, include ancestors
down : bool
if True, include descendants
relations : list[str]
list of relations used to filter
Return
------
list[str]
nodes reachable from qids | Below is the the instruction that describes the task:
### Input:
Traverse (optionally) up and (optionally) down from an input set of nodes
Arguments
---------
qids : list[str]
list of seed node IDs to start from
up : bool
if True, include ancestors
down : bool
if True, include descendants
relations : list[str]
list of relations used to filter
Return
------
list[str]
nodes reachable from qids
### Response:
def traverse_nodes(self, qids, up=True, down=False, **args):
"""
Traverse (optionally) up and (optionally) down from an input set of nodes
Arguments
---------
qids : list[str]
list of seed node IDs to start from
up : bool
if True, include ancestors
down : bool
if True, include descendants
relations : list[str]
list of relations used to filter
Return
------
list[str]
nodes reachable from qids
"""
g = self.get_filtered_graph(**args)
nodes = set()
for id in qids:
# reflexive - always add self
nodes.add(id)
if down:
nodes.update(nx.descendants(g, id))
if up:
nodes.update(nx.ancestors(g, id))
return nodes |
def __draw_canvas_cluster(self, ax, dimension, cluster_descr):
"""!
@brief Draw canvas cluster descriptor.
@param[in] ax (Axis): Axis of the canvas where canvas cluster descriptor should be displayed.
@param[in] dimension (uint): Canvas dimension.
@param[in] cluster_descr (canvas_cluster_descr): Canvas cluster descriptor that should be displayed.
@return (fig) Figure where clusters are shown.
"""
cluster = cluster_descr.cluster
data = cluster_descr.data
marker = cluster_descr.marker
markersize = cluster_descr.markersize
color = cluster_descr.color
for item in cluster:
if dimension == 1:
if data is None:
ax.plot(item[0], 0.0, color = color, marker = marker, markersize = markersize)
else:
ax.plot(data[item][0], 0.0, color = color, marker = marker, markersize = markersize)
elif dimension == 2:
if data is None:
ax.plot(item[0], item[1], color = color, marker = marker, markersize = markersize)
else:
ax.plot(data[item][0], data[item][1], color = color, marker = marker, markersize = markersize)
elif dimension == 3:
if data is None:
ax.scatter(item[0], item[1], item[2], c = color, marker = marker, s = markersize)
else:
ax.scatter(data[item][0], data[item][1], data[item][2], c = color, marker = marker, s = markersize) | !
@brief Draw canvas cluster descriptor.
@param[in] ax (Axis): Axis of the canvas where canvas cluster descriptor should be displayed.
@param[in] dimension (uint): Canvas dimension.
@param[in] cluster_descr (canvas_cluster_descr): Canvas cluster descriptor that should be displayed.
@return (fig) Figure where clusters are shown. | Below is the the instruction that describes the task:
### Input:
!
@brief Draw canvas cluster descriptor.
@param[in] ax (Axis): Axis of the canvas where canvas cluster descriptor should be displayed.
@param[in] dimension (uint): Canvas dimension.
@param[in] cluster_descr (canvas_cluster_descr): Canvas cluster descriptor that should be displayed.
@return (fig) Figure where clusters are shown.
### Response:
def __draw_canvas_cluster(self, ax, dimension, cluster_descr):
"""!
@brief Draw canvas cluster descriptor.
@param[in] ax (Axis): Axis of the canvas where canvas cluster descriptor should be displayed.
@param[in] dimension (uint): Canvas dimension.
@param[in] cluster_descr (canvas_cluster_descr): Canvas cluster descriptor that should be displayed.
@return (fig) Figure where clusters are shown.
"""
cluster = cluster_descr.cluster
data = cluster_descr.data
marker = cluster_descr.marker
markersize = cluster_descr.markersize
color = cluster_descr.color
for item in cluster:
if dimension == 1:
if data is None:
ax.plot(item[0], 0.0, color = color, marker = marker, markersize = markersize)
else:
ax.plot(data[item][0], 0.0, color = color, marker = marker, markersize = markersize)
elif dimension == 2:
if data is None:
ax.plot(item[0], item[1], color = color, marker = marker, markersize = markersize)
else:
ax.plot(data[item][0], data[item][1], color = color, marker = marker, markersize = markersize)
elif dimension == 3:
if data is None:
ax.scatter(item[0], item[1], item[2], c = color, marker = marker, s = markersize)
else:
ax.scatter(data[item][0], data[item][1], data[item][2], c = color, marker = marker, s = markersize) |
def from_dict(input_dict, data=None):
"""
Instantiate an SparseGPClassification object using the information
in input_dict (built by the to_dict method).
:param data: It is used to provide X and Y for the case when the model
was saved using save_data=False in to_dict method.
:type data: tuple(:class:`np.ndarray`, :class:`np.ndarray`)
"""
import GPy
m = GPy.core.model.Model.from_dict(input_dict, data)
from copy import deepcopy
sparse_gp = deepcopy(m)
return SparseGPClassification(sparse_gp.X, sparse_gp.Y, sparse_gp.Z, sparse_gp.kern, sparse_gp.likelihood, sparse_gp.inference_method, sparse_gp.mean_function, name='sparse_gp_classification') | Instantiate an SparseGPClassification object using the information
in input_dict (built by the to_dict method).
:param data: It is used to provide X and Y for the case when the model
was saved using save_data=False in to_dict method.
:type data: tuple(:class:`np.ndarray`, :class:`np.ndarray`) | Below is the the instruction that describes the task:
### Input:
Instantiate an SparseGPClassification object using the information
in input_dict (built by the to_dict method).
:param data: It is used to provide X and Y for the case when the model
was saved using save_data=False in to_dict method.
:type data: tuple(:class:`np.ndarray`, :class:`np.ndarray`)
### Response:
def from_dict(input_dict, data=None):
"""
Instantiate an SparseGPClassification object using the information
in input_dict (built by the to_dict method).
:param data: It is used to provide X and Y for the case when the model
was saved using save_data=False in to_dict method.
:type data: tuple(:class:`np.ndarray`, :class:`np.ndarray`)
"""
import GPy
m = GPy.core.model.Model.from_dict(input_dict, data)
from copy import deepcopy
sparse_gp = deepcopy(m)
return SparseGPClassification(sparse_gp.X, sparse_gp.Y, sparse_gp.Z, sparse_gp.kern, sparse_gp.likelihood, sparse_gp.inference_method, sparse_gp.mean_function, name='sparse_gp_classification') |
def update_repository(self, repository_form=None):
"""Updates an existing repository.
:param repository_form: the form containing the elements to be updated
:type repository_form: ``osid.repository.RepositoryForm``
:raise: ``IllegalState`` -- ``repository_form`` already used in an update transaction
:raise: ``InvalidArgument`` -- the form contains an invalid value
:raise: ``NullArgument`` -- ``repository_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``repository_form`` did not originate from ``get_repository_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
if repository_form is None:
raise NullArgument()
if not isinstance(repository_form, abc_repository_objects.RepositoryForm):
raise InvalidArgument('argument type is not a RepositoryForm')
if not repository_form.is_for_update():
raise InvalidArgument('form is for create only, not update')
# Check for "sandbox" genus type. Hardcoded for now:
if repository_form._my_map['genusTypeId'] != 'mc3-objectivebank%3Amc3.learning.objectivebank.sandbox%40MIT-OEIT':
raise PermissionDenied('Handcar only supports updating \'sandbox\' type Repositories')
try:
if self._forms[repository_form.get_id().get_identifier()] == UPDATED:
raise IllegalState('form already used in an update transaction')
except KeyError:
raise Unsupported('form did not originate from this session')
if not repository_form.is_valid():
raise InvalidArgument('one or more of the form elements is invalid')
url_path = construct_url('objective_banks')
try:
result = self._put_request(url_path, repository_form._my_map)
except Exception:
raise # OperationFailed
self._forms[repository_form.get_id().get_identifier()] = UPDATED
return objects.Repository(result) | Updates an existing repository.
:param repository_form: the form containing the elements to be updated
:type repository_form: ``osid.repository.RepositoryForm``
:raise: ``IllegalState`` -- ``repository_form`` already used in an update transaction
:raise: ``InvalidArgument`` -- the form contains an invalid value
:raise: ``NullArgument`` -- ``repository_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``repository_form`` did not originate from ``get_repository_form_for_update()``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Updates an existing repository.
:param repository_form: the form containing the elements to be updated
:type repository_form: ``osid.repository.RepositoryForm``
:raise: ``IllegalState`` -- ``repository_form`` already used in an update transaction
:raise: ``InvalidArgument`` -- the form contains an invalid value
:raise: ``NullArgument`` -- ``repository_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``repository_form`` did not originate from ``get_repository_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
### Response:
def update_repository(self, repository_form=None):
"""Updates an existing repository.
:param repository_form: the form containing the elements to be updated
:type repository_form: ``osid.repository.RepositoryForm``
:raise: ``IllegalState`` -- ``repository_form`` already used in an update transaction
:raise: ``InvalidArgument`` -- the form contains an invalid value
:raise: ``NullArgument`` -- ``repository_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``repository_form`` did not originate from ``get_repository_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
if repository_form is None:
raise NullArgument()
if not isinstance(repository_form, abc_repository_objects.RepositoryForm):
raise InvalidArgument('argument type is not a RepositoryForm')
if not repository_form.is_for_update():
raise InvalidArgument('form is for create only, not update')
# Check for "sandbox" genus type. Hardcoded for now:
if repository_form._my_map['genusTypeId'] != 'mc3-objectivebank%3Amc3.learning.objectivebank.sandbox%40MIT-OEIT':
raise PermissionDenied('Handcar only supports updating \'sandbox\' type Repositories')
try:
if self._forms[repository_form.get_id().get_identifier()] == UPDATED:
raise IllegalState('form already used in an update transaction')
except KeyError:
raise Unsupported('form did not originate from this session')
if not repository_form.is_valid():
raise InvalidArgument('one or more of the form elements is invalid')
url_path = construct_url('objective_banks')
try:
result = self._put_request(url_path, repository_form._my_map)
except Exception:
raise # OperationFailed
self._forms[repository_form.get_id().get_identifier()] = UPDATED
return objects.Repository(result) |
def list_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
List information about available models.
Uses the 'model_persister' from the configuration to display a list of
models and their metadata.
Usage:
pld-list [options]
Options:
-h --help Show this screen.
"""
docopt(list_cmd.__doc__, argv=argv)
initialize_config(__mode__='fit')
list() | \
List information about available models.
Uses the 'model_persister' from the configuration to display a list of
models and their metadata.
Usage:
pld-list [options]
Options:
-h --help Show this screen. | Below is the the instruction that describes the task:
### Input:
\
List information about available models.
Uses the 'model_persister' from the configuration to display a list of
models and their metadata.
Usage:
pld-list [options]
Options:
-h --help Show this screen.
### Response:
def list_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
List information about available models.
Uses the 'model_persister' from the configuration to display a list of
models and their metadata.
Usage:
pld-list [options]
Options:
-h --help Show this screen.
"""
docopt(list_cmd.__doc__, argv=argv)
initialize_config(__mode__='fit')
list() |
def extend_selection_to_next(self, what='word', direction='left'):
"""
Extend selection to next *what* ('word' or 'character')
toward *direction* ('left' or 'right')
"""
self.__move_cursor_anchor(what, direction, QTextCursor.KeepAnchor) | Extend selection to next *what* ('word' or 'character')
toward *direction* ('left' or 'right') | Below is the the instruction that describes the task:
### Input:
Extend selection to next *what* ('word' or 'character')
toward *direction* ('left' or 'right')
### Response:
def extend_selection_to_next(self, what='word', direction='left'):
"""
Extend selection to next *what* ('word' or 'character')
toward *direction* ('left' or 'right')
"""
self.__move_cursor_anchor(what, direction, QTextCursor.KeepAnchor) |
def draw_rivers_on_image(world, target, factor=1):
"""Draw only the rivers, it expect the background to be in place
"""
for y in range(world.height):
for x in range(world.width):
if world.is_land((x, y)) and (world.layers['river_map'].data[y, x] > 0.0):
for dx in range(factor):
for dy in range(factor):
target.set_pixel(x * factor + dx, y * factor + dy, (0, 0, 128, 255))
if world.is_land((x, y)) and (world.layers['lake_map'].data[y, x] != 0):
for dx in range(factor):
for dy in range(factor):
target.set_pixel(x * factor + dx, y * factor + dy, (0, 100, 128, 255)) | Draw only the rivers, it expect the background to be in place | Below is the the instruction that describes the task:
### Input:
Draw only the rivers, it expect the background to be in place
### Response:
def draw_rivers_on_image(world, target, factor=1):
"""Draw only the rivers, it expect the background to be in place
"""
for y in range(world.height):
for x in range(world.width):
if world.is_land((x, y)) and (world.layers['river_map'].data[y, x] > 0.0):
for dx in range(factor):
for dy in range(factor):
target.set_pixel(x * factor + dx, y * factor + dy, (0, 0, 128, 255))
if world.is_land((x, y)) and (world.layers['lake_map'].data[y, x] != 0):
for dx in range(factor):
for dy in range(factor):
target.set_pixel(x * factor + dx, y * factor + dy, (0, 100, 128, 255)) |
def _shru16(ins):
''' Logical right shift 16bit unsigned integer.
The result is pushed onto the stack.
Optimizations:
* If 2nd op is 0 then
do nothing
* If 2nd op is 1
Shift Right Arithmetic
'''
op1, op2 = tuple(ins.quad[2:])
if is_int(op2):
op = int16(op2)
if op == 0:
return []
output = _16bit_oper(op1)
if op == 1:
output.append('srl h')
output.append('rr l')
output.append('push hl')
return output
output.append('ld b, %i' % op)
else:
output = _8bit_oper(op2)
output.append('ld b, a')
output.extend(_16bit_oper(op1))
label = tmp_label()
output.append('%s:' % label)
output.append('srl h')
output.append('rr l')
output.append('djnz %s' % label)
output.append('push hl')
return output | Logical right shift 16bit unsigned integer.
The result is pushed onto the stack.
Optimizations:
* If 2nd op is 0 then
do nothing
* If 2nd op is 1
Shift Right Arithmetic | Below is the the instruction that describes the task:
### Input:
Logical right shift 16bit unsigned integer.
The result is pushed onto the stack.
Optimizations:
* If 2nd op is 0 then
do nothing
* If 2nd op is 1
Shift Right Arithmetic
### Response:
def _shru16(ins):
''' Logical right shift 16bit unsigned integer.
The result is pushed onto the stack.
Optimizations:
* If 2nd op is 0 then
do nothing
* If 2nd op is 1
Shift Right Arithmetic
'''
op1, op2 = tuple(ins.quad[2:])
if is_int(op2):
op = int16(op2)
if op == 0:
return []
output = _16bit_oper(op1)
if op == 1:
output.append('srl h')
output.append('rr l')
output.append('push hl')
return output
output.append('ld b, %i' % op)
else:
output = _8bit_oper(op2)
output.append('ld b, a')
output.extend(_16bit_oper(op1))
label = tmp_label()
output.append('%s:' % label)
output.append('srl h')
output.append('rr l')
output.append('djnz %s' % label)
output.append('push hl')
return output |
def append_known_secrets(self): # type: () -> None
"""
Read key-value pair files with secrets. For example, .conf and .ini files.
:return:
"""
for file_name in self.files:
if "~" in file_name:
file_name = os.path.expanduser(file_name)
if not os.path.isfile(file_name):
print(
"Don't have "
+ Back.BLACK
+ Fore.YELLOW
+ file_name
+ ", won't use."
)
continue
with open(os.path.expanduser(file_name), "r") as file:
for line in file:
if line and "=" in line:
possible = line.split("=")[1].strip(" \"'\n")
if len(possible) > 4 and possible not in self.false_positives:
self.secrets.append(possible) | Read key-value pair files with secrets. For example, .conf and .ini files.
:return: | Below is the the instruction that describes the task:
### Input:
Read key-value pair files with secrets. For example, .conf and .ini files.
:return:
### Response:
def append_known_secrets(self): # type: () -> None
"""
Read key-value pair files with secrets. For example, .conf and .ini files.
:return:
"""
for file_name in self.files:
if "~" in file_name:
file_name = os.path.expanduser(file_name)
if not os.path.isfile(file_name):
print(
"Don't have "
+ Back.BLACK
+ Fore.YELLOW
+ file_name
+ ", won't use."
)
continue
with open(os.path.expanduser(file_name), "r") as file:
for line in file:
if line and "=" in line:
possible = line.split("=")[1].strip(" \"'\n")
if len(possible) > 4 and possible not in self.false_positives:
self.secrets.append(possible) |
def p_do_loop_while(p):
""" statement : do_start program_co label_loop WHILE expr
| do_start label_loop WHILE expr
| DO label_loop WHILE expr
"""
if len(p) == 6:
q = make_block(p[2], p[3])
r = p[5]
else:
q = p[2]
r = p[4]
if p[1] == 'DO':
gl.LOOPS.append(('DO',))
p[0] = make_sentence('DO_WHILE', r, q)
gl.LOOPS.pop()
if is_number(r):
api.errmsg.warning_condition_is_always(p.lineno(3), bool(r.value))
if q is None:
api.errmsg.warning_empty_loop(p.lineno(3)) | statement : do_start program_co label_loop WHILE expr
| do_start label_loop WHILE expr
| DO label_loop WHILE expr | Below is the the instruction that describes the task:
### Input:
statement : do_start program_co label_loop WHILE expr
| do_start label_loop WHILE expr
| DO label_loop WHILE expr
### Response:
def p_do_loop_while(p):
""" statement : do_start program_co label_loop WHILE expr
| do_start label_loop WHILE expr
| DO label_loop WHILE expr
"""
if len(p) == 6:
q = make_block(p[2], p[3])
r = p[5]
else:
q = p[2]
r = p[4]
if p[1] == 'DO':
gl.LOOPS.append(('DO',))
p[0] = make_sentence('DO_WHILE', r, q)
gl.LOOPS.pop()
if is_number(r):
api.errmsg.warning_condition_is_always(p.lineno(3), bool(r.value))
if q is None:
api.errmsg.warning_empty_loop(p.lineno(3)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.