code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _gotitem(self,
key: Union[str, List[str]],
ndim: int,
subset: Optional[Union[Series, ABCDataFrame]] = None,
) -> Union[Series, ABCDataFrame]:
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
elif subset.ndim == 1: # is Series
return subset
# TODO: _shallow_copy(subset)?
return subset[key] | Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on | Below is the the instruction that describes the task:
### Input:
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
### Response:
def _gotitem(self,
key: Union[str, List[str]],
ndim: int,
subset: Optional[Union[Series, ABCDataFrame]] = None,
) -> Union[Series, ABCDataFrame]:
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
elif subset.ndim == 1: # is Series
return subset
# TODO: _shallow_copy(subset)?
return subset[key] |
def _model_unpickle(cls, data):
"""Unpickle a model by retrieving it from the database."""
auto_field_value = data['pk']
try:
obj = cls.objects.get(pk=auto_field_value)
except Exception as e:
if isinstance(e, OperationalError):
# Attempt reconnect, we've probably hit;
# OperationalError(2006, 'MySQL server has gone away')
logger.debug("Caught OperationalError, closing database connection.", exc_info=e)
from django.db import connection
connection.close()
obj = cls.objects.get(pk=auto_field_value)
else:
raise
return obj | Unpickle a model by retrieving it from the database. | Below is the the instruction that describes the task:
### Input:
Unpickle a model by retrieving it from the database.
### Response:
def _model_unpickle(cls, data):
"""Unpickle a model by retrieving it from the database."""
auto_field_value = data['pk']
try:
obj = cls.objects.get(pk=auto_field_value)
except Exception as e:
if isinstance(e, OperationalError):
# Attempt reconnect, we've probably hit;
# OperationalError(2006, 'MySQL server has gone away')
logger.debug("Caught OperationalError, closing database connection.", exc_info=e)
from django.db import connection
connection.close()
obj = cls.objects.get(pk=auto_field_value)
else:
raise
return obj |
def all(cls, domain=None):
"""
Return all sites
@param domain: The domain to filter by
@type domain: Domain
@rtype: list of Site
"""
Site = cls
site = Session.query(Site)
if domain:
site.filter(Site.domain == domain)
return site.all() | Return all sites
@param domain: The domain to filter by
@type domain: Domain
@rtype: list of Site | Below is the the instruction that describes the task:
### Input:
Return all sites
@param domain: The domain to filter by
@type domain: Domain
@rtype: list of Site
### Response:
def all(cls, domain=None):
"""
Return all sites
@param domain: The domain to filter by
@type domain: Domain
@rtype: list of Site
"""
Site = cls
site = Session.query(Site)
if domain:
site.filter(Site.domain == domain)
return site.all() |
def list(self):
'''Returns the `list` representation of this Key.
Note that this method assumes the key is immutable.
'''
if not self._list:
self._list = map(Namespace, self._string.split('/'))
return self._list | Returns the `list` representation of this Key.
Note that this method assumes the key is immutable. | Below is the the instruction that describes the task:
### Input:
Returns the `list` representation of this Key.
Note that this method assumes the key is immutable.
### Response:
def list(self):
'''Returns the `list` representation of this Key.
Note that this method assumes the key is immutable.
'''
if not self._list:
self._list = map(Namespace, self._string.split('/'))
return self._list |
def get(self, endpoint):
'''Return the result of a GET request to `endpoint` on boatd'''
json_body = urlopen(self.url(endpoint)).read().decode('utf-8')
return json.loads(json_body) | Return the result of a GET request to `endpoint` on boatd | Below is the the instruction that describes the task:
### Input:
Return the result of a GET request to `endpoint` on boatd
### Response:
def get(self, endpoint):
'''Return the result of a GET request to `endpoint` on boatd'''
json_body = urlopen(self.url(endpoint)).read().decode('utf-8')
return json.loads(json_body) |
def conversions(self):
"""
Returns a string showing the available conversions.
Useful tool in interactive mode.
"""
return "\n".join(str(self.to(unit)) for unit in self.supported_units) | Returns a string showing the available conversions.
Useful tool in interactive mode. | Below is the the instruction that describes the task:
### Input:
Returns a string showing the available conversions.
Useful tool in interactive mode.
### Response:
def conversions(self):
"""
Returns a string showing the available conversions.
Useful tool in interactive mode.
"""
return "\n".join(str(self.to(unit)) for unit in self.supported_units) |
def _bbox(nodes):
"""Get the bounding box for set of points.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): A set of points.
Returns:
Tuple[float, float, float, float]: The left, right,
bottom and top bounds for the box.
"""
left, bottom = np.min(nodes, axis=1)
right, top = np.max(nodes, axis=1)
return left, right, bottom, top | Get the bounding box for set of points.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): A set of points.
Returns:
Tuple[float, float, float, float]: The left, right,
bottom and top bounds for the box. | Below is the the instruction that describes the task:
### Input:
Get the bounding box for set of points.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): A set of points.
Returns:
Tuple[float, float, float, float]: The left, right,
bottom and top bounds for the box.
### Response:
def _bbox(nodes):
"""Get the bounding box for set of points.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): A set of points.
Returns:
Tuple[float, float, float, float]: The left, right,
bottom and top bounds for the box.
"""
left, bottom = np.min(nodes, axis=1)
right, top = np.max(nodes, axis=1)
return left, right, bottom, top |
def on_to_state_edited(self, renderer, path, new_state_identifier):
"""Connects the outcome with a transition to the newly set state
:param Gtk.CellRendererText renderer: The cell renderer that was edited
:param str path: The path string of the renderer
:param str new_state_identifier: An identifier for the new state that was selected
"""
def do_self_transition_check(t_id, new_state_identifier):
# add self transition meta data
if 'self' in new_state_identifier.split('.'):
insert_self_transition_meta_data(self.model, t_id, 'outcomes_widget', combined_action=True)
outcome_id = self.list_store[path][self.ID_STORAGE_ID]
if outcome_id in self.dict_to_other_state or outcome_id in self.dict_to_other_outcome:
transition_parent_state = self.model.parent.state
if outcome_id in self.dict_to_other_state:
t_id = self.dict_to_other_state[outcome_id][2]
else:
t_id = self.dict_to_other_outcome[outcome_id][2]
if new_state_identifier is not None:
to_state_id = new_state_identifier.split('.')[1]
if not transition_parent_state.transitions[t_id].to_state == to_state_id:
try:
transition_parent_state.transitions[t_id].modify_target(to_state=to_state_id)
do_self_transition_check(t_id, new_state_identifier)
except ValueError as e:
logger.warning("The target of transition couldn't be modified: {0}".format(e))
else:
try:
transition_parent_state.remove_transition(t_id)
except AttributeError as e:
logger.warning("The transition couldn't be removed: {0}".format(e))
else: # there is no transition till now
if new_state_identifier is not None and not self.model.state.is_root_state:
transition_parent_state = self.model.parent.state
to_state_id = new_state_identifier.split('.')[1]
try:
t_id = transition_parent_state.add_transition(from_state_id=self.model.state.state_id,
from_outcome=outcome_id,
to_state_id=to_state_id,
to_outcome=None, transition_id=None)
do_self_transition_check(t_id, new_state_identifier)
except (ValueError, TypeError) as e:
logger.warning("The transition couldn't be added: {0}".format(e))
return
else:
logger.debug("outcome-editor got None in to_state-combo-change no transition is added") | Connects the outcome with a transition to the newly set state
:param Gtk.CellRendererText renderer: The cell renderer that was edited
:param str path: The path string of the renderer
:param str new_state_identifier: An identifier for the new state that was selected | Below is the the instruction that describes the task:
### Input:
Connects the outcome with a transition to the newly set state
:param Gtk.CellRendererText renderer: The cell renderer that was edited
:param str path: The path string of the renderer
:param str new_state_identifier: An identifier for the new state that was selected
### Response:
def on_to_state_edited(self, renderer, path, new_state_identifier):
"""Connects the outcome with a transition to the newly set state
:param Gtk.CellRendererText renderer: The cell renderer that was edited
:param str path: The path string of the renderer
:param str new_state_identifier: An identifier for the new state that was selected
"""
def do_self_transition_check(t_id, new_state_identifier):
# add self transition meta data
if 'self' in new_state_identifier.split('.'):
insert_self_transition_meta_data(self.model, t_id, 'outcomes_widget', combined_action=True)
outcome_id = self.list_store[path][self.ID_STORAGE_ID]
if outcome_id in self.dict_to_other_state or outcome_id in self.dict_to_other_outcome:
transition_parent_state = self.model.parent.state
if outcome_id in self.dict_to_other_state:
t_id = self.dict_to_other_state[outcome_id][2]
else:
t_id = self.dict_to_other_outcome[outcome_id][2]
if new_state_identifier is not None:
to_state_id = new_state_identifier.split('.')[1]
if not transition_parent_state.transitions[t_id].to_state == to_state_id:
try:
transition_parent_state.transitions[t_id].modify_target(to_state=to_state_id)
do_self_transition_check(t_id, new_state_identifier)
except ValueError as e:
logger.warning("The target of transition couldn't be modified: {0}".format(e))
else:
try:
transition_parent_state.remove_transition(t_id)
except AttributeError as e:
logger.warning("The transition couldn't be removed: {0}".format(e))
else: # there is no transition till now
if new_state_identifier is not None and not self.model.state.is_root_state:
transition_parent_state = self.model.parent.state
to_state_id = new_state_identifier.split('.')[1]
try:
t_id = transition_parent_state.add_transition(from_state_id=self.model.state.state_id,
from_outcome=outcome_id,
to_state_id=to_state_id,
to_outcome=None, transition_id=None)
do_self_transition_check(t_id, new_state_identifier)
except (ValueError, TypeError) as e:
logger.warning("The transition couldn't be added: {0}".format(e))
return
else:
logger.debug("outcome-editor got None in to_state-combo-change no transition is added") |
def run(self, subcmd_name, arg):
"""Run subcmd_name with args using obj for the environent"""
entry=self.lookup(subcmd_name)
if entry:
entry['callback'](arg)
else:
self.cmdproc.undefined_cmd(entry.__class__.name, subcmd_name)
pass
return | Run subcmd_name with args using obj for the environent | Below is the the instruction that describes the task:
### Input:
Run subcmd_name with args using obj for the environent
### Response:
def run(self, subcmd_name, arg):
"""Run subcmd_name with args using obj for the environent"""
entry=self.lookup(subcmd_name)
if entry:
entry['callback'](arg)
else:
self.cmdproc.undefined_cmd(entry.__class__.name, subcmd_name)
pass
return |
def get_page_number_from_request(
request, querystring_key=PAGE_LABEL, default=1):
"""Retrieve the current page number from *GET* or *POST* data.
If the page does not exists in *request*, or is not a number,
then *default* number is returned.
"""
try:
return int(request.GET[querystring_key])
except (KeyError, TypeError, ValueError):
return default | Retrieve the current page number from *GET* or *POST* data.
If the page does not exists in *request*, or is not a number,
then *default* number is returned. | Below is the the instruction that describes the task:
### Input:
Retrieve the current page number from *GET* or *POST* data.
If the page does not exists in *request*, or is not a number,
then *default* number is returned.
### Response:
def get_page_number_from_request(
request, querystring_key=PAGE_LABEL, default=1):
"""Retrieve the current page number from *GET* or *POST* data.
If the page does not exists in *request*, or is not a number,
then *default* number is returned.
"""
try:
return int(request.GET[querystring_key])
except (KeyError, TypeError, ValueError):
return default |
def _mirror_idx_cov(self, f_values, idx1): # will most likely be removed
"""obsolete and subject to removal (TODO),
return indices for negative ("active") update of the covariance matrix
assuming that ``f_values[idx1[i]]`` and ``f_values[-1-i]`` are
the corresponding mirrored values
computes the index of the worse solution sorted by the f-value of the
better solution.
TODO: when the actual mirror was rejected, it is better
to return idx1 instead of idx2.
Remark: this function might not be necessary at all: if the worst solution
is the best mirrored, the covariance matrix updates cancel (cave: weights
and learning rates), which seems what is desirable. If the mirror is bad,
as strong negative update is made, again what is desirable.
And the fitness--step-length correlation is in part addressed by
using flat weights.
"""
idx2 = np.arange(len(f_values) - 1, len(f_values) - 1 - len(idx1), -1)
f = []
for i in rglen((idx1)):
f.append(min((f_values[idx1[i]], f_values[idx2[i]])))
# idx.append(idx1[i] if f_values[idx1[i]] > f_values[idx2[i]] else idx2[i])
return idx2[np.argsort(f)][-1::-1] | obsolete and subject to removal (TODO),
return indices for negative ("active") update of the covariance matrix
assuming that ``f_values[idx1[i]]`` and ``f_values[-1-i]`` are
the corresponding mirrored values
computes the index of the worse solution sorted by the f-value of the
better solution.
TODO: when the actual mirror was rejected, it is better
to return idx1 instead of idx2.
Remark: this function might not be necessary at all: if the worst solution
is the best mirrored, the covariance matrix updates cancel (cave: weights
and learning rates), which seems what is desirable. If the mirror is bad,
as strong negative update is made, again what is desirable.
And the fitness--step-length correlation is in part addressed by
using flat weights. | Below is the the instruction that describes the task:
### Input:
obsolete and subject to removal (TODO),
return indices for negative ("active") update of the covariance matrix
assuming that ``f_values[idx1[i]]`` and ``f_values[-1-i]`` are
the corresponding mirrored values
computes the index of the worse solution sorted by the f-value of the
better solution.
TODO: when the actual mirror was rejected, it is better
to return idx1 instead of idx2.
Remark: this function might not be necessary at all: if the worst solution
is the best mirrored, the covariance matrix updates cancel (cave: weights
and learning rates), which seems what is desirable. If the mirror is bad,
as strong negative update is made, again what is desirable.
And the fitness--step-length correlation is in part addressed by
using flat weights.
### Response:
def _mirror_idx_cov(self, f_values, idx1): # will most likely be removed
"""obsolete and subject to removal (TODO),
return indices for negative ("active") update of the covariance matrix
assuming that ``f_values[idx1[i]]`` and ``f_values[-1-i]`` are
the corresponding mirrored values
computes the index of the worse solution sorted by the f-value of the
better solution.
TODO: when the actual mirror was rejected, it is better
to return idx1 instead of idx2.
Remark: this function might not be necessary at all: if the worst solution
is the best mirrored, the covariance matrix updates cancel (cave: weights
and learning rates), which seems what is desirable. If the mirror is bad,
as strong negative update is made, again what is desirable.
And the fitness--step-length correlation is in part addressed by
using flat weights.
"""
idx2 = np.arange(len(f_values) - 1, len(f_values) - 1 - len(idx1), -1)
f = []
for i in rglen((idx1)):
f.append(min((f_values[idx1[i]], f_values[idx2[i]])))
# idx.append(idx1[i] if f_values[idx1[i]] > f_values[idx2[i]] else idx2[i])
return idx2[np.argsort(f)][-1::-1] |
async def release_cursor(self, cursor, in_transaction=False):
"""Release cursor coroutine. Unless in transaction,
the connection is also released back to the pool.
"""
conn = cursor.connection
await cursor.close()
if not in_transaction:
self.release(conn) | Release cursor coroutine. Unless in transaction,
the connection is also released back to the pool. | Below is the the instruction that describes the task:
### Input:
Release cursor coroutine. Unless in transaction,
the connection is also released back to the pool.
### Response:
async def release_cursor(self, cursor, in_transaction=False):
"""Release cursor coroutine. Unless in transaction,
the connection is also released back to the pool.
"""
conn = cursor.connection
await cursor.close()
if not in_transaction:
self.release(conn) |
def union(df, other, index=False, keep='first'):
"""
Returns rows that appear in either DataFrame.
Args:
df (pandas.DataFrame): data passed in through the pipe.
other (pandas.DataFrame): other DataFrame to use for set operation with
the first.
Kwargs:
index (bool): Boolean indicating whether to consider the pandas index
as part of the set operation (default `False`).
keep (str): Indicates which duplicate should be kept. Options are `'first'`
and `'last'`.
"""
validate_set_ops(df, other)
stacked = df.append(other)
if index:
stacked_reset_indexes = stacked.reset_index()
index_cols = [col for col in stacked_reset_indexes.columns if col not in df.columns]
index_name = df.index.names
return_df = stacked_reset_indexes.drop_duplicates(keep=keep).set_index(index_cols)
return_df.index.names = index_name
return return_df
else:
return stacked.drop_duplicates(keep=keep) | Returns rows that appear in either DataFrame.
Args:
df (pandas.DataFrame): data passed in through the pipe.
other (pandas.DataFrame): other DataFrame to use for set operation with
the first.
Kwargs:
index (bool): Boolean indicating whether to consider the pandas index
as part of the set operation (default `False`).
keep (str): Indicates which duplicate should be kept. Options are `'first'`
and `'last'`. | Below is the the instruction that describes the task:
### Input:
Returns rows that appear in either DataFrame.
Args:
df (pandas.DataFrame): data passed in through the pipe.
other (pandas.DataFrame): other DataFrame to use for set operation with
the first.
Kwargs:
index (bool): Boolean indicating whether to consider the pandas index
as part of the set operation (default `False`).
keep (str): Indicates which duplicate should be kept. Options are `'first'`
and `'last'`.
### Response:
def union(df, other, index=False, keep='first'):
"""
Returns rows that appear in either DataFrame.
Args:
df (pandas.DataFrame): data passed in through the pipe.
other (pandas.DataFrame): other DataFrame to use for set operation with
the first.
Kwargs:
index (bool): Boolean indicating whether to consider the pandas index
as part of the set operation (default `False`).
keep (str): Indicates which duplicate should be kept. Options are `'first'`
and `'last'`.
"""
validate_set_ops(df, other)
stacked = df.append(other)
if index:
stacked_reset_indexes = stacked.reset_index()
index_cols = [col for col in stacked_reset_indexes.columns if col not in df.columns]
index_name = df.index.names
return_df = stacked_reset_indexes.drop_duplicates(keep=keep).set_index(index_cols)
return_df.index.names = index_name
return return_df
else:
return stacked.drop_duplicates(keep=keep) |
def spawn_process(self, port):
"""Create an Application and HTTPServer for the given port.
:param int port: The port to listen on
:rtype: multiprocessing.Process
"""
return process.Process(name="ServerProcess.%i" % port,
kwargs={'namespace': self.namespace,
'port': port}) | Create an Application and HTTPServer for the given port.
:param int port: The port to listen on
:rtype: multiprocessing.Process | Below is the the instruction that describes the task:
### Input:
Create an Application and HTTPServer for the given port.
:param int port: The port to listen on
:rtype: multiprocessing.Process
### Response:
def spawn_process(self, port):
"""Create an Application and HTTPServer for the given port.
:param int port: The port to listen on
:rtype: multiprocessing.Process
"""
return process.Process(name="ServerProcess.%i" % port,
kwargs={'namespace': self.namespace,
'port': port}) |
def buildingname(ddtt):
"""return building name"""
idf = ddtt.theidf
building = idf.idfobjects['building'.upper()][0]
return building.Name | return building name | Below is the the instruction that describes the task:
### Input:
return building name
### Response:
def buildingname(ddtt):
"""return building name"""
idf = ddtt.theidf
building = idf.idfobjects['building'.upper()][0]
return building.Name |
def probability_of_n_purchases_up_to_time(self, t, n):
r"""
Compute the probability of n purchases.
.. math:: P( N(t) = n | \text{model} )
where N(t) is the number of repeat purchases a customer makes in t
units of time.
Parameters
----------
t: float
number units of time
n: int
number of purchases
Returns
-------
float:
Probability to have n purchases up to t units of time
"""
r, alpha, a, b = self._unload_params("r", "alpha", "a", "b")
first_term = (
beta(a, b + n)
/ beta(a, b)
* gamma(r + n)
/ gamma(r)
/ gamma(n + 1)
* (alpha / (alpha + t)) ** r
* (t / (alpha + t)) ** n
)
if n > 0:
j = np.arange(0, n)
finite_sum = (gamma(r + j) / gamma(r) / gamma(j + 1) * (t / (alpha + t)) ** j).sum()
second_term = beta(a + 1, b + n - 1) / beta(a, b) * (1 - (alpha / (alpha + t)) ** r * finite_sum)
else:
second_term = 0
return first_term + second_term | r"""
Compute the probability of n purchases.
.. math:: P( N(t) = n | \text{model} )
where N(t) is the number of repeat purchases a customer makes in t
units of time.
Parameters
----------
t: float
number units of time
n: int
number of purchases
Returns
-------
float:
Probability to have n purchases up to t units of time | Below is the the instruction that describes the task:
### Input:
r"""
Compute the probability of n purchases.
.. math:: P( N(t) = n | \text{model} )
where N(t) is the number of repeat purchases a customer makes in t
units of time.
Parameters
----------
t: float
number units of time
n: int
number of purchases
Returns
-------
float:
Probability to have n purchases up to t units of time
### Response:
def probability_of_n_purchases_up_to_time(self, t, n):
r"""
Compute the probability of n purchases.
.. math:: P( N(t) = n | \text{model} )
where N(t) is the number of repeat purchases a customer makes in t
units of time.
Parameters
----------
t: float
number units of time
n: int
number of purchases
Returns
-------
float:
Probability to have n purchases up to t units of time
"""
r, alpha, a, b = self._unload_params("r", "alpha", "a", "b")
first_term = (
beta(a, b + n)
/ beta(a, b)
* gamma(r + n)
/ gamma(r)
/ gamma(n + 1)
* (alpha / (alpha + t)) ** r
* (t / (alpha + t)) ** n
)
if n > 0:
j = np.arange(0, n)
finite_sum = (gamma(r + j) / gamma(r) / gamma(j + 1) * (t / (alpha + t)) ** j).sum()
second_term = beta(a + 1, b + n - 1) / beta(a, b) * (1 - (alpha / (alpha + t)) ** r * finite_sum)
else:
second_term = 0
return first_term + second_term |
def unmount(self, cid):
"""
Unmounts and cleans-up after a previous mount().
"""
driver = self.client.info()['Driver']
driver_unmount_fn = getattr(self, "_unmount_" + driver,
self._unsupported_backend)
driver_unmount_fn(cid) | Unmounts and cleans-up after a previous mount(). | Below is the the instruction that describes the task:
### Input:
Unmounts and cleans-up after a previous mount().
### Response:
def unmount(self, cid):
"""
Unmounts and cleans-up after a previous mount().
"""
driver = self.client.info()['Driver']
driver_unmount_fn = getattr(self, "_unmount_" + driver,
self._unsupported_backend)
driver_unmount_fn(cid) |
def user_method(user_event):
"""Decorator of the Pdb user_* methods that controls the RemoteSocket."""
def wrapper(self, *args):
stdin = self.stdin
is_sock = isinstance(stdin, RemoteSocket)
try:
try:
if is_sock and not stdin.connect():
return
return user_event(self, *args)
except Exception:
self.close()
raise
finally:
if is_sock and stdin.closed():
self.do_detach(None)
return wrapper | Decorator of the Pdb user_* methods that controls the RemoteSocket. | Below is the the instruction that describes the task:
### Input:
Decorator of the Pdb user_* methods that controls the RemoteSocket.
### Response:
def user_method(user_event):
"""Decorator of the Pdb user_* methods that controls the RemoteSocket."""
def wrapper(self, *args):
stdin = self.stdin
is_sock = isinstance(stdin, RemoteSocket)
try:
try:
if is_sock and not stdin.connect():
return
return user_event(self, *args)
except Exception:
self.close()
raise
finally:
if is_sock and stdin.closed():
self.do_detach(None)
return wrapper |
def _get_contig_id(contig_str):
"""Tries to retrieve contig id. Returns the original string if it
is unable to retrieve the id.
Parameters
----------
contig_str : str
Full contig string (fasta header)
Returns
-------
str
Contig id
"""
contig_id = contig_str
try:
contig_id = re.search(".*NODE_([0-9]*)_.*", contig_str).group(1)
except AttributeError:
pass
try:
contig_id = re.search(".*Contig_([0-9]*)_.*", contig_str).group(1)
except AttributeError:
pass
return contig_id | Tries to retrieve contig id. Returns the original string if it
is unable to retrieve the id.
Parameters
----------
contig_str : str
Full contig string (fasta header)
Returns
-------
str
Contig id | Below is the the instruction that describes the task:
### Input:
Tries to retrieve contig id. Returns the original string if it
is unable to retrieve the id.
Parameters
----------
contig_str : str
Full contig string (fasta header)
Returns
-------
str
Contig id
### Response:
def _get_contig_id(contig_str):
"""Tries to retrieve contig id. Returns the original string if it
is unable to retrieve the id.
Parameters
----------
contig_str : str
Full contig string (fasta header)
Returns
-------
str
Contig id
"""
contig_id = contig_str
try:
contig_id = re.search(".*NODE_([0-9]*)_.*", contig_str).group(1)
except AttributeError:
pass
try:
contig_id = re.search(".*Contig_([0-9]*)_.*", contig_str).group(1)
except AttributeError:
pass
return contig_id |
def upgrade(self, only):
"""Remove all package lists with changelog and checksums files
and create lists again"""
repositories = self.meta.repositories
if only:
repositories = only
for repo in repositories:
changelogs = "{0}{1}{2}".format(self.log_path, repo,
"/ChangeLog.txt")
if os.path.isfile(changelogs):
os.remove(changelogs)
if os.path.isdir(self.lib_path + "{0}_repo/".format(repo)):
for f in (os.listdir(self.lib_path + "{0}_repo/".format(
repo))):
files = "{0}{1}_repo/{2}".format(self.lib_path, repo, f)
if os.path.isfile(files):
os.remove(files)
elif os.path.isdir(files):
shutil.rmtree(files)
Update().repository(only) | Remove all package lists with changelog and checksums files
and create lists again | Below is the the instruction that describes the task:
### Input:
Remove all package lists with changelog and checksums files
and create lists again
### Response:
def upgrade(self, only):
"""Remove all package lists with changelog and checksums files
and create lists again"""
repositories = self.meta.repositories
if only:
repositories = only
for repo in repositories:
changelogs = "{0}{1}{2}".format(self.log_path, repo,
"/ChangeLog.txt")
if os.path.isfile(changelogs):
os.remove(changelogs)
if os.path.isdir(self.lib_path + "{0}_repo/".format(repo)):
for f in (os.listdir(self.lib_path + "{0}_repo/".format(
repo))):
files = "{0}{1}_repo/{2}".format(self.lib_path, repo, f)
if os.path.isfile(files):
os.remove(files)
elif os.path.isdir(files):
shutil.rmtree(files)
Update().repository(only) |
def delete(self, request, bot_id, id, format=None):
"""
Delete existing Messenger Bot
---
responseMessages:
- code: 401
message: Not authenticated
"""
return super(MessengerBotDetail, self).delete(request, bot_id, id, format) | Delete existing Messenger Bot
---
responseMessages:
- code: 401
message: Not authenticated | Below is the the instruction that describes the task:
### Input:
Delete existing Messenger Bot
---
responseMessages:
- code: 401
message: Not authenticated
### Response:
def delete(self, request, bot_id, id, format=None):
"""
Delete existing Messenger Bot
---
responseMessages:
- code: 401
message: Not authenticated
"""
return super(MessengerBotDetail, self).delete(request, bot_id, id, format) |
def RdatabasesBM(host=rbiomart_host):
"""
Lists BioMart databases through a RPY2 connection.
:param host: address of the host server, default='www.ensembl.org'
:returns: nothing
"""
biomaRt = importr("biomaRt")
print(biomaRt.listMarts(host=host)) | Lists BioMart databases through a RPY2 connection.
:param host: address of the host server, default='www.ensembl.org'
:returns: nothing | Below is the the instruction that describes the task:
### Input:
Lists BioMart databases through a RPY2 connection.
:param host: address of the host server, default='www.ensembl.org'
:returns: nothing
### Response:
def RdatabasesBM(host=rbiomart_host):
"""
Lists BioMart databases through a RPY2 connection.
:param host: address of the host server, default='www.ensembl.org'
:returns: nothing
"""
biomaRt = importr("biomaRt")
print(biomaRt.listMarts(host=host)) |
def locate(connection, agent_id):
'''
Return the hostname of the agency where given agent runs or None.
'''
connection = IDatabaseClient(connection)
log.log('locate', 'Locate called for agent_id: %r', agent_id)
try:
desc = yield connection.get_document(agent_id)
log.log('locate', 'Got document %r', desc)
if isinstance(desc, host.Descriptor):
defer.returnValue(desc.hostname)
elif isinstance(desc, descriptor.Descriptor):
host_part = first(x for x in desc.partners if x.role == 'host')
if host_part is None:
log.log('locate',
'No host partner found in descriptor.')
defer.returnValue(None)
res = yield locate(connection, host_part.recipient.key)
defer.returnValue(res)
except NotFoundError:
log.log('locate',
'Host with id %r not found, returning None', agent_id)
defer.returnValue(None) | Return the hostname of the agency where given agent runs or None. | Below is the the instruction that describes the task:
### Input:
Return the hostname of the agency where given agent runs or None.
### Response:
def locate(connection, agent_id):
'''
Return the hostname of the agency where given agent runs or None.
'''
connection = IDatabaseClient(connection)
log.log('locate', 'Locate called for agent_id: %r', agent_id)
try:
desc = yield connection.get_document(agent_id)
log.log('locate', 'Got document %r', desc)
if isinstance(desc, host.Descriptor):
defer.returnValue(desc.hostname)
elif isinstance(desc, descriptor.Descriptor):
host_part = first(x for x in desc.partners if x.role == 'host')
if host_part is None:
log.log('locate',
'No host partner found in descriptor.')
defer.returnValue(None)
res = yield locate(connection, host_part.recipient.key)
defer.returnValue(res)
except NotFoundError:
log.log('locate',
'Host with id %r not found, returning None', agent_id)
defer.returnValue(None) |
def config(key, default=None):
"""
Shortcut to access the application's config in your class
:param key: The key to access
:param default: The default value when None
:returns mixed:
"""
return Mocha._app.config.get(key, default) if Mocha._app else default | Shortcut to access the application's config in your class
:param key: The key to access
:param default: The default value when None
:returns mixed: | Below is the the instruction that describes the task:
### Input:
Shortcut to access the application's config in your class
:param key: The key to access
:param default: The default value when None
:returns mixed:
### Response:
def config(key, default=None):
"""
Shortcut to access the application's config in your class
:param key: The key to access
:param default: The default value when None
:returns mixed:
"""
return Mocha._app.config.get(key, default) if Mocha._app else default |
def firmware_download_input_protocol_type_ftp_protocol_ftp_password(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
firmware_download = ET.Element("firmware_download")
config = firmware_download
input = ET.SubElement(firmware_download, "input")
protocol_type = ET.SubElement(input, "protocol-type")
ftp_protocol = ET.SubElement(protocol_type, "ftp-protocol")
ftp = ET.SubElement(ftp_protocol, "ftp")
password = ET.SubElement(ftp, "password")
password.text = kwargs.pop('password')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def firmware_download_input_protocol_type_ftp_protocol_ftp_password(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
firmware_download = ET.Element("firmware_download")
config = firmware_download
input = ET.SubElement(firmware_download, "input")
protocol_type = ET.SubElement(input, "protocol-type")
ftp_protocol = ET.SubElement(protocol_type, "ftp-protocol")
ftp = ET.SubElement(ftp_protocol, "ftp")
password = ET.SubElement(ftp, "password")
password.text = kwargs.pop('password')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def get_resource_method(name, template):
"""
Creates a function that is suitable as a method for ResourceCollection.
"""
def rsr_meth(self, **kwargs):
http_method = template['http_method']
extra_path = template.get('extra_path')
if extra_path:
fills = {'res_id': kwargs.pop('res_id', '')}
path = self.path + (extra_path % fills)
else:
path = self.path
response = self.client.request(http_method, path, **kwargs)
loc = response.headers.get('location', None)
if loc:
# If the returned code is a 201, then there should be a location
# header in the response that we can use to re-get the newly created
# resource.
loc = response.headers.get('location')
response = self.client.get(loc, **kwargs)
# At this point, we better have a valid JSON response object
try:
obj = response.json()
except:
# The response had no JSON ... not a resource object
return
if COLLECTION_TYPE in response.content_type:
ret = HookList(
[Resource(r, path, response, self.client) for r in obj],
response=response
)
else:
ret = Resource(obj, path, response, self.client)
return ret
rsr_meth.__name__ = name
return rsr_meth | Creates a function that is suitable as a method for ResourceCollection. | Below is the the instruction that describes the task:
### Input:
Creates a function that is suitable as a method for ResourceCollection.
### Response:
def get_resource_method(name, template):
"""
Creates a function that is suitable as a method for ResourceCollection.
"""
def rsr_meth(self, **kwargs):
http_method = template['http_method']
extra_path = template.get('extra_path')
if extra_path:
fills = {'res_id': kwargs.pop('res_id', '')}
path = self.path + (extra_path % fills)
else:
path = self.path
response = self.client.request(http_method, path, **kwargs)
loc = response.headers.get('location', None)
if loc:
# If the returned code is a 201, then there should be a location
# header in the response that we can use to re-get the newly created
# resource.
loc = response.headers.get('location')
response = self.client.get(loc, **kwargs)
# At this point, we better have a valid JSON response object
try:
obj = response.json()
except:
# The response had no JSON ... not a resource object
return
if COLLECTION_TYPE in response.content_type:
ret = HookList(
[Resource(r, path, response, self.client) for r in obj],
response=response
)
else:
ret = Resource(obj, path, response, self.client)
return ret
rsr_meth.__name__ = name
return rsr_meth |
def _doAction(self, action):
"""This function will perform a FileMaker action."""
if self._db == '':
raise FMError, 'No database was selected'
result = ''
try:
request = [
uu({'-db': self._db })
]
if self._layout != '':
request.append(uu({'-lay': self._layout }))
if action == '-find' and self._lop != 'and':
request.append(uu({'-lop': self._lop }))
if action in ['-find', '-findall']:
if self._skipRecords != 0:
request.append(uu({ '-skip': self._skipRecords }))
if self._maxRecords != 0:
request.append(uu({ '-max': self._maxRecords }))
for i in range(0, len(self._sortParams)):
sort = self._sortParams[i]
request.append(uu({ '-sortfield.'+str(i+1): sort[0] }))
if sort[1] != '':
request.append(uu({ '-sortorder.'+str(i+1): sort[1] }))
for dbParam in self._dbParams:
if dbParam[0] == 'RECORDID':
request.append(uu({ '-recid': dbParam[1] }))
elif dbParam[0] == 'MODID':
request.append(uu({ '-modid': dbParam[1] }))
elif hasattr(dbParam[1], 'strftime'):
d = dbParam[1]
if (not hasattr(d, 'second')):
request.append(uu({ dbParam[0]: d.strftime('%m-%d-%Y') }))
else:
request.append(uu({ dbParam[0]: d.strftime('%m-%d-%Y %H:%M:%S') }))
del(d)
else:
request.append(uu({ dbParam[0]: dbParam[1] }))
request.append(action)
if self._extra_script:
request += self._extra_script
self._extra_script = None
result = self._doRequest(request)
try:
result = FMResultset.FMResultset(result)
except FMFieldError, value:
realfields = FMServer(self._buildUrl(), self._db, self._layout).doView()
l = []
for k, v in self._dbParams:
if k[-3:] != '.op' and k[0] != '-':
l.append(("'%s'" % k.replace('::','.')).encode('utf-8'))
raise FMError, "Field(s) %s not found on layout '%s'" % (', '.join(l), self._layout)
if action == '-view':
result = result.fieldNames
finally:
self._dbParams = []
self._sortParams = []
self._skipRecords = 0
self._maxRecords = 0
self._lop = 'and'
return result | This function will perform a FileMaker action. | Below is the the instruction that describes the task:
### Input:
This function will perform a FileMaker action.
### Response:
def _doAction(self, action):
"""This function will perform a FileMaker action."""
if self._db == '':
raise FMError, 'No database was selected'
result = ''
try:
request = [
uu({'-db': self._db })
]
if self._layout != '':
request.append(uu({'-lay': self._layout }))
if action == '-find' and self._lop != 'and':
request.append(uu({'-lop': self._lop }))
if action in ['-find', '-findall']:
if self._skipRecords != 0:
request.append(uu({ '-skip': self._skipRecords }))
if self._maxRecords != 0:
request.append(uu({ '-max': self._maxRecords }))
for i in range(0, len(self._sortParams)):
sort = self._sortParams[i]
request.append(uu({ '-sortfield.'+str(i+1): sort[0] }))
if sort[1] != '':
request.append(uu({ '-sortorder.'+str(i+1): sort[1] }))
for dbParam in self._dbParams:
if dbParam[0] == 'RECORDID':
request.append(uu({ '-recid': dbParam[1] }))
elif dbParam[0] == 'MODID':
request.append(uu({ '-modid': dbParam[1] }))
elif hasattr(dbParam[1], 'strftime'):
d = dbParam[1]
if (not hasattr(d, 'second')):
request.append(uu({ dbParam[0]: d.strftime('%m-%d-%Y') }))
else:
request.append(uu({ dbParam[0]: d.strftime('%m-%d-%Y %H:%M:%S') }))
del(d)
else:
request.append(uu({ dbParam[0]: dbParam[1] }))
request.append(action)
if self._extra_script:
request += self._extra_script
self._extra_script = None
result = self._doRequest(request)
try:
result = FMResultset.FMResultset(result)
except FMFieldError, value:
realfields = FMServer(self._buildUrl(), self._db, self._layout).doView()
l = []
for k, v in self._dbParams:
if k[-3:] != '.op' and k[0] != '-':
l.append(("'%s'" % k.replace('::','.')).encode('utf-8'))
raise FMError, "Field(s) %s not found on layout '%s'" % (', '.join(l), self._layout)
if action == '-view':
result = result.fieldNames
finally:
self._dbParams = []
self._sortParams = []
self._skipRecords = 0
self._maxRecords = 0
self._lop = 'and'
return result |
def __load(self):
"""
Loads dynamically the class that acts like a namespace for constants.
"""
parts = self.__class_name.split('.')
module_name = ".".join(parts[:-1])
module = __import__(module_name)
modules = []
for comp in parts[1:]:
module = getattr(module, comp)
modules.append(module)
self.__module = modules[-2] | Loads dynamically the class that acts like a namespace for constants. | Below is the the instruction that describes the task:
### Input:
Loads dynamically the class that acts like a namespace for constants.
### Response:
def __load(self):
"""
Loads dynamically the class that acts like a namespace for constants.
"""
parts = self.__class_name.split('.')
module_name = ".".join(parts[:-1])
module = __import__(module_name)
modules = []
for comp in parts[1:]:
module = getattr(module, comp)
modules.append(module)
self.__module = modules[-2] |
def _get_calibration_for_mchits(hits, lookup):
"""Append the position, direction and t0 columns and add t0 to time"""
n_hits = len(hits)
cal = np.empty((n_hits, 9))
for i in range(n_hits):
cal[i] = lookup[hits['pmt_id'][i]]
dir_x = cal[:, 3]
dir_y = cal[:, 4]
dir_z = cal[:, 5]
du = cal[:, 7]
floor = cal[:, 8]
pos_x = cal[:, 0]
pos_y = cal[:, 1]
pos_z = cal[:, 2]
t0 = cal[:, 6]
return [dir_x, dir_y, dir_z, du, floor, pos_x, pos_y, pos_z, t0] | Append the position, direction and t0 columns and add t0 to time | Below is the the instruction that describes the task:
### Input:
Append the position, direction and t0 columns and add t0 to time
### Response:
def _get_calibration_for_mchits(hits, lookup):
"""Append the position, direction and t0 columns and add t0 to time"""
n_hits = len(hits)
cal = np.empty((n_hits, 9))
for i in range(n_hits):
cal[i] = lookup[hits['pmt_id'][i]]
dir_x = cal[:, 3]
dir_y = cal[:, 4]
dir_z = cal[:, 5]
du = cal[:, 7]
floor = cal[:, 8]
pos_x = cal[:, 0]
pos_y = cal[:, 1]
pos_z = cal[:, 2]
t0 = cal[:, 6]
return [dir_x, dir_y, dir_z, du, floor, pos_x, pos_y, pos_z, t0] |
def _set_lsp_frr(self, v, load=False):
"""
Setter method for lsp_frr, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/lsp_frr (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_frr is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_frr() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=lsp_frr.lsp_frr, is_container='container', presence=True, yang_name="lsp-frr", rest_name="frr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set Fast Reroute options', u'callpoint': u'MplsLspFrr', u'cli-full-command': None, u'cli-add-mode': None, u'alt-name': u'frr', u'cli-mode-name': u'config-router-mpls-lsp-$(lsp-name)-frr'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_frr must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=lsp_frr.lsp_frr, is_container='container', presence=True, yang_name="lsp-frr", rest_name="frr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set Fast Reroute options', u'callpoint': u'MplsLspFrr', u'cli-full-command': None, u'cli-add-mode': None, u'alt-name': u'frr', u'cli-mode-name': u'config-router-mpls-lsp-$(lsp-name)-frr'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""",
})
self.__lsp_frr = t
if hasattr(self, '_set'):
self._set() | Setter method for lsp_frr, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/lsp_frr (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_frr is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_frr() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for lsp_frr, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/lsp_frr (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_frr is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_frr() directly.
### Response:
def _set_lsp_frr(self, v, load=False):
"""
Setter method for lsp_frr, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/lsp_frr (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_frr is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_frr() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=lsp_frr.lsp_frr, is_container='container', presence=True, yang_name="lsp-frr", rest_name="frr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set Fast Reroute options', u'callpoint': u'MplsLspFrr', u'cli-full-command': None, u'cli-add-mode': None, u'alt-name': u'frr', u'cli-mode-name': u'config-router-mpls-lsp-$(lsp-name)-frr'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_frr must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=lsp_frr.lsp_frr, is_container='container', presence=True, yang_name="lsp-frr", rest_name="frr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set Fast Reroute options', u'callpoint': u'MplsLspFrr', u'cli-full-command': None, u'cli-add-mode': None, u'alt-name': u'frr', u'cli-mode-name': u'config-router-mpls-lsp-$(lsp-name)-frr'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""",
})
self.__lsp_frr = t
if hasattr(self, '_set'):
self._set() |
def send(self, relative_path, http_method, **requests_args):
"""
Makes an API-key-authorized request
:param relative_path: URL path relative to self.base_url
:param http_method: HTTP method
:param requests_args: kwargs to be sent to requests
:type relative_path: str
:type http_method: str
:type requests_args: kwargs
:return:
A request response object
:raise:
CartoException
"""
try:
http_method, requests_args = self.prepare_send(http_method, **requests_args)
response = super(APIKeyAuthClient, self).send(relative_path, http_method, **requests_args)
except Exception as e:
raise CartoException(e)
if CartoRateLimitException.is_rate_limited(response):
raise CartoRateLimitException(response)
return response | Makes an API-key-authorized request
:param relative_path: URL path relative to self.base_url
:param http_method: HTTP method
:param requests_args: kwargs to be sent to requests
:type relative_path: str
:type http_method: str
:type requests_args: kwargs
:return:
A request response object
:raise:
CartoException | Below is the the instruction that describes the task:
### Input:
Makes an API-key-authorized request
:param relative_path: URL path relative to self.base_url
:param http_method: HTTP method
:param requests_args: kwargs to be sent to requests
:type relative_path: str
:type http_method: str
:type requests_args: kwargs
:return:
A request response object
:raise:
CartoException
### Response:
def send(self, relative_path, http_method, **requests_args):
"""
Makes an API-key-authorized request
:param relative_path: URL path relative to self.base_url
:param http_method: HTTP method
:param requests_args: kwargs to be sent to requests
:type relative_path: str
:type http_method: str
:type requests_args: kwargs
:return:
A request response object
:raise:
CartoException
"""
try:
http_method, requests_args = self.prepare_send(http_method, **requests_args)
response = super(APIKeyAuthClient, self).send(relative_path, http_method, **requests_args)
except Exception as e:
raise CartoException(e)
if CartoRateLimitException.is_rate_limited(response):
raise CartoRateLimitException(response)
return response |
def open(self, tile, process, **kwargs):
"""
Open process output as input for other process.
Parameters
----------
tile : ``Tile``
process : ``MapcheteProcess``
kwargs : keyword arguments
"""
return InputTile(tile, process, kwargs.get("resampling", None)) | Open process output as input for other process.
Parameters
----------
tile : ``Tile``
process : ``MapcheteProcess``
kwargs : keyword arguments | Below is the the instruction that describes the task:
### Input:
Open process output as input for other process.
Parameters
----------
tile : ``Tile``
process : ``MapcheteProcess``
kwargs : keyword arguments
### Response:
def open(self, tile, process, **kwargs):
"""
Open process output as input for other process.
Parameters
----------
tile : ``Tile``
process : ``MapcheteProcess``
kwargs : keyword arguments
"""
return InputTile(tile, process, kwargs.get("resampling", None)) |
def firstUrn(resource):
""" Parse a resource to get the first URN
:param resource: XML Resource
:type resource: etree._Element
:return: Tuple representing previous and next urn
:rtype: str
"""
resource = xmlparser(resource)
urn = resource.xpath("//ti:reply/ti:urn/text()", namespaces=XPATH_NAMESPACES, magic_string=True)
if len(urn) > 0:
urn = str(urn[0])
return urn.split(":")[-1] | Parse a resource to get the first URN
:param resource: XML Resource
:type resource: etree._Element
:return: Tuple representing previous and next urn
:rtype: str | Below is the the instruction that describes the task:
### Input:
Parse a resource to get the first URN
:param resource: XML Resource
:type resource: etree._Element
:return: Tuple representing previous and next urn
:rtype: str
### Response:
def firstUrn(resource):
""" Parse a resource to get the first URN
:param resource: XML Resource
:type resource: etree._Element
:return: Tuple representing previous and next urn
:rtype: str
"""
resource = xmlparser(resource)
urn = resource.xpath("//ti:reply/ti:urn/text()", namespaces=XPATH_NAMESPACES, magic_string=True)
if len(urn) > 0:
urn = str(urn[0])
return urn.split(":")[-1] |
def _delete_extraneous_files(self):
# type: (SyncCopy) -> None
"""Delete extraneous files on the remote
:param SyncCopy self: this
"""
if not self._spec.options.delete_extraneous_destination:
return
# list blobs for all destinations
checked = set()
deleted = 0
for sa, container, _, _ in self._get_destination_paths():
key = ';'.join((sa.name, sa.endpoint, container))
if key in checked:
continue
logger.debug(
'attempting to delete extraneous blobs/files from: {}'.format(
key))
if self._global_dest_mode_is_file():
files = blobxfer.operations.azure.file.list_all_files(
sa.file_client, container)
for file in files:
id = blobxfer.operations.synccopy.SyncCopy.\
create_deletion_id(sa.file_client, container, file)
if id not in self._delete_exclude:
if self._general_options.dry_run:
logger.info('[DRY RUN] deleting file: {}'.format(
file))
else:
if self._general_options.verbose:
logger.debug('deleting file: {}'.format(file))
blobxfer.operations.azure.file.delete_file(
sa.file_client, container, file)
deleted += 1
else:
blobs = blobxfer.operations.azure.blob.list_all_blobs(
sa.block_blob_client, container)
for blob in blobs:
id = blobxfer.operations.synccopy.SyncCopy.\
create_deletion_id(
sa.block_blob_client, container, blob.name)
if id not in self._delete_exclude:
if self._general_options.dry_run:
logger.info('[DRY RUN] deleting blob: {}'.format(
blob.name))
else:
if self._general_options.verbose:
logger.debug('deleting blob: {}'.format(
blob.name))
blobxfer.operations.azure.blob.delete_blob(
sa.block_blob_client, container, blob.name)
deleted += 1
checked.add(key)
logger.info('deleted {} extraneous blobs/files'.format(deleted)) | Delete extraneous files on the remote
:param SyncCopy self: this | Below is the the instruction that describes the task:
### Input:
Delete extraneous files on the remote
:param SyncCopy self: this
### Response:
def _delete_extraneous_files(self):
# type: (SyncCopy) -> None
"""Delete extraneous files on the remote
:param SyncCopy self: this
"""
if not self._spec.options.delete_extraneous_destination:
return
# list blobs for all destinations
checked = set()
deleted = 0
for sa, container, _, _ in self._get_destination_paths():
key = ';'.join((sa.name, sa.endpoint, container))
if key in checked:
continue
logger.debug(
'attempting to delete extraneous blobs/files from: {}'.format(
key))
if self._global_dest_mode_is_file():
files = blobxfer.operations.azure.file.list_all_files(
sa.file_client, container)
for file in files:
id = blobxfer.operations.synccopy.SyncCopy.\
create_deletion_id(sa.file_client, container, file)
if id not in self._delete_exclude:
if self._general_options.dry_run:
logger.info('[DRY RUN] deleting file: {}'.format(
file))
else:
if self._general_options.verbose:
logger.debug('deleting file: {}'.format(file))
blobxfer.operations.azure.file.delete_file(
sa.file_client, container, file)
deleted += 1
else:
blobs = blobxfer.operations.azure.blob.list_all_blobs(
sa.block_blob_client, container)
for blob in blobs:
id = blobxfer.operations.synccopy.SyncCopy.\
create_deletion_id(
sa.block_blob_client, container, blob.name)
if id not in self._delete_exclude:
if self._general_options.dry_run:
logger.info('[DRY RUN] deleting blob: {}'.format(
blob.name))
else:
if self._general_options.verbose:
logger.debug('deleting blob: {}'.format(
blob.name))
blobxfer.operations.azure.blob.delete_blob(
sa.block_blob_client, container, blob.name)
deleted += 1
checked.add(key)
logger.info('deleted {} extraneous blobs/files'.format(deleted)) |
def process_event(self, event):
"""
Process a new input event.
This method will pass the event on to any Effects in reverse Z order so that the
top-most Effect has priority.
:param event: The Event that has been triggered.
:returns: None if the Scene processed the event, else the original event.
"""
for effect in reversed(self._effects):
event = effect.process_event(event)
if event is None:
break
return event | Process a new input event.
This method will pass the event on to any Effects in reverse Z order so that the
top-most Effect has priority.
:param event: The Event that has been triggered.
:returns: None if the Scene processed the event, else the original event. | Below is the the instruction that describes the task:
### Input:
Process a new input event.
This method will pass the event on to any Effects in reverse Z order so that the
top-most Effect has priority.
:param event: The Event that has been triggered.
:returns: None if the Scene processed the event, else the original event.
### Response:
def process_event(self, event):
"""
Process a new input event.
This method will pass the event on to any Effects in reverse Z order so that the
top-most Effect has priority.
:param event: The Event that has been triggered.
:returns: None if the Scene processed the event, else the original event.
"""
for effect in reversed(self._effects):
event = effect.process_event(event)
if event is None:
break
return event |
def _open_file(cls, writer_spec, filename_suffix, use_tmp_bucket=False):
"""Opens a new gcs file for writing."""
if use_tmp_bucket:
bucket = cls._get_tmp_gcs_bucket(writer_spec)
account_id = cls._get_tmp_account_id(writer_spec)
else:
bucket = cls._get_gcs_bucket(writer_spec)
account_id = cls._get_account_id(writer_spec)
# GoogleCloudStorage format for filenames, Initial slash is required
filename = "/%s/%s" % (bucket, filename_suffix)
content_type = writer_spec.get(cls.CONTENT_TYPE_PARAM, None)
options = {}
if cls.ACL_PARAM in writer_spec:
options["x-goog-acl"] = writer_spec.get(cls.ACL_PARAM)
return cloudstorage.open(filename, mode="w", content_type=content_type,
options=options, _account_id=account_id) | Opens a new gcs file for writing. | Below is the the instruction that describes the task:
### Input:
Opens a new gcs file for writing.
### Response:
def _open_file(cls, writer_spec, filename_suffix, use_tmp_bucket=False):
"""Opens a new gcs file for writing."""
if use_tmp_bucket:
bucket = cls._get_tmp_gcs_bucket(writer_spec)
account_id = cls._get_tmp_account_id(writer_spec)
else:
bucket = cls._get_gcs_bucket(writer_spec)
account_id = cls._get_account_id(writer_spec)
# GoogleCloudStorage format for filenames, Initial slash is required
filename = "/%s/%s" % (bucket, filename_suffix)
content_type = writer_spec.get(cls.CONTENT_TYPE_PARAM, None)
options = {}
if cls.ACL_PARAM in writer_spec:
options["x-goog-acl"] = writer_spec.get(cls.ACL_PARAM)
return cloudstorage.open(filename, mode="w", content_type=content_type,
options=options, _account_id=account_id) |
def sync_tools(
self, all_=False, destination=None, dry_run=False, public=False,
source=None, stream=None, version=None):
"""Copy Juju tools into this model.
:param bool all_: Copy all versions, not just the latest
:param str destination: Path to local destination directory
:param bool dry_run: Don't do the actual copy
:param bool public: Tools are for a public cloud, so generate mirrors
information
:param str source: Path to local source directory
:param str stream: Simplestreams stream for which to sync metadata
:param str version: Copy a specific major.minor version
"""
raise NotImplementedError() | Copy Juju tools into this model.
:param bool all_: Copy all versions, not just the latest
:param str destination: Path to local destination directory
:param bool dry_run: Don't do the actual copy
:param bool public: Tools are for a public cloud, so generate mirrors
information
:param str source: Path to local source directory
:param str stream: Simplestreams stream for which to sync metadata
:param str version: Copy a specific major.minor version | Below is the the instruction that describes the task:
### Input:
Copy Juju tools into this model.
:param bool all_: Copy all versions, not just the latest
:param str destination: Path to local destination directory
:param bool dry_run: Don't do the actual copy
:param bool public: Tools are for a public cloud, so generate mirrors
information
:param str source: Path to local source directory
:param str stream: Simplestreams stream for which to sync metadata
:param str version: Copy a specific major.minor version
### Response:
def sync_tools(
self, all_=False, destination=None, dry_run=False, public=False,
source=None, stream=None, version=None):
"""Copy Juju tools into this model.
:param bool all_: Copy all versions, not just the latest
:param str destination: Path to local destination directory
:param bool dry_run: Don't do the actual copy
:param bool public: Tools are for a public cloud, so generate mirrors
information
:param str source: Path to local source directory
:param str stream: Simplestreams stream for which to sync metadata
:param str version: Copy a specific major.minor version
"""
raise NotImplementedError() |
def remote_call(request, cls, method, args, kw):
'''Command for executing remote calls on a remote object
'''
actor = request.actor
name = 'remote_%s' % cls.__name__
if not hasattr(actor, name):
object = cls(actor)
setattr(actor, name, object)
else:
object = getattr(actor, name)
method_name = '%s%s' % (PREFIX, method)
return getattr(object, method_name)(request, *args, **kw) | Command for executing remote calls on a remote object | Below is the the instruction that describes the task:
### Input:
Command for executing remote calls on a remote object
### Response:
def remote_call(request, cls, method, args, kw):
'''Command for executing remote calls on a remote object
'''
actor = request.actor
name = 'remote_%s' % cls.__name__
if not hasattr(actor, name):
object = cls(actor)
setattr(actor, name, object)
else:
object = getattr(actor, name)
method_name = '%s%s' % (PREFIX, method)
return getattr(object, method_name)(request, *args, **kw) |
def domain_list(gandi):
"""List domains manageable by REST API."""
domains = gandi.dns.list()
for domain in domains:
gandi.echo(domain['fqdn'])
return domains | List domains manageable by REST API. | Below is the the instruction that describes the task:
### Input:
List domains manageable by REST API.
### Response:
def domain_list(gandi):
"""List domains manageable by REST API."""
domains = gandi.dns.list()
for domain in domains:
gandi.echo(domain['fqdn'])
return domains |
def parse(cls, args):
"""
Parse command line arguments to construct a dictionary of command
parameters that can be used to create a command
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used in create method
Raises:
ParseError: when the arguments are not correct
"""
parsed = {}
try:
(options, args) = cls.optparser.parse_args(args)
except OptionParsingError as e:
raise ParseError(e.msg, cls.optparser.format_help())
except OptionParsingExit as e:
return None
parsed['label'] = options.label
parsed['can_notify'] = options.can_notify
parsed['name'] = options.name
parsed['tags'] = options.tags
parsed["command_type"] = "HadoopCommand"
parsed['print_logs'] = options.print_logs
parsed['print_logs_live'] = options.print_logs_live
parsed['pool'] = options.pool
if len(args) < 2:
raise ParseError("Need at least two arguments", cls.usage)
subcmd = args.pop(0)
if subcmd not in cls.subcmdlist:
raise ParseError("First argument must be one of <%s>" %
"|".join(cls.subcmdlist))
parsed["sub_command"] = subcmd
parsed["sub_command_args"] = " ".join("'" + str(a) + "'" for a in args)
return parsed | Parse command line arguments to construct a dictionary of command
parameters that can be used to create a command
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used in create method
Raises:
ParseError: when the arguments are not correct | Below is the the instruction that describes the task:
### Input:
Parse command line arguments to construct a dictionary of command
parameters that can be used to create a command
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used in create method
Raises:
ParseError: when the arguments are not correct
### Response:
def parse(cls, args):
"""
Parse command line arguments to construct a dictionary of command
parameters that can be used to create a command
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used in create method
Raises:
ParseError: when the arguments are not correct
"""
parsed = {}
try:
(options, args) = cls.optparser.parse_args(args)
except OptionParsingError as e:
raise ParseError(e.msg, cls.optparser.format_help())
except OptionParsingExit as e:
return None
parsed['label'] = options.label
parsed['can_notify'] = options.can_notify
parsed['name'] = options.name
parsed['tags'] = options.tags
parsed["command_type"] = "HadoopCommand"
parsed['print_logs'] = options.print_logs
parsed['print_logs_live'] = options.print_logs_live
parsed['pool'] = options.pool
if len(args) < 2:
raise ParseError("Need at least two arguments", cls.usage)
subcmd = args.pop(0)
if subcmd not in cls.subcmdlist:
raise ParseError("First argument must be one of <%s>" %
"|".join(cls.subcmdlist))
parsed["sub_command"] = subcmd
parsed["sub_command_args"] = " ".join("'" + str(a) + "'" for a in args)
return parsed |
def foreignkey(element, exceptions):
'''
function to determine if each select field needs a create button or not
'''
label = element.field.__dict__['label']
try:
label = unicode(label)
except NameError:
pass
if (not label) or (label in exceptions):
return False
else:
return "_queryset" in element.field.__dict__ | function to determine if each select field needs a create button or not | Below is the the instruction that describes the task:
### Input:
function to determine if each select field needs a create button or not
### Response:
def foreignkey(element, exceptions):
'''
function to determine if each select field needs a create button or not
'''
label = element.field.__dict__['label']
try:
label = unicode(label)
except NameError:
pass
if (not label) or (label in exceptions):
return False
else:
return "_queryset" in element.field.__dict__ |
def next(self, fetch: bool = False, next_symbol: _NextSymbol = DEFAULT_NEXT_SYMBOL) -> _Next:
"""Attempts to find the next page, if there is one. If ``fetch``
is ``True`` (default), returns :class:`HTML <HTML>` object of
next page. If ``fetch`` is ``False``, simply returns the next URL.
"""
def get_next():
candidates = self.find('a', containing=next_symbol)
for candidate in candidates:
if candidate.attrs.get('href'):
# Support 'next' rel (e.g. reddit).
if 'next' in candidate.attrs.get('rel', []):
return candidate.attrs['href']
# Support 'next' in classnames.
for _class in candidate.attrs.get('class', []):
if 'next' in _class:
return candidate.attrs['href']
if 'page' in candidate.attrs['href']:
return candidate.attrs['href']
try:
# Resort to the last candidate.
return candidates[-1].attrs['href']
except IndexError:
return None
__next = get_next()
if __next:
url = self._make_absolute(__next)
else:
return None
if fetch:
return self.session.get(url)
else:
return url | Attempts to find the next page, if there is one. If ``fetch``
is ``True`` (default), returns :class:`HTML <HTML>` object of
next page. If ``fetch`` is ``False``, simply returns the next URL. | Below is the the instruction that describes the task:
### Input:
Attempts to find the next page, if there is one. If ``fetch``
is ``True`` (default), returns :class:`HTML <HTML>` object of
next page. If ``fetch`` is ``False``, simply returns the next URL.
### Response:
def next(self, fetch: bool = False, next_symbol: _NextSymbol = DEFAULT_NEXT_SYMBOL) -> _Next:
"""Attempts to find the next page, if there is one. If ``fetch``
is ``True`` (default), returns :class:`HTML <HTML>` object of
next page. If ``fetch`` is ``False``, simply returns the next URL.
"""
def get_next():
candidates = self.find('a', containing=next_symbol)
for candidate in candidates:
if candidate.attrs.get('href'):
# Support 'next' rel (e.g. reddit).
if 'next' in candidate.attrs.get('rel', []):
return candidate.attrs['href']
# Support 'next' in classnames.
for _class in candidate.attrs.get('class', []):
if 'next' in _class:
return candidate.attrs['href']
if 'page' in candidate.attrs['href']:
return candidate.attrs['href']
try:
# Resort to the last candidate.
return candidates[-1].attrs['href']
except IndexError:
return None
__next = get_next()
if __next:
url = self._make_absolute(__next)
else:
return None
if fetch:
return self.session.get(url)
else:
return url |
def install_defaults(self):
"""Installs the default skills, updates all others"""
def install_or_update_skill(skill):
if skill.is_local:
self.update(skill)
else:
self.install(skill, origin='default')
return self.apply(install_or_update_skill, self.list_defaults()) | Installs the default skills, updates all others | Below is the the instruction that describes the task:
### Input:
Installs the default skills, updates all others
### Response:
def install_defaults(self):
"""Installs the default skills, updates all others"""
def install_or_update_skill(skill):
if skill.is_local:
self.update(skill)
else:
self.install(skill, origin='default')
return self.apply(install_or_update_skill, self.list_defaults()) |
def chdir(new_dir):
"""
stolen from bcbio.
Context manager to temporarily change to a new directory.
http://lucentbeing.com/blog/context-managers-and-the-with-statement-in-python/
"""
cur_dir = os.getcwd()
_mkdir(new_dir)
os.chdir(new_dir)
try:
yield
finally:
os.chdir(cur_dir) | stolen from bcbio.
Context manager to temporarily change to a new directory.
http://lucentbeing.com/blog/context-managers-and-the-with-statement-in-python/ | Below is the the instruction that describes the task:
### Input:
stolen from bcbio.
Context manager to temporarily change to a new directory.
http://lucentbeing.com/blog/context-managers-and-the-with-statement-in-python/
### Response:
def chdir(new_dir):
"""
stolen from bcbio.
Context manager to temporarily change to a new directory.
http://lucentbeing.com/blog/context-managers-and-the-with-statement-in-python/
"""
cur_dir = os.getcwd()
_mkdir(new_dir)
os.chdir(new_dir)
try:
yield
finally:
os.chdir(cur_dir) |
def clear_zone_conditions(self):
"""stub"""
if (self.get_zone_conditions_metadata().is_read_only() or
self.get_zone_conditions_metadata().is_required()):
raise NoAccess()
self.my_osid_object_form._my_map['zoneConditions'] = \
self._zone_conditions_metadata['default_object_values'][0] | stub | Below is the the instruction that describes the task:
### Input:
stub
### Response:
def clear_zone_conditions(self):
"""stub"""
if (self.get_zone_conditions_metadata().is_read_only() or
self.get_zone_conditions_metadata().is_required()):
raise NoAccess()
self.my_osid_object_form._my_map['zoneConditions'] = \
self._zone_conditions_metadata['default_object_values'][0] |
def md5(self):
"""
MD5 of scene which will change when meshes or
transforms are changed
Returns
--------
hashed: str, MD5 hash of scene
"""
# start with transforms hash
hashes = [self.graph.md5()]
for g in self.geometry.values():
if hasattr(g, 'md5'):
hashes.append(g.md5())
elif hasattr(g, 'tostring'):
hashes.append(str(hash(g.tostring())))
else:
# try to just straight up hash
# this may raise errors
hashes.append(str(hash(g)))
md5 = util.md5_object(''.join(hashes))
return md5 | MD5 of scene which will change when meshes or
transforms are changed
Returns
--------
hashed: str, MD5 hash of scene | Below is the the instruction that describes the task:
### Input:
MD5 of scene which will change when meshes or
transforms are changed
Returns
--------
hashed: str, MD5 hash of scene
### Response:
def md5(self):
"""
MD5 of scene which will change when meshes or
transforms are changed
Returns
--------
hashed: str, MD5 hash of scene
"""
# start with transforms hash
hashes = [self.graph.md5()]
for g in self.geometry.values():
if hasattr(g, 'md5'):
hashes.append(g.md5())
elif hasattr(g, 'tostring'):
hashes.append(str(hash(g.tostring())))
else:
# try to just straight up hash
# this may raise errors
hashes.append(str(hash(g)))
md5 = util.md5_object(''.join(hashes))
return md5 |
def drop_edges(self) -> None:
"""Drop all edges in the database."""
t = time.time()
self.session.query(Edge).delete()
self.session.commit()
log.info('dropped all edges in %.2f seconds', time.time() - t) | Drop all edges in the database. | Below is the the instruction that describes the task:
### Input:
Drop all edges in the database.
### Response:
def drop_edges(self) -> None:
"""Drop all edges in the database."""
t = time.time()
self.session.query(Edge).delete()
self.session.commit()
log.info('dropped all edges in %.2f seconds', time.time() - t) |
def call_moses_detokenizer(workspace_dir: str, input_fname: str, output_fname: str, lang_code: Optional[str] = None):
"""
Call Moses detokenizer.
:param workspace_dir: Workspace third-party directory where Moses
tokenizer is checked out.
:param input_fname: Path of tokenized input file, plain text or gzipped.
:param output_fname: Path of tokenized output file, plain text.
:param lang_code: Language code for rules and non-breaking prefixes. Can be
None if unknown (using pre-tokenized data), which will
cause the tokenizer to default to English.
"""
detokenizer_fname = os.path.join(workspace_dir,
DIR_THIRD_PARTY,
MOSES_DEST,
"scripts",
"tokenizer",
"detokenizer.perl")
with bin_open(input_fname) as inp, open(output_fname, "wb") as out, open(os.devnull, "wb") as devnull:
command = ["perl", detokenizer_fname]
if lang_code:
command.append("-l")
command.append(lang_code)
detokenizer = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=devnull)
detokenizer_thread = threading.Thread(target=copy_out, args=(detokenizer.stdout, out))
detokenizer_thread.start()
for line in inp:
detokenizer.stdin.write(line)
detokenizer.stdin.close()
detokenizer_thread.join()
detokenizer.wait() | Call Moses detokenizer.
:param workspace_dir: Workspace third-party directory where Moses
tokenizer is checked out.
:param input_fname: Path of tokenized input file, plain text or gzipped.
:param output_fname: Path of tokenized output file, plain text.
:param lang_code: Language code for rules and non-breaking prefixes. Can be
None if unknown (using pre-tokenized data), which will
cause the tokenizer to default to English. | Below is the the instruction that describes the task:
### Input:
Call Moses detokenizer.
:param workspace_dir: Workspace third-party directory where Moses
tokenizer is checked out.
:param input_fname: Path of tokenized input file, plain text or gzipped.
:param output_fname: Path of tokenized output file, plain text.
:param lang_code: Language code for rules and non-breaking prefixes. Can be
None if unknown (using pre-tokenized data), which will
cause the tokenizer to default to English.
### Response:
def call_moses_detokenizer(workspace_dir: str, input_fname: str, output_fname: str, lang_code: Optional[str] = None):
"""
Call Moses detokenizer.
:param workspace_dir: Workspace third-party directory where Moses
tokenizer is checked out.
:param input_fname: Path of tokenized input file, plain text or gzipped.
:param output_fname: Path of tokenized output file, plain text.
:param lang_code: Language code for rules and non-breaking prefixes. Can be
None if unknown (using pre-tokenized data), which will
cause the tokenizer to default to English.
"""
detokenizer_fname = os.path.join(workspace_dir,
DIR_THIRD_PARTY,
MOSES_DEST,
"scripts",
"tokenizer",
"detokenizer.perl")
with bin_open(input_fname) as inp, open(output_fname, "wb") as out, open(os.devnull, "wb") as devnull:
command = ["perl", detokenizer_fname]
if lang_code:
command.append("-l")
command.append(lang_code)
detokenizer = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=devnull)
detokenizer_thread = threading.Thread(target=copy_out, args=(detokenizer.stdout, out))
detokenizer_thread.start()
for line in inp:
detokenizer.stdin.write(line)
detokenizer.stdin.close()
detokenizer_thread.join()
detokenizer.wait() |
def Open(self):
"""Opens the process for reading."""
self.h_process = kernel32.OpenProcess(
PROCESS_VM_READ | PROCESS_QUERY_INFORMATION, 0, self.pid)
if not self.h_process:
raise process_error.ProcessError(
"Failed to open process (pid %d)." % self.pid)
if self.Is64bit():
si = self.GetNativeSystemInfo()
self.max_addr = si.lpMaximumApplicationAddress
else:
si = self.GetSystemInfo()
self.max_addr = 2147418111
self.min_addr = si.lpMinimumApplicationAddress | Opens the process for reading. | Below is the the instruction that describes the task:
### Input:
Opens the process for reading.
### Response:
def Open(self):
"""Opens the process for reading."""
self.h_process = kernel32.OpenProcess(
PROCESS_VM_READ | PROCESS_QUERY_INFORMATION, 0, self.pid)
if not self.h_process:
raise process_error.ProcessError(
"Failed to open process (pid %d)." % self.pid)
if self.Is64bit():
si = self.GetNativeSystemInfo()
self.max_addr = si.lpMaximumApplicationAddress
else:
si = self.GetSystemInfo()
self.max_addr = 2147418111
self.min_addr = si.lpMinimumApplicationAddress |
def _env_filenames(filenames, env):
"""
Extend filenames with ennv indication of environments.
:param list filenames: list of strings indicating filenames
:param str env: environment indicator
:returns: list of filenames extended with environment version
:rtype: list
"""
env_filenames = []
for filename in filenames:
filename_parts = filename.split('.')
filename_parts.insert(1, env)
env_filenames.extend([filename, '.'.join(filename_parts)])
return env_filenames | Extend filenames with ennv indication of environments.
:param list filenames: list of strings indicating filenames
:param str env: environment indicator
:returns: list of filenames extended with environment version
:rtype: list | Below is the the instruction that describes the task:
### Input:
Extend filenames with ennv indication of environments.
:param list filenames: list of strings indicating filenames
:param str env: environment indicator
:returns: list of filenames extended with environment version
:rtype: list
### Response:
def _env_filenames(filenames, env):
"""
Extend filenames with ennv indication of environments.
:param list filenames: list of strings indicating filenames
:param str env: environment indicator
:returns: list of filenames extended with environment version
:rtype: list
"""
env_filenames = []
for filename in filenames:
filename_parts = filename.split('.')
filename_parts.insert(1, env)
env_filenames.extend([filename, '.'.join(filename_parts)])
return env_filenames |
def get_table_metadata(engine, table):
""" Extract all useful infos from the given table
Args:
engine: SQLAlchemy connection engine
table: table name
Returns:
Dictionary of infos
"""
metadata = MetaData()
metadata.reflect(bind=engine, only=[table])
table_metadata = Table(table, metadata, autoload=True)
return table_metadata | Extract all useful infos from the given table
Args:
engine: SQLAlchemy connection engine
table: table name
Returns:
Dictionary of infos | Below is the the instruction that describes the task:
### Input:
Extract all useful infos from the given table
Args:
engine: SQLAlchemy connection engine
table: table name
Returns:
Dictionary of infos
### Response:
def get_table_metadata(engine, table):
""" Extract all useful infos from the given table
Args:
engine: SQLAlchemy connection engine
table: table name
Returns:
Dictionary of infos
"""
metadata = MetaData()
metadata.reflect(bind=engine, only=[table])
table_metadata = Table(table, metadata, autoload=True)
return table_metadata |
def create(self, parties):
"""Create the barrier for the given number of parties.
Parameters:
parties(int): The number of parties to wait for.
Returns:
bool: Whether or not the new barrier was successfully created.
"""
assert parties > 0, "parties must be a positive integer."
return self.backend.add(self.key, parties, self.ttl) | Create the barrier for the given number of parties.
Parameters:
parties(int): The number of parties to wait for.
Returns:
bool: Whether or not the new barrier was successfully created. | Below is the the instruction that describes the task:
### Input:
Create the barrier for the given number of parties.
Parameters:
parties(int): The number of parties to wait for.
Returns:
bool: Whether or not the new barrier was successfully created.
### Response:
def create(self, parties):
"""Create the barrier for the given number of parties.
Parameters:
parties(int): The number of parties to wait for.
Returns:
bool: Whether or not the new barrier was successfully created.
"""
assert parties > 0, "parties must be a positive integer."
return self.backend.add(self.key, parties, self.ttl) |
def monkeycache(apis):
"""
Feed this a dictionary of api bananas, it spits out processed cache
"""
if isinstance(type(apis), type(None)) or apis is None:
return {}
verbs = set()
cache = {}
cache['count'] = apis['count']
cache['asyncapis'] = []
apilist = apis['api']
if apilist is None:
print("[monkeycache] Server response issue, no apis found")
for api in apilist:
name = getvalue(api, 'name')
verb, subject = splitverbsubject(name)
apidict = {}
apidict['name'] = name
apidict['description'] = getvalue(api, 'description')
apidict['isasync'] = getvalue(api, 'isasync')
if apidict['isasync']:
cache['asyncapis'].append(name)
apidict['related'] = splitcsvstring(getvalue(api, 'related'))
required = []
apiparams = []
for param in getvalue(api, 'params'):
apiparam = {}
apiparam['name'] = getvalue(param, 'name')
apiparam['description'] = getvalue(param, 'description')
apiparam['required'] = (getvalue(param, 'required') is True)
apiparam['length'] = int(getvalue(param, 'length'))
apiparam['type'] = getvalue(param, 'type')
apiparam['related'] = splitcsvstring(getvalue(param, 'related'))
if apiparam['required']:
required.append(apiparam['name'])
apiparams.append(apiparam)
apidict['requiredparams'] = required
apidict['params'] = apiparams
if verb not in cache:
cache[verb] = {}
cache[verb][subject] = apidict
verbs.add(verb)
cache['verbs'] = list(verbs)
return cache | Feed this a dictionary of api bananas, it spits out processed cache | Below is the the instruction that describes the task:
### Input:
Feed this a dictionary of api bananas, it spits out processed cache
### Response:
def monkeycache(apis):
"""
Feed this a dictionary of api bananas, it spits out processed cache
"""
if isinstance(type(apis), type(None)) or apis is None:
return {}
verbs = set()
cache = {}
cache['count'] = apis['count']
cache['asyncapis'] = []
apilist = apis['api']
if apilist is None:
print("[monkeycache] Server response issue, no apis found")
for api in apilist:
name = getvalue(api, 'name')
verb, subject = splitverbsubject(name)
apidict = {}
apidict['name'] = name
apidict['description'] = getvalue(api, 'description')
apidict['isasync'] = getvalue(api, 'isasync')
if apidict['isasync']:
cache['asyncapis'].append(name)
apidict['related'] = splitcsvstring(getvalue(api, 'related'))
required = []
apiparams = []
for param in getvalue(api, 'params'):
apiparam = {}
apiparam['name'] = getvalue(param, 'name')
apiparam['description'] = getvalue(param, 'description')
apiparam['required'] = (getvalue(param, 'required') is True)
apiparam['length'] = int(getvalue(param, 'length'))
apiparam['type'] = getvalue(param, 'type')
apiparam['related'] = splitcsvstring(getvalue(param, 'related'))
if apiparam['required']:
required.append(apiparam['name'])
apiparams.append(apiparam)
apidict['requiredparams'] = required
apidict['params'] = apiparams
if verb not in cache:
cache[verb] = {}
cache[verb][subject] = apidict
verbs.add(verb)
cache['verbs'] = list(verbs)
return cache |
def keyPressEvent(self, event):
"""Reimplement Qt methods"""
if event.key() == Qt.Key_Delete:
self.remove_item()
elif event.key() == Qt.Key_F2:
self.rename_item()
elif event == QKeySequence.Copy:
self.copy()
elif event == QKeySequence.Paste:
self.paste()
else:
QTableView.keyPressEvent(self, event) | Reimplement Qt methods | Below is the the instruction that describes the task:
### Input:
Reimplement Qt methods
### Response:
def keyPressEvent(self, event):
"""Reimplement Qt methods"""
if event.key() == Qt.Key_Delete:
self.remove_item()
elif event.key() == Qt.Key_F2:
self.rename_item()
elif event == QKeySequence.Copy:
self.copy()
elif event == QKeySequence.Paste:
self.paste()
else:
QTableView.keyPressEvent(self, event) |
def channel_view(x:Tensor)->Tensor:
"Make channel the first axis of `x` and flatten remaining axes"
return x.transpose(0,1).contiguous().view(x.shape[1],-1) | Make channel the first axis of `x` and flatten remaining axes | Below is the the instruction that describes the task:
### Input:
Make channel the first axis of `x` and flatten remaining axes
### Response:
def channel_view(x:Tensor)->Tensor:
"Make channel the first axis of `x` and flatten remaining axes"
return x.transpose(0,1).contiguous().view(x.shape[1],-1) |
def get_formatted_content(self, pyobj):
'''typecode data --> text
'''
u = urlencode(pyobj, self.reserved)
return String.get_formatted_content(self,
u) | typecode data --> text | Below is the the instruction that describes the task:
### Input:
typecode data --> text
### Response:
def get_formatted_content(self, pyobj):
'''typecode data --> text
'''
u = urlencode(pyobj, self.reserved)
return String.get_formatted_content(self,
u) |
def draw(self, dx=0, dy=0, weighted=False, directed=False, highlight=[], traffic=None):
""" Layout the graph incrementally.
The graph is drawn at the center of the canvas.
The weighted and directed parameters visualize edge weight and direction.
The highlight specifies list of connected nodes.
The path will be colored according to the "highlight" style.
Clicking and dragging events are monitored.
"""
self.update()
# Draw the graph background.
s = self.styles.default
s.graph_background(s)
# Center the graph on the canvas.
_ctx.push()
_ctx.translate(self.x+dx, self.y+dy)
# Indicate betweenness centrality.
if traffic:
if isinstance(traffic, bool):
traffic = 5
for n in self.nodes_by_betweenness()[:traffic]:
try: s = self.styles[n.style]
except: s = self.styles.default
if s.graph_traffic:
s.graph_traffic(s, n, self.alpha)
# Draw the edges and their labels.
s = self.styles.default
if s.edges:
s.edges(s, self.edges, self.alpha, weighted, directed)
# Draw each node in the graph.
# Apply individual style to each node (or default).
for n in self.nodes:
try: s = self.styles[n.style]
except: s = self.styles.default
if s.node:
s.node(s, n, self.alpha)
# Highlight the given shortest path.
try: s = self.styles.highlight
except: s = self.styles.default
if s.path:
s.path(s, self, highlight)
# Draw node id's as labels on each node.
for n in self.nodes:
try: s = self.styles[n.style]
except: s = self.styles.default
if s.node_label:
s.node_label(s, n, self.alpha)
# Events for clicked and dragged nodes.
# Nodes will resist being dragged by attraction and repulsion,
# put the event listener on top to get more direct feedback.
#self.events.update()
_ctx.pop() | Layout the graph incrementally.
The graph is drawn at the center of the canvas.
The weighted and directed parameters visualize edge weight and direction.
The highlight specifies list of connected nodes.
The path will be colored according to the "highlight" style.
Clicking and dragging events are monitored. | Below is the the instruction that describes the task:
### Input:
Layout the graph incrementally.
The graph is drawn at the center of the canvas.
The weighted and directed parameters visualize edge weight and direction.
The highlight specifies list of connected nodes.
The path will be colored according to the "highlight" style.
Clicking and dragging events are monitored.
### Response:
def draw(self, dx=0, dy=0, weighted=False, directed=False, highlight=[], traffic=None):
""" Layout the graph incrementally.
The graph is drawn at the center of the canvas.
The weighted and directed parameters visualize edge weight and direction.
The highlight specifies list of connected nodes.
The path will be colored according to the "highlight" style.
Clicking and dragging events are monitored.
"""
self.update()
# Draw the graph background.
s = self.styles.default
s.graph_background(s)
# Center the graph on the canvas.
_ctx.push()
_ctx.translate(self.x+dx, self.y+dy)
# Indicate betweenness centrality.
if traffic:
if isinstance(traffic, bool):
traffic = 5
for n in self.nodes_by_betweenness()[:traffic]:
try: s = self.styles[n.style]
except: s = self.styles.default
if s.graph_traffic:
s.graph_traffic(s, n, self.alpha)
# Draw the edges and their labels.
s = self.styles.default
if s.edges:
s.edges(s, self.edges, self.alpha, weighted, directed)
# Draw each node in the graph.
# Apply individual style to each node (or default).
for n in self.nodes:
try: s = self.styles[n.style]
except: s = self.styles.default
if s.node:
s.node(s, n, self.alpha)
# Highlight the given shortest path.
try: s = self.styles.highlight
except: s = self.styles.default
if s.path:
s.path(s, self, highlight)
# Draw node id's as labels on each node.
for n in self.nodes:
try: s = self.styles[n.style]
except: s = self.styles.default
if s.node_label:
s.node_label(s, n, self.alpha)
# Events for clicked and dragged nodes.
# Nodes will resist being dragged by attraction and repulsion,
# put the event listener on top to get more direct feedback.
#self.events.update()
_ctx.pop() |
def _read_bands(self):
""" Reads a band with rasterio """
bands = []
try:
for i, band in enumerate(self.bands):
bands.append(rasterio.open(self.bands_path[i]).read_band(1))
except IOError as e:
exit(e.message, 1)
return bands | Reads a band with rasterio | Below is the the instruction that describes the task:
### Input:
Reads a band with rasterio
### Response:
def _read_bands(self):
""" Reads a band with rasterio """
bands = []
try:
for i, band in enumerate(self.bands):
bands.append(rasterio.open(self.bands_path[i]).read_band(1))
except IOError as e:
exit(e.message, 1)
return bands |
def add_all_from_dict(self, dictionary, **kwargs):
"""
Batch-add function implementations to the library.
:param dictionary: A mapping from name to procedure class, i.e. the first two arguments to add()
:param kwargs: Any additional kwargs will be passed to the constructors of _each_ procedure class
"""
for name, procedure in dictionary.items():
self.add(name, procedure, **kwargs) | Batch-add function implementations to the library.
:param dictionary: A mapping from name to procedure class, i.e. the first two arguments to add()
:param kwargs: Any additional kwargs will be passed to the constructors of _each_ procedure class | Below is the the instruction that describes the task:
### Input:
Batch-add function implementations to the library.
:param dictionary: A mapping from name to procedure class, i.e. the first two arguments to add()
:param kwargs: Any additional kwargs will be passed to the constructors of _each_ procedure class
### Response:
def add_all_from_dict(self, dictionary, **kwargs):
"""
Batch-add function implementations to the library.
:param dictionary: A mapping from name to procedure class, i.e. the first two arguments to add()
:param kwargs: Any additional kwargs will be passed to the constructors of _each_ procedure class
"""
for name, procedure in dictionary.items():
self.add(name, procedure, **kwargs) |
def leaveEvent(self, event):
"""
Mark the hovered state as being false.
:param event | <QtCore.QLeaveEvent>
"""
super(XViewPanelItem, self).leaveEvent(event)
# store the hover state and mark for a repaint
self._hovered = False
self.update() | Mark the hovered state as being false.
:param event | <QtCore.QLeaveEvent> | Below is the the instruction that describes the task:
### Input:
Mark the hovered state as being false.
:param event | <QtCore.QLeaveEvent>
### Response:
def leaveEvent(self, event):
"""
Mark the hovered state as being false.
:param event | <QtCore.QLeaveEvent>
"""
super(XViewPanelItem, self).leaveEvent(event)
# store the hover state and mark for a repaint
self._hovered = False
self.update() |
def parse(cls, fptr, offset, length):
"""Parse data entry URL box.
Parameters
----------
fptr : file
Open file object.
offset : int
Start position of box in bytes.
length : int
Length of the box in bytes.
Returns
-------
DataEntryURLbox
Instance of the current data entry URL box.
"""
num_bytes = offset + length - fptr.tell()
read_buffer = fptr.read(num_bytes)
data = struct.unpack_from('>BBBB', read_buffer)
version = data[0]
flag = data[1:4]
url = read_buffer[4:].decode('utf-8').rstrip(chr(0))
return cls(version, flag, url, length=length, offset=offset) | Parse data entry URL box.
Parameters
----------
fptr : file
Open file object.
offset : int
Start position of box in bytes.
length : int
Length of the box in bytes.
Returns
-------
DataEntryURLbox
Instance of the current data entry URL box. | Below is the the instruction that describes the task:
### Input:
Parse data entry URL box.
Parameters
----------
fptr : file
Open file object.
offset : int
Start position of box in bytes.
length : int
Length of the box in bytes.
Returns
-------
DataEntryURLbox
Instance of the current data entry URL box.
### Response:
def parse(cls, fptr, offset, length):
"""Parse data entry URL box.
Parameters
----------
fptr : file
Open file object.
offset : int
Start position of box in bytes.
length : int
Length of the box in bytes.
Returns
-------
DataEntryURLbox
Instance of the current data entry URL box.
"""
num_bytes = offset + length - fptr.tell()
read_buffer = fptr.read(num_bytes)
data = struct.unpack_from('>BBBB', read_buffer)
version = data[0]
flag = data[1:4]
url = read_buffer[4:].decode('utf-8').rstrip(chr(0))
return cls(version, flag, url, length=length, offset=offset) |
def incoming_references(self, client=None, query={}):
"""Fetches all entries referencing the entry
API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/search-parameters/links-to-asset
:param client Client instance
:param query: (optional) Dict with API options.
:return: List of :class:`Entry <contentful.entry.Entry>` objects.
:rtype: List of contentful.entry.Entry
Usage:
>>> entries = entry.incoming_references(client)
[<Entry[cat] id='happycat'>]
"""
if client is None:
return False
query.update({'links_to_entry': self.id})
return client.entries(query) | Fetches all entries referencing the entry
API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/search-parameters/links-to-asset
:param client Client instance
:param query: (optional) Dict with API options.
:return: List of :class:`Entry <contentful.entry.Entry>` objects.
:rtype: List of contentful.entry.Entry
Usage:
>>> entries = entry.incoming_references(client)
[<Entry[cat] id='happycat'>] | Below is the the instruction that describes the task:
### Input:
Fetches all entries referencing the entry
API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/search-parameters/links-to-asset
:param client Client instance
:param query: (optional) Dict with API options.
:return: List of :class:`Entry <contentful.entry.Entry>` objects.
:rtype: List of contentful.entry.Entry
Usage:
>>> entries = entry.incoming_references(client)
[<Entry[cat] id='happycat'>]
### Response:
def incoming_references(self, client=None, query={}):
"""Fetches all entries referencing the entry
API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/search-parameters/links-to-asset
:param client Client instance
:param query: (optional) Dict with API options.
:return: List of :class:`Entry <contentful.entry.Entry>` objects.
:rtype: List of contentful.entry.Entry
Usage:
>>> entries = entry.incoming_references(client)
[<Entry[cat] id='happycat'>]
"""
if client is None:
return False
query.update({'links_to_entry': self.id})
return client.entries(query) |
def query(name, use_kerberos=None, debug=False):
"""Query the Channel Information System for details on the given
channel name
Parameters
----------
name : `~gwpy.detector.Channel`, or `str`
Name of the channel of interest
Returns
-------
channel : `~gwpy.detector.Channel`
Channel with all details as acquired from the CIS
"""
url = '%s/?q=%s' % (CIS_API_URL, name)
more = True
out = ChannelList()
while more:
reply = _get(url, use_kerberos=use_kerberos, debug=debug)
try:
out.extend(map(parse_json, reply[u'results']))
except KeyError:
pass
except TypeError: # reply is a list
out.extend(map(parse_json, reply))
break
more = 'next' in reply and reply['next'] is not None
if more:
url = reply['next']
else:
break
out.sort(key=lambda c: c.name)
return out | Query the Channel Information System for details on the given
channel name
Parameters
----------
name : `~gwpy.detector.Channel`, or `str`
Name of the channel of interest
Returns
-------
channel : `~gwpy.detector.Channel`
Channel with all details as acquired from the CIS | Below is the the instruction that describes the task:
### Input:
Query the Channel Information System for details on the given
channel name
Parameters
----------
name : `~gwpy.detector.Channel`, or `str`
Name of the channel of interest
Returns
-------
channel : `~gwpy.detector.Channel`
Channel with all details as acquired from the CIS
### Response:
def query(name, use_kerberos=None, debug=False):
"""Query the Channel Information System for details on the given
channel name
Parameters
----------
name : `~gwpy.detector.Channel`, or `str`
Name of the channel of interest
Returns
-------
channel : `~gwpy.detector.Channel`
Channel with all details as acquired from the CIS
"""
url = '%s/?q=%s' % (CIS_API_URL, name)
more = True
out = ChannelList()
while more:
reply = _get(url, use_kerberos=use_kerberos, debug=debug)
try:
out.extend(map(parse_json, reply[u'results']))
except KeyError:
pass
except TypeError: # reply is a list
out.extend(map(parse_json, reply))
break
more = 'next' in reply and reply['next'] is not None
if more:
url = reply['next']
else:
break
out.sort(key=lambda c: c.name)
return out |
def _get_files(file_patterns, top=HERE):
"""Expand file patterns to a list of paths.
Parameters
-----------
file_patterns: list or str
A list of glob patterns for the data file locations.
The globs can be recursive if they include a `**`.
They should be relative paths from the top directory or
absolute paths.
top: str
the directory to consider for data files
Note:
Files in `node_modules` are ignored.
"""
if not isinstance(file_patterns, (list, tuple)):
file_patterns = [file_patterns]
for i, p in enumerate(file_patterns):
if os.path.isabs(p):
file_patterns[i] = os.path.relpath(p, top)
matchers = [_compile_pattern(p) for p in file_patterns]
files = set()
for root, dirnames, filenames in os.walk(top):
# Don't recurse into node_modules
if 'node_modules' in dirnames:
dirnames.remove('node_modules')
for m in matchers:
for filename in filenames:
fn = os.path.relpath(_glob_pjoin(root, filename), top)
fn = fn.replace(os.sep, '/')
if m(fn):
files.add(fn.replace(os.sep, '/'))
return list(files) | Expand file patterns to a list of paths.
Parameters
-----------
file_patterns: list or str
A list of glob patterns for the data file locations.
The globs can be recursive if they include a `**`.
They should be relative paths from the top directory or
absolute paths.
top: str
the directory to consider for data files
Note:
Files in `node_modules` are ignored. | Below is the the instruction that describes the task:
### Input:
Expand file patterns to a list of paths.
Parameters
-----------
file_patterns: list or str
A list of glob patterns for the data file locations.
The globs can be recursive if they include a `**`.
They should be relative paths from the top directory or
absolute paths.
top: str
the directory to consider for data files
Note:
Files in `node_modules` are ignored.
### Response:
def _get_files(file_patterns, top=HERE):
"""Expand file patterns to a list of paths.
Parameters
-----------
file_patterns: list or str
A list of glob patterns for the data file locations.
The globs can be recursive if they include a `**`.
They should be relative paths from the top directory or
absolute paths.
top: str
the directory to consider for data files
Note:
Files in `node_modules` are ignored.
"""
if not isinstance(file_patterns, (list, tuple)):
file_patterns = [file_patterns]
for i, p in enumerate(file_patterns):
if os.path.isabs(p):
file_patterns[i] = os.path.relpath(p, top)
matchers = [_compile_pattern(p) for p in file_patterns]
files = set()
for root, dirnames, filenames in os.walk(top):
# Don't recurse into node_modules
if 'node_modules' in dirnames:
dirnames.remove('node_modules')
for m in matchers:
for filename in filenames:
fn = os.path.relpath(_glob_pjoin(root, filename), top)
fn = fn.replace(os.sep, '/')
if m(fn):
files.add(fn.replace(os.sep, '/'))
return list(files) |
def empty_tree(input_list):
"""Recursively iterate through values in nested lists."""
for item in input_list:
if not isinstance(item, list) or not empty_tree(item):
return False
return True | Recursively iterate through values in nested lists. | Below is the the instruction that describes the task:
### Input:
Recursively iterate through values in nested lists.
### Response:
def empty_tree(input_list):
"""Recursively iterate through values in nested lists."""
for item in input_list:
if not isinstance(item, list) or not empty_tree(item):
return False
return True |
def postprocess_input_todo(self, p_todo):
"""
Post-processes a parsed todo when adding it to the list.
* It converts relative dates to absolute ones.
* Automatically inserts a creation date if not present.
* Handles more user-friendly dependencies with before:, partof: and
after: tags
"""
def convert_date(p_tag):
value = p_todo.tag_value(p_tag)
if value:
dateobj = relative_date_to_date(value)
if dateobj:
p_todo.set_tag(p_tag, dateobj.isoformat())
def add_dependencies(p_tag):
for value in p_todo.tag_values(p_tag):
try:
dep = self.todolist.todo(value)
if p_tag == 'after':
self.todolist.add_dependency(p_todo, dep)
elif p_tag == 'before' or p_tag == 'partof':
self.todolist.add_dependency(dep, p_todo)
elif p_tag.startswith('parent'):
for parent in self.todolist.parents(dep):
self.todolist.add_dependency(parent, p_todo)
elif p_tag.startswith('child'):
for child in self.todolist.children(dep):
self.todolist.add_dependency(p_todo, child)
except InvalidTodoException:
pass
p_todo.remove_tag(p_tag, value)
convert_date(config().tag_start())
convert_date(config().tag_due())
keywords = [
'after',
'before',
'child-of',
'childof',
'children-of',
'childrenof',
'parent-of',
'parentof',
'parents-of',
'parentsof',
'partof',
]
for keyword in keywords:
add_dependencies(keyword) | Post-processes a parsed todo when adding it to the list.
* It converts relative dates to absolute ones.
* Automatically inserts a creation date if not present.
* Handles more user-friendly dependencies with before:, partof: and
after: tags | Below is the the instruction that describes the task:
### Input:
Post-processes a parsed todo when adding it to the list.
* It converts relative dates to absolute ones.
* Automatically inserts a creation date if not present.
* Handles more user-friendly dependencies with before:, partof: and
after: tags
### Response:
def postprocess_input_todo(self, p_todo):
"""
Post-processes a parsed todo when adding it to the list.
* It converts relative dates to absolute ones.
* Automatically inserts a creation date if not present.
* Handles more user-friendly dependencies with before:, partof: and
after: tags
"""
def convert_date(p_tag):
value = p_todo.tag_value(p_tag)
if value:
dateobj = relative_date_to_date(value)
if dateobj:
p_todo.set_tag(p_tag, dateobj.isoformat())
def add_dependencies(p_tag):
for value in p_todo.tag_values(p_tag):
try:
dep = self.todolist.todo(value)
if p_tag == 'after':
self.todolist.add_dependency(p_todo, dep)
elif p_tag == 'before' or p_tag == 'partof':
self.todolist.add_dependency(dep, p_todo)
elif p_tag.startswith('parent'):
for parent in self.todolist.parents(dep):
self.todolist.add_dependency(parent, p_todo)
elif p_tag.startswith('child'):
for child in self.todolist.children(dep):
self.todolist.add_dependency(p_todo, child)
except InvalidTodoException:
pass
p_todo.remove_tag(p_tag, value)
convert_date(config().tag_start())
convert_date(config().tag_due())
keywords = [
'after',
'before',
'child-of',
'childof',
'children-of',
'childrenof',
'parent-of',
'parentof',
'parents-of',
'parentsof',
'partof',
]
for keyword in keywords:
add_dependencies(keyword) |
def text(self, tag, textdata, step=None):
"""Saves a text summary.
Args:
tag: str: label for this data
textdata: string, or 1D/2D list/numpy array of strings
step: int: training step
Note: markdown formatting is rendered by tensorboard.
"""
if step is None:
step = self._step
else:
self._step = step
smd = SummaryMetadata(
plugin_data=SummaryMetadata.PluginData(plugin_name='text'))
if isinstance(textdata, (str, bytes)):
tensor = tf.make_tensor_proto(
values=[textdata.encode(encoding='utf_8')], shape=(1,))
else:
textdata = onp.array(textdata) # convert lists, jax arrays, etc.
datashape = onp.shape(textdata)
if len(datashape) == 1:
tensor = tf.make_tensor_proto(
values=[td.encode(encoding='utf_8') for td in textdata],
shape=(datashape[0],))
elif len(datashape) == 2:
tensor = tf.make_tensor_proto(
values=[
td.encode(encoding='utf_8') for td in onp.reshape(textdata, -1)
],
shape=(datashape[0], datashape[1]))
summary = Summary(
value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)])
self.add_summary(summary, step) | Saves a text summary.
Args:
tag: str: label for this data
textdata: string, or 1D/2D list/numpy array of strings
step: int: training step
Note: markdown formatting is rendered by tensorboard. | Below is the the instruction that describes the task:
### Input:
Saves a text summary.
Args:
tag: str: label for this data
textdata: string, or 1D/2D list/numpy array of strings
step: int: training step
Note: markdown formatting is rendered by tensorboard.
### Response:
def text(self, tag, textdata, step=None):
"""Saves a text summary.
Args:
tag: str: label for this data
textdata: string, or 1D/2D list/numpy array of strings
step: int: training step
Note: markdown formatting is rendered by tensorboard.
"""
if step is None:
step = self._step
else:
self._step = step
smd = SummaryMetadata(
plugin_data=SummaryMetadata.PluginData(plugin_name='text'))
if isinstance(textdata, (str, bytes)):
tensor = tf.make_tensor_proto(
values=[textdata.encode(encoding='utf_8')], shape=(1,))
else:
textdata = onp.array(textdata) # convert lists, jax arrays, etc.
datashape = onp.shape(textdata)
if len(datashape) == 1:
tensor = tf.make_tensor_proto(
values=[td.encode(encoding='utf_8') for td in textdata],
shape=(datashape[0],))
elif len(datashape) == 2:
tensor = tf.make_tensor_proto(
values=[
td.encode(encoding='utf_8') for td in onp.reshape(textdata, -1)
],
shape=(datashape[0], datashape[1]))
summary = Summary(
value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)])
self.add_summary(summary, step) |
def get(self, *keys, fallback=None):
"""Retrieve a value in the config, if the value is not available
give the fallback value specified.
"""
section, *keys = keys
out = super().get(section, fallback)
while isinstance(out, dict):
key = keys.pop(0)
out = out.get(key, fallback)
return out | Retrieve a value in the config, if the value is not available
give the fallback value specified. | Below is the the instruction that describes the task:
### Input:
Retrieve a value in the config, if the value is not available
give the fallback value specified.
### Response:
def get(self, *keys, fallback=None):
"""Retrieve a value in the config, if the value is not available
give the fallback value specified.
"""
section, *keys = keys
out = super().get(section, fallback)
while isinstance(out, dict):
key = keys.pop(0)
out = out.get(key, fallback)
return out |
def waypoint_count_send(self, seq):
'''wrapper for waypoint_count_send'''
if self.mavlink10():
self.mav.mission_count_send(self.target_system, self.target_component, seq)
else:
self.mav.waypoint_count_send(self.target_system, self.target_component, seq) | wrapper for waypoint_count_send | Below is the the instruction that describes the task:
### Input:
wrapper for waypoint_count_send
### Response:
def waypoint_count_send(self, seq):
'''wrapper for waypoint_count_send'''
if self.mavlink10():
self.mav.mission_count_send(self.target_system, self.target_component, seq)
else:
self.mav.waypoint_count_send(self.target_system, self.target_component, seq) |
def weeks_per_year(year):
'''Number of ISO weeks in a year'''
# 53 weeks: any year starting on Thursday and any leap year starting on Wednesday
jan1 = jwday(gregorian.to_jd(year, 1, 1))
if jan1 == THU or (jan1 == WED and isleap(year)):
return 53
else:
return 52 | Number of ISO weeks in a year | Below is the the instruction that describes the task:
### Input:
Number of ISO weeks in a year
### Response:
def weeks_per_year(year):
'''Number of ISO weeks in a year'''
# 53 weeks: any year starting on Thursday and any leap year starting on Wednesday
jan1 = jwday(gregorian.to_jd(year, 1, 1))
if jan1 == THU or (jan1 == WED and isleap(year)):
return 53
else:
return 52 |
def default_unit_label(axis, unit):
"""Set default label for an axis from a `~astropy.units.Unit`
If the axis already has a label, this function does nothing.
Parameters
----------
axis : `~matplotlib.axis.Axis`
the axis to manipulate
unit : `~astropy.units.Unit`
the unit to use for the label
Returns
-------
text : `str`, `None`
the text for the new label, if set, otherwise `None`
"""
if not axis.isDefault_label:
return
label = axis.set_label_text(unit.to_string('latex_inline_dimensional'))
axis.isDefault_label = True
return label.get_text() | Set default label for an axis from a `~astropy.units.Unit`
If the axis already has a label, this function does nothing.
Parameters
----------
axis : `~matplotlib.axis.Axis`
the axis to manipulate
unit : `~astropy.units.Unit`
the unit to use for the label
Returns
-------
text : `str`, `None`
the text for the new label, if set, otherwise `None` | Below is the the instruction that describes the task:
### Input:
Set default label for an axis from a `~astropy.units.Unit`
If the axis already has a label, this function does nothing.
Parameters
----------
axis : `~matplotlib.axis.Axis`
the axis to manipulate
unit : `~astropy.units.Unit`
the unit to use for the label
Returns
-------
text : `str`, `None`
the text for the new label, if set, otherwise `None`
### Response:
def default_unit_label(axis, unit):
"""Set default label for an axis from a `~astropy.units.Unit`
If the axis already has a label, this function does nothing.
Parameters
----------
axis : `~matplotlib.axis.Axis`
the axis to manipulate
unit : `~astropy.units.Unit`
the unit to use for the label
Returns
-------
text : `str`, `None`
the text for the new label, if set, otherwise `None`
"""
if not axis.isDefault_label:
return
label = axis.set_label_text(unit.to_string('latex_inline_dimensional'))
axis.isDefault_label = True
return label.get_text() |
def distributions_for_instances(self, data):
"""
Peforms predictions, returning the class distributions.
:param data: the Instances to get the class distributions for
:type data: Instances
:return: the class distribution matrix, None if not a batch predictor
:rtype: ndarray
"""
if self.is_batchpredictor:
return arrays.double_matrix_to_ndarray(self.__distributions(data.jobject))
else:
return None | Peforms predictions, returning the class distributions.
:param data: the Instances to get the class distributions for
:type data: Instances
:return: the class distribution matrix, None if not a batch predictor
:rtype: ndarray | Below is the the instruction that describes the task:
### Input:
Peforms predictions, returning the class distributions.
:param data: the Instances to get the class distributions for
:type data: Instances
:return: the class distribution matrix, None if not a batch predictor
:rtype: ndarray
### Response:
def distributions_for_instances(self, data):
"""
Peforms predictions, returning the class distributions.
:param data: the Instances to get the class distributions for
:type data: Instances
:return: the class distribution matrix, None if not a batch predictor
:rtype: ndarray
"""
if self.is_batchpredictor:
return arrays.double_matrix_to_ndarray(self.__distributions(data.jobject))
else:
return None |
def get_motor_offsets(SERVO_OUTPUT_RAW, ofs, motor_ofs):
'''calculate magnetic field strength from raw magnetometer'''
import mavutil
self = mavutil.mavfile_global
m = SERVO_OUTPUT_RAW
motor_pwm = m.servo1_raw + m.servo2_raw + m.servo3_raw + m.servo4_raw
motor_pwm *= 0.25
rc3_min = self.param('RC3_MIN', 1100)
rc3_max = self.param('RC3_MAX', 1900)
motor = (motor_pwm - rc3_min) / (rc3_max - rc3_min)
if motor > 1.0:
motor = 1.0
if motor < 0.0:
motor = 0.0
motor_offsets0 = motor_ofs[0] * motor
motor_offsets1 = motor_ofs[1] * motor
motor_offsets2 = motor_ofs[2] * motor
ofs = (ofs[0] + motor_offsets0, ofs[1] + motor_offsets1, ofs[2] + motor_offsets2)
return ofs | calculate magnetic field strength from raw magnetometer | Below is the the instruction that describes the task:
### Input:
calculate magnetic field strength from raw magnetometer
### Response:
def get_motor_offsets(SERVO_OUTPUT_RAW, ofs, motor_ofs):
'''calculate magnetic field strength from raw magnetometer'''
import mavutil
self = mavutil.mavfile_global
m = SERVO_OUTPUT_RAW
motor_pwm = m.servo1_raw + m.servo2_raw + m.servo3_raw + m.servo4_raw
motor_pwm *= 0.25
rc3_min = self.param('RC3_MIN', 1100)
rc3_max = self.param('RC3_MAX', 1900)
motor = (motor_pwm - rc3_min) / (rc3_max - rc3_min)
if motor > 1.0:
motor = 1.0
if motor < 0.0:
motor = 0.0
motor_offsets0 = motor_ofs[0] * motor
motor_offsets1 = motor_ofs[1] * motor
motor_offsets2 = motor_ofs[2] * motor
ofs = (ofs[0] + motor_offsets0, ofs[1] + motor_offsets1, ofs[2] + motor_offsets2)
return ofs |
def convert_jams(jams_file, output_prefix, csv=False, comment_char='#', namespaces=None):
'''Convert jams to labs.
Parameters
----------
jams_file : str
The path on disk to the jams file in question
output_prefix : str
The file path prefix of the outputs
csv : bool
Whether to output in csv (True) or lab (False) format
comment_char : str
The character used to denote comments
namespaces : list-like
The set of namespace patterns to match for output
'''
if namespaces is None:
raise ValueError('No namespaces provided. Try ".*" for all namespaces.')
jam = jams.load(jams_file)
# Get all the annotations
# Filter down to the unique ones
# For each annotation
# generate the comment string
# generate the output filename
# dump to csv
# Make a counter object for each namespace type
counter = collections.Counter()
annotations = []
for query in namespaces:
annotations.extend(jam.search(namespace=query))
if csv:
suffix = 'csv'
sep = ','
else:
suffix = 'lab'
sep = '\t'
for ann in annotations:
index = counter[ann.namespace]
counter[ann.namespace] += 1
filename = os.path.extsep.join([get_output_name(output_prefix,
ann.namespace,
index),
suffix])
comment = get_comments(jam, ann)
# Dump to disk
lab_dump(ann, comment, filename, sep, comment_char) | Convert jams to labs.
Parameters
----------
jams_file : str
The path on disk to the jams file in question
output_prefix : str
The file path prefix of the outputs
csv : bool
Whether to output in csv (True) or lab (False) format
comment_char : str
The character used to denote comments
namespaces : list-like
The set of namespace patterns to match for output | Below is the the instruction that describes the task:
### Input:
Convert jams to labs.
Parameters
----------
jams_file : str
The path on disk to the jams file in question
output_prefix : str
The file path prefix of the outputs
csv : bool
Whether to output in csv (True) or lab (False) format
comment_char : str
The character used to denote comments
namespaces : list-like
The set of namespace patterns to match for output
### Response:
def convert_jams(jams_file, output_prefix, csv=False, comment_char='#', namespaces=None):
'''Convert jams to labs.
Parameters
----------
jams_file : str
The path on disk to the jams file in question
output_prefix : str
The file path prefix of the outputs
csv : bool
Whether to output in csv (True) or lab (False) format
comment_char : str
The character used to denote comments
namespaces : list-like
The set of namespace patterns to match for output
'''
if namespaces is None:
raise ValueError('No namespaces provided. Try ".*" for all namespaces.')
jam = jams.load(jams_file)
# Get all the annotations
# Filter down to the unique ones
# For each annotation
# generate the comment string
# generate the output filename
# dump to csv
# Make a counter object for each namespace type
counter = collections.Counter()
annotations = []
for query in namespaces:
annotations.extend(jam.search(namespace=query))
if csv:
suffix = 'csv'
sep = ','
else:
suffix = 'lab'
sep = '\t'
for ann in annotations:
index = counter[ann.namespace]
counter[ann.namespace] += 1
filename = os.path.extsep.join([get_output_name(output_prefix,
ann.namespace,
index),
suffix])
comment = get_comments(jam, ann)
# Dump to disk
lab_dump(ann, comment, filename, sep, comment_char) |
def fn_ceil(self, value):
"""
Return the ceiling of a number.
:param value: The number.
:return: The ceiling of the number.
"""
if is_ndarray(value) or isinstance(value, (list, tuple)):
return numpy.ceil(self._to_ndarray(value))
else:
return math.ceil(value) | Return the ceiling of a number.
:param value: The number.
:return: The ceiling of the number. | Below is the the instruction that describes the task:
### Input:
Return the ceiling of a number.
:param value: The number.
:return: The ceiling of the number.
### Response:
def fn_ceil(self, value):
"""
Return the ceiling of a number.
:param value: The number.
:return: The ceiling of the number.
"""
if is_ndarray(value) or isinstance(value, (list, tuple)):
return numpy.ceil(self._to_ndarray(value))
else:
return math.ceil(value) |
def RelayDirectly(self, inventory):
"""
Relay the inventory to the remote client.
Args:
inventory (neo.Network.Inventory):
Returns:
bool: True if relayed successfully. False otherwise.
"""
relayed = False
self.RelayCache[inventory.Hash.ToBytes()] = inventory
for peer in self.Peers:
relayed |= peer.Relay(inventory)
if len(self.Peers) == 0:
if type(BC.Default()) is TestLevelDBBlockchain:
# mock a true result for tests
return True
logger.info("no connected peers")
return relayed | Relay the inventory to the remote client.
Args:
inventory (neo.Network.Inventory):
Returns:
bool: True if relayed successfully. False otherwise. | Below is the the instruction that describes the task:
### Input:
Relay the inventory to the remote client.
Args:
inventory (neo.Network.Inventory):
Returns:
bool: True if relayed successfully. False otherwise.
### Response:
def RelayDirectly(self, inventory):
"""
Relay the inventory to the remote client.
Args:
inventory (neo.Network.Inventory):
Returns:
bool: True if relayed successfully. False otherwise.
"""
relayed = False
self.RelayCache[inventory.Hash.ToBytes()] = inventory
for peer in self.Peers:
relayed |= peer.Relay(inventory)
if len(self.Peers) == 0:
if type(BC.Default()) is TestLevelDBBlockchain:
# mock a true result for tests
return True
logger.info("no connected peers")
return relayed |
def _add_colorbar(ax: Axes, cmap: colors.Colormap, cmap_data: np.ndarray, norm: colors.Normalize):
"""Show a colorbar right of the plot."""
fig = ax.get_figure()
mappable = cm.ScalarMappable(cmap=cmap, norm=norm)
mappable.set_array(cmap_data) # TODO: Or what???
fig.colorbar(mappable, ax=ax) | Show a colorbar right of the plot. | Below is the the instruction that describes the task:
### Input:
Show a colorbar right of the plot.
### Response:
def _add_colorbar(ax: Axes, cmap: colors.Colormap, cmap_data: np.ndarray, norm: colors.Normalize):
"""Show a colorbar right of the plot."""
fig = ax.get_figure()
mappable = cm.ScalarMappable(cmap=cmap, norm=norm)
mappable.set_array(cmap_data) # TODO: Or what???
fig.colorbar(mappable, ax=ax) |
def _skew_symmetric_translation(pos_A_in_B):
"""
Helper function to get a skew symmetric translation matrix for converting quantities
between frames.
"""
return np.array(
[
0.,
-pos_A_in_B[2],
pos_A_in_B[1],
pos_A_in_B[2],
0.,
-pos_A_in_B[0],
-pos_A_in_B[1],
pos_A_in_B[0],
0.,
]
).reshape((3, 3)) | Helper function to get a skew symmetric translation matrix for converting quantities
between frames. | Below is the the instruction that describes the task:
### Input:
Helper function to get a skew symmetric translation matrix for converting quantities
between frames.
### Response:
def _skew_symmetric_translation(pos_A_in_B):
"""
Helper function to get a skew symmetric translation matrix for converting quantities
between frames.
"""
return np.array(
[
0.,
-pos_A_in_B[2],
pos_A_in_B[1],
pos_A_in_B[2],
0.,
-pos_A_in_B[0],
-pos_A_in_B[1],
pos_A_in_B[0],
0.,
]
).reshape((3, 3)) |
def close(self):
"""Close the notification."""
with self.selenium.context(self.selenium.CONTEXT_CHROME):
if self.window.firefox_version > 63:
self.find_primary_button().click()
self.window.wait_for_notification(None)
else:
BaseNotification.close(self) | Close the notification. | Below is the the instruction that describes the task:
### Input:
Close the notification.
### Response:
def close(self):
"""Close the notification."""
with self.selenium.context(self.selenium.CONTEXT_CHROME):
if self.window.firefox_version > 63:
self.find_primary_button().click()
self.window.wait_for_notification(None)
else:
BaseNotification.close(self) |
def equals(self, other):
"""
Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
See Also
--------
equal_levels
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if not isinstance(other, MultiIndex):
other_vals = com.values_from_object(ensure_index(other))
return array_equivalent(self._ndarray_values, other_vals)
if self.nlevels != other.nlevels:
return False
if len(self) != len(other):
return False
for i in range(self.nlevels):
self_codes = self.codes[i]
self_codes = self_codes[self_codes != -1]
self_values = algos.take_nd(np.asarray(self.levels[i]._values),
self_codes, allow_fill=False)
other_codes = other.codes[i]
other_codes = other_codes[other_codes != -1]
other_values = algos.take_nd(
np.asarray(other.levels[i]._values),
other_codes, allow_fill=False)
# since we use NaT both datetime64 and timedelta64
# we can have a situation where a level is typed say
# timedelta64 in self (IOW it has other values than NaT)
# but types datetime64 in other (where its all NaT)
# but these are equivalent
if len(self_values) == 0 and len(other_values) == 0:
continue
if not array_equivalent(self_values, other_values):
return False
return True | Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
See Also
--------
equal_levels | Below is the the instruction that describes the task:
### Input:
Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
See Also
--------
equal_levels
### Response:
def equals(self, other):
"""
Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
See Also
--------
equal_levels
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if not isinstance(other, MultiIndex):
other_vals = com.values_from_object(ensure_index(other))
return array_equivalent(self._ndarray_values, other_vals)
if self.nlevels != other.nlevels:
return False
if len(self) != len(other):
return False
for i in range(self.nlevels):
self_codes = self.codes[i]
self_codes = self_codes[self_codes != -1]
self_values = algos.take_nd(np.asarray(self.levels[i]._values),
self_codes, allow_fill=False)
other_codes = other.codes[i]
other_codes = other_codes[other_codes != -1]
other_values = algos.take_nd(
np.asarray(other.levels[i]._values),
other_codes, allow_fill=False)
# since we use NaT both datetime64 and timedelta64
# we can have a situation where a level is typed say
# timedelta64 in self (IOW it has other values than NaT)
# but types datetime64 in other (where its all NaT)
# but these are equivalent
if len(self_values) == 0 and len(other_values) == 0:
continue
if not array_equivalent(self_values, other_values):
return False
return True |
def puts(self, addr, s):
"""Put string of bytes at given address. Will overwrite any previous
entries.
"""
a = array('B', asbytes(s))
for i in range_g(len(a)):
self._buf[addr+i] = a[i] | Put string of bytes at given address. Will overwrite any previous
entries. | Below is the the instruction that describes the task:
### Input:
Put string of bytes at given address. Will overwrite any previous
entries.
### Response:
def puts(self, addr, s):
"""Put string of bytes at given address. Will overwrite any previous
entries.
"""
a = array('B', asbytes(s))
for i in range_g(len(a)):
self._buf[addr+i] = a[i] |
def show_workspace(self, name):
"""Show specific workspace."""
if not self.workspace.exists(name):
raise ValueError("Workspace `%s` doesn't exists." % name)
color = Color()
workspaces = self.workspace.list()
self.logger.info("<== %s workspace ==>" % color.colored(name, "green"))
self.logger.info("\tPath: %s" % workspaces[name]["path"])
self.logger.info("\tNumber of repositories: %s"
% color.colored(
len(workspaces[name]["repositories"]),
"yellow"))
repo_colored = color.colored("Repositories", "blue")
path_colored = color.colored("Path", "blue")
trepositories = PrettyTable(
[repo_colored, path_colored, color.colored("+", "blue")])
trepositories.align[repo_colored] = "l"
trepositories.align[path_colored] = "l"
for repo_name in workspaces[name]["repositories"]:
fullname = "%s/%s" % (name, repo_name)
fullpath = find_path(fullname, self.config)[fullname]
try:
repo = Repository(fullpath)
repo_scm = repo.get_scm()
except RepositoryAdapterNotFound:
repo_scm = None
trepositories.add_row(
[color.colored(repo_name, "cyan"), fullpath, repo_scm])
self.logger.info(trepositories) | Show specific workspace. | Below is the the instruction that describes the task:
### Input:
Show specific workspace.
### Response:
def show_workspace(self, name):
"""Show specific workspace."""
if not self.workspace.exists(name):
raise ValueError("Workspace `%s` doesn't exists." % name)
color = Color()
workspaces = self.workspace.list()
self.logger.info("<== %s workspace ==>" % color.colored(name, "green"))
self.logger.info("\tPath: %s" % workspaces[name]["path"])
self.logger.info("\tNumber of repositories: %s"
% color.colored(
len(workspaces[name]["repositories"]),
"yellow"))
repo_colored = color.colored("Repositories", "blue")
path_colored = color.colored("Path", "blue")
trepositories = PrettyTable(
[repo_colored, path_colored, color.colored("+", "blue")])
trepositories.align[repo_colored] = "l"
trepositories.align[path_colored] = "l"
for repo_name in workspaces[name]["repositories"]:
fullname = "%s/%s" % (name, repo_name)
fullpath = find_path(fullname, self.config)[fullname]
try:
repo = Repository(fullpath)
repo_scm = repo.get_scm()
except RepositoryAdapterNotFound:
repo_scm = None
trepositories.add_row(
[color.colored(repo_name, "cyan"), fullpath, repo_scm])
self.logger.info(trepositories) |
def generate_image_beacon(self, event_collection, event_body, timestamp=None):
""" Generates an image beacon URL.
:param event_collection: the name of the collection to insert the
event to
:param event_body: dict, the body of the event to insert the event to
:param timestamp: datetime, optional, the timestamp of the event
"""
event = Event(self.project_id, event_collection, event_body,
timestamp=timestamp)
event_json = event.to_json()
return "{0}/{1}/projects/{2}/events/{3}?api_key={4}&data={5}".format(
self.api.base_url, self.api.api_version, self.project_id, self._url_escape(event_collection),
self.api.write_key.decode(sys.getdefaultencoding()), self._base64_encode(event_json)
) | Generates an image beacon URL.
:param event_collection: the name of the collection to insert the
event to
:param event_body: dict, the body of the event to insert the event to
:param timestamp: datetime, optional, the timestamp of the event | Below is the the instruction that describes the task:
### Input:
Generates an image beacon URL.
:param event_collection: the name of the collection to insert the
event to
:param event_body: dict, the body of the event to insert the event to
:param timestamp: datetime, optional, the timestamp of the event
### Response:
def generate_image_beacon(self, event_collection, event_body, timestamp=None):
""" Generates an image beacon URL.
:param event_collection: the name of the collection to insert the
event to
:param event_body: dict, the body of the event to insert the event to
:param timestamp: datetime, optional, the timestamp of the event
"""
event = Event(self.project_id, event_collection, event_body,
timestamp=timestamp)
event_json = event.to_json()
return "{0}/{1}/projects/{2}/events/{3}?api_key={4}&data={5}".format(
self.api.base_url, self.api.api_version, self.project_id, self._url_escape(event_collection),
self.api.write_key.decode(sys.getdefaultencoding()), self._base64_encode(event_json)
) |
def main(command_line=True, **kwargs):
"""
NAME
utrecht_magic.py
DESCRIPTION
converts Utrecht magnetometer data files to magic_measurements files
SYNTAX
utrecht_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input file, or
-F FILE: specify output file, default is magic_measurements.txt
-Fsa: specify er_samples format file for appending, default is new er_samples.txt (Not working yet)
-WD: output directory for MagIC files
-ncn: Site Naming Convention
Site to Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2: default] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
-spc: number of characters to remove to generate sample names from specimen names
-dmy: European date format
-loc LOCNAME : specify location/study name
-lat latitude of samples
-lon longitude of samples
-A: don't average replicate measurements
-mcd: [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented
-dc: B PHI THETA: dc lab field (in microTesla), phi,and theta must be input as a tuple "(DC,PHI,THETA)". If not input user will be asked for values, this is advantagious if there are differing dc fields between steps or specimens. Note: this currently only works with the decimal IZZI naming convetion (XXX.0,1,2,3 where XXX is the treatment temperature and 0 is a zero field step, 1 is in field, and 2 is a pTRM check, 3 is a tail check). All other steps are hardcoded dc_field = 0.
INPUT
Utrecht magnetometer data file
"""
# initialize some stuff
sample_lat = 0.0
sample_lon = 0.0
noave = 0
er_location_name = "unknown"
args = sys.argv
meth_code = "LP-NO"
version_num = pmag.get_version()
site_num = 1
mag_file = ""
dir_path = '.'
MagRecs = []
SpecOuts = []
SampOuts = []
SiteOuts = []
meas_file='magic_measurements.txt'
spec_file='er_specimens.txt'
samp_file='er_samples.txt'
site_file='er_sites.txt'
meth_code = ""
#
# get command line arguments
#
if command_line:
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind+1]
if '-ID' in sys.argv:
ind = sys.argv.index('-ID')
input_dir_path = sys.argv[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
if "-h" in args:
print(main.__doc__)
return False
if '-F' in args:
ind = args.index("-F")
meas_file = args[ind+1]
if '-Fsp' in args:
ind=args.index("-Fsp")
spec_file=args[ind+1]
if '-Fsa' in args:
ind = args.index("-Fsa")
samp_file = args[ind+1]
if '-Fsi' in args: # LORI addition
ind=args.index("-Fsi")
site_file=args[ind+1]
#try:
# open(samp_file,'r')
# ErSamps,file_type=pmag.magic_read(samp_file)
# print 'sample information will be appended to ', samp_file
#except:
# print samp_file,' not found: sample information will be stored in new er_samples.txt file'
# samp_file = output_dir_path+'/er_samples.txt'
if '-f' in args:
ind = args.index("-f")
mag_file = args[ind+1]
if "-loc" in args:
ind = args.index("-loc")
er_location_name = args[ind+1]
if "-lat" in args:
ind = args.index("-lat")
site_lat = args[ind+1]
if "-lon" in args:
ind = args.index("-lon")
site_lon = args[ind+1]
if "-A" in args:
noave = 1
if "-mcd" in args:
ind = args.index("-mcd")
meth_code = args[ind+1]
#samp_con='5'
if "-ncn" in args:
ind=args.index("-ncn")
samp_con=sys.argv[ind+1]
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
return False, "naming convention option [4] must be in form 4-Z where Z is an integer"
else:
site_num=samp_con.split("-")[1]
samp_con="4"
elif "7" in samp_con:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
return False, "naming convention option [7] must be in form 7-Z where Z is an integer"
else:
site_num=samp_con.split("-")[1]
samp_con="7"
else: samp_con="1"
if '-dc' in args:
ind=args.index('-dc')
DC_FIELD,DC_PHI,DC_THETA=list(map(float,args[ind+1].strip('( ) [ ]').split(',')))
DC_FIELD *= 1e-6
yn=''
GET_DC_PARAMS=False
else: DC_FIELD,DC_PHI,DC_THETA=0,0,-90
if '-spc' in args:
ind=args.index("-spc")
specnum=-int(args[ind+1])
else: specnum = 0
if '-dmy' in args:
ind=args.index("-dmy")
dmy_flag=True
else: dmy_flag=False
if not command_line:
dir_path = kwargs.get('dir_path', '.')
input_dir_path = kwargs.get('input_dir_path', dir_path)
output_dir_path = dir_path
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
mag_file = kwargs.get('mag_file')
spec_file = kwargs.get('spec_file', 'er_specimens.txt') # specimen outfile
samp_file = kwargs.get('samp_file', 'er_samples.txt')
site_file = kwargs.get('site_file', 'er_sites.txt') # site outfile
er_location_name = kwargs.get('location_name', '')
site_lat = kwargs.get('site_lat', '')
site_lon = kwargs.get('site_lon', '')
#oave = kwargs.get('noave', 0) # default (0) means DO average
meth_code = kwargs.get('meth_code', "LP-NO")
specnum = -int(kwargs.get('specnum', 0))
samp_con = kwargs.get('samp_con', '2')
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
return False, "naming convention option [4] must be in form 4-Z where Z is an integer"
else:
site_num=samp_con.split("-")[1]
samp_con="4"
elif "7" in samp_con:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
return False, "naming convention option [7] must be in form 7-Z where Z is an integer"
else:
site_num=samp_con.split("-")[1]
samp_con="7"
DC_FIELD,DC_PHI,DC_THETA = list(map(float, kwargs.get('dc_params', (0,0,-90))))
DC_FIELD *= 1e-6
noave = kwargs.get('avg', True)
dmy_flag = kwargs.get('dmy_flag', False)
# format variables
if not mag_file:
return False, 'You must provide a Utrecht formated file'
mag_file = os.path.join(input_dir_path, mag_file)
meas_file = os.path.join(output_dir_path, meas_file)
spec_file = os.path.join(output_dir_path, spec_file)
samp_file = os.path.join(output_dir_path, samp_file)
site_file = os.path.join(output_dir_path, site_file)
# parse data
# Open up the Utrecht file and read the header information
print('mag_file in utrecht_file', mag_file)
AF_or_T = mag_file.split('.')[-1]
data = open(mag_file, 'r')
line = data.readline()
line_items = line.split(',')
operator=line_items[0]
operator=operator.replace("\"","")
machine=line_items[1]
machine=machine.replace("\"","")
machine=machine.rstrip('\n')
print("operator=", operator)
print("machine=", machine)
#read in measurement data
line = data.readline()
while line != "END" and line != '"END"':
ErSpecRec,ErSampRec,ErSiteRec = {},{},{}
line_items = line.split(',')
spec_name=line_items[0]
spec_name=spec_name.replace("\"","")
print("spec_name=", spec_name)
free_string=line_items[1]
free_string=free_string.replace("\"","")
print("free_string=", free_string)
dec=line_items[2]
print("dec=", dec)
inc=line_items[3]
print("inc=", inc)
volume=float(line_items[4])
volume=volume * 1e-6 # enter volume in cm^3, convert to m^3
print("volume=", volume)
bed_plane=line_items[5]
print("bed_plane=", bed_plane)
bed_tilt=line_items[6]
print("bed_tilt=", bed_tilt)
# Configure et er_ tables
ErSpecRec['er_specimen_name'] = spec_name
if specnum==0: sample_name = spec_name
else: sample_name = spec_name[:specnum]
ErSampRec['er_sample_name'] = sample_name
ErSpecRec['er_sample_name'] = sample_name
er_site_name = pmag.parse_site(sample_name,samp_con,site_num)
ErSpecRec['er_site_name']=er_site_name
ErSpecRec['er_location_name']=er_location_name
ErSampRec['sample_azimuth'] = dec
ErSampRec['sample_dip'] = str(float(inc)-90)
ErSampRec['sample_bed_dip_direction'] = bed_plane
ErSampRec['sample_bed_tilt'] = bed_tilt
ErSiteRec['site_lat'] = site_lat
ErSiteRec['site_lon'] = site_lon
ErSpecRec['magic_method_codes'] = meth_code
ErSampRec['er_location_name'] = er_location_name
ErSiteRec['er_location_name'] = er_location_name
ErSiteRec['er_site_name'] = er_site_name
ErSampRec['er_site_name'] = er_site_name
ErSampRec['er_citation_names'] = 'This study'
SpecOuts.append(ErSpecRec)
SampOuts.append(ErSampRec)
SiteOuts.append(ErSiteRec)
#measurement data
line = data.readline()
line = line.rstrip("\n")
items = line.split(",")
while line != '9999':
print(line)
step=items[0]
step=step.split('.')
step_value=step[0]
step_type = ""
if len(step) == 2:
step_type=step[1]
if step_type=='5':
step_value = items[0]
A=float(items[1])
B=float(items[2])
C=float(items[3])
# convert to MagIC coordinates
Z=-A
X=-B
Y=C
cart = np.array([X, Y, Z]).transpose()
direction = pmag.cart2dir(cart).transpose()
measurement_dec = direction[0]
measurement_inc = direction[1]
measurement_magn_moment = direction[2] * 1.0e-12 # the data are in pico-Am^2 - this converts to Am^2
measurement_magn_volume = direction[2] * 1.0e-12 / volume # data volume normalized - converted to A/m
print("measurement_magn_moment=", measurement_magn_moment)
print("measurement_magn_volume=", measurement_magn_volume)
error = items[4]
date=items[5]
date=date.strip('"')
if date.count("-") > 0:
date=date.split("-")
elif date.count("/") > 0:
date=date.split("/")
else: print("date format seperator cannot be identified")
print(date)
time=items[6]
time=time.strip('"')
time=time.split(":")
print(time)
if dmy_flag:
date_time = date[1] + ":" + date[0] + ":" + date[2] + ":" + time[0] + ":" + time[1] + ":" + "0.0"
else:
date_time = date[0] + ":" + date[1] + ":" + date[2] + ":" + time[0] + ":" + time[1] + ":" + "0.0"
print(date_time)
MagRec = {}
MagRec["er_analyst_mail_names"] = operator
MagRec["magic_instrument_codes"] = "Utrecht_" + machine
MagRec["measurement_description"] = "free string = " + free_string
MagRec["measurement_date"] = date_time
MagRec["er_citation_names"] = "This study"
MagRec['er_location_name'] = er_location_name
MagRec['er_site_name'] = er_site_name
MagRec['er_sample_name'] = sample_name
MagRec['magic_software_packages'] = version_num
MagRec["measurement_temp"] = '%8.3e' % (273) # room temp in kelvin
MagRec["measurement_flag"] = 'g'
MagRec["measurement_standard"] = 'u'
MagRec["magic_experiment_name"] = er_location_name + er_site_name + spec_name
MagRec["measurement_number"] = er_location_name + er_site_name + spec_name + items[0]
MagRec["er_specimen_name"] = spec_name
# MagRec["treatment_ac_field"] = '0'
if AF_or_T.lower() == "th":
MagRec["treatment_temp"] = '%8.3e' % (float(step_value)+273.) # temp in kelvin
MagRec['treatment_ac_field']='0'
meas_type = "LP-DIR-T:LT-T-Z"
else:
MagRec['treatment_temp']='273'
MagRec['treatment_ac_field']='%10.3e'%(float(step_value)*1e-3)
meas_type = "LP-DIR-AF:LT-AF-Z"
MagRec['treatment_dc_field']='0'
if step_value == '0':
meas_type = "LT-NO"
print("step_type=", step_type)
if step_type == '0' and AF_or_T.lower() == 'th':
if meas_type == "":
meas_type = "LT-T-Z"
else:
meas_type = meas_type + ":" + "LT-T-Z"
elif step_type == '1':
if meas_type == "":
meas_type = "LT-T-I"
else:
meas_type = meas_type + ":" + "LT-T-I"
MagRec['treatment_dc_field']='%1.2e'%DC_FIELD
elif step_type == '2':
if meas_type == "":
meas_type = "LT-PTRM-I"
else:
meas_type = meas_type + ":" + "LT-PTRM-I"
MagRec['treatment_dc_field']='%1.2e'%DC_FIELD
elif step_type == '3':
if meas_type == "" :
meas_type = "LT-PTRM-Z"
else:
meas_type = meas_type + ":" + "LT-PTRM-Z"
print("meas_type=", meas_type)
MagRec['treatment_dc_field_phi'] = '%1.2f'%DC_PHI
MagRec['treatment_dc_field_theta'] = '%1.2f'%DC_THETA
MagRec['magic_method_codes'] = meas_type
MagRec["measurement_magn_moment"] = measurement_magn_moment
MagRec["measurement_magn_volume"] = measurement_magn_volume
MagRec["measurement_dec"] = measurement_dec
MagRec["measurement_inc"] = measurement_inc
MagRec['measurement_csd'] = error
# MagRec['measurement_positions'] = '1'
MagRecs.append(MagRec)
line = data.readline()
line = line.rstrip("\n")
items = line.split(",")
line = data.readline()
line = line.rstrip("\n")
items = line.split(",")
# write out the data to MagIC data files
pmag.magic_write(spec_file, SpecOuts, 'er_specimens')
pmag.magic_write(samp_file, SampOuts, 'er_samples')
pmag.magic_write(site_file, SiteOuts, 'er_sites')
# MagOuts = pmag.measurements_methods(MagRecs, noave)
# pmag.magic_write(meas_file, MagOuts, 'magic_measurements')
pmag.magic_write(meas_file, MagRecs, 'magic_measurements')
print("results put in ", meas_file)
print("exit!")
return True, meas_file | NAME
utrecht_magic.py
DESCRIPTION
converts Utrecht magnetometer data files to magic_measurements files
SYNTAX
utrecht_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input file, or
-F FILE: specify output file, default is magic_measurements.txt
-Fsa: specify er_samples format file for appending, default is new er_samples.txt (Not working yet)
-WD: output directory for MagIC files
-ncn: Site Naming Convention
Site to Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2: default] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
-spc: number of characters to remove to generate sample names from specimen names
-dmy: European date format
-loc LOCNAME : specify location/study name
-lat latitude of samples
-lon longitude of samples
-A: don't average replicate measurements
-mcd: [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented
-dc: B PHI THETA: dc lab field (in microTesla), phi,and theta must be input as a tuple "(DC,PHI,THETA)". If not input user will be asked for values, this is advantagious if there are differing dc fields between steps or specimens. Note: this currently only works with the decimal IZZI naming convetion (XXX.0,1,2,3 where XXX is the treatment temperature and 0 is a zero field step, 1 is in field, and 2 is a pTRM check, 3 is a tail check). All other steps are hardcoded dc_field = 0.
INPUT
Utrecht magnetometer data file | Below is the the instruction that describes the task:
### Input:
NAME
utrecht_magic.py
DESCRIPTION
converts Utrecht magnetometer data files to magic_measurements files
SYNTAX
utrecht_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input file, or
-F FILE: specify output file, default is magic_measurements.txt
-Fsa: specify er_samples format file for appending, default is new er_samples.txt (Not working yet)
-WD: output directory for MagIC files
-ncn: Site Naming Convention
Site to Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2: default] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
-spc: number of characters to remove to generate sample names from specimen names
-dmy: European date format
-loc LOCNAME : specify location/study name
-lat latitude of samples
-lon longitude of samples
-A: don't average replicate measurements
-mcd: [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented
-dc: B PHI THETA: dc lab field (in microTesla), phi,and theta must be input as a tuple "(DC,PHI,THETA)". If not input user will be asked for values, this is advantagious if there are differing dc fields between steps or specimens. Note: this currently only works with the decimal IZZI naming convetion (XXX.0,1,2,3 where XXX is the treatment temperature and 0 is a zero field step, 1 is in field, and 2 is a pTRM check, 3 is a tail check). All other steps are hardcoded dc_field = 0.
INPUT
Utrecht magnetometer data file
### Response:
def main(command_line=True, **kwargs):
"""
NAME
utrecht_magic.py
DESCRIPTION
converts Utrecht magnetometer data files to magic_measurements files
SYNTAX
utrecht_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input file, or
-F FILE: specify output file, default is magic_measurements.txt
-Fsa: specify er_samples format file for appending, default is new er_samples.txt (Not working yet)
-WD: output directory for MagIC files
-ncn: Site Naming Convention
Site to Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2: default] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name = sample name
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
-spc: number of characters to remove to generate sample names from specimen names
-dmy: European date format
-loc LOCNAME : specify location/study name
-lat latitude of samples
-lon longitude of samples
-A: don't average replicate measurements
-mcd: [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented
-dc: B PHI THETA: dc lab field (in microTesla), phi,and theta must be input as a tuple "(DC,PHI,THETA)". If not input user will be asked for values, this is advantagious if there are differing dc fields between steps or specimens. Note: this currently only works with the decimal IZZI naming convetion (XXX.0,1,2,3 where XXX is the treatment temperature and 0 is a zero field step, 1 is in field, and 2 is a pTRM check, 3 is a tail check). All other steps are hardcoded dc_field = 0.
INPUT
Utrecht magnetometer data file
"""
# initialize some stuff
sample_lat = 0.0
sample_lon = 0.0
noave = 0
er_location_name = "unknown"
args = sys.argv
meth_code = "LP-NO"
version_num = pmag.get_version()
site_num = 1
mag_file = ""
dir_path = '.'
MagRecs = []
SpecOuts = []
SampOuts = []
SiteOuts = []
meas_file='magic_measurements.txt'
spec_file='er_specimens.txt'
samp_file='er_samples.txt'
site_file='er_sites.txt'
meth_code = ""
#
# get command line arguments
#
if command_line:
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path = sys.argv[ind+1]
if '-ID' in sys.argv:
ind = sys.argv.index('-ID')
input_dir_path = sys.argv[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
if "-h" in args:
print(main.__doc__)
return False
if '-F' in args:
ind = args.index("-F")
meas_file = args[ind+1]
if '-Fsp' in args:
ind=args.index("-Fsp")
spec_file=args[ind+1]
if '-Fsa' in args:
ind = args.index("-Fsa")
samp_file = args[ind+1]
if '-Fsi' in args: # LORI addition
ind=args.index("-Fsi")
site_file=args[ind+1]
#try:
# open(samp_file,'r')
# ErSamps,file_type=pmag.magic_read(samp_file)
# print 'sample information will be appended to ', samp_file
#except:
# print samp_file,' not found: sample information will be stored in new er_samples.txt file'
# samp_file = output_dir_path+'/er_samples.txt'
if '-f' in args:
ind = args.index("-f")
mag_file = args[ind+1]
if "-loc" in args:
ind = args.index("-loc")
er_location_name = args[ind+1]
if "-lat" in args:
ind = args.index("-lat")
site_lat = args[ind+1]
if "-lon" in args:
ind = args.index("-lon")
site_lon = args[ind+1]
if "-A" in args:
noave = 1
if "-mcd" in args:
ind = args.index("-mcd")
meth_code = args[ind+1]
#samp_con='5'
if "-ncn" in args:
ind=args.index("-ncn")
samp_con=sys.argv[ind+1]
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
return False, "naming convention option [4] must be in form 4-Z where Z is an integer"
else:
site_num=samp_con.split("-")[1]
samp_con="4"
elif "7" in samp_con:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
return False, "naming convention option [7] must be in form 7-Z where Z is an integer"
else:
site_num=samp_con.split("-")[1]
samp_con="7"
else: samp_con="1"
if '-dc' in args:
ind=args.index('-dc')
DC_FIELD,DC_PHI,DC_THETA=list(map(float,args[ind+1].strip('( ) [ ]').split(',')))
DC_FIELD *= 1e-6
yn=''
GET_DC_PARAMS=False
else: DC_FIELD,DC_PHI,DC_THETA=0,0,-90
if '-spc' in args:
ind=args.index("-spc")
specnum=-int(args[ind+1])
else: specnum = 0
if '-dmy' in args:
ind=args.index("-dmy")
dmy_flag=True
else: dmy_flag=False
if not command_line:
dir_path = kwargs.get('dir_path', '.')
input_dir_path = kwargs.get('input_dir_path', dir_path)
output_dir_path = dir_path
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
mag_file = kwargs.get('mag_file')
spec_file = kwargs.get('spec_file', 'er_specimens.txt') # specimen outfile
samp_file = kwargs.get('samp_file', 'er_samples.txt')
site_file = kwargs.get('site_file', 'er_sites.txt') # site outfile
er_location_name = kwargs.get('location_name', '')
site_lat = kwargs.get('site_lat', '')
site_lon = kwargs.get('site_lon', '')
#oave = kwargs.get('noave', 0) # default (0) means DO average
meth_code = kwargs.get('meth_code', "LP-NO")
specnum = -int(kwargs.get('specnum', 0))
samp_con = kwargs.get('samp_con', '2')
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
return False, "naming convention option [4] must be in form 4-Z where Z is an integer"
else:
site_num=samp_con.split("-")[1]
samp_con="4"
elif "7" in samp_con:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
return False, "naming convention option [7] must be in form 7-Z where Z is an integer"
else:
site_num=samp_con.split("-")[1]
samp_con="7"
DC_FIELD,DC_PHI,DC_THETA = list(map(float, kwargs.get('dc_params', (0,0,-90))))
DC_FIELD *= 1e-6
noave = kwargs.get('avg', True)
dmy_flag = kwargs.get('dmy_flag', False)
# format variables
if not mag_file:
return False, 'You must provide a Utrecht formated file'
mag_file = os.path.join(input_dir_path, mag_file)
meas_file = os.path.join(output_dir_path, meas_file)
spec_file = os.path.join(output_dir_path, spec_file)
samp_file = os.path.join(output_dir_path, samp_file)
site_file = os.path.join(output_dir_path, site_file)
# parse data
# Open up the Utrecht file and read the header information
print('mag_file in utrecht_file', mag_file)
AF_or_T = mag_file.split('.')[-1]
data = open(mag_file, 'r')
line = data.readline()
line_items = line.split(',')
operator=line_items[0]
operator=operator.replace("\"","")
machine=line_items[1]
machine=machine.replace("\"","")
machine=machine.rstrip('\n')
print("operator=", operator)
print("machine=", machine)
#read in measurement data
line = data.readline()
while line != "END" and line != '"END"':
ErSpecRec,ErSampRec,ErSiteRec = {},{},{}
line_items = line.split(',')
spec_name=line_items[0]
spec_name=spec_name.replace("\"","")
print("spec_name=", spec_name)
free_string=line_items[1]
free_string=free_string.replace("\"","")
print("free_string=", free_string)
dec=line_items[2]
print("dec=", dec)
inc=line_items[3]
print("inc=", inc)
volume=float(line_items[4])
volume=volume * 1e-6 # enter volume in cm^3, convert to m^3
print("volume=", volume)
bed_plane=line_items[5]
print("bed_plane=", bed_plane)
bed_tilt=line_items[6]
print("bed_tilt=", bed_tilt)
# Configure et er_ tables
ErSpecRec['er_specimen_name'] = spec_name
if specnum==0: sample_name = spec_name
else: sample_name = spec_name[:specnum]
ErSampRec['er_sample_name'] = sample_name
ErSpecRec['er_sample_name'] = sample_name
er_site_name = pmag.parse_site(sample_name,samp_con,site_num)
ErSpecRec['er_site_name']=er_site_name
ErSpecRec['er_location_name']=er_location_name
ErSampRec['sample_azimuth'] = dec
ErSampRec['sample_dip'] = str(float(inc)-90)
ErSampRec['sample_bed_dip_direction'] = bed_plane
ErSampRec['sample_bed_tilt'] = bed_tilt
ErSiteRec['site_lat'] = site_lat
ErSiteRec['site_lon'] = site_lon
ErSpecRec['magic_method_codes'] = meth_code
ErSampRec['er_location_name'] = er_location_name
ErSiteRec['er_location_name'] = er_location_name
ErSiteRec['er_site_name'] = er_site_name
ErSampRec['er_site_name'] = er_site_name
ErSampRec['er_citation_names'] = 'This study'
SpecOuts.append(ErSpecRec)
SampOuts.append(ErSampRec)
SiteOuts.append(ErSiteRec)
#measurement data
line = data.readline()
line = line.rstrip("\n")
items = line.split(",")
while line != '9999':
print(line)
step=items[0]
step=step.split('.')
step_value=step[0]
step_type = ""
if len(step) == 2:
step_type=step[1]
if step_type=='5':
step_value = items[0]
A=float(items[1])
B=float(items[2])
C=float(items[3])
# convert to MagIC coordinates
Z=-A
X=-B
Y=C
cart = np.array([X, Y, Z]).transpose()
direction = pmag.cart2dir(cart).transpose()
measurement_dec = direction[0]
measurement_inc = direction[1]
measurement_magn_moment = direction[2] * 1.0e-12 # the data are in pico-Am^2 - this converts to Am^2
measurement_magn_volume = direction[2] * 1.0e-12 / volume # data volume normalized - converted to A/m
print("measurement_magn_moment=", measurement_magn_moment)
print("measurement_magn_volume=", measurement_magn_volume)
error = items[4]
date=items[5]
date=date.strip('"')
if date.count("-") > 0:
date=date.split("-")
elif date.count("/") > 0:
date=date.split("/")
else: print("date format seperator cannot be identified")
print(date)
time=items[6]
time=time.strip('"')
time=time.split(":")
print(time)
if dmy_flag:
date_time = date[1] + ":" + date[0] + ":" + date[2] + ":" + time[0] + ":" + time[1] + ":" + "0.0"
else:
date_time = date[0] + ":" + date[1] + ":" + date[2] + ":" + time[0] + ":" + time[1] + ":" + "0.0"
print(date_time)
MagRec = {}
MagRec["er_analyst_mail_names"] = operator
MagRec["magic_instrument_codes"] = "Utrecht_" + machine
MagRec["measurement_description"] = "free string = " + free_string
MagRec["measurement_date"] = date_time
MagRec["er_citation_names"] = "This study"
MagRec['er_location_name'] = er_location_name
MagRec['er_site_name'] = er_site_name
MagRec['er_sample_name'] = sample_name
MagRec['magic_software_packages'] = version_num
MagRec["measurement_temp"] = '%8.3e' % (273) # room temp in kelvin
MagRec["measurement_flag"] = 'g'
MagRec["measurement_standard"] = 'u'
MagRec["magic_experiment_name"] = er_location_name + er_site_name + spec_name
MagRec["measurement_number"] = er_location_name + er_site_name + spec_name + items[0]
MagRec["er_specimen_name"] = spec_name
# MagRec["treatment_ac_field"] = '0'
if AF_or_T.lower() == "th":
MagRec["treatment_temp"] = '%8.3e' % (float(step_value)+273.) # temp in kelvin
MagRec['treatment_ac_field']='0'
meas_type = "LP-DIR-T:LT-T-Z"
else:
MagRec['treatment_temp']='273'
MagRec['treatment_ac_field']='%10.3e'%(float(step_value)*1e-3)
meas_type = "LP-DIR-AF:LT-AF-Z"
MagRec['treatment_dc_field']='0'
if step_value == '0':
meas_type = "LT-NO"
print("step_type=", step_type)
if step_type == '0' and AF_or_T.lower() == 'th':
if meas_type == "":
meas_type = "LT-T-Z"
else:
meas_type = meas_type + ":" + "LT-T-Z"
elif step_type == '1':
if meas_type == "":
meas_type = "LT-T-I"
else:
meas_type = meas_type + ":" + "LT-T-I"
MagRec['treatment_dc_field']='%1.2e'%DC_FIELD
elif step_type == '2':
if meas_type == "":
meas_type = "LT-PTRM-I"
else:
meas_type = meas_type + ":" + "LT-PTRM-I"
MagRec['treatment_dc_field']='%1.2e'%DC_FIELD
elif step_type == '3':
if meas_type == "" :
meas_type = "LT-PTRM-Z"
else:
meas_type = meas_type + ":" + "LT-PTRM-Z"
print("meas_type=", meas_type)
MagRec['treatment_dc_field_phi'] = '%1.2f'%DC_PHI
MagRec['treatment_dc_field_theta'] = '%1.2f'%DC_THETA
MagRec['magic_method_codes'] = meas_type
MagRec["measurement_magn_moment"] = measurement_magn_moment
MagRec["measurement_magn_volume"] = measurement_magn_volume
MagRec["measurement_dec"] = measurement_dec
MagRec["measurement_inc"] = measurement_inc
MagRec['measurement_csd'] = error
# MagRec['measurement_positions'] = '1'
MagRecs.append(MagRec)
line = data.readline()
line = line.rstrip("\n")
items = line.split(",")
line = data.readline()
line = line.rstrip("\n")
items = line.split(",")
# write out the data to MagIC data files
pmag.magic_write(spec_file, SpecOuts, 'er_specimens')
pmag.magic_write(samp_file, SampOuts, 'er_samples')
pmag.magic_write(site_file, SiteOuts, 'er_sites')
# MagOuts = pmag.measurements_methods(MagRecs, noave)
# pmag.magic_write(meas_file, MagOuts, 'magic_measurements')
pmag.magic_write(meas_file, MagRecs, 'magic_measurements')
print("results put in ", meas_file)
print("exit!")
return True, meas_file |
def slice_by_component( self, component_index, start, end ):
"""
Return a slice of the alignment, corresponding to an coordinate interval in a specific component.
component_index is one of
an integer offset into the components list
a string indicating the src of the desired component
a component
start and end are relative to the + strand, regardless of the component's strand.
"""
if type( component_index ) == type( 0 ):
ref = self.components[ component_index ]
elif type( component_index ) == type( "" ):
ref = self.get_component_by_src( component_index )
elif type( component_index ) == Component:
ref = component_index
else:
raise ValueError( "can't figure out what to do" )
start_col = ref.coord_to_col( start )
end_col = ref.coord_to_col( end )
if (ref.strand == '-'):
(start_col,end_col) = (end_col,start_col)
return self.slice( start_col, end_col ) | Return a slice of the alignment, corresponding to an coordinate interval in a specific component.
component_index is one of
an integer offset into the components list
a string indicating the src of the desired component
a component
start and end are relative to the + strand, regardless of the component's strand. | Below is the the instruction that describes the task:
### Input:
Return a slice of the alignment, corresponding to an coordinate interval in a specific component.
component_index is one of
an integer offset into the components list
a string indicating the src of the desired component
a component
start and end are relative to the + strand, regardless of the component's strand.
### Response:
def slice_by_component( self, component_index, start, end ):
"""
Return a slice of the alignment, corresponding to an coordinate interval in a specific component.
component_index is one of
an integer offset into the components list
a string indicating the src of the desired component
a component
start and end are relative to the + strand, regardless of the component's strand.
"""
if type( component_index ) == type( 0 ):
ref = self.components[ component_index ]
elif type( component_index ) == type( "" ):
ref = self.get_component_by_src( component_index )
elif type( component_index ) == Component:
ref = component_index
else:
raise ValueError( "can't figure out what to do" )
start_col = ref.coord_to_col( start )
end_col = ref.coord_to_col( end )
if (ref.strand == '-'):
(start_col,end_col) = (end_col,start_col)
return self.slice( start_col, end_col ) |
def summarize_dataframe(self):
"""Summarize default dataframe for this cohort using a hash function.
Useful for confirming the version of data used in various reports, e.g. ipynbs
"""
if self.dataframe_hash:
return(self.dataframe_hash)
else:
df = self._as_dataframe_unmodified()
return(self.dataframe_hash) | Summarize default dataframe for this cohort using a hash function.
Useful for confirming the version of data used in various reports, e.g. ipynbs | Below is the the instruction that describes the task:
### Input:
Summarize default dataframe for this cohort using a hash function.
Useful for confirming the version of data used in various reports, e.g. ipynbs
### Response:
def summarize_dataframe(self):
"""Summarize default dataframe for this cohort using a hash function.
Useful for confirming the version of data used in various reports, e.g. ipynbs
"""
if self.dataframe_hash:
return(self.dataframe_hash)
else:
df = self._as_dataframe_unmodified()
return(self.dataframe_hash) |
def Query(self, query, parameters=None):
"""Queries the database file.
Args:
query (str): SQL query.
parameters (Optional[dict|tuple]): query parameters.
Returns:
list[sqlite3.Row]: rows resulting from the query.
"""
# TODO: catch Warning and return None.
# Note that we cannot pass parameters as a keyword argument here.
# A parameters value of None is not supported.
if parameters:
self._cursor.execute(query, parameters)
else:
self._cursor.execute(query)
return self._cursor.fetchall() | Queries the database file.
Args:
query (str): SQL query.
parameters (Optional[dict|tuple]): query parameters.
Returns:
list[sqlite3.Row]: rows resulting from the query. | Below is the the instruction that describes the task:
### Input:
Queries the database file.
Args:
query (str): SQL query.
parameters (Optional[dict|tuple]): query parameters.
Returns:
list[sqlite3.Row]: rows resulting from the query.
### Response:
def Query(self, query, parameters=None):
"""Queries the database file.
Args:
query (str): SQL query.
parameters (Optional[dict|tuple]): query parameters.
Returns:
list[sqlite3.Row]: rows resulting from the query.
"""
# TODO: catch Warning and return None.
# Note that we cannot pass parameters as a keyword argument here.
# A parameters value of None is not supported.
if parameters:
self._cursor.execute(query, parameters)
else:
self._cursor.execute(query)
return self._cursor.fetchall() |
def file_uptodate(fname, cmp_fname):
"""Check if a file exists, is non-empty and is more recent than cmp_fname.
"""
try:
return (file_exists(fname) and file_exists(cmp_fname) and
getmtime(fname) >= getmtime(cmp_fname))
except OSError:
return False | Check if a file exists, is non-empty and is more recent than cmp_fname. | Below is the the instruction that describes the task:
### Input:
Check if a file exists, is non-empty and is more recent than cmp_fname.
### Response:
def file_uptodate(fname, cmp_fname):
"""Check if a file exists, is non-empty and is more recent than cmp_fname.
"""
try:
return (file_exists(fname) and file_exists(cmp_fname) and
getmtime(fname) >= getmtime(cmp_fname))
except OSError:
return False |
def current_version(self, object, relations_as_of=None, check_db=False):
"""
Return the current version of the given object.
The current version is the one having its version_end_date set to NULL.
If there is not such a version then it means the object has been
'deleted' and so there is no current version available. In this case
the function returns None.
Note that if check_db is False and object's version_end_date is None,
this does not check the database to see if there is a newer version
(perhaps created by some other code), it simply returns the passed
object.
``relations_as_of`` is used to fix the point in time for the version;
this affects which related objects are returned when querying for
object relations. See ``VersionManager.version_as_of`` for details on
valid ``relations_as_of`` values.
:param Versionable object: object whose current version will be
returned.
:param mixed relations_as_of: determines point in time used to access
relations. 'start'|'end'|datetime|None
:param bool check_db: Whether or not to look in the database for a
more recent version
:return: Versionable
"""
if object.version_end_date is None and not check_db:
current = object
else:
current = self.current.filter(identity=object.identity).first()
return self.adjust_version_as_of(current, relations_as_of) | Return the current version of the given object.
The current version is the one having its version_end_date set to NULL.
If there is not such a version then it means the object has been
'deleted' and so there is no current version available. In this case
the function returns None.
Note that if check_db is False and object's version_end_date is None,
this does not check the database to see if there is a newer version
(perhaps created by some other code), it simply returns the passed
object.
``relations_as_of`` is used to fix the point in time for the version;
this affects which related objects are returned when querying for
object relations. See ``VersionManager.version_as_of`` for details on
valid ``relations_as_of`` values.
:param Versionable object: object whose current version will be
returned.
:param mixed relations_as_of: determines point in time used to access
relations. 'start'|'end'|datetime|None
:param bool check_db: Whether or not to look in the database for a
more recent version
:return: Versionable | Below is the the instruction that describes the task:
### Input:
Return the current version of the given object.
The current version is the one having its version_end_date set to NULL.
If there is not such a version then it means the object has been
'deleted' and so there is no current version available. In this case
the function returns None.
Note that if check_db is False and object's version_end_date is None,
this does not check the database to see if there is a newer version
(perhaps created by some other code), it simply returns the passed
object.
``relations_as_of`` is used to fix the point in time for the version;
this affects which related objects are returned when querying for
object relations. See ``VersionManager.version_as_of`` for details on
valid ``relations_as_of`` values.
:param Versionable object: object whose current version will be
returned.
:param mixed relations_as_of: determines point in time used to access
relations. 'start'|'end'|datetime|None
:param bool check_db: Whether or not to look in the database for a
more recent version
:return: Versionable
### Response:
def current_version(self, object, relations_as_of=None, check_db=False):
"""
Return the current version of the given object.
The current version is the one having its version_end_date set to NULL.
If there is not such a version then it means the object has been
'deleted' and so there is no current version available. In this case
the function returns None.
Note that if check_db is False and object's version_end_date is None,
this does not check the database to see if there is a newer version
(perhaps created by some other code), it simply returns the passed
object.
``relations_as_of`` is used to fix the point in time for the version;
this affects which related objects are returned when querying for
object relations. See ``VersionManager.version_as_of`` for details on
valid ``relations_as_of`` values.
:param Versionable object: object whose current version will be
returned.
:param mixed relations_as_of: determines point in time used to access
relations. 'start'|'end'|datetime|None
:param bool check_db: Whether or not to look in the database for a
more recent version
:return: Versionable
"""
if object.version_end_date is None and not check_db:
current = object
else:
current = self.current.filter(identity=object.identity).first()
return self.adjust_version_as_of(current, relations_as_of) |
def axes_off(ax):
"""Get rid of all axis ticks, lines, etc.
"""
ax.set_frame_on(False)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False) | Get rid of all axis ticks, lines, etc. | Below is the the instruction that describes the task:
### Input:
Get rid of all axis ticks, lines, etc.
### Response:
def axes_off(ax):
"""Get rid of all axis ticks, lines, etc.
"""
ax.set_frame_on(False)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False) |
def rewrite_to_secure_url(url, secure_base=None):
"""
Rewrite URL to a Secure URL
@param url URL to be rewritten to a secure URL.
@param secure_base: Base URL of secure site (defaults to CFG_SITE_SECURE_URL).
"""
if secure_base is None:
secure_base = cfg.get('CFG_SITE_SECURE_URL')
url_parts = list(urlparse(url))
url_secure_parts = urlparse(secure_base)
url_parts[0] = url_secure_parts[0]
url_parts[1] = url_secure_parts[1]
return urlunparse(url_parts) | Rewrite URL to a Secure URL
@param url URL to be rewritten to a secure URL.
@param secure_base: Base URL of secure site (defaults to CFG_SITE_SECURE_URL). | Below is the the instruction that describes the task:
### Input:
Rewrite URL to a Secure URL
@param url URL to be rewritten to a secure URL.
@param secure_base: Base URL of secure site (defaults to CFG_SITE_SECURE_URL).
### Response:
def rewrite_to_secure_url(url, secure_base=None):
"""
Rewrite URL to a Secure URL
@param url URL to be rewritten to a secure URL.
@param secure_base: Base URL of secure site (defaults to CFG_SITE_SECURE_URL).
"""
if secure_base is None:
secure_base = cfg.get('CFG_SITE_SECURE_URL')
url_parts = list(urlparse(url))
url_secure_parts = urlparse(secure_base)
url_parts[0] = url_secure_parts[0]
url_parts[1] = url_secure_parts[1]
return urlunparse(url_parts) |
def finalize_filename(filename, file_format=None):
""" Replaces invalid characters in filename string, adds image extension and reduces filename length
:param filename: Incomplete filename string
:type filename: str
:param file_format: Format which will be used for filename extension
:type file_format: MimeType
:return: Final filename string
:rtype: str
"""
for char in [' ', '/', '\\', '|', ';', ':', '\n', '\t']:
filename = filename.replace(char, '')
if file_format:
suffix = str(file_format.value)
if file_format.is_tiff_format() and file_format is not MimeType.TIFF:
suffix = str(MimeType.TIFF.value)
filename = '_'.join([filename, str(file_format.value).replace(';', '_')])
filename = '.'.join([filename[:254 - len(suffix)], suffix])
LOGGER.debug("filename=%s", filename)
return filename | Replaces invalid characters in filename string, adds image extension and reduces filename length
:param filename: Incomplete filename string
:type filename: str
:param file_format: Format which will be used for filename extension
:type file_format: MimeType
:return: Final filename string
:rtype: str | Below is the the instruction that describes the task:
### Input:
Replaces invalid characters in filename string, adds image extension and reduces filename length
:param filename: Incomplete filename string
:type filename: str
:param file_format: Format which will be used for filename extension
:type file_format: MimeType
:return: Final filename string
:rtype: str
### Response:
def finalize_filename(filename, file_format=None):
""" Replaces invalid characters in filename string, adds image extension and reduces filename length
:param filename: Incomplete filename string
:type filename: str
:param file_format: Format which will be used for filename extension
:type file_format: MimeType
:return: Final filename string
:rtype: str
"""
for char in [' ', '/', '\\', '|', ';', ':', '\n', '\t']:
filename = filename.replace(char, '')
if file_format:
suffix = str(file_format.value)
if file_format.is_tiff_format() and file_format is not MimeType.TIFF:
suffix = str(MimeType.TIFF.value)
filename = '_'.join([filename, str(file_format.value).replace(';', '_')])
filename = '.'.join([filename[:254 - len(suffix)], suffix])
LOGGER.debug("filename=%s", filename)
return filename |
def _check_cpd_inputs(X, rank):
"""Checks that inputs to optimization function are appropriate.
Parameters
----------
X : ndarray
Tensor used for fitting CP decomposition.
rank : int
Rank of low rank decomposition.
Raises
------
ValueError: If inputs are not suited for CP decomposition.
"""
if X.ndim < 3:
raise ValueError("Array with X.ndim > 2 expected.")
if rank <= 0 or not isinstance(rank, int):
raise ValueError("Rank is invalid.") | Checks that inputs to optimization function are appropriate.
Parameters
----------
X : ndarray
Tensor used for fitting CP decomposition.
rank : int
Rank of low rank decomposition.
Raises
------
ValueError: If inputs are not suited for CP decomposition. | Below is the the instruction that describes the task:
### Input:
Checks that inputs to optimization function are appropriate.
Parameters
----------
X : ndarray
Tensor used for fitting CP decomposition.
rank : int
Rank of low rank decomposition.
Raises
------
ValueError: If inputs are not suited for CP decomposition.
### Response:
def _check_cpd_inputs(X, rank):
"""Checks that inputs to optimization function are appropriate.
Parameters
----------
X : ndarray
Tensor used for fitting CP decomposition.
rank : int
Rank of low rank decomposition.
Raises
------
ValueError: If inputs are not suited for CP decomposition.
"""
if X.ndim < 3:
raise ValueError("Array with X.ndim > 2 expected.")
if rank <= 0 or not isinstance(rank, int):
raise ValueError("Rank is invalid.") |
def get_dip(self):
"""
Compute dip of each surface element and return area-weighted average
value (in range ``(0, 90]``).
Given that dip values are constrained in the range (0, 90], the simple
formula for weighted mean is used.
"""
areas = self._get_areas()
dips = numpy.array([surf.get_dip() for surf in self.surfaces])
return numpy.sum(areas * dips) / numpy.sum(areas) | Compute dip of each surface element and return area-weighted average
value (in range ``(0, 90]``).
Given that dip values are constrained in the range (0, 90], the simple
formula for weighted mean is used. | Below is the the instruction that describes the task:
### Input:
Compute dip of each surface element and return area-weighted average
value (in range ``(0, 90]``).
Given that dip values are constrained in the range (0, 90], the simple
formula for weighted mean is used.
### Response:
def get_dip(self):
"""
Compute dip of each surface element and return area-weighted average
value (in range ``(0, 90]``).
Given that dip values are constrained in the range (0, 90], the simple
formula for weighted mean is used.
"""
areas = self._get_areas()
dips = numpy.array([surf.get_dip() for surf in self.surfaces])
return numpy.sum(areas * dips) / numpy.sum(areas) |
def listFiles(self, dataset = "", block_name = "", logical_file_name = "",
release_version="", pset_hash="", app_name="", output_module_label="",
run_num=-1, origin_site_name="", lumi_list="", detail=False, validFileOnly=0, sumOverLumi=0):
"""
API to list files in DBS. Either non-wildcarded logical_file_name, non-wildcarded dataset or non-wildcarded block_name is required.
The combination of a non-wildcarded dataset or block_name with an wildcarded logical_file_name is supported.
* For lumi_list the following two json formats are supported:
- [a1, a2, a3,]
- [[a,b], [c, d],]
* lumi_list can be either a list of lumi section numbers as [a1, a2, a3,] or a list of lumi section range as [[a,b], [c, d],]. Thay cannot be mixed.
* If lumi_list is provided run only run_num=single-run-number is allowed
* When lfn list is present, no run or lumi list is allowed.
*There are five dataset access types: VALID, INVALID, PRODUCTION, DEPRECATED and DELETED.
* One file status: IS_FILE_VALID: 1 or 0.
* When a dataset is INVALID/ DEPRECATED/ DELETED, DBS will consider all the files under it is invalid not matter what value is_file_valid has.
In general, when the dataset is in one of INVALID/ DEPRECATED/ DELETED, is_file_valid should all marked as 0, but some old DBS2 data was not.
* When Dataset is VALID/PRODUCTION, by default is_file_valid is all 1. But if individual file is invalid, then the file's is_file_valid is set to 0.
* DBS use this logical in its APIs that have validFileOnly variable.
*There are five dataset access types: VALID, INVALID, PRODUCTION, DEPRECATED and DELETED.
* One file status: IS_FILE_VALID: 1 or 0.
* When a dataset is INVALID/ DEPRECATED/ DELETED, DBS will consider all the files under it is invalid not matter what value is_file_valid has.
In general, when the dataset is in one of INVALID/ DEPRECATED/ DELETED, is_file_valid should all marked as 0, but some old DBS2 data was not.
* When Dataset is VALID/PRODUCTION, by default is_file_valid is all 1. But if individual file is invalid, then the file's is_file_valid is set to 0.
* DBS use this logical in its APIs that have validFileOnly variable.
:param logical_file_name: logical_file_name of the file
:type logical_file_name: str
:param dataset: dataset
:type dataset: str
:param block_name: block name
:type block_name: str
:param release_version: release version
:type release_version: str
:param pset_hash: parameter set hash
:type pset_hash: str
:param app_name: Name of the application
:type app_name: str
:param output_module_label: name of the used output module
:type output_module_label: str
:param run_num: run , run ranges, and run list. Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...].
:type run_num: int, list, string
:param origin_site_name: site where the file was created
:type origin_site_name: str
:param lumi_list: List containing luminosity sections
:type lumi_list: list
:param detail: Get detailed information about a file
:type detail: bool
:param validFileOnly: default=0 return all the files. when =1, only return files with is_file_valid=1 or dataset_access_type=PRODUCTION or VALID
:type validFileOnly: int
:param sumOverLumi: default=0 event_count is the event_count/file. When sumOverLumi=1 and run_num is specified, the event_count is sum of the event_count/lumi for that run; When sumOverLumi = 1, no other input can be a list, for example no run_num list, lumi list or lfn list.
:type sumOverLumi: int
:returns: List of dictionaries containing the following keys (logical_file_name). If detail parameter is true, the dictionaries contain the following keys (check_sum, branch_hash_id, adler32, block_id, event_count, file_type, create_by, logical_file_name, creation_date, last_modified_by, dataset, block_name, file_id, file_size, last_modification_date, dataset_id, file_type_id, auto_cross_section, md5, is_file_valid)
:rtype: list of dicts
"""
logical_file_name = logical_file_name.replace("*", "%")
release_version = release_version.replace("*", "%")
pset_hash = pset_hash.replace("*", "%")
app_name = app_name.replace("*", "%")
block_name = block_name.replace("*", "%")
origin_site_name = origin_site_name.replace("*", "%")
dataset = dataset.replace("*", "%")
#
# run_num=1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours
# We will disbale all the run_num=1 calls in DBS. Run_num=1 will be OK when logical_file_name is given.
# YG Jan. 15 2019
#
if (run_num != -1 and logical_file_name ==''):
for r in parseRunRange(run_num):
if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long):
if r == 1 or r == '1':
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.",
self.logger.exception)
elif isinstance(r, run_tuple):
if r[0] == r[1]:
dbsExceptionHandler("dbsException-invalid-input", "DBS run range must be apart at least by 1.",
self.logger.exception)
elif r[0] <= 1 <= r[1]:
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.",
self.logger.exception)
if lumi_list:
if run_num ==-1 or not run_num :
dbsExceptionHandler("dbsException-invalid-input", "When lumi_list is given, require a single run_num.", self.logger.exception)
elif sumOverLumi == 1:
dbsExceptionHandler("dbsException-invalid-input", "lumi_list and sumOverLumi=1 cannot be set at the same time becaue nesting of WITH clause within WITH clause not supported yet by Oracle. ", self.logger.exception)
else:
try:
lumi_list = self.dbsUtils2.decodeLumiIntervals(lumi_list)
except Exception as de:
dbsExceptionHandler("dbsException-invalid-input", "Invalid lumi_list input: "+ str(de), self.logger.exception)
else:
if not isinstance(run_num, list):
if run_num ==1 or run_num == '1':
dbsExceptionHandler("dbsException-invalid-input", "files API does not supprt run_num=1 when no lumi.", self.logger.exception)
else:
if 1 in run_num or '1' in run_num :
dbsExceptionHandler("dbsException-invalid-input", "files API does not supprt run_num=1 when no lumi.", self.logger.exception)
if int(sumOverLumi) == 1 and (isinstance(run_num, list) or isinstance(logical_file_name, list)):
dbsExceptionHandler("dbsException-invalid-input", "When sumOverLumi=1, no lfn list or run_num list allowed becaue nesting of WITH clause within WITH clause not supported yet by Oracle. ", self.logger.exception)
detail = detail in (True, 1, "True", "1", 'true')
output_module_label = output_module_label.replace("*", "%")
try:
result = self.dbsFile.listFiles(dataset, block_name, logical_file_name, release_version, pset_hash, app_name,
output_module_label, run_num, origin_site_name, lumi_list, detail,
validFileOnly, sumOverLumi)
for item in result:
yield item
except HTTPError as he:
raise he
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listFiles. %s \n Exception trace: \n %s" % (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', ex.message,
self.logger.exception, sError) | API to list files in DBS. Either non-wildcarded logical_file_name, non-wildcarded dataset or non-wildcarded block_name is required.
The combination of a non-wildcarded dataset or block_name with an wildcarded logical_file_name is supported.
* For lumi_list the following two json formats are supported:
- [a1, a2, a3,]
- [[a,b], [c, d],]
* lumi_list can be either a list of lumi section numbers as [a1, a2, a3,] or a list of lumi section range as [[a,b], [c, d],]. Thay cannot be mixed.
* If lumi_list is provided run only run_num=single-run-number is allowed
* When lfn list is present, no run or lumi list is allowed.
*There are five dataset access types: VALID, INVALID, PRODUCTION, DEPRECATED and DELETED.
* One file status: IS_FILE_VALID: 1 or 0.
* When a dataset is INVALID/ DEPRECATED/ DELETED, DBS will consider all the files under it is invalid not matter what value is_file_valid has.
In general, when the dataset is in one of INVALID/ DEPRECATED/ DELETED, is_file_valid should all marked as 0, but some old DBS2 data was not.
* When Dataset is VALID/PRODUCTION, by default is_file_valid is all 1. But if individual file is invalid, then the file's is_file_valid is set to 0.
* DBS use this logical in its APIs that have validFileOnly variable.
*There are five dataset access types: VALID, INVALID, PRODUCTION, DEPRECATED and DELETED.
* One file status: IS_FILE_VALID: 1 or 0.
* When a dataset is INVALID/ DEPRECATED/ DELETED, DBS will consider all the files under it is invalid not matter what value is_file_valid has.
In general, when the dataset is in one of INVALID/ DEPRECATED/ DELETED, is_file_valid should all marked as 0, but some old DBS2 data was not.
* When Dataset is VALID/PRODUCTION, by default is_file_valid is all 1. But if individual file is invalid, then the file's is_file_valid is set to 0.
* DBS use this logical in its APIs that have validFileOnly variable.
:param logical_file_name: logical_file_name of the file
:type logical_file_name: str
:param dataset: dataset
:type dataset: str
:param block_name: block name
:type block_name: str
:param release_version: release version
:type release_version: str
:param pset_hash: parameter set hash
:type pset_hash: str
:param app_name: Name of the application
:type app_name: str
:param output_module_label: name of the used output module
:type output_module_label: str
:param run_num: run , run ranges, and run list. Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...].
:type run_num: int, list, string
:param origin_site_name: site where the file was created
:type origin_site_name: str
:param lumi_list: List containing luminosity sections
:type lumi_list: list
:param detail: Get detailed information about a file
:type detail: bool
:param validFileOnly: default=0 return all the files. when =1, only return files with is_file_valid=1 or dataset_access_type=PRODUCTION or VALID
:type validFileOnly: int
:param sumOverLumi: default=0 event_count is the event_count/file. When sumOverLumi=1 and run_num is specified, the event_count is sum of the event_count/lumi for that run; When sumOverLumi = 1, no other input can be a list, for example no run_num list, lumi list or lfn list.
:type sumOverLumi: int
:returns: List of dictionaries containing the following keys (logical_file_name). If detail parameter is true, the dictionaries contain the following keys (check_sum, branch_hash_id, adler32, block_id, event_count, file_type, create_by, logical_file_name, creation_date, last_modified_by, dataset, block_name, file_id, file_size, last_modification_date, dataset_id, file_type_id, auto_cross_section, md5, is_file_valid)
:rtype: list of dicts | Below is the the instruction that describes the task:
### Input:
API to list files in DBS. Either non-wildcarded logical_file_name, non-wildcarded dataset or non-wildcarded block_name is required.
The combination of a non-wildcarded dataset or block_name with an wildcarded logical_file_name is supported.
* For lumi_list the following two json formats are supported:
- [a1, a2, a3,]
- [[a,b], [c, d],]
* lumi_list can be either a list of lumi section numbers as [a1, a2, a3,] or a list of lumi section range as [[a,b], [c, d],]. Thay cannot be mixed.
* If lumi_list is provided run only run_num=single-run-number is allowed
* When lfn list is present, no run or lumi list is allowed.
*There are five dataset access types: VALID, INVALID, PRODUCTION, DEPRECATED and DELETED.
* One file status: IS_FILE_VALID: 1 or 0.
* When a dataset is INVALID/ DEPRECATED/ DELETED, DBS will consider all the files under it is invalid not matter what value is_file_valid has.
In general, when the dataset is in one of INVALID/ DEPRECATED/ DELETED, is_file_valid should all marked as 0, but some old DBS2 data was not.
* When Dataset is VALID/PRODUCTION, by default is_file_valid is all 1. But if individual file is invalid, then the file's is_file_valid is set to 0.
* DBS use this logical in its APIs that have validFileOnly variable.
*There are five dataset access types: VALID, INVALID, PRODUCTION, DEPRECATED and DELETED.
* One file status: IS_FILE_VALID: 1 or 0.
* When a dataset is INVALID/ DEPRECATED/ DELETED, DBS will consider all the files under it is invalid not matter what value is_file_valid has.
In general, when the dataset is in one of INVALID/ DEPRECATED/ DELETED, is_file_valid should all marked as 0, but some old DBS2 data was not.
* When Dataset is VALID/PRODUCTION, by default is_file_valid is all 1. But if individual file is invalid, then the file's is_file_valid is set to 0.
* DBS use this logical in its APIs that have validFileOnly variable.
:param logical_file_name: logical_file_name of the file
:type logical_file_name: str
:param dataset: dataset
:type dataset: str
:param block_name: block name
:type block_name: str
:param release_version: release version
:type release_version: str
:param pset_hash: parameter set hash
:type pset_hash: str
:param app_name: Name of the application
:type app_name: str
:param output_module_label: name of the used output module
:type output_module_label: str
:param run_num: run , run ranges, and run list. Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...].
:type run_num: int, list, string
:param origin_site_name: site where the file was created
:type origin_site_name: str
:param lumi_list: List containing luminosity sections
:type lumi_list: list
:param detail: Get detailed information about a file
:type detail: bool
:param validFileOnly: default=0 return all the files. when =1, only return files with is_file_valid=1 or dataset_access_type=PRODUCTION or VALID
:type validFileOnly: int
:param sumOverLumi: default=0 event_count is the event_count/file. When sumOverLumi=1 and run_num is specified, the event_count is sum of the event_count/lumi for that run; When sumOverLumi = 1, no other input can be a list, for example no run_num list, lumi list or lfn list.
:type sumOverLumi: int
:returns: List of dictionaries containing the following keys (logical_file_name). If detail parameter is true, the dictionaries contain the following keys (check_sum, branch_hash_id, adler32, block_id, event_count, file_type, create_by, logical_file_name, creation_date, last_modified_by, dataset, block_name, file_id, file_size, last_modification_date, dataset_id, file_type_id, auto_cross_section, md5, is_file_valid)
:rtype: list of dicts
### Response:
def listFiles(self, dataset = "", block_name = "", logical_file_name = "",
release_version="", pset_hash="", app_name="", output_module_label="",
run_num=-1, origin_site_name="", lumi_list="", detail=False, validFileOnly=0, sumOverLumi=0):
"""
API to list files in DBS. Either non-wildcarded logical_file_name, non-wildcarded dataset or non-wildcarded block_name is required.
The combination of a non-wildcarded dataset or block_name with an wildcarded logical_file_name is supported.
* For lumi_list the following two json formats are supported:
- [a1, a2, a3,]
- [[a,b], [c, d],]
* lumi_list can be either a list of lumi section numbers as [a1, a2, a3,] or a list of lumi section range as [[a,b], [c, d],]. Thay cannot be mixed.
* If lumi_list is provided run only run_num=single-run-number is allowed
* When lfn list is present, no run or lumi list is allowed.
*There are five dataset access types: VALID, INVALID, PRODUCTION, DEPRECATED and DELETED.
* One file status: IS_FILE_VALID: 1 or 0.
* When a dataset is INVALID/ DEPRECATED/ DELETED, DBS will consider all the files under it is invalid not matter what value is_file_valid has.
In general, when the dataset is in one of INVALID/ DEPRECATED/ DELETED, is_file_valid should all marked as 0, but some old DBS2 data was not.
* When Dataset is VALID/PRODUCTION, by default is_file_valid is all 1. But if individual file is invalid, then the file's is_file_valid is set to 0.
* DBS use this logical in its APIs that have validFileOnly variable.
*There are five dataset access types: VALID, INVALID, PRODUCTION, DEPRECATED and DELETED.
* One file status: IS_FILE_VALID: 1 or 0.
* When a dataset is INVALID/ DEPRECATED/ DELETED, DBS will consider all the files under it is invalid not matter what value is_file_valid has.
In general, when the dataset is in one of INVALID/ DEPRECATED/ DELETED, is_file_valid should all marked as 0, but some old DBS2 data was not.
* When Dataset is VALID/PRODUCTION, by default is_file_valid is all 1. But if individual file is invalid, then the file's is_file_valid is set to 0.
* DBS use this logical in its APIs that have validFileOnly variable.
:param logical_file_name: logical_file_name of the file
:type logical_file_name: str
:param dataset: dataset
:type dataset: str
:param block_name: block name
:type block_name: str
:param release_version: release version
:type release_version: str
:param pset_hash: parameter set hash
:type pset_hash: str
:param app_name: Name of the application
:type app_name: str
:param output_module_label: name of the used output module
:type output_module_label: str
:param run_num: run , run ranges, and run list. Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...].
:type run_num: int, list, string
:param origin_site_name: site where the file was created
:type origin_site_name: str
:param lumi_list: List containing luminosity sections
:type lumi_list: list
:param detail: Get detailed information about a file
:type detail: bool
:param validFileOnly: default=0 return all the files. when =1, only return files with is_file_valid=1 or dataset_access_type=PRODUCTION or VALID
:type validFileOnly: int
:param sumOverLumi: default=0 event_count is the event_count/file. When sumOverLumi=1 and run_num is specified, the event_count is sum of the event_count/lumi for that run; When sumOverLumi = 1, no other input can be a list, for example no run_num list, lumi list or lfn list.
:type sumOverLumi: int
:returns: List of dictionaries containing the following keys (logical_file_name). If detail parameter is true, the dictionaries contain the following keys (check_sum, branch_hash_id, adler32, block_id, event_count, file_type, create_by, logical_file_name, creation_date, last_modified_by, dataset, block_name, file_id, file_size, last_modification_date, dataset_id, file_type_id, auto_cross_section, md5, is_file_valid)
:rtype: list of dicts
"""
logical_file_name = logical_file_name.replace("*", "%")
release_version = release_version.replace("*", "%")
pset_hash = pset_hash.replace("*", "%")
app_name = app_name.replace("*", "%")
block_name = block_name.replace("*", "%")
origin_site_name = origin_site_name.replace("*", "%")
dataset = dataset.replace("*", "%")
#
# run_num=1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours
# We will disbale all the run_num=1 calls in DBS. Run_num=1 will be OK when logical_file_name is given.
# YG Jan. 15 2019
#
if (run_num != -1 and logical_file_name ==''):
for r in parseRunRange(run_num):
if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long):
if r == 1 or r == '1':
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.",
self.logger.exception)
elif isinstance(r, run_tuple):
if r[0] == r[1]:
dbsExceptionHandler("dbsException-invalid-input", "DBS run range must be apart at least by 1.",
self.logger.exception)
elif r[0] <= 1 <= r[1]:
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.",
self.logger.exception)
if lumi_list:
if run_num ==-1 or not run_num :
dbsExceptionHandler("dbsException-invalid-input", "When lumi_list is given, require a single run_num.", self.logger.exception)
elif sumOverLumi == 1:
dbsExceptionHandler("dbsException-invalid-input", "lumi_list and sumOverLumi=1 cannot be set at the same time becaue nesting of WITH clause within WITH clause not supported yet by Oracle. ", self.logger.exception)
else:
try:
lumi_list = self.dbsUtils2.decodeLumiIntervals(lumi_list)
except Exception as de:
dbsExceptionHandler("dbsException-invalid-input", "Invalid lumi_list input: "+ str(de), self.logger.exception)
else:
if not isinstance(run_num, list):
if run_num ==1 or run_num == '1':
dbsExceptionHandler("dbsException-invalid-input", "files API does not supprt run_num=1 when no lumi.", self.logger.exception)
else:
if 1 in run_num or '1' in run_num :
dbsExceptionHandler("dbsException-invalid-input", "files API does not supprt run_num=1 when no lumi.", self.logger.exception)
if int(sumOverLumi) == 1 and (isinstance(run_num, list) or isinstance(logical_file_name, list)):
dbsExceptionHandler("dbsException-invalid-input", "When sumOverLumi=1, no lfn list or run_num list allowed becaue nesting of WITH clause within WITH clause not supported yet by Oracle. ", self.logger.exception)
detail = detail in (True, 1, "True", "1", 'true')
output_module_label = output_module_label.replace("*", "%")
try:
result = self.dbsFile.listFiles(dataset, block_name, logical_file_name, release_version, pset_hash, app_name,
output_module_label, run_num, origin_site_name, lumi_list, detail,
validFileOnly, sumOverLumi)
for item in result:
yield item
except HTTPError as he:
raise he
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listFiles. %s \n Exception trace: \n %s" % (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', ex.message,
self.logger.exception, sError) |
def _rebuild_entries(self):
"""
Recreates the entries master list based on the groups hierarchy (order matters here,
since the parser uses order to determine lineage).
"""
self.entries = []
def collapse_entries(group):
for entry in group.entries:
self.entries.append(entry)
for subgroup in group.children:
collapse_entries(subgroup)
collapse_entries(self.root) | Recreates the entries master list based on the groups hierarchy (order matters here,
since the parser uses order to determine lineage). | Below is the the instruction that describes the task:
### Input:
Recreates the entries master list based on the groups hierarchy (order matters here,
since the parser uses order to determine lineage).
### Response:
def _rebuild_entries(self):
"""
Recreates the entries master list based on the groups hierarchy (order matters here,
since the parser uses order to determine lineage).
"""
self.entries = []
def collapse_entries(group):
for entry in group.entries:
self.entries.append(entry)
for subgroup in group.children:
collapse_entries(subgroup)
collapse_entries(self.root) |
def getDescriptor(self, desc_type, desc_index, length, endpoint = -1):
r"""Retrieves a descriptor from the device identified by the type
and index of the descriptor.
Arguments:
desc_type: descriptor type.
desc_index: index of the descriptor.
len: descriptor length.
endpoint: ignored.
"""
return control.get_descriptor(self.dev, length, desc_type, desc_index) | r"""Retrieves a descriptor from the device identified by the type
and index of the descriptor.
Arguments:
desc_type: descriptor type.
desc_index: index of the descriptor.
len: descriptor length.
endpoint: ignored. | Below is the the instruction that describes the task:
### Input:
r"""Retrieves a descriptor from the device identified by the type
and index of the descriptor.
Arguments:
desc_type: descriptor type.
desc_index: index of the descriptor.
len: descriptor length.
endpoint: ignored.
### Response:
def getDescriptor(self, desc_type, desc_index, length, endpoint = -1):
r"""Retrieves a descriptor from the device identified by the type
and index of the descriptor.
Arguments:
desc_type: descriptor type.
desc_index: index of the descriptor.
len: descriptor length.
endpoint: ignored.
"""
return control.get_descriptor(self.dev, length, desc_type, desc_index) |
def geo_shape(self, sides=5, center=None, distance=None):
"""
Return a WKT string for a POLYGON with given amount of sides.
The polygon is defined by its center (random point if not provided) and
the distance (random distance if not provided; in km) of the points to
its center.
"""
assert isinstance(sides, int)
if distance is None:
distance = self.random_int(100, 1000)
else:
# 6371 => earth radius in km
# assert that shape radius is maximum half of earth's circumference
assert isinstance(distance, int)
assert distance <= EARTH_RADIUS * math.pi, \
'distance must not be greater than half of earth\'s circumference'
if center is None:
# required minimal spherical distance from north/southpole
dp = distance * 180.0 / EARTH_RADIUS / math.pi
center = self.geo_point(lat_min=-90.0 + dp, lat_max=90.0 - dp)
else:
assert -180.0 <= center[0] <= 180.0, 'Longitude out of bounds'
assert -90.0 <= center[1] <= 90.0, 'Latitude out of bounds'
angles = list(self.random_sample(range(360), sides))
angles.sort()
points = [_dest_point(center, distance, bearing, EARTH_RADIUS) for bearing in angles]
# close polygon
points.append(points[0])
path = ', '.join([' '.join(p) for p in ([str(lon), str(lat)] for lon, lat in points)])
return f'POLYGON (( {path} ))' | Return a WKT string for a POLYGON with given amount of sides.
The polygon is defined by its center (random point if not provided) and
the distance (random distance if not provided; in km) of the points to
its center. | Below is the the instruction that describes the task:
### Input:
Return a WKT string for a POLYGON with given amount of sides.
The polygon is defined by its center (random point if not provided) and
the distance (random distance if not provided; in km) of the points to
its center.
### Response:
def geo_shape(self, sides=5, center=None, distance=None):
"""
Return a WKT string for a POLYGON with given amount of sides.
The polygon is defined by its center (random point if not provided) and
the distance (random distance if not provided; in km) of the points to
its center.
"""
assert isinstance(sides, int)
if distance is None:
distance = self.random_int(100, 1000)
else:
# 6371 => earth radius in km
# assert that shape radius is maximum half of earth's circumference
assert isinstance(distance, int)
assert distance <= EARTH_RADIUS * math.pi, \
'distance must not be greater than half of earth\'s circumference'
if center is None:
# required minimal spherical distance from north/southpole
dp = distance * 180.0 / EARTH_RADIUS / math.pi
center = self.geo_point(lat_min=-90.0 + dp, lat_max=90.0 - dp)
else:
assert -180.0 <= center[0] <= 180.0, 'Longitude out of bounds'
assert -90.0 <= center[1] <= 90.0, 'Latitude out of bounds'
angles = list(self.random_sample(range(360), sides))
angles.sort()
points = [_dest_point(center, distance, bearing, EARTH_RADIUS) for bearing in angles]
# close polygon
points.append(points[0])
path = ', '.join([' '.join(p) for p in ([str(lon), str(lat)] for lon, lat in points)])
return f'POLYGON (( {path} ))' |
def share(self, accounts):
"""
Create a share
"""
if not isinstance(accounts, (list, tuple)):
msg = "Video.share expects an iterable argument"
raise exceptions.PyBrightcoveError(msg)
raise exceptions.PyBrightcoveError("Not yet implemented") | Create a share | Below is the the instruction that describes the task:
### Input:
Create a share
### Response:
def share(self, accounts):
"""
Create a share
"""
if not isinstance(accounts, (list, tuple)):
msg = "Video.share expects an iterable argument"
raise exceptions.PyBrightcoveError(msg)
raise exceptions.PyBrightcoveError("Not yet implemented") |
def poly2poly(line):
"""
Parse a string of text containing a DS9 description of a polygon.
This function works but is not very robust due to the constraints of healpy.
Parameters
----------
line : str
A string containing a DS9 region command for a polygon.
Returns
-------
poly : [ra, dec, ...]
The coordinates of the polygon.
"""
words = re.split('[(\s,)]', line)
ras = np.array(words[1::2])
decs = np.array(words[2::2])
coords = []
for ra, dec in zip(ras, decs):
if ra.strip() == '' or dec.strip() == '':
continue
if ":" in ra:
pos = SkyCoord(Angle(ra, unit=u.hour), Angle(dec, unit=u.degree))
else:
pos = SkyCoord(Angle(ra, unit=u.degree), Angle(dec, unit=u.degree))
# only add this point if it is some distance from the previous one
coords.extend([pos.ra.degree, pos.dec.degree])
return coords | Parse a string of text containing a DS9 description of a polygon.
This function works but is not very robust due to the constraints of healpy.
Parameters
----------
line : str
A string containing a DS9 region command for a polygon.
Returns
-------
poly : [ra, dec, ...]
The coordinates of the polygon. | Below is the the instruction that describes the task:
### Input:
Parse a string of text containing a DS9 description of a polygon.
This function works but is not very robust due to the constraints of healpy.
Parameters
----------
line : str
A string containing a DS9 region command for a polygon.
Returns
-------
poly : [ra, dec, ...]
The coordinates of the polygon.
### Response:
def poly2poly(line):
"""
Parse a string of text containing a DS9 description of a polygon.
This function works but is not very robust due to the constraints of healpy.
Parameters
----------
line : str
A string containing a DS9 region command for a polygon.
Returns
-------
poly : [ra, dec, ...]
The coordinates of the polygon.
"""
words = re.split('[(\s,)]', line)
ras = np.array(words[1::2])
decs = np.array(words[2::2])
coords = []
for ra, dec in zip(ras, decs):
if ra.strip() == '' or dec.strip() == '':
continue
if ":" in ra:
pos = SkyCoord(Angle(ra, unit=u.hour), Angle(dec, unit=u.degree))
else:
pos = SkyCoord(Angle(ra, unit=u.degree), Angle(dec, unit=u.degree))
# only add this point if it is some distance from the previous one
coords.extend([pos.ra.degree, pos.dec.degree])
return coords |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.