code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def wait(self, **kwargs):
"""
Block until the container stops, then return its exit code. Similar to
the ``docker wait`` command.
Args:
timeout (int): Request timeout
condition (str): Wait until a container state reaches the given
condition, either ``not-running`` (default), ``next-exit``,
or ``removed``
Returns:
(dict): The API's response as a Python dictionary, including
the container's exit code under the ``StatusCode`` attribute.
Raises:
:py:class:`requests.exceptions.ReadTimeout`
If the timeout is exceeded.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.wait(self.id, **kwargs) | Block until the container stops, then return its exit code. Similar to
the ``docker wait`` command.
Args:
timeout (int): Request timeout
condition (str): Wait until a container state reaches the given
condition, either ``not-running`` (default), ``next-exit``,
or ``removed``
Returns:
(dict): The API's response as a Python dictionary, including
the container's exit code under the ``StatusCode`` attribute.
Raises:
:py:class:`requests.exceptions.ReadTimeout`
If the timeout is exceeded.
:py:class:`docker.errors.APIError`
If the server returns an error. | Below is the the instruction that describes the task:
### Input:
Block until the container stops, then return its exit code. Similar to
the ``docker wait`` command.
Args:
timeout (int): Request timeout
condition (str): Wait until a container state reaches the given
condition, either ``not-running`` (default), ``next-exit``,
or ``removed``
Returns:
(dict): The API's response as a Python dictionary, including
the container's exit code under the ``StatusCode`` attribute.
Raises:
:py:class:`requests.exceptions.ReadTimeout`
If the timeout is exceeded.
:py:class:`docker.errors.APIError`
If the server returns an error.
### Response:
def wait(self, **kwargs):
"""
Block until the container stops, then return its exit code. Similar to
the ``docker wait`` command.
Args:
timeout (int): Request timeout
condition (str): Wait until a container state reaches the given
condition, either ``not-running`` (default), ``next-exit``,
or ``removed``
Returns:
(dict): The API's response as a Python dictionary, including
the container's exit code under the ``StatusCode`` attribute.
Raises:
:py:class:`requests.exceptions.ReadTimeout`
If the timeout is exceeded.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.wait(self.id, **kwargs) |
def search(lines, pattern):
"""
return all lines that match the pattern
#TODO: we need an example
:param lines:
:param pattern:
:return:
"""
p = pattern.replace("*", ".*")
test = re.compile(p)
result = []
for l in lines:
if test.search(l):
result.append(l)
return result | return all lines that match the pattern
#TODO: we need an example
:param lines:
:param pattern:
:return: | Below is the the instruction that describes the task:
### Input:
return all lines that match the pattern
#TODO: we need an example
:param lines:
:param pattern:
:return:
### Response:
def search(lines, pattern):
"""
return all lines that match the pattern
#TODO: we need an example
:param lines:
:param pattern:
:return:
"""
p = pattern.replace("*", ".*")
test = re.compile(p)
result = []
for l in lines:
if test.search(l):
result.append(l)
return result |
def register(adapter):
'''Register a search adapter'''
# register the class in the catalog
if adapter.model and adapter.model not in adapter_catalog:
adapter_catalog[adapter.model] = adapter
# Automatically (re|un)index objects on save/delete
post_save.connect(reindex_model_on_save, sender=adapter.model)
post_delete.connect(unindex_model_on_delete, sender=adapter.model)
return adapter | Register a search adapter | Below is the the instruction that describes the task:
### Input:
Register a search adapter
### Response:
def register(adapter):
'''Register a search adapter'''
# register the class in the catalog
if adapter.model and adapter.model not in adapter_catalog:
adapter_catalog[adapter.model] = adapter
# Automatically (re|un)index objects on save/delete
post_save.connect(reindex_model_on_save, sender=adapter.model)
post_delete.connect(unindex_model_on_delete, sender=adapter.model)
return adapter |
def rec_new(self, val):
"""Recursively add a new value and its children to me.
Args:
val (LispVal): The value to be added.
Returns:
LispVal: The added value.
"""
if val not in self.things:
for child in val.children():
self.rec_new(child)
self.new(val)
return val | Recursively add a new value and its children to me.
Args:
val (LispVal): The value to be added.
Returns:
LispVal: The added value. | Below is the the instruction that describes the task:
### Input:
Recursively add a new value and its children to me.
Args:
val (LispVal): The value to be added.
Returns:
LispVal: The added value.
### Response:
def rec_new(self, val):
"""Recursively add a new value and its children to me.
Args:
val (LispVal): The value to be added.
Returns:
LispVal: The added value.
"""
if val not in self.things:
for child in val.children():
self.rec_new(child)
self.new(val)
return val |
def write_results(self, filename):
"""Writes samples, model stats, acceptance fraction, and random state
to the given file.
Parameters
-----------
filename : str
The file to write to. The file is opened using the ``io`` class
in an an append state.
"""
with self.io(filename, 'a') as fp:
# write samples
fp.write_samples(self.samples, self.model.variable_params,
last_iteration=self.niterations)
# write stats
fp.write_samples(self.model_stats,
last_iteration=self.niterations)
# write accpetance
fp.write_acceptance_fraction(self._sampler.acceptance_fraction)
# write random state
fp.write_random_state(state=self._sampler.random_state) | Writes samples, model stats, acceptance fraction, and random state
to the given file.
Parameters
-----------
filename : str
The file to write to. The file is opened using the ``io`` class
in an an append state. | Below is the the instruction that describes the task:
### Input:
Writes samples, model stats, acceptance fraction, and random state
to the given file.
Parameters
-----------
filename : str
The file to write to. The file is opened using the ``io`` class
in an an append state.
### Response:
def write_results(self, filename):
"""Writes samples, model stats, acceptance fraction, and random state
to the given file.
Parameters
-----------
filename : str
The file to write to. The file is opened using the ``io`` class
in an an append state.
"""
with self.io(filename, 'a') as fp:
# write samples
fp.write_samples(self.samples, self.model.variable_params,
last_iteration=self.niterations)
# write stats
fp.write_samples(self.model_stats,
last_iteration=self.niterations)
# write accpetance
fp.write_acceptance_fraction(self._sampler.acceptance_fraction)
# write random state
fp.write_random_state(state=self._sampler.random_state) |
def _wrap_users(users, request):
"""
Returns a list with the given list of users and/or the currently logged in user, if the list
contains the magic item SELF.
"""
result = set()
for u in users:
if u is SELF and is_authenticated(request):
result.add(request.user.get_username())
else:
result.add(u)
return result | Returns a list with the given list of users and/or the currently logged in user, if the list
contains the magic item SELF. | Below is the the instruction that describes the task:
### Input:
Returns a list with the given list of users and/or the currently logged in user, if the list
contains the magic item SELF.
### Response:
def _wrap_users(users, request):
"""
Returns a list with the given list of users and/or the currently logged in user, if the list
contains the magic item SELF.
"""
result = set()
for u in users:
if u is SELF and is_authenticated(request):
result.add(request.user.get_username())
else:
result.add(u)
return result |
def restore(self, filename):
"""Restore object from mat-file. TODO: determine format specification
"""
matfile = loadmat(filename)
if matfile['dim'] == 1:
matfile['solution'] = matfile['solution'][0, :]
self.elapsed_time = matfile['elapsed_time'][0, 0]
self.solution = matfile['solution']
return self | Restore object from mat-file. TODO: determine format specification | Below is the the instruction that describes the task:
### Input:
Restore object from mat-file. TODO: determine format specification
### Response:
def restore(self, filename):
"""Restore object from mat-file. TODO: determine format specification
"""
matfile = loadmat(filename)
if matfile['dim'] == 1:
matfile['solution'] = matfile['solution'][0, :]
self.elapsed_time = matfile['elapsed_time'][0, 0]
self.solution = matfile['solution']
return self |
def _compute_H(self, t, index, t2, index2, update_derivatives=False, stationary=False):
"""Helper function for computing part of the ode1 covariance function.
:param t: first time input.
:type t: array
:param index: Indices of first output.
:type index: array of int
:param t2: second time input.
:type t2: array
:param index2: Indices of second output.
:type index2: array of int
:param update_derivatives: whether to update derivatives (default is False)
:return h : result of this subcomponent of the kernel for the given values.
:rtype: ndarray
"""
if stationary:
raise NotImplementedError, "Error, stationary version of this covariance not yet implemented."
# Vector of decays and delays associated with each output.
Decay = self.decay[index]
Decay2 = self.decay[index2]
t_mat = t[:, None]
t2_mat = t2[None, :]
if self.delay is not None:
Delay = self.delay[index]
Delay2 = self.delay[index2]
t_mat-=Delay[:, None]
t2_mat-=Delay2[None, :]
diff_t = (t_mat - t2_mat)
inv_sigma_diff_t = 1./self.sigma*diff_t
half_sigma_decay_i = 0.5*self.sigma*Decay[:, None]
ln_part_1, sign1 = ln_diff_erfs(half_sigma_decay_i + t2_mat/self.sigma,
half_sigma_decay_i - inv_sigma_diff_t,
return_sign=True)
ln_part_2, sign2 = ln_diff_erfs(half_sigma_decay_i,
half_sigma_decay_i - t_mat/self.sigma,
return_sign=True)
h = sign1*np.exp(half_sigma_decay_i
*half_sigma_decay_i
-Decay[:, None]*diff_t+ln_part_1
-np.log(Decay[:, None] + Decay2[None, :]))
h -= sign2*np.exp(half_sigma_decay_i*half_sigma_decay_i
-Decay[:, None]*t_mat-Decay2[None, :]*t2_mat+ln_part_2
-np.log(Decay[:, None] + Decay2[None, :]))
if update_derivatives:
sigma2 = self.sigma*self.sigma
# Update ith decay gradient
dh_ddecay = ((0.5*Decay[:, None]*sigma2*(Decay[:, None] + Decay2[None, :])-1)*h
+ (-diff_t*sign1*np.exp(
half_sigma_decay_i*half_sigma_decay_i-Decay[:, None]*diff_t+ln_part_1
)
+t_mat*sign2*np.exp(
half_sigma_decay_i*half_sigma_decay_i-Decay[:, None]*t_mat
- Decay2*t2_mat+ln_part_2))
+self.sigma/np.sqrt(np.pi)*(
-np.exp(
-diff_t*diff_t/sigma2
)+np.exp(
-t2_mat*t2_mat/sigma2-Decay[:, None]*t_mat
)+np.exp(
-t_mat*t_mat/sigma2-Decay2[None, :]*t2_mat
)-np.exp(
-(Decay[:, None]*t_mat + Decay2[None, :]*t2_mat)
)
))
self._dh_ddecay = (dh_ddecay/(Decay[:, None]+Decay2[None, :])).real
# Update jth decay gradient
dh_ddecay2 = (t2_mat*sign2
*np.exp(
half_sigma_decay_i*half_sigma_decay_i
-(Decay[:, None]*t_mat + Decay2[None, :]*t2_mat)
+ln_part_2
)
-h)
self._dh_ddecay2 = (dh_ddecay/(Decay[:, None] + Decay2[None, :])).real
# Update sigma gradient
self._dh_dsigma = (half_sigma_decay_i*Decay[:, None]*h
+ 2/(np.sqrt(np.pi)
*(Decay[:, None]+Decay2[None, :]))
*((-diff_t/sigma2-Decay[:, None]/2)
*np.exp(-diff_t*diff_t/sigma2)
+ (-t2_mat/sigma2+Decay[:, None]/2)
*np.exp(-t2_mat*t2_mat/sigma2-Decay[:, None]*t_mat)
- (-t_mat/sigma2-Decay[:, None]/2)
*np.exp(-t_mat*t_mat/sigma2-Decay2[None, :]*t2_mat)
- Decay[:, None]/2
*np.exp(-(Decay[:, None]*t_mat+Decay2[None, :]*t2_mat))))
return h | Helper function for computing part of the ode1 covariance function.
:param t: first time input.
:type t: array
:param index: Indices of first output.
:type index: array of int
:param t2: second time input.
:type t2: array
:param index2: Indices of second output.
:type index2: array of int
:param update_derivatives: whether to update derivatives (default is False)
:return h : result of this subcomponent of the kernel for the given values.
:rtype: ndarray | Below is the the instruction that describes the task:
### Input:
Helper function for computing part of the ode1 covariance function.
:param t: first time input.
:type t: array
:param index: Indices of first output.
:type index: array of int
:param t2: second time input.
:type t2: array
:param index2: Indices of second output.
:type index2: array of int
:param update_derivatives: whether to update derivatives (default is False)
:return h : result of this subcomponent of the kernel for the given values.
:rtype: ndarray
### Response:
def _compute_H(self, t, index, t2, index2, update_derivatives=False, stationary=False):
"""Helper function for computing part of the ode1 covariance function.
:param t: first time input.
:type t: array
:param index: Indices of first output.
:type index: array of int
:param t2: second time input.
:type t2: array
:param index2: Indices of second output.
:type index2: array of int
:param update_derivatives: whether to update derivatives (default is False)
:return h : result of this subcomponent of the kernel for the given values.
:rtype: ndarray
"""
if stationary:
raise NotImplementedError, "Error, stationary version of this covariance not yet implemented."
# Vector of decays and delays associated with each output.
Decay = self.decay[index]
Decay2 = self.decay[index2]
t_mat = t[:, None]
t2_mat = t2[None, :]
if self.delay is not None:
Delay = self.delay[index]
Delay2 = self.delay[index2]
t_mat-=Delay[:, None]
t2_mat-=Delay2[None, :]
diff_t = (t_mat - t2_mat)
inv_sigma_diff_t = 1./self.sigma*diff_t
half_sigma_decay_i = 0.5*self.sigma*Decay[:, None]
ln_part_1, sign1 = ln_diff_erfs(half_sigma_decay_i + t2_mat/self.sigma,
half_sigma_decay_i - inv_sigma_diff_t,
return_sign=True)
ln_part_2, sign2 = ln_diff_erfs(half_sigma_decay_i,
half_sigma_decay_i - t_mat/self.sigma,
return_sign=True)
h = sign1*np.exp(half_sigma_decay_i
*half_sigma_decay_i
-Decay[:, None]*diff_t+ln_part_1
-np.log(Decay[:, None] + Decay2[None, :]))
h -= sign2*np.exp(half_sigma_decay_i*half_sigma_decay_i
-Decay[:, None]*t_mat-Decay2[None, :]*t2_mat+ln_part_2
-np.log(Decay[:, None] + Decay2[None, :]))
if update_derivatives:
sigma2 = self.sigma*self.sigma
# Update ith decay gradient
dh_ddecay = ((0.5*Decay[:, None]*sigma2*(Decay[:, None] + Decay2[None, :])-1)*h
+ (-diff_t*sign1*np.exp(
half_sigma_decay_i*half_sigma_decay_i-Decay[:, None]*diff_t+ln_part_1
)
+t_mat*sign2*np.exp(
half_sigma_decay_i*half_sigma_decay_i-Decay[:, None]*t_mat
- Decay2*t2_mat+ln_part_2))
+self.sigma/np.sqrt(np.pi)*(
-np.exp(
-diff_t*diff_t/sigma2
)+np.exp(
-t2_mat*t2_mat/sigma2-Decay[:, None]*t_mat
)+np.exp(
-t_mat*t_mat/sigma2-Decay2[None, :]*t2_mat
)-np.exp(
-(Decay[:, None]*t_mat + Decay2[None, :]*t2_mat)
)
))
self._dh_ddecay = (dh_ddecay/(Decay[:, None]+Decay2[None, :])).real
# Update jth decay gradient
dh_ddecay2 = (t2_mat*sign2
*np.exp(
half_sigma_decay_i*half_sigma_decay_i
-(Decay[:, None]*t_mat + Decay2[None, :]*t2_mat)
+ln_part_2
)
-h)
self._dh_ddecay2 = (dh_ddecay/(Decay[:, None] + Decay2[None, :])).real
# Update sigma gradient
self._dh_dsigma = (half_sigma_decay_i*Decay[:, None]*h
+ 2/(np.sqrt(np.pi)
*(Decay[:, None]+Decay2[None, :]))
*((-diff_t/sigma2-Decay[:, None]/2)
*np.exp(-diff_t*diff_t/sigma2)
+ (-t2_mat/sigma2+Decay[:, None]/2)
*np.exp(-t2_mat*t2_mat/sigma2-Decay[:, None]*t_mat)
- (-t_mat/sigma2-Decay[:, None]/2)
*np.exp(-t_mat*t_mat/sigma2-Decay2[None, :]*t2_mat)
- Decay[:, None]/2
*np.exp(-(Decay[:, None]*t_mat+Decay2[None, :]*t2_mat))))
return h |
def load_geo_adwords(filename='AdWords API Location Criteria 2017-06-26.csv.gz'):
""" WARN: Not a good source of city names. This table has many errors, even after cleaning"""
df = pd.read_csv(filename, header=0, index_col=0, low_memory=False)
df.columns = [c.replace(' ', '_').lower() for c in df.columns]
canonical = pd.DataFrame([list(row) for row in df.canonical_name.str.split(',').values])
def cleaner(row):
cleaned = pd.np.array(
[s for i, s in enumerate(row.values) if s not in ('Downtown', None) and (i > 3 or row[i + 1] != s)])
if len(cleaned) == 2:
cleaned = [cleaned[0], None, cleaned[1], None, None]
else:
cleaned = list(cleaned) + [None] * (5 - len(cleaned))
if not pd.np.all(pd.np.array(row.values)[:3] == pd.np.array(cleaned)[:3]):
logger.info('{} => {}'.format(row.values, cleaned))
return list(cleaned)
cleancanon = canonical.apply(cleaner, axis=1)
cleancanon.columns = 'city region country extra extra2'.split()
df['region'] = cleancanon.region
df['country'] = cleancanon.country
return df | WARN: Not a good source of city names. This table has many errors, even after cleaning | Below is the the instruction that describes the task:
### Input:
WARN: Not a good source of city names. This table has many errors, even after cleaning
### Response:
def load_geo_adwords(filename='AdWords API Location Criteria 2017-06-26.csv.gz'):
""" WARN: Not a good source of city names. This table has many errors, even after cleaning"""
df = pd.read_csv(filename, header=0, index_col=0, low_memory=False)
df.columns = [c.replace(' ', '_').lower() for c in df.columns]
canonical = pd.DataFrame([list(row) for row in df.canonical_name.str.split(',').values])
def cleaner(row):
cleaned = pd.np.array(
[s for i, s in enumerate(row.values) if s not in ('Downtown', None) and (i > 3 or row[i + 1] != s)])
if len(cleaned) == 2:
cleaned = [cleaned[0], None, cleaned[1], None, None]
else:
cleaned = list(cleaned) + [None] * (5 - len(cleaned))
if not pd.np.all(pd.np.array(row.values)[:3] == pd.np.array(cleaned)[:3]):
logger.info('{} => {}'.format(row.values, cleaned))
return list(cleaned)
cleancanon = canonical.apply(cleaner, axis=1)
cleancanon.columns = 'city region country extra extra2'.split()
df['region'] = cleancanon.region
df['country'] = cleancanon.country
return df |
def buildIcon(icon):
"""
Builds an icon from the inputed information.
:param icon | <variant>
"""
if icon is None:
return QIcon()
if type(icon) == buffer:
try:
icon = QIcon(projexui.generatePixmap(icon))
except:
icon = QIcon()
else:
try:
icon = QIcon(icon)
except:
icon = QIcon()
return icon | Builds an icon from the inputed information.
:param icon | <variant> | Below is the the instruction that describes the task:
### Input:
Builds an icon from the inputed information.
:param icon | <variant>
### Response:
def buildIcon(icon):
"""
Builds an icon from the inputed information.
:param icon | <variant>
"""
if icon is None:
return QIcon()
if type(icon) == buffer:
try:
icon = QIcon(projexui.generatePixmap(icon))
except:
icon = QIcon()
else:
try:
icon = QIcon(icon)
except:
icon = QIcon()
return icon |
def update(self, *data, **kwargs) -> 'Entity':
"""Update a Record in the repository.
Also performs unique validations before creating the entity.
Supports both dictionary and keyword argument updates to the entity::
dog.update({'age': 10})
dog.update(age=10)
:param data: Dictionary of values to be updated for the entity
:param kwargs: keyword arguments with key-value pairs to be updated
"""
logger.debug(f'Updating existing `{self.__class__.__name__}` object with id {self.id}')
# Fetch Model class and connected repository from Repository Factory
model_cls = repo_factory.get_model(self.__class__)
repository = repo_factory.get_repository(self.__class__)
try:
# Update entity's data attributes
self._update_data(*data, **kwargs)
# Do unique checks, update the record and return the Entity
self._validate_unique(create=False)
# Perform Pre-Save Actions
self.pre_save()
repository.update(model_cls.from_entity(self))
# Set Entity status to saved
self.state_.mark_saved()
# Perform Post-Save Actions
self.post_save()
return self
except Exception:
# FIXME Log Exception
raise | Update a Record in the repository.
Also performs unique validations before creating the entity.
Supports both dictionary and keyword argument updates to the entity::
dog.update({'age': 10})
dog.update(age=10)
:param data: Dictionary of values to be updated for the entity
:param kwargs: keyword arguments with key-value pairs to be updated | Below is the the instruction that describes the task:
### Input:
Update a Record in the repository.
Also performs unique validations before creating the entity.
Supports both dictionary and keyword argument updates to the entity::
dog.update({'age': 10})
dog.update(age=10)
:param data: Dictionary of values to be updated for the entity
:param kwargs: keyword arguments with key-value pairs to be updated
### Response:
def update(self, *data, **kwargs) -> 'Entity':
"""Update a Record in the repository.
Also performs unique validations before creating the entity.
Supports both dictionary and keyword argument updates to the entity::
dog.update({'age': 10})
dog.update(age=10)
:param data: Dictionary of values to be updated for the entity
:param kwargs: keyword arguments with key-value pairs to be updated
"""
logger.debug(f'Updating existing `{self.__class__.__name__}` object with id {self.id}')
# Fetch Model class and connected repository from Repository Factory
model_cls = repo_factory.get_model(self.__class__)
repository = repo_factory.get_repository(self.__class__)
try:
# Update entity's data attributes
self._update_data(*data, **kwargs)
# Do unique checks, update the record and return the Entity
self._validate_unique(create=False)
# Perform Pre-Save Actions
self.pre_save()
repository.update(model_cls.from_entity(self))
# Set Entity status to saved
self.state_.mark_saved()
# Perform Post-Save Actions
self.post_save()
return self
except Exception:
# FIXME Log Exception
raise |
def _recursive_bezier(self, x1, y1, x2, y2, x3, y3, attr, row, level=0):
'from http://www.antigrain.com/research/adaptive_bezier/'
m_approximation_scale = 10.0
m_distance_tolerance = (0.5 / m_approximation_scale) ** 2
m_angle_tolerance = 1 * 2*math.pi/360 # 15 degrees in rads
curve_angle_tolerance_epsilon = 0.01
curve_recursion_limit = 32
curve_collinearity_epsilon = 1e-30
if level > curve_recursion_limit:
return
# Calculate all the mid-points of the line segments
x12 = (x1 + x2) / 2
y12 = (y1 + y2) / 2
x23 = (x2 + x3) / 2
y23 = (y2 + y3) / 2
x123 = (x12 + x23) / 2
y123 = (y12 + y23) / 2
dx = x3-x1
dy = y3-y1
d = abs(((x2 - x3) * dy - (y2 - y3) * dx))
if d > curve_collinearity_epsilon:
# Regular care
if d*d <= m_distance_tolerance * (dx*dx + dy*dy):
# If the curvature doesn't exceed the distance_tolerance value, we tend to finish subdivisions.
if m_angle_tolerance < curve_angle_tolerance_epsilon:
self.point(x123, y123, attr, row)
return
# Angle & Cusp Condition
da = abs(math.atan2(y3 - y2, x3 - x2) - math.atan2(y2 - y1, x2 - x1))
if da >= math.pi:
da = 2*math.pi - da
if da < m_angle_tolerance:
# Finally we can stop the recursion
self.point(x123, y123, attr, row)
return
else:
# Collinear case
dx = x123 - (x1 + x3) / 2
dy = y123 - (y1 + y3) / 2
if dx*dx + dy*dy <= m_distance_tolerance:
self.point(x123, y123, attr, row)
return
# Continue subdivision
self._recursive_bezier(x1, y1, x12, y12, x123, y123, attr, row, level + 1)
self._recursive_bezier(x123, y123, x23, y23, x3, y3, attr, row, level + 1) | from http://www.antigrain.com/research/adaptive_bezier/ | Below is the the instruction that describes the task:
### Input:
from http://www.antigrain.com/research/adaptive_bezier/
### Response:
def _recursive_bezier(self, x1, y1, x2, y2, x3, y3, attr, row, level=0):
'from http://www.antigrain.com/research/adaptive_bezier/'
m_approximation_scale = 10.0
m_distance_tolerance = (0.5 / m_approximation_scale) ** 2
m_angle_tolerance = 1 * 2*math.pi/360 # 15 degrees in rads
curve_angle_tolerance_epsilon = 0.01
curve_recursion_limit = 32
curve_collinearity_epsilon = 1e-30
if level > curve_recursion_limit:
return
# Calculate all the mid-points of the line segments
x12 = (x1 + x2) / 2
y12 = (y1 + y2) / 2
x23 = (x2 + x3) / 2
y23 = (y2 + y3) / 2
x123 = (x12 + x23) / 2
y123 = (y12 + y23) / 2
dx = x3-x1
dy = y3-y1
d = abs(((x2 - x3) * dy - (y2 - y3) * dx))
if d > curve_collinearity_epsilon:
# Regular care
if d*d <= m_distance_tolerance * (dx*dx + dy*dy):
# If the curvature doesn't exceed the distance_tolerance value, we tend to finish subdivisions.
if m_angle_tolerance < curve_angle_tolerance_epsilon:
self.point(x123, y123, attr, row)
return
# Angle & Cusp Condition
da = abs(math.atan2(y3 - y2, x3 - x2) - math.atan2(y2 - y1, x2 - x1))
if da >= math.pi:
da = 2*math.pi - da
if da < m_angle_tolerance:
# Finally we can stop the recursion
self.point(x123, y123, attr, row)
return
else:
# Collinear case
dx = x123 - (x1 + x3) / 2
dy = y123 - (y1 + y3) / 2
if dx*dx + dy*dy <= m_distance_tolerance:
self.point(x123, y123, attr, row)
return
# Continue subdivision
self._recursive_bezier(x1, y1, x12, y12, x123, y123, attr, row, level + 1)
self._recursive_bezier(x123, y123, x23, y23, x3, y3, attr, row, level + 1) |
def run_validators(self, value):
"""
Make sure value is a string so it can run through django validators
"""
value = self.to_python(value)
value = self.value_to_string(value)
return super(RegexField, self).run_validators(value) | Make sure value is a string so it can run through django validators | Below is the the instruction that describes the task:
### Input:
Make sure value is a string so it can run through django validators
### Response:
def run_validators(self, value):
"""
Make sure value is a string so it can run through django validators
"""
value = self.to_python(value)
value = self.value_to_string(value)
return super(RegexField, self).run_validators(value) |
def _accept_reflected_fn(simplex,
objective_values,
worst_index,
reflected,
objective_at_reflected):
"""Creates the condition function pair for a reflection to be accepted."""
def _replace_worst_with_reflected():
next_simplex = _replace_at_index(simplex, worst_index, reflected)
next_objective_values = _replace_at_index(objective_values, worst_index,
objective_at_reflected)
return False, next_simplex, next_objective_values, 0
return _replace_worst_with_reflected | Creates the condition function pair for a reflection to be accepted. | Below is the the instruction that describes the task:
### Input:
Creates the condition function pair for a reflection to be accepted.
### Response:
def _accept_reflected_fn(simplex,
objective_values,
worst_index,
reflected,
objective_at_reflected):
"""Creates the condition function pair for a reflection to be accepted."""
def _replace_worst_with_reflected():
next_simplex = _replace_at_index(simplex, worst_index, reflected)
next_objective_values = _replace_at_index(objective_values, worst_index,
objective_at_reflected)
return False, next_simplex, next_objective_values, 0
return _replace_worst_with_reflected |
def are_equal_or_superset(superset_tree, base_tree):
"""Return True if ``superset_tree`` is equal to or a superset of ``base_tree``
- Checks that all elements and attributes in ``superset_tree`` are present and
contain the same values as in ``base_tree``. For elements, also checks that the
order is the same.
- Can be used for checking if one XML document is based on another, as long as all
the information in ``base_tree`` is also present and unmodified in
``superset_tree``.
"""
try:
_compare_attr(superset_tree, base_tree)
_compare_text(superset_tree, base_tree)
except CompareError as e:
logger.debug(str(e))
return False
return True | Return True if ``superset_tree`` is equal to or a superset of ``base_tree``
- Checks that all elements and attributes in ``superset_tree`` are present and
contain the same values as in ``base_tree``. For elements, also checks that the
order is the same.
- Can be used for checking if one XML document is based on another, as long as all
the information in ``base_tree`` is also present and unmodified in
``superset_tree``. | Below is the the instruction that describes the task:
### Input:
Return True if ``superset_tree`` is equal to or a superset of ``base_tree``
- Checks that all elements and attributes in ``superset_tree`` are present and
contain the same values as in ``base_tree``. For elements, also checks that the
order is the same.
- Can be used for checking if one XML document is based on another, as long as all
the information in ``base_tree`` is also present and unmodified in
``superset_tree``.
### Response:
def are_equal_or_superset(superset_tree, base_tree):
"""Return True if ``superset_tree`` is equal to or a superset of ``base_tree``
- Checks that all elements and attributes in ``superset_tree`` are present and
contain the same values as in ``base_tree``. For elements, also checks that the
order is the same.
- Can be used for checking if one XML document is based on another, as long as all
the information in ``base_tree`` is also present and unmodified in
``superset_tree``.
"""
try:
_compare_attr(superset_tree, base_tree)
_compare_text(superset_tree, base_tree)
except CompareError as e:
logger.debug(str(e))
return False
return True |
def download(self, download_key, raise_exception_on_failure=False):
"""Download the file represented by the download_key."""
query = {"output": "json", "user_credentials": self.api_key}
resp = requests.get(
"%sdownload/%s" % (self._url, download_key),
params=query,
timeout=self._timeout,
)
if raise_exception_on_failure and resp.status_code != 200:
raise DocumentDownloadFailure(resp.content, resp.status_code)
return resp | Download the file represented by the download_key. | Below is the the instruction that describes the task:
### Input:
Download the file represented by the download_key.
### Response:
def download(self, download_key, raise_exception_on_failure=False):
"""Download the file represented by the download_key."""
query = {"output": "json", "user_credentials": self.api_key}
resp = requests.get(
"%sdownload/%s" % (self._url, download_key),
params=query,
timeout=self._timeout,
)
if raise_exception_on_failure and resp.status_code != 200:
raise DocumentDownloadFailure(resp.content, resp.status_code)
return resp |
def _getTempFile(self, jobStoreID=None):
"""
:rtype : file-descriptor, string, string is the absolute path to a temporary file within
the given job's (referenced by jobStoreID's) temporary file directory. The file-descriptor
is integer pointing to open operating system file handle. Should be closed using os.close()
after writing some material to the file.
"""
if jobStoreID != None:
# Make a temporary file within the job's directory
self._checkJobStoreId(jobStoreID)
return tempfile.mkstemp(suffix=".tmp",
dir=os.path.join(self._getAbsPath(jobStoreID), "g"))
else:
# Make a temporary file within the temporary file structure
return tempfile.mkstemp(prefix="tmp", suffix=".tmp", dir=self._getTempSharedDir()) | :rtype : file-descriptor, string, string is the absolute path to a temporary file within
the given job's (referenced by jobStoreID's) temporary file directory. The file-descriptor
is integer pointing to open operating system file handle. Should be closed using os.close()
after writing some material to the file. | Below is the the instruction that describes the task:
### Input:
:rtype : file-descriptor, string, string is the absolute path to a temporary file within
the given job's (referenced by jobStoreID's) temporary file directory. The file-descriptor
is integer pointing to open operating system file handle. Should be closed using os.close()
after writing some material to the file.
### Response:
def _getTempFile(self, jobStoreID=None):
"""
:rtype : file-descriptor, string, string is the absolute path to a temporary file within
the given job's (referenced by jobStoreID's) temporary file directory. The file-descriptor
is integer pointing to open operating system file handle. Should be closed using os.close()
after writing some material to the file.
"""
if jobStoreID != None:
# Make a temporary file within the job's directory
self._checkJobStoreId(jobStoreID)
return tempfile.mkstemp(suffix=".tmp",
dir=os.path.join(self._getAbsPath(jobStoreID), "g"))
else:
# Make a temporary file within the temporary file structure
return tempfile.mkstemp(prefix="tmp", suffix=".tmp", dir=self._getTempSharedDir()) |
def parse_blob_snapshot_parameter(url):
# type: (str) -> str
"""Retrieves the blob snapshot parameter from a url
:param url str: blob url
:rtype: str
:return: snapshot parameter
"""
if blob_is_snapshot(url):
tmp = url.split('?snapshot=')
if len(tmp) == 2:
return tmp[0], tmp[1]
return None | Retrieves the blob snapshot parameter from a url
:param url str: blob url
:rtype: str
:return: snapshot parameter | Below is the the instruction that describes the task:
### Input:
Retrieves the blob snapshot parameter from a url
:param url str: blob url
:rtype: str
:return: snapshot parameter
### Response:
def parse_blob_snapshot_parameter(url):
# type: (str) -> str
"""Retrieves the blob snapshot parameter from a url
:param url str: blob url
:rtype: str
:return: snapshot parameter
"""
if blob_is_snapshot(url):
tmp = url.split('?snapshot=')
if len(tmp) == 2:
return tmp[0], tmp[1]
return None |
def show_filetypes(extensions):
"""
function to show valid file extensions
"""
for item in extensions.items():
val = item[1]
if type(item[1]) == list:
val = ", ".join(str(x) for x in item[1])
print("{0:4}: {1}".format(val, item[0])) | function to show valid file extensions | Below is the the instruction that describes the task:
### Input:
function to show valid file extensions
### Response:
def show_filetypes(extensions):
"""
function to show valid file extensions
"""
for item in extensions.items():
val = item[1]
if type(item[1]) == list:
val = ", ".join(str(x) for x in item[1])
print("{0:4}: {1}".format(val, item[0])) |
def get_pickling_errors(obj, seen=None):
"""Investigate pickling errors."""
if seen == None:
seen = []
if hasattr(obj, "__getstate__"):
state = obj.__getstate__()
#elif hasattr(obj, "__dict__"):
# state = obj.__dict__
else:
return None
#try:
# state = obj.__getstate__()
#except AttributeError as e:
# #state = obj.__dict__
# return str(e)
if state == None:
return 'object state is None'
if isinstance(state,tuple):
if not isinstance(state[0], dict):
state=state[1]
else:
state=state[0].update(state[1])
result = {}
for i in state:
try:
pickle.dumps(state[i], protocol=2)
except pickle.PicklingError as e:
if not state[i] in seen:
seen.append(state[i])
result[i]=get_pickling_errors(state[i],seen)
return result | Investigate pickling errors. | Below is the the instruction that describes the task:
### Input:
Investigate pickling errors.
### Response:
def get_pickling_errors(obj, seen=None):
"""Investigate pickling errors."""
if seen == None:
seen = []
if hasattr(obj, "__getstate__"):
state = obj.__getstate__()
#elif hasattr(obj, "__dict__"):
# state = obj.__dict__
else:
return None
#try:
# state = obj.__getstate__()
#except AttributeError as e:
# #state = obj.__dict__
# return str(e)
if state == None:
return 'object state is None'
if isinstance(state,tuple):
if not isinstance(state[0], dict):
state=state[1]
else:
state=state[0].update(state[1])
result = {}
for i in state:
try:
pickle.dumps(state[i], protocol=2)
except pickle.PicklingError as e:
if not state[i] in seen:
seen.append(state[i])
result[i]=get_pickling_errors(state[i],seen)
return result |
def destroyTempDir(self, tempDir):
"""Removes a temporary directory in the temp file dir, checking its in the temp file tree.
The dir will be removed regardless of if it is empty.
"""
#Do basic assertions for goodness of the function
assert os.path.isdir(tempDir)
assert os.path.commonprefix((self.rootDir, tempDir)) == self.rootDir #Checks file is part of tree
#Update stats.
self.tempFilesDestroyed += 1
#Do the actual removal
try:
os.rmdir(tempDir)
except OSError:
shutil.rmtree(tempDir)
#system("rm -rf %s" % tempDir)
self.__destroyFile(tempDir) | Removes a temporary directory in the temp file dir, checking its in the temp file tree.
The dir will be removed regardless of if it is empty. | Below is the the instruction that describes the task:
### Input:
Removes a temporary directory in the temp file dir, checking its in the temp file tree.
The dir will be removed regardless of if it is empty.
### Response:
def destroyTempDir(self, tempDir):
"""Removes a temporary directory in the temp file dir, checking its in the temp file tree.
The dir will be removed regardless of if it is empty.
"""
#Do basic assertions for goodness of the function
assert os.path.isdir(tempDir)
assert os.path.commonprefix((self.rootDir, tempDir)) == self.rootDir #Checks file is part of tree
#Update stats.
self.tempFilesDestroyed += 1
#Do the actual removal
try:
os.rmdir(tempDir)
except OSError:
shutil.rmtree(tempDir)
#system("rm -rf %s" % tempDir)
self.__destroyFile(tempDir) |
def difference(self, instrument1, instrument2, bounds, data_labels,
cost_function):
"""
Calculates the difference in signals from multiple
instruments within the given bounds.
Parameters
----------
instrument1 : Instrument
Information must already be loaded into the
instrument.
instrument2 : Instrument
Information must already be loaded into the
instrument.
bounds : list of tuples in the form (inst1_label, inst2_label,
min, max, max_difference)
inst1_label are inst2_label are labels for the data in
instrument1 and instrument2
min and max are bounds on the data considered
max_difference is the maximum difference between two points
for the difference to be calculated
data_labels : list of tuples of data labels
The first key is used to access data in s1
and the second data in s2.
cost_function : function
function that operates on two rows of the instrument data.
used to determine the distance between two points for finding
closest points
Returns
-------
data_df: pandas DataFrame
Each row has a point from instrument1, with the keys
preceded by '1_', and a point within bounds on that point
from instrument2 with the keys preceded by '2_', and the
difference between the instruments' data for all the labels
in data_labels
Created as part of a Spring 2018 UTDesign project.
"""
"""
Draft Pseudocode
----------------
Check integrity of inputs.
Let STD_LABELS be the constant tuple:
("time", "lat", "long", "alt")
Note: modify so that user can override labels for time,
lat, long, data for each satelite.
// We only care about the data currently loaded
into each object.
Let start be the later of the datetime of the
first piece of data loaded into s1, the first
piece of data loaded into s2, and the user
supplied start bound.
Let end be the later of the datetime of the first
piece of data loaded into s1, the first piece
of data loaded into s2, and the user supplied
end bound.
If start is after end, raise an error.
// Let data be the 2D array of deques holding each piece
// of data, sorted into bins by lat/long/alt.
Let s1_data (resp s2_data) be data from s1.data, s2.data
filtered by user-provided lat/long/alt bounds, time bounds
calculated.
Let data be a dictionary of lists with the keys
[ dl1 for dl1, dl2 in data_labels ] +
STD_LABELS +
[ lb+"2" for lb in STD_LABELS ]
For each piece of data s1_point in s1_data:
# Hopefully np.where is very good, because this
# runs O(n) times.
# We could try reusing selections, maybe, if needed.
# This would probably involve binning.
Let s2_near be the data from s2.data within certain
bounds on lat/long/alt/time using 8 statements to
numpy.where. We can probably get those defaults from
the user or handy constants / config?
# We could try a different algorithm for closest pairs
# of points.
Let distance be the numpy array representing the
distance between s1_point and each point in s2_near.
# S: Difference for others: change this line.
For each of those, calculate the spatial difference
from the s1 using lat/long/alt. If s2_near is
empty; break loop.
Let s2_nearest be the point in s2_near corresponding
to the lowest distance.
Append to data: a point, indexed by the time from
s1_point, containing the following data:
# note
Let n be the length of data["time"].
For each key in data:
Assert len(data[key]) == n
End for.
# Create data row to pass to pandas.
Let row be an empty dict.
For dl1, dl2 in data_labels:
Append s1_point[dl1] - s2_nearest[dl2] to data[dl1].
For key in STD_LABELS:
Append s1_point[translate[key]] to data[key]
key = key+"2"
Append s2_nearest[translate[key]] to data[key]
Let data_df be a pandas dataframe created from the data
in data.
return { 'data': data_df, 'start':start, 'end':end }
"""
labels = [dl1 for dl1, dl2 in data_labels] + ['1_'+b[0] for b in bounds] + ['2_'+b[1] for b in bounds] + ['dist']
data = {label: [] for label in labels}
# Apply bounds
inst1 = instrument1.data
inst2 = instrument2.data
for b in bounds:
label1 = b[0]
label2 = b[1]
low = b[2]
high = b[3]
data1 = inst1[label1]
ind1 = np.where((data1 >= low) & (data1 < high))
inst1 = inst1.iloc[ind1]
data2 = inst2[label2]
ind2 = np.where((data2 >= low) & (data2 < high))
inst2 = inst2.iloc[ind2]
for i, s1_point in inst1.iterrows():
# Gets points in instrument2 within the given bounds
s2_near = instrument2.data
for b in bounds:
label1 = b[0]
label2 = b[1]
s1_val = s1_point[label1]
max_dist = b[4]
minbound = s1_val - max_dist
maxbound = s1_val + max_dist
data2 = s2_near[label2]
indices = np.where((data2 >= minbound) & (data2 < maxbound))
s2_near = s2_near.iloc[indices]
# Finds nearest point to s1_point in s2_near
s2_nearest = None
min_dist = float('NaN')
for j, s2_point in s2_near.iterrows():
dist = cost_function(s1_point, s2_point)
if dist < min_dist or min_dist != min_dist:
min_dist = dist
s2_nearest = s2_point
data['dist'].append(min_dist)
# Append difference to data dict
for dl1, dl2 in data_labels:
if s2_nearest is not None:
data[dl1].append(s1_point[dl1] - s2_nearest[dl2])
else:
data[dl1].append(float('NaN'))
# Append the rest of the row
for b in bounds:
label1 = b[0]
label2 = b[1]
data['1_'+label1].append(s1_point[label1])
if s2_nearest is not None:
data['2_'+label2].append(s2_nearest[label2])
else:
data['2_'+label2].append(float('NaN'))
data_df = pds.DataFrame(data=data)
return data_df | Calculates the difference in signals from multiple
instruments within the given bounds.
Parameters
----------
instrument1 : Instrument
Information must already be loaded into the
instrument.
instrument2 : Instrument
Information must already be loaded into the
instrument.
bounds : list of tuples in the form (inst1_label, inst2_label,
min, max, max_difference)
inst1_label are inst2_label are labels for the data in
instrument1 and instrument2
min and max are bounds on the data considered
max_difference is the maximum difference between two points
for the difference to be calculated
data_labels : list of tuples of data labels
The first key is used to access data in s1
and the second data in s2.
cost_function : function
function that operates on two rows of the instrument data.
used to determine the distance between two points for finding
closest points
Returns
-------
data_df: pandas DataFrame
Each row has a point from instrument1, with the keys
preceded by '1_', and a point within bounds on that point
from instrument2 with the keys preceded by '2_', and the
difference between the instruments' data for all the labels
in data_labels
Created as part of a Spring 2018 UTDesign project. | Below is the the instruction that describes the task:
### Input:
Calculates the difference in signals from multiple
instruments within the given bounds.
Parameters
----------
instrument1 : Instrument
Information must already be loaded into the
instrument.
instrument2 : Instrument
Information must already be loaded into the
instrument.
bounds : list of tuples in the form (inst1_label, inst2_label,
min, max, max_difference)
inst1_label are inst2_label are labels for the data in
instrument1 and instrument2
min and max are bounds on the data considered
max_difference is the maximum difference between two points
for the difference to be calculated
data_labels : list of tuples of data labels
The first key is used to access data in s1
and the second data in s2.
cost_function : function
function that operates on two rows of the instrument data.
used to determine the distance between two points for finding
closest points
Returns
-------
data_df: pandas DataFrame
Each row has a point from instrument1, with the keys
preceded by '1_', and a point within bounds on that point
from instrument2 with the keys preceded by '2_', and the
difference between the instruments' data for all the labels
in data_labels
Created as part of a Spring 2018 UTDesign project.
### Response:
def difference(self, instrument1, instrument2, bounds, data_labels,
cost_function):
"""
Calculates the difference in signals from multiple
instruments within the given bounds.
Parameters
----------
instrument1 : Instrument
Information must already be loaded into the
instrument.
instrument2 : Instrument
Information must already be loaded into the
instrument.
bounds : list of tuples in the form (inst1_label, inst2_label,
min, max, max_difference)
inst1_label are inst2_label are labels for the data in
instrument1 and instrument2
min and max are bounds on the data considered
max_difference is the maximum difference between two points
for the difference to be calculated
data_labels : list of tuples of data labels
The first key is used to access data in s1
and the second data in s2.
cost_function : function
function that operates on two rows of the instrument data.
used to determine the distance between two points for finding
closest points
Returns
-------
data_df: pandas DataFrame
Each row has a point from instrument1, with the keys
preceded by '1_', and a point within bounds on that point
from instrument2 with the keys preceded by '2_', and the
difference between the instruments' data for all the labels
in data_labels
Created as part of a Spring 2018 UTDesign project.
"""
"""
Draft Pseudocode
----------------
Check integrity of inputs.
Let STD_LABELS be the constant tuple:
("time", "lat", "long", "alt")
Note: modify so that user can override labels for time,
lat, long, data for each satelite.
// We only care about the data currently loaded
into each object.
Let start be the later of the datetime of the
first piece of data loaded into s1, the first
piece of data loaded into s2, and the user
supplied start bound.
Let end be the later of the datetime of the first
piece of data loaded into s1, the first piece
of data loaded into s2, and the user supplied
end bound.
If start is after end, raise an error.
// Let data be the 2D array of deques holding each piece
// of data, sorted into bins by lat/long/alt.
Let s1_data (resp s2_data) be data from s1.data, s2.data
filtered by user-provided lat/long/alt bounds, time bounds
calculated.
Let data be a dictionary of lists with the keys
[ dl1 for dl1, dl2 in data_labels ] +
STD_LABELS +
[ lb+"2" for lb in STD_LABELS ]
For each piece of data s1_point in s1_data:
# Hopefully np.where is very good, because this
# runs O(n) times.
# We could try reusing selections, maybe, if needed.
# This would probably involve binning.
Let s2_near be the data from s2.data within certain
bounds on lat/long/alt/time using 8 statements to
numpy.where. We can probably get those defaults from
the user or handy constants / config?
# We could try a different algorithm for closest pairs
# of points.
Let distance be the numpy array representing the
distance between s1_point and each point in s2_near.
# S: Difference for others: change this line.
For each of those, calculate the spatial difference
from the s1 using lat/long/alt. If s2_near is
empty; break loop.
Let s2_nearest be the point in s2_near corresponding
to the lowest distance.
Append to data: a point, indexed by the time from
s1_point, containing the following data:
# note
Let n be the length of data["time"].
For each key in data:
Assert len(data[key]) == n
End for.
# Create data row to pass to pandas.
Let row be an empty dict.
For dl1, dl2 in data_labels:
Append s1_point[dl1] - s2_nearest[dl2] to data[dl1].
For key in STD_LABELS:
Append s1_point[translate[key]] to data[key]
key = key+"2"
Append s2_nearest[translate[key]] to data[key]
Let data_df be a pandas dataframe created from the data
in data.
return { 'data': data_df, 'start':start, 'end':end }
"""
labels = [dl1 for dl1, dl2 in data_labels] + ['1_'+b[0] for b in bounds] + ['2_'+b[1] for b in bounds] + ['dist']
data = {label: [] for label in labels}
# Apply bounds
inst1 = instrument1.data
inst2 = instrument2.data
for b in bounds:
label1 = b[0]
label2 = b[1]
low = b[2]
high = b[3]
data1 = inst1[label1]
ind1 = np.where((data1 >= low) & (data1 < high))
inst1 = inst1.iloc[ind1]
data2 = inst2[label2]
ind2 = np.where((data2 >= low) & (data2 < high))
inst2 = inst2.iloc[ind2]
for i, s1_point in inst1.iterrows():
# Gets points in instrument2 within the given bounds
s2_near = instrument2.data
for b in bounds:
label1 = b[0]
label2 = b[1]
s1_val = s1_point[label1]
max_dist = b[4]
minbound = s1_val - max_dist
maxbound = s1_val + max_dist
data2 = s2_near[label2]
indices = np.where((data2 >= minbound) & (data2 < maxbound))
s2_near = s2_near.iloc[indices]
# Finds nearest point to s1_point in s2_near
s2_nearest = None
min_dist = float('NaN')
for j, s2_point in s2_near.iterrows():
dist = cost_function(s1_point, s2_point)
if dist < min_dist or min_dist != min_dist:
min_dist = dist
s2_nearest = s2_point
data['dist'].append(min_dist)
# Append difference to data dict
for dl1, dl2 in data_labels:
if s2_nearest is not None:
data[dl1].append(s1_point[dl1] - s2_nearest[dl2])
else:
data[dl1].append(float('NaN'))
# Append the rest of the row
for b in bounds:
label1 = b[0]
label2 = b[1]
data['1_'+label1].append(s1_point[label1])
if s2_nearest is not None:
data['2_'+label2].append(s2_nearest[label2])
else:
data['2_'+label2].append(float('NaN'))
data_df = pds.DataFrame(data=data)
return data_df |
def chunks(items, chunksize):
"""Turn generator sequence into sequence of chunks."""
items = iter(items)
for first in items:
chunk = chain((first,), islice(items, chunksize - 1))
yield chunk
deque(chunk, 0) | Turn generator sequence into sequence of chunks. | Below is the the instruction that describes the task:
### Input:
Turn generator sequence into sequence of chunks.
### Response:
def chunks(items, chunksize):
"""Turn generator sequence into sequence of chunks."""
items = iter(items)
for first in items:
chunk = chain((first,), islice(items, chunksize - 1))
yield chunk
deque(chunk, 0) |
def add(self, item):
"""
Add a file to the manifest.
:param item: The pathname to add. This can be relative to the base.
"""
if not item.startswith(self.prefix):
item = os.path.join(self.base, item)
self.files.add(os.path.normpath(item)) | Add a file to the manifest.
:param item: The pathname to add. This can be relative to the base. | Below is the the instruction that describes the task:
### Input:
Add a file to the manifest.
:param item: The pathname to add. This can be relative to the base.
### Response:
def add(self, item):
"""
Add a file to the manifest.
:param item: The pathname to add. This can be relative to the base.
"""
if not item.startswith(self.prefix):
item = os.path.join(self.base, item)
self.files.add(os.path.normpath(item)) |
def save(self, path):
"""Save svg as file(.svg)
Args:
path (str): destination to save file
"""
with open(path, 'w') as f:
f.write(self.contents()) | Save svg as file(.svg)
Args:
path (str): destination to save file | Below is the the instruction that describes the task:
### Input:
Save svg as file(.svg)
Args:
path (str): destination to save file
### Response:
def save(self, path):
"""Save svg as file(.svg)
Args:
path (str): destination to save file
"""
with open(path, 'w') as f:
f.write(self.contents()) |
def request(self, send_terminator = False):
"""Required request() override for v3 and standard method to read meter.
Args:
send_terminator (bool): Send termination string at end of read.
Returns:
bool: CRC request flag result from most recent read
"""
self.m_a_crc = False
start_context = self.getContext()
self.setContext("request[v3A]")
try:
self.m_serial_port.write("2f3f".decode("hex") +
self.m_meter_address +
"210d0a".decode("hex"))
self.m_raw_read_a = self.m_serial_port.getResponse(self.getContext())
unpacked_read_a = self.unpackStruct(self.m_raw_read_a, self.m_blk_a)
self.convertData(unpacked_read_a, self.m_blk_a, 1)
self.m_a_crc = self.crcMeterRead(self.m_raw_read_a, self.m_blk_a)
if send_terminator:
self.serialPostEnd()
self.calculateFields()
self.makeReturnFormat()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext(start_context)
return self.m_a_crc | Required request() override for v3 and standard method to read meter.
Args:
send_terminator (bool): Send termination string at end of read.
Returns:
bool: CRC request flag result from most recent read | Below is the the instruction that describes the task:
### Input:
Required request() override for v3 and standard method to read meter.
Args:
send_terminator (bool): Send termination string at end of read.
Returns:
bool: CRC request flag result from most recent read
### Response:
def request(self, send_terminator = False):
"""Required request() override for v3 and standard method to read meter.
Args:
send_terminator (bool): Send termination string at end of read.
Returns:
bool: CRC request flag result from most recent read
"""
self.m_a_crc = False
start_context = self.getContext()
self.setContext("request[v3A]")
try:
self.m_serial_port.write("2f3f".decode("hex") +
self.m_meter_address +
"210d0a".decode("hex"))
self.m_raw_read_a = self.m_serial_port.getResponse(self.getContext())
unpacked_read_a = self.unpackStruct(self.m_raw_read_a, self.m_blk_a)
self.convertData(unpacked_read_a, self.m_blk_a, 1)
self.m_a_crc = self.crcMeterRead(self.m_raw_read_a, self.m_blk_a)
if send_terminator:
self.serialPostEnd()
self.calculateFields()
self.makeReturnFormat()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext(start_context)
return self.m_a_crc |
def get_user(self, login):
""" http://confluence.jetbrains.net/display/YTD2/GET+user
"""
return youtrack.User(self._get("/admin/user/" + urlquote(login.encode('utf8'))), self) | http://confluence.jetbrains.net/display/YTD2/GET+user | Below is the the instruction that describes the task:
### Input:
http://confluence.jetbrains.net/display/YTD2/GET+user
### Response:
def get_user(self, login):
""" http://confluence.jetbrains.net/display/YTD2/GET+user
"""
return youtrack.User(self._get("/admin/user/" + urlquote(login.encode('utf8'))), self) |
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the api.
"""
for item in self.query.results():
obj = self.resource(**item)
yield obj | An iterator over the results from applying this QuerySet to the api. | Below is the the instruction that describes the task:
### Input:
An iterator over the results from applying this QuerySet to the api.
### Response:
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the api.
"""
for item in self.query.results():
obj = self.resource(**item)
yield obj |
def load_febrl4(return_links=False):
"""Load the FEBRL 4 datasets.
The Freely Extensible Biomedical Record Linkage (Febrl) package is
distributed with a dataset generator and four datasets generated
with the generator. This function returns the fourth Febrl dataset
as a :class:`pandas.DataFrame`.
*"Generated as one data set with 10000 records (5000
originals and 5000 duplicates, with one duplicate per
original), the originals have been split from the
duplicates, into dataset4a.csv (containing the 5000
original records) and dataset4b.csv (containing the
5000 duplicate records) These two data sets can be
used for testing linkage procedures."*
Parameters
----------
return_links: bool
When True, the function returns also the true links.
Returns
-------
(pandas.DataFrame, pandas.DataFrame)
A :class:`pandas.DataFrame` with Febrl dataset4a.csv and a pandas
dataframe with Febrl dataset4b.csv. When return_links is True,
the function returns also the true links.
"""
df_a = _febrl_load_data('dataset4a.csv')
df_b = _febrl_load_data('dataset4b.csv')
if return_links:
links = pandas.MultiIndex.from_arrays([
["rec-{}-org".format(i) for i in range(0, 5000)],
["rec-{}-dup-0".format(i) for i in range(0, 5000)]]
)
return df_a, df_b, links
else:
return df_a, df_b | Load the FEBRL 4 datasets.
The Freely Extensible Biomedical Record Linkage (Febrl) package is
distributed with a dataset generator and four datasets generated
with the generator. This function returns the fourth Febrl dataset
as a :class:`pandas.DataFrame`.
*"Generated as one data set with 10000 records (5000
originals and 5000 duplicates, with one duplicate per
original), the originals have been split from the
duplicates, into dataset4a.csv (containing the 5000
original records) and dataset4b.csv (containing the
5000 duplicate records) These two data sets can be
used for testing linkage procedures."*
Parameters
----------
return_links: bool
When True, the function returns also the true links.
Returns
-------
(pandas.DataFrame, pandas.DataFrame)
A :class:`pandas.DataFrame` with Febrl dataset4a.csv and a pandas
dataframe with Febrl dataset4b.csv. When return_links is True,
the function returns also the true links. | Below is the the instruction that describes the task:
### Input:
Load the FEBRL 4 datasets.
The Freely Extensible Biomedical Record Linkage (Febrl) package is
distributed with a dataset generator and four datasets generated
with the generator. This function returns the fourth Febrl dataset
as a :class:`pandas.DataFrame`.
*"Generated as one data set with 10000 records (5000
originals and 5000 duplicates, with one duplicate per
original), the originals have been split from the
duplicates, into dataset4a.csv (containing the 5000
original records) and dataset4b.csv (containing the
5000 duplicate records) These two data sets can be
used for testing linkage procedures."*
Parameters
----------
return_links: bool
When True, the function returns also the true links.
Returns
-------
(pandas.DataFrame, pandas.DataFrame)
A :class:`pandas.DataFrame` with Febrl dataset4a.csv and a pandas
dataframe with Febrl dataset4b.csv. When return_links is True,
the function returns also the true links.
### Response:
def load_febrl4(return_links=False):
"""Load the FEBRL 4 datasets.
The Freely Extensible Biomedical Record Linkage (Febrl) package is
distributed with a dataset generator and four datasets generated
with the generator. This function returns the fourth Febrl dataset
as a :class:`pandas.DataFrame`.
*"Generated as one data set with 10000 records (5000
originals and 5000 duplicates, with one duplicate per
original), the originals have been split from the
duplicates, into dataset4a.csv (containing the 5000
original records) and dataset4b.csv (containing the
5000 duplicate records) These two data sets can be
used for testing linkage procedures."*
Parameters
----------
return_links: bool
When True, the function returns also the true links.
Returns
-------
(pandas.DataFrame, pandas.DataFrame)
A :class:`pandas.DataFrame` with Febrl dataset4a.csv and a pandas
dataframe with Febrl dataset4b.csv. When return_links is True,
the function returns also the true links.
"""
df_a = _febrl_load_data('dataset4a.csv')
df_b = _febrl_load_data('dataset4b.csv')
if return_links:
links = pandas.MultiIndex.from_arrays([
["rec-{}-org".format(i) for i in range(0, 5000)],
["rec-{}-dup-0".format(i) for i in range(0, 5000)]]
)
return df_a, df_b, links
else:
return df_a, df_b |
def GT(classical_reg1, classical_reg2, classical_reg3):
"""
Produce an GT instruction.
:param classical_reg1: Memory address to which to store the comparison result.
:param classical_reg2: Left comparison operand.
:param classical_reg3: Right comparison operand.
:return: A ClassicalGreaterThan instance.
"""
classical_reg1, classical_reg2, classical_reg3 = prepare_ternary_operands(classical_reg1,
classical_reg2,
classical_reg3)
return ClassicalGreaterThan(classical_reg1, classical_reg2, classical_reg3) | Produce an GT instruction.
:param classical_reg1: Memory address to which to store the comparison result.
:param classical_reg2: Left comparison operand.
:param classical_reg3: Right comparison operand.
:return: A ClassicalGreaterThan instance. | Below is the the instruction that describes the task:
### Input:
Produce an GT instruction.
:param classical_reg1: Memory address to which to store the comparison result.
:param classical_reg2: Left comparison operand.
:param classical_reg3: Right comparison operand.
:return: A ClassicalGreaterThan instance.
### Response:
def GT(classical_reg1, classical_reg2, classical_reg3):
"""
Produce an GT instruction.
:param classical_reg1: Memory address to which to store the comparison result.
:param classical_reg2: Left comparison operand.
:param classical_reg3: Right comparison operand.
:return: A ClassicalGreaterThan instance.
"""
classical_reg1, classical_reg2, classical_reg3 = prepare_ternary_operands(classical_reg1,
classical_reg2,
classical_reg3)
return ClassicalGreaterThan(classical_reg1, classical_reg2, classical_reg3) |
def _domain_event_io_error_cb(conn, domain, srcpath, devalias, action, reason, opaque):
'''
Domain I/O Error events handler
'''
_salt_send_domain_event(opaque, conn, domain, opaque['event'], {
'srcPath': srcpath,
'dev': devalias,
'action': _get_libvirt_enum_string('VIR_DOMAIN_EVENT_IO_ERROR_', action),
'reason': reason
}) | Domain I/O Error events handler | Below is the the instruction that describes the task:
### Input:
Domain I/O Error events handler
### Response:
def _domain_event_io_error_cb(conn, domain, srcpath, devalias, action, reason, opaque):
'''
Domain I/O Error events handler
'''
_salt_send_domain_event(opaque, conn, domain, opaque['event'], {
'srcPath': srcpath,
'dev': devalias,
'action': _get_libvirt_enum_string('VIR_DOMAIN_EVENT_IO_ERROR_', action),
'reason': reason
}) |
def func_timeout(timeout, func, args=(), kwargs=None):
'''
func_timeout - Runs the given function for up to #timeout# seconds.
Raises any exceptions #func# would raise, returns what #func# would return (unless timeout is exceeded), in which case it raises FunctionTimedOut
@param timeout <float> - Maximum number of seconds to run #func# before terminating
@param func <function> - The function to call
@param args <tuple> - Any ordered arguments to pass to the function
@param kwargs <dict/None> - Keyword arguments to pass to the function.
@raises - FunctionTimedOut if #timeout# is exceeded, otherwise anything #func# could raise will be raised
If the timeout is exceeded, FunctionTimedOut will be raised within the context of the called function every two seconds until it terminates,
but will not block the calling thread (a new thread will be created to perform the join). If possible, you should try/except FunctionTimedOut
to return cleanly, but in most cases it will 'just work'.
@return - The return value that #func# gives
'''
if not kwargs:
kwargs = {}
if not args:
args = ()
ret = []
exception = []
isStopped = False
def funcwrap(args2, kwargs2):
try:
ret.append( func(*args2, **kwargs2) )
except FunctionTimedOut:
# Don't print traceback to stderr if we time out
pass
except Exception as e:
exc_info = sys.exc_info()
if isStopped is False:
# Assemble the alternate traceback, excluding this function
# from the trace (by going to next frame)
# Pytohn3 reads native from __traceback__,
# python2 has a different form for "raise"
e.__traceback__ = exc_info[2].tb_next
exception.append( e )
thread = StoppableThread(target=funcwrap, args=(args, kwargs))
thread.daemon = True
thread.start()
thread.join(timeout)
stopException = None
if thread.isAlive():
isStopped = True
class FunctionTimedOutTempType(FunctionTimedOut):
def __init__(self):
return FunctionTimedOut.__init__(self, '', timeout, func, args, kwargs)
FunctionTimedOutTemp = type('FunctionTimedOut' + str( hash( "%d_%d_%d_%d" %(id(timeout), id(func), id(args), id(kwargs))) ), FunctionTimedOutTempType.__bases__, dict(FunctionTimedOutTempType.__dict__))
stopException = FunctionTimedOutTemp
thread._stopThread(stopException)
thread.join(min(.1, timeout / 50.0))
raise FunctionTimedOut('', timeout, func, args, kwargs)
else:
# We can still cleanup the thread here..
# Still give a timeout... just... cuz..
thread.join(.5)
if exception:
raise_exception(exception)
if ret:
return ret[0] | func_timeout - Runs the given function for up to #timeout# seconds.
Raises any exceptions #func# would raise, returns what #func# would return (unless timeout is exceeded), in which case it raises FunctionTimedOut
@param timeout <float> - Maximum number of seconds to run #func# before terminating
@param func <function> - The function to call
@param args <tuple> - Any ordered arguments to pass to the function
@param kwargs <dict/None> - Keyword arguments to pass to the function.
@raises - FunctionTimedOut if #timeout# is exceeded, otherwise anything #func# could raise will be raised
If the timeout is exceeded, FunctionTimedOut will be raised within the context of the called function every two seconds until it terminates,
but will not block the calling thread (a new thread will be created to perform the join). If possible, you should try/except FunctionTimedOut
to return cleanly, but in most cases it will 'just work'.
@return - The return value that #func# gives | Below is the the instruction that describes the task:
### Input:
func_timeout - Runs the given function for up to #timeout# seconds.
Raises any exceptions #func# would raise, returns what #func# would return (unless timeout is exceeded), in which case it raises FunctionTimedOut
@param timeout <float> - Maximum number of seconds to run #func# before terminating
@param func <function> - The function to call
@param args <tuple> - Any ordered arguments to pass to the function
@param kwargs <dict/None> - Keyword arguments to pass to the function.
@raises - FunctionTimedOut if #timeout# is exceeded, otherwise anything #func# could raise will be raised
If the timeout is exceeded, FunctionTimedOut will be raised within the context of the called function every two seconds until it terminates,
but will not block the calling thread (a new thread will be created to perform the join). If possible, you should try/except FunctionTimedOut
to return cleanly, but in most cases it will 'just work'.
@return - The return value that #func# gives
### Response:
def func_timeout(timeout, func, args=(), kwargs=None):
'''
func_timeout - Runs the given function for up to #timeout# seconds.
Raises any exceptions #func# would raise, returns what #func# would return (unless timeout is exceeded), in which case it raises FunctionTimedOut
@param timeout <float> - Maximum number of seconds to run #func# before terminating
@param func <function> - The function to call
@param args <tuple> - Any ordered arguments to pass to the function
@param kwargs <dict/None> - Keyword arguments to pass to the function.
@raises - FunctionTimedOut if #timeout# is exceeded, otherwise anything #func# could raise will be raised
If the timeout is exceeded, FunctionTimedOut will be raised within the context of the called function every two seconds until it terminates,
but will not block the calling thread (a new thread will be created to perform the join). If possible, you should try/except FunctionTimedOut
to return cleanly, but in most cases it will 'just work'.
@return - The return value that #func# gives
'''
if not kwargs:
kwargs = {}
if not args:
args = ()
ret = []
exception = []
isStopped = False
def funcwrap(args2, kwargs2):
try:
ret.append( func(*args2, **kwargs2) )
except FunctionTimedOut:
# Don't print traceback to stderr if we time out
pass
except Exception as e:
exc_info = sys.exc_info()
if isStopped is False:
# Assemble the alternate traceback, excluding this function
# from the trace (by going to next frame)
# Pytohn3 reads native from __traceback__,
# python2 has a different form for "raise"
e.__traceback__ = exc_info[2].tb_next
exception.append( e )
thread = StoppableThread(target=funcwrap, args=(args, kwargs))
thread.daemon = True
thread.start()
thread.join(timeout)
stopException = None
if thread.isAlive():
isStopped = True
class FunctionTimedOutTempType(FunctionTimedOut):
def __init__(self):
return FunctionTimedOut.__init__(self, '', timeout, func, args, kwargs)
FunctionTimedOutTemp = type('FunctionTimedOut' + str( hash( "%d_%d_%d_%d" %(id(timeout), id(func), id(args), id(kwargs))) ), FunctionTimedOutTempType.__bases__, dict(FunctionTimedOutTempType.__dict__))
stopException = FunctionTimedOutTemp
thread._stopThread(stopException)
thread.join(min(.1, timeout / 50.0))
raise FunctionTimedOut('', timeout, func, args, kwargs)
else:
# We can still cleanup the thread here..
# Still give a timeout... just... cuz..
thread.join(.5)
if exception:
raise_exception(exception)
if ret:
return ret[0] |
def register(self, category):
"""
Usage:
@metrics.register('finance')
def approved_funds(pronac, data):
return metric_from_data_and_pronac_number(data, pronac)
"""
def decorator(func):
name = func.__name__
key = f'{category}.{name}'
self._metrics[key] = func
return func
return decorator | Usage:
@metrics.register('finance')
def approved_funds(pronac, data):
return metric_from_data_and_pronac_number(data, pronac) | Below is the the instruction that describes the task:
### Input:
Usage:
@metrics.register('finance')
def approved_funds(pronac, data):
return metric_from_data_and_pronac_number(data, pronac)
### Response:
def register(self, category):
"""
Usage:
@metrics.register('finance')
def approved_funds(pronac, data):
return metric_from_data_and_pronac_number(data, pronac)
"""
def decorator(func):
name = func.__name__
key = f'{category}.{name}'
self._metrics[key] = func
return func
return decorator |
def handleError(self, test, err):
"""
Baseclass override. Called when a test raises an exception.
If the test isn't going to be rerun again, then report the error
to the nose test result.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool`
"""
# pylint:disable=invalid-name
want_error = self._handle_test_error_or_failure(test, err)
if not want_error and id(test) in self._tests_that_reran:
self._nose_result.addError(test, err)
return want_error or None | Baseclass override. Called when a test raises an exception.
If the test isn't going to be rerun again, then report the error
to the nose test result.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool` | Below is the the instruction that describes the task:
### Input:
Baseclass override. Called when a test raises an exception.
If the test isn't going to be rerun again, then report the error
to the nose test result.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool`
### Response:
def handleError(self, test, err):
"""
Baseclass override. Called when a test raises an exception.
If the test isn't going to be rerun again, then report the error
to the nose test result.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool`
"""
# pylint:disable=invalid-name
want_error = self._handle_test_error_or_failure(test, err)
if not want_error and id(test) in self._tests_that_reran:
self._nose_result.addError(test, err)
return want_error or None |
def get_voltage_at_bus_bar(grid, tree):
"""
Determine voltage level at bus bar of MV-LV substation
Parameters
----------
grid : LVGridDing0
Ding0 grid object
tree : :networkx:`NetworkX Graph Obj< >`
Tree of grid topology:
Returns
-------
:any:`list`
Voltage at bus bar. First item refers to load case, second item refers
to voltage in feedin (generation) case
"""
# voltage at substation bus bar
r_mv_grid, x_mv_grid = get_mv_impedance(grid)
r_trafo = sum([tr.r for tr in grid._station._transformers])
x_trafo = sum([tr.x for tr in grid._station._transformers])
cos_phi_load = cfg_ding0.get('assumptions', 'cos_phi_load')
cos_phi_feedin = cfg_ding0.get('assumptions', 'cos_phi_gen')
v_nom = cfg_ding0.get('assumptions', 'lv_nominal_voltage')
# loads and generators connected to bus bar
bus_bar_load = sum(
[node.peak_load for node in tree.successors(grid._station)
if isinstance(node, LVLoadDing0)]) / cos_phi_load
bus_bar_generation = sum(
[node.capacity for node in tree.successors(grid._station)
if isinstance(node, GeneratorDing0)]) / cos_phi_feedin
v_delta_load_case_bus_bar = voltage_delta_vde(v_nom,
bus_bar_load,
(r_mv_grid + r_trafo),
(x_mv_grid + x_trafo),
cos_phi_load)
v_delta_gen_case_bus_bar = voltage_delta_vde(v_nom,
bus_bar_generation,
(r_mv_grid + r_trafo),
-(x_mv_grid + x_trafo),
cos_phi_feedin)
return v_delta_load_case_bus_bar, v_delta_gen_case_bus_bar | Determine voltage level at bus bar of MV-LV substation
Parameters
----------
grid : LVGridDing0
Ding0 grid object
tree : :networkx:`NetworkX Graph Obj< >`
Tree of grid topology:
Returns
-------
:any:`list`
Voltage at bus bar. First item refers to load case, second item refers
to voltage in feedin (generation) case | Below is the the instruction that describes the task:
### Input:
Determine voltage level at bus bar of MV-LV substation
Parameters
----------
grid : LVGridDing0
Ding0 grid object
tree : :networkx:`NetworkX Graph Obj< >`
Tree of grid topology:
Returns
-------
:any:`list`
Voltage at bus bar. First item refers to load case, second item refers
to voltage in feedin (generation) case
### Response:
def get_voltage_at_bus_bar(grid, tree):
"""
Determine voltage level at bus bar of MV-LV substation
Parameters
----------
grid : LVGridDing0
Ding0 grid object
tree : :networkx:`NetworkX Graph Obj< >`
Tree of grid topology:
Returns
-------
:any:`list`
Voltage at bus bar. First item refers to load case, second item refers
to voltage in feedin (generation) case
"""
# voltage at substation bus bar
r_mv_grid, x_mv_grid = get_mv_impedance(grid)
r_trafo = sum([tr.r for tr in grid._station._transformers])
x_trafo = sum([tr.x for tr in grid._station._transformers])
cos_phi_load = cfg_ding0.get('assumptions', 'cos_phi_load')
cos_phi_feedin = cfg_ding0.get('assumptions', 'cos_phi_gen')
v_nom = cfg_ding0.get('assumptions', 'lv_nominal_voltage')
# loads and generators connected to bus bar
bus_bar_load = sum(
[node.peak_load for node in tree.successors(grid._station)
if isinstance(node, LVLoadDing0)]) / cos_phi_load
bus_bar_generation = sum(
[node.capacity for node in tree.successors(grid._station)
if isinstance(node, GeneratorDing0)]) / cos_phi_feedin
v_delta_load_case_bus_bar = voltage_delta_vde(v_nom,
bus_bar_load,
(r_mv_grid + r_trafo),
(x_mv_grid + x_trafo),
cos_phi_load)
v_delta_gen_case_bus_bar = voltage_delta_vde(v_nom,
bus_bar_generation,
(r_mv_grid + r_trafo),
-(x_mv_grid + x_trafo),
cos_phi_feedin)
return v_delta_load_case_bus_bar, v_delta_gen_case_bus_bar |
def _visited_callback(self, state, pc, instr):
""" Maintain our own copy of the visited set
"""
pc = state.platform.current.PC
with self.locked_context('visited', dict) as ctx:
ctx[pc] = ctx.get(pc, 0) + 1 | Maintain our own copy of the visited set | Below is the the instruction that describes the task:
### Input:
Maintain our own copy of the visited set
### Response:
def _visited_callback(self, state, pc, instr):
""" Maintain our own copy of the visited set
"""
pc = state.platform.current.PC
with self.locked_context('visited', dict) as ctx:
ctx[pc] = ctx.get(pc, 0) + 1 |
def main( gpu:Param("GPU to run on", str)=None ):
"""Distributed training of Imagenet. Fastest speed is if you run with: python -m fastai.launch"""
path = Path('/mnt/fe2_disk/')
tot_epochs,size,bs,lr = 60,224,256,3e-1
dirname = 'imagenet'
gpu = setup_distrib(gpu)
if gpu is None: bs *= torch.cuda.device_count()
n_gpus = num_distrib() or 1
workers = min(12, num_cpus()//n_gpus)
data = get_data(path/dirname, size, bs, workers)
b_its = len(data.train_dl)//n_gpus
# Using bs 256 on single GPU as baseline, scale the LR linearly
tot_bs = bs*n_gpus
bs_rat = tot_bs/256
lr *= bs_rat
ph1 = (TrainingPhase(tot_epochs*0.10*b_its)
.schedule_hp('lr', (lr/10,lr), anneal=annealing_cos))
ph2 = (TrainingPhase(tot_epochs*0.90*b_its)
.schedule_hp('lr', (lr,lr/1e5), anneal=annealing_cos))
opt_func = partial(optim.Adam, eps=0.1, betas=(0.9,0.99))
learn = Learner(data, models.xresnet50(), metrics=[accuracy,top_k_accuracy], wd=1e-3,
opt_func=opt_func, bn_wd=False, true_wd=True,
loss_func = LabelSmoothingCrossEntropy()).mixup(alpha=0.2)
learn.callback_fns += [
partial(GeneralScheduler, phases=(ph1,ph2)),
partial(SaveModelCallback, every='epoch', name='model')
]
learn.split(lambda m: (children(m)[-2],))
if gpu is None: learn.model = nn.DataParallel(learn.model)
else: learn.to_distributed(gpu)
learn.to_fp16(dynamic=True)
learn.fit(tot_epochs, 1)
if rank_distrib(): time.sleep(1)
learn.save('done') | Distributed training of Imagenet. Fastest speed is if you run with: python -m fastai.launch | Below is the the instruction that describes the task:
### Input:
Distributed training of Imagenet. Fastest speed is if you run with: python -m fastai.launch
### Response:
def main( gpu:Param("GPU to run on", str)=None ):
"""Distributed training of Imagenet. Fastest speed is if you run with: python -m fastai.launch"""
path = Path('/mnt/fe2_disk/')
tot_epochs,size,bs,lr = 60,224,256,3e-1
dirname = 'imagenet'
gpu = setup_distrib(gpu)
if gpu is None: bs *= torch.cuda.device_count()
n_gpus = num_distrib() or 1
workers = min(12, num_cpus()//n_gpus)
data = get_data(path/dirname, size, bs, workers)
b_its = len(data.train_dl)//n_gpus
# Using bs 256 on single GPU as baseline, scale the LR linearly
tot_bs = bs*n_gpus
bs_rat = tot_bs/256
lr *= bs_rat
ph1 = (TrainingPhase(tot_epochs*0.10*b_its)
.schedule_hp('lr', (lr/10,lr), anneal=annealing_cos))
ph2 = (TrainingPhase(tot_epochs*0.90*b_its)
.schedule_hp('lr', (lr,lr/1e5), anneal=annealing_cos))
opt_func = partial(optim.Adam, eps=0.1, betas=(0.9,0.99))
learn = Learner(data, models.xresnet50(), metrics=[accuracy,top_k_accuracy], wd=1e-3,
opt_func=opt_func, bn_wd=False, true_wd=True,
loss_func = LabelSmoothingCrossEntropy()).mixup(alpha=0.2)
learn.callback_fns += [
partial(GeneralScheduler, phases=(ph1,ph2)),
partial(SaveModelCallback, every='epoch', name='model')
]
learn.split(lambda m: (children(m)[-2],))
if gpu is None: learn.model = nn.DataParallel(learn.model)
else: learn.to_distributed(gpu)
learn.to_fp16(dynamic=True)
learn.fit(tot_epochs, 1)
if rank_distrib(): time.sleep(1)
learn.save('done') |
def tables_in_schema(self, schema):
"""Get a listing of all tables in given schema
"""
sql = """SELECT table_name
FROM information_schema.tables
WHERE table_schema = %s"""
return [t[0] for t in self.query(sql, (schema,)).fetchall()] | Get a listing of all tables in given schema | Below is the the instruction that describes the task:
### Input:
Get a listing of all tables in given schema
### Response:
def tables_in_schema(self, schema):
"""Get a listing of all tables in given schema
"""
sql = """SELECT table_name
FROM information_schema.tables
WHERE table_schema = %s"""
return [t[0] for t in self.query(sql, (schema,)).fetchall()] |
def LoadSNPs(self, snps=[]):
"""Define the SNP inclusions (by RSID). This overrides true boundary \
definition.
:param snps: array of RSIDs
:return: None
This doesn't define RSID ranges, so it throws InvalidBoundarySpec if it
encounters what appears to be a range (SNP contains a "-")
"""
for snp in snps:
bounds = snp.split("-")
if len(bounds) == 1:
if bounds[0] != "":
self.target_rs.append(bounds[0])
else:
raise InvalidBoundarySpec(snp) | Define the SNP inclusions (by RSID). This overrides true boundary \
definition.
:param snps: array of RSIDs
:return: None
This doesn't define RSID ranges, so it throws InvalidBoundarySpec if it
encounters what appears to be a range (SNP contains a "-") | Below is the the instruction that describes the task:
### Input:
Define the SNP inclusions (by RSID). This overrides true boundary \
definition.
:param snps: array of RSIDs
:return: None
This doesn't define RSID ranges, so it throws InvalidBoundarySpec if it
encounters what appears to be a range (SNP contains a "-")
### Response:
def LoadSNPs(self, snps=[]):
"""Define the SNP inclusions (by RSID). This overrides true boundary \
definition.
:param snps: array of RSIDs
:return: None
This doesn't define RSID ranges, so it throws InvalidBoundarySpec if it
encounters what appears to be a range (SNP contains a "-")
"""
for snp in snps:
bounds = snp.split("-")
if len(bounds) == 1:
if bounds[0] != "":
self.target_rs.append(bounds[0])
else:
raise InvalidBoundarySpec(snp) |
def _read_mesafile(filename,data_rows=0,only='all'):
""" private routine that is not directly called by the user"""
f=open(filename,'r')
vv=[]
v=[]
lines = []
line = ''
for i in range(0,6):
line = f.readline()
lines.extend([line])
hval = lines[2].split()
hlist = lines[1].split()
header_attr = {}
for a,b in zip(hlist,hval):
header_attr[a] = float(b)
if only is 'header_attr':
return header_attr
cols = {}
colnum = lines[4].split()
colname = lines[5].split()
for a,b in zip(colname,colnum):
cols[a] = int(b)
data = []
old_percent = 0
for i in range(data_rows):
# writing reading status
percent = int(i*100/np.max([1, data_rows-1]))
if percent >= old_percent + 5:
sys.stdout.flush()
sys.stdout.write("\r reading " + "...%d%%" % percent)
old_percent = percent
line = f.readline()
v=line.split()
try:
vv=np.array(v,dtype='float64')
except ValueError:
for item in v:
if item.__contains__('.') and not item.__contains__('E'):
v[v.index(item)]='0'
data.append(vv)
print(' \n')
f.close()
a=np.array(data)
data = []
return header_attr, cols, a | private routine that is not directly called by the user | Below is the the instruction that describes the task:
### Input:
private routine that is not directly called by the user
### Response:
def _read_mesafile(filename,data_rows=0,only='all'):
""" private routine that is not directly called by the user"""
f=open(filename,'r')
vv=[]
v=[]
lines = []
line = ''
for i in range(0,6):
line = f.readline()
lines.extend([line])
hval = lines[2].split()
hlist = lines[1].split()
header_attr = {}
for a,b in zip(hlist,hval):
header_attr[a] = float(b)
if only is 'header_attr':
return header_attr
cols = {}
colnum = lines[4].split()
colname = lines[5].split()
for a,b in zip(colname,colnum):
cols[a] = int(b)
data = []
old_percent = 0
for i in range(data_rows):
# writing reading status
percent = int(i*100/np.max([1, data_rows-1]))
if percent >= old_percent + 5:
sys.stdout.flush()
sys.stdout.write("\r reading " + "...%d%%" % percent)
old_percent = percent
line = f.readline()
v=line.split()
try:
vv=np.array(v,dtype='float64')
except ValueError:
for item in v:
if item.__contains__('.') and not item.__contains__('E'):
v[v.index(item)]='0'
data.append(vv)
print(' \n')
f.close()
a=np.array(data)
data = []
return header_attr, cols, a |
def to_native(self, value):
"""Return the value as a dict, raising error if conversion to dict is not possible"""
if isinstance(value, dict):
return value
elif isinstance(value, six.string_types):
native_value = json.loads(value)
if isinstance(native_value, dict):
return native_value
else:
raise ConversionError(u'Cannot load value as a dict: {}'.format(value)) | Return the value as a dict, raising error if conversion to dict is not possible | Below is the the instruction that describes the task:
### Input:
Return the value as a dict, raising error if conversion to dict is not possible
### Response:
def to_native(self, value):
"""Return the value as a dict, raising error if conversion to dict is not possible"""
if isinstance(value, dict):
return value
elif isinstance(value, six.string_types):
native_value = json.loads(value)
if isinstance(native_value, dict):
return native_value
else:
raise ConversionError(u'Cannot load value as a dict: {}'.format(value)) |
def decompose_once_with_qubits(val: Any,
qubits: Iterable['cirq.Qid'],
default=RaiseTypeErrorIfNotProvided):
"""Decomposes a value into operations on the given qubits.
This method is used when decomposing gates, which don't know which qubits
they are being applied to unless told. It decomposes the gate exactly once,
instead of decomposing it and then continuing to decomposing the decomposed
operations recursively until some criteria is met.
Args:
val: The value to call `._decompose_(qubits=qubits)` on, if possible.
qubits: The value to pass into the named `qubits` parameter of
`val._decompose_`.
default: A default result to use if the value doesn't have a
`_decompose_` method or that method returns `NotImplemented` or
`None`. If not specified, undecomposable values cause a `TypeError`.
Returns:
The result of `val._decompose_(qubits=qubits)`, if `val` has a
`_decompose_` method and it didn't return `NotImplemented` or `None`.
Otherwise `default` is returned, if it was specified. Otherwise an error
is raised.
TypeError:
`val` didn't have a `_decompose_` method (or that method returned
`NotImplemented` or `None`) and `default` wasn't set.
"""
return decompose_once(val, default, qubits=tuple(qubits)) | Decomposes a value into operations on the given qubits.
This method is used when decomposing gates, which don't know which qubits
they are being applied to unless told. It decomposes the gate exactly once,
instead of decomposing it and then continuing to decomposing the decomposed
operations recursively until some criteria is met.
Args:
val: The value to call `._decompose_(qubits=qubits)` on, if possible.
qubits: The value to pass into the named `qubits` parameter of
`val._decompose_`.
default: A default result to use if the value doesn't have a
`_decompose_` method or that method returns `NotImplemented` or
`None`. If not specified, undecomposable values cause a `TypeError`.
Returns:
The result of `val._decompose_(qubits=qubits)`, if `val` has a
`_decompose_` method and it didn't return `NotImplemented` or `None`.
Otherwise `default` is returned, if it was specified. Otherwise an error
is raised.
TypeError:
`val` didn't have a `_decompose_` method (or that method returned
`NotImplemented` or `None`) and `default` wasn't set. | Below is the the instruction that describes the task:
### Input:
Decomposes a value into operations on the given qubits.
This method is used when decomposing gates, which don't know which qubits
they are being applied to unless told. It decomposes the gate exactly once,
instead of decomposing it and then continuing to decomposing the decomposed
operations recursively until some criteria is met.
Args:
val: The value to call `._decompose_(qubits=qubits)` on, if possible.
qubits: The value to pass into the named `qubits` parameter of
`val._decompose_`.
default: A default result to use if the value doesn't have a
`_decompose_` method or that method returns `NotImplemented` or
`None`. If not specified, undecomposable values cause a `TypeError`.
Returns:
The result of `val._decompose_(qubits=qubits)`, if `val` has a
`_decompose_` method and it didn't return `NotImplemented` or `None`.
Otherwise `default` is returned, if it was specified. Otherwise an error
is raised.
TypeError:
`val` didn't have a `_decompose_` method (or that method returned
`NotImplemented` or `None`) and `default` wasn't set.
### Response:
def decompose_once_with_qubits(val: Any,
qubits: Iterable['cirq.Qid'],
default=RaiseTypeErrorIfNotProvided):
"""Decomposes a value into operations on the given qubits.
This method is used when decomposing gates, which don't know which qubits
they are being applied to unless told. It decomposes the gate exactly once,
instead of decomposing it and then continuing to decomposing the decomposed
operations recursively until some criteria is met.
Args:
val: The value to call `._decompose_(qubits=qubits)` on, if possible.
qubits: The value to pass into the named `qubits` parameter of
`val._decompose_`.
default: A default result to use if the value doesn't have a
`_decompose_` method or that method returns `NotImplemented` or
`None`. If not specified, undecomposable values cause a `TypeError`.
Returns:
The result of `val._decompose_(qubits=qubits)`, if `val` has a
`_decompose_` method and it didn't return `NotImplemented` or `None`.
Otherwise `default` is returned, if it was specified. Otherwise an error
is raised.
TypeError:
`val` didn't have a `_decompose_` method (or that method returned
`NotImplemented` or `None`) and `default` wasn't set.
"""
return decompose_once(val, default, qubits=tuple(qubits)) |
def is_imap(self, model):
"""
Checks whether the given BayesianModel is Imap of JointProbabilityDistribution
Parameters
-----------
model : An instance of BayesianModel Class, for which you want to
check the Imap
Returns
--------
boolean : True if given bayesian model is Imap for Joint Probability Distribution
False otherwise
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.factors.discrete import JointProbabilityDistribution
>>> bm = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]])
>>> intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
>>> grade_cpd = TabularCPD('grade', 3,
... [[0.1,0.1,0.1,0.1,0.1,0.1],
... [0.1,0.1,0.1,0.1,0.1,0.1],
... [0.8,0.8,0.8,0.8,0.8,0.8]],
... evidence=['diff', 'intel'],
... evidence_card=[2, 3])
>>> bm.add_cpds(diff_cpd, intel_cpd, grade_cpd)
>>> val = [0.01, 0.01, 0.08, 0.006, 0.006, 0.048, 0.004, 0.004, 0.032,
0.04, 0.04, 0.32, 0.024, 0.024, 0.192, 0.016, 0.016, 0.128]
>>> JPD = JointProbabilityDistribution(['diff', 'intel', 'grade'], [2, 3, 3], val)
>>> JPD.is_imap(bm)
True
"""
from pgmpy.models import BayesianModel
if not isinstance(model, BayesianModel):
raise TypeError("model must be an instance of BayesianModel")
factors = [cpd.to_factor() for cpd in model.get_cpds()]
factor_prod = six.moves.reduce(mul, factors)
JPD_fact = DiscreteFactor(self.variables, self.cardinality, self.values)
if JPD_fact == factor_prod:
return True
else:
return False | Checks whether the given BayesianModel is Imap of JointProbabilityDistribution
Parameters
-----------
model : An instance of BayesianModel Class, for which you want to
check the Imap
Returns
--------
boolean : True if given bayesian model is Imap for Joint Probability Distribution
False otherwise
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.factors.discrete import JointProbabilityDistribution
>>> bm = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]])
>>> intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
>>> grade_cpd = TabularCPD('grade', 3,
... [[0.1,0.1,0.1,0.1,0.1,0.1],
... [0.1,0.1,0.1,0.1,0.1,0.1],
... [0.8,0.8,0.8,0.8,0.8,0.8]],
... evidence=['diff', 'intel'],
... evidence_card=[2, 3])
>>> bm.add_cpds(diff_cpd, intel_cpd, grade_cpd)
>>> val = [0.01, 0.01, 0.08, 0.006, 0.006, 0.048, 0.004, 0.004, 0.032,
0.04, 0.04, 0.32, 0.024, 0.024, 0.192, 0.016, 0.016, 0.128]
>>> JPD = JointProbabilityDistribution(['diff', 'intel', 'grade'], [2, 3, 3], val)
>>> JPD.is_imap(bm)
True | Below is the the instruction that describes the task:
### Input:
Checks whether the given BayesianModel is Imap of JointProbabilityDistribution
Parameters
-----------
model : An instance of BayesianModel Class, for which you want to
check the Imap
Returns
--------
boolean : True if given bayesian model is Imap for Joint Probability Distribution
False otherwise
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.factors.discrete import JointProbabilityDistribution
>>> bm = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]])
>>> intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
>>> grade_cpd = TabularCPD('grade', 3,
... [[0.1,0.1,0.1,0.1,0.1,0.1],
... [0.1,0.1,0.1,0.1,0.1,0.1],
... [0.8,0.8,0.8,0.8,0.8,0.8]],
... evidence=['diff', 'intel'],
... evidence_card=[2, 3])
>>> bm.add_cpds(diff_cpd, intel_cpd, grade_cpd)
>>> val = [0.01, 0.01, 0.08, 0.006, 0.006, 0.048, 0.004, 0.004, 0.032,
0.04, 0.04, 0.32, 0.024, 0.024, 0.192, 0.016, 0.016, 0.128]
>>> JPD = JointProbabilityDistribution(['diff', 'intel', 'grade'], [2, 3, 3], val)
>>> JPD.is_imap(bm)
True
### Response:
def is_imap(self, model):
"""
Checks whether the given BayesianModel is Imap of JointProbabilityDistribution
Parameters
-----------
model : An instance of BayesianModel Class, for which you want to
check the Imap
Returns
--------
boolean : True if given bayesian model is Imap for Joint Probability Distribution
False otherwise
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.factors.discrete import JointProbabilityDistribution
>>> bm = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]])
>>> intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
>>> grade_cpd = TabularCPD('grade', 3,
... [[0.1,0.1,0.1,0.1,0.1,0.1],
... [0.1,0.1,0.1,0.1,0.1,0.1],
... [0.8,0.8,0.8,0.8,0.8,0.8]],
... evidence=['diff', 'intel'],
... evidence_card=[2, 3])
>>> bm.add_cpds(diff_cpd, intel_cpd, grade_cpd)
>>> val = [0.01, 0.01, 0.08, 0.006, 0.006, 0.048, 0.004, 0.004, 0.032,
0.04, 0.04, 0.32, 0.024, 0.024, 0.192, 0.016, 0.016, 0.128]
>>> JPD = JointProbabilityDistribution(['diff', 'intel', 'grade'], [2, 3, 3], val)
>>> JPD.is_imap(bm)
True
"""
from pgmpy.models import BayesianModel
if not isinstance(model, BayesianModel):
raise TypeError("model must be an instance of BayesianModel")
factors = [cpd.to_factor() for cpd in model.get_cpds()]
factor_prod = six.moves.reduce(mul, factors)
JPD_fact = DiscreteFactor(self.variables, self.cardinality, self.values)
if JPD_fact == factor_prod:
return True
else:
return False |
def distance(a, b):
"""Calculates distance between two latitude-longitude coordinates."""
R = 3963 # radius of Earth (miles)
lat1, lon1 = math.radians(a[0]), math.radians(a[1])
lat2, lon2 = math.radians(b[0]), math.radians(b[1])
return math.acos(math.sin(lat1) * math.sin(lat2) +
math.cos(lat1) * math.cos(lat2) * math.cos(lon1 - lon2)) * R | Calculates distance between two latitude-longitude coordinates. | Below is the the instruction that describes the task:
### Input:
Calculates distance between two latitude-longitude coordinates.
### Response:
def distance(a, b):
"""Calculates distance between two latitude-longitude coordinates."""
R = 3963 # radius of Earth (miles)
lat1, lon1 = math.radians(a[0]), math.radians(a[1])
lat2, lon2 = math.radians(b[0]), math.radians(b[1])
return math.acos(math.sin(lat1) * math.sin(lat2) +
math.cos(lat1) * math.cos(lat2) * math.cos(lon1 - lon2)) * R |
def _parse_json(cls, resources, exactly_one=True):
"""
Parse display name, latitude, and longitude from a JSON response.
"""
if not len(resources['features']): # pragma: no cover
return None
if exactly_one:
return cls.parse_resource(resources['features'][0])
else:
return [cls.parse_resource(resource) for resource
in resources['features']] | Parse display name, latitude, and longitude from a JSON response. | Below is the the instruction that describes the task:
### Input:
Parse display name, latitude, and longitude from a JSON response.
### Response:
def _parse_json(cls, resources, exactly_one=True):
"""
Parse display name, latitude, and longitude from a JSON response.
"""
if not len(resources['features']): # pragma: no cover
return None
if exactly_one:
return cls.parse_resource(resources['features'][0])
else:
return [cls.parse_resource(resource) for resource
in resources['features']] |
def get_next_objective(self):
"""Gets the next Objective in this list.
return: (osid.learning.Objective) - the next Objective in this
list. The has_next() method should be used to test that
a next Objective is available before calling this
method.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
compliance: mandatory - This method must be implemented.
"""
try:
next_object = next(self)
except StopIteration:
raise IllegalState('no more elements available in this list')
except Exception: # Need to specify exceptions here!
raise OperationFailed()
else:
return next_object | Gets the next Objective in this list.
return: (osid.learning.Objective) - the next Objective in this
list. The has_next() method should be used to test that
a next Objective is available before calling this
method.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
compliance: mandatory - This method must be implemented. | Below is the the instruction that describes the task:
### Input:
Gets the next Objective in this list.
return: (osid.learning.Objective) - the next Objective in this
list. The has_next() method should be used to test that
a next Objective is available before calling this
method.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
compliance: mandatory - This method must be implemented.
### Response:
def get_next_objective(self):
"""Gets the next Objective in this list.
return: (osid.learning.Objective) - the next Objective in this
list. The has_next() method should be used to test that
a next Objective is available before calling this
method.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
compliance: mandatory - This method must be implemented.
"""
try:
next_object = next(self)
except StopIteration:
raise IllegalState('no more elements available in this list')
except Exception: # Need to specify exceptions here!
raise OperationFailed()
else:
return next_object |
def refweights(self):
"""A |numpy| |numpy.ndarray| with equal weights for all segment
junctions..
>>> from hydpy.models.hstream import *
>>> parameterstep('1d')
>>> states.qjoints.shape = 5
>>> states.qjoints.refweights
array([ 0.2, 0.2, 0.2, 0.2, 0.2])
"""
# pylint: disable=unsubscriptable-object
# due to a pylint bug (see https://github.com/PyCQA/pylint/issues/870)
return numpy.full(self.shape, 1./self.shape[0], dtype=float) | A |numpy| |numpy.ndarray| with equal weights for all segment
junctions..
>>> from hydpy.models.hstream import *
>>> parameterstep('1d')
>>> states.qjoints.shape = 5
>>> states.qjoints.refweights
array([ 0.2, 0.2, 0.2, 0.2, 0.2]) | Below is the the instruction that describes the task:
### Input:
A |numpy| |numpy.ndarray| with equal weights for all segment
junctions..
>>> from hydpy.models.hstream import *
>>> parameterstep('1d')
>>> states.qjoints.shape = 5
>>> states.qjoints.refweights
array([ 0.2, 0.2, 0.2, 0.2, 0.2])
### Response:
def refweights(self):
"""A |numpy| |numpy.ndarray| with equal weights for all segment
junctions..
>>> from hydpy.models.hstream import *
>>> parameterstep('1d')
>>> states.qjoints.shape = 5
>>> states.qjoints.refweights
array([ 0.2, 0.2, 0.2, 0.2, 0.2])
"""
# pylint: disable=unsubscriptable-object
# due to a pylint bug (see https://github.com/PyCQA/pylint/issues/870)
return numpy.full(self.shape, 1./self.shape[0], dtype=float) |
def load_file(folder_path, idx, corpus):
"""
Load speaker, file, utterance, labels for the file with the given id.
"""
xml_path = os.path.join(folder_path, '{}.xml'.format(idx))
wav_paths = glob.glob(os.path.join(folder_path, '{}_*.wav'.format(idx)))
if len(wav_paths) == 0:
return []
xml_file = open(xml_path, 'r', encoding='utf-8')
soup = BeautifulSoup(xml_file, 'lxml')
transcription = soup.recording.cleaned_sentence.string
transcription_raw = soup.recording.sentence.string
gender = soup.recording.gender.string
is_native = soup.recording.muttersprachler.string
age_class = soup.recording.ageclass.string
speaker_idx = soup.recording.speaker_id.string
if speaker_idx not in corpus.issuers.keys():
start_age_class = int(age_class.split('-')[0])
if start_age_class < 12:
age_group = issuers.AgeGroup.CHILD
elif start_age_class < 18:
age_group = issuers.AgeGroup.YOUTH
elif start_age_class < 65:
age_group = issuers.AgeGroup.ADULT
else:
age_group = issuers.AgeGroup.SENIOR
native_lang = None
if is_native == 'Ja':
native_lang = 'deu'
issuer = issuers.Speaker(speaker_idx,
gender=issuers.Gender(gender),
age_group=age_group,
native_language=native_lang)
corpus.import_issuers(issuer)
utt_ids = []
for wav_path in wav_paths:
wav_name = os.path.split(wav_path)[1]
wav_idx = os.path.splitext(wav_name)[0]
corpus.new_file(wav_path, wav_idx)
utt = corpus.new_utterance(wav_idx, wav_idx, speaker_idx)
utt.set_label_list(annotations.LabelList.create_single(
transcription,
idx=audiomate.corpus.LL_WORD_TRANSCRIPT
))
utt.set_label_list(annotations.LabelList.create_single(
transcription_raw,
idx=audiomate.corpus.LL_WORD_TRANSCRIPT_RAW
))
utt_ids.append(wav_idx)
return utt_ids | Load speaker, file, utterance, labels for the file with the given id. | Below is the the instruction that describes the task:
### Input:
Load speaker, file, utterance, labels for the file with the given id.
### Response:
def load_file(folder_path, idx, corpus):
"""
Load speaker, file, utterance, labels for the file with the given id.
"""
xml_path = os.path.join(folder_path, '{}.xml'.format(idx))
wav_paths = glob.glob(os.path.join(folder_path, '{}_*.wav'.format(idx)))
if len(wav_paths) == 0:
return []
xml_file = open(xml_path, 'r', encoding='utf-8')
soup = BeautifulSoup(xml_file, 'lxml')
transcription = soup.recording.cleaned_sentence.string
transcription_raw = soup.recording.sentence.string
gender = soup.recording.gender.string
is_native = soup.recording.muttersprachler.string
age_class = soup.recording.ageclass.string
speaker_idx = soup.recording.speaker_id.string
if speaker_idx not in corpus.issuers.keys():
start_age_class = int(age_class.split('-')[0])
if start_age_class < 12:
age_group = issuers.AgeGroup.CHILD
elif start_age_class < 18:
age_group = issuers.AgeGroup.YOUTH
elif start_age_class < 65:
age_group = issuers.AgeGroup.ADULT
else:
age_group = issuers.AgeGroup.SENIOR
native_lang = None
if is_native == 'Ja':
native_lang = 'deu'
issuer = issuers.Speaker(speaker_idx,
gender=issuers.Gender(gender),
age_group=age_group,
native_language=native_lang)
corpus.import_issuers(issuer)
utt_ids = []
for wav_path in wav_paths:
wav_name = os.path.split(wav_path)[1]
wav_idx = os.path.splitext(wav_name)[0]
corpus.new_file(wav_path, wav_idx)
utt = corpus.new_utterance(wav_idx, wav_idx, speaker_idx)
utt.set_label_list(annotations.LabelList.create_single(
transcription,
idx=audiomate.corpus.LL_WORD_TRANSCRIPT
))
utt.set_label_list(annotations.LabelList.create_single(
transcription_raw,
idx=audiomate.corpus.LL_WORD_TRANSCRIPT_RAW
))
utt_ids.append(wav_idx)
return utt_ids |
def response(self, response_data):
'''
called by the event handler with the result data
:param response_data: result data
:return:
'''
if "address" not in response_data:
return None, "address missing from response_data payload"
if "function" not in response_data:
return None, "method missing from response_data payload"
if "params" not in response_data:
return None, "params missing from response_data payload"
if "target" not in response_data:
return None, "target missing from response_data payload"
address = self._ezo.w3.toChecksumAddress(response_data["address"])
account = self._ezo.w3.toChecksumAddress(get_account(self._ezo.config, response_data["target"]))
self._ezo.w3.eth.accounts[0] = account
tx_dict = dict()
tx_dict["account"] = account
tx_dict["from"] = account
password = os.environ['EZO_PASSWORD'] if 'EZO_PASSWORD' in os.environ else None
u_state = self._ezo.w3.personal.unlockAccount(account, password)
if not self.contract_obj:
try:
self.contract_obj = self._ezo.w3.eth.contract(address=address, abi=self.abi)
except Exception as e:
return None, e
method = response_data["function"]
params = response_data["params"]
contract_func = self.contract_obj.functions[method]
try:
if not params:
tx_dict["gas"] = contract_func().estimateGas() + 1000
tx_hash = contract_func().transact(tx_dict)
else:
tx_dict["gas"] = contract_func(*params).estimateGas() + 1000
tx_hash = contract_func(*params).transact(tx_dict)
receipt = self._ezo.w3.eth.waitForTransactionReceipt(tx_hash)
except Exception as e:
return None, "error executing transaction: {}".format(e)
# finally:
# self._ezo.w3.personal.lockAccount(account)
return receipt, None | called by the event handler with the result data
:param response_data: result data
:return: | Below is the the instruction that describes the task:
### Input:
called by the event handler with the result data
:param response_data: result data
:return:
### Response:
def response(self, response_data):
'''
called by the event handler with the result data
:param response_data: result data
:return:
'''
if "address" not in response_data:
return None, "address missing from response_data payload"
if "function" not in response_data:
return None, "method missing from response_data payload"
if "params" not in response_data:
return None, "params missing from response_data payload"
if "target" not in response_data:
return None, "target missing from response_data payload"
address = self._ezo.w3.toChecksumAddress(response_data["address"])
account = self._ezo.w3.toChecksumAddress(get_account(self._ezo.config, response_data["target"]))
self._ezo.w3.eth.accounts[0] = account
tx_dict = dict()
tx_dict["account"] = account
tx_dict["from"] = account
password = os.environ['EZO_PASSWORD'] if 'EZO_PASSWORD' in os.environ else None
u_state = self._ezo.w3.personal.unlockAccount(account, password)
if not self.contract_obj:
try:
self.contract_obj = self._ezo.w3.eth.contract(address=address, abi=self.abi)
except Exception as e:
return None, e
method = response_data["function"]
params = response_data["params"]
contract_func = self.contract_obj.functions[method]
try:
if not params:
tx_dict["gas"] = contract_func().estimateGas() + 1000
tx_hash = contract_func().transact(tx_dict)
else:
tx_dict["gas"] = contract_func(*params).estimateGas() + 1000
tx_hash = contract_func(*params).transact(tx_dict)
receipt = self._ezo.w3.eth.waitForTransactionReceipt(tx_hash)
except Exception as e:
return None, "error executing transaction: {}".format(e)
# finally:
# self._ezo.w3.personal.lockAccount(account)
return receipt, None |
def generate_psk(self, security_key):
"""
Generate and set a psk from the security key.
"""
if not self._psk:
# Backup the real identity.
existing_psk_id = self._psk_id
# Set the default identity and security key for generation.
self._psk_id = 'Client_identity'
self._psk = security_key
# Ask the Gateway to generate the psk for the identity.
self._psk = self.request(Gateway().generate_psk(existing_psk_id))
# Restore the real identity.
self._psk_id = existing_psk_id
return self._psk | Generate and set a psk from the security key. | Below is the the instruction that describes the task:
### Input:
Generate and set a psk from the security key.
### Response:
def generate_psk(self, security_key):
"""
Generate and set a psk from the security key.
"""
if not self._psk:
# Backup the real identity.
existing_psk_id = self._psk_id
# Set the default identity and security key for generation.
self._psk_id = 'Client_identity'
self._psk = security_key
# Ask the Gateway to generate the psk for the identity.
self._psk = self.request(Gateway().generate_psk(existing_psk_id))
# Restore the real identity.
self._psk_id = existing_psk_id
return self._psk |
def listTasks(self, opts={}, queryOpts={}):
"""
Get information about all Koji tasks.
Calls "listTasks" XML-RPC.
:param dict opts: Eg. {'state': [task_states.OPEN]}
:param dict queryOpts: Eg. {'order' : 'priority,create_time'}
:returns: deferred that when fired returns a list of Task objects.
"""
opts['decode'] = True # decode xmlrpc data in "request"
data = yield self.call('listTasks', opts, queryOpts)
tasks = []
for tdata in data:
task = Task.fromDict(tdata)
task.connection = self
tasks.append(task)
defer.returnValue(tasks) | Get information about all Koji tasks.
Calls "listTasks" XML-RPC.
:param dict opts: Eg. {'state': [task_states.OPEN]}
:param dict queryOpts: Eg. {'order' : 'priority,create_time'}
:returns: deferred that when fired returns a list of Task objects. | Below is the the instruction that describes the task:
### Input:
Get information about all Koji tasks.
Calls "listTasks" XML-RPC.
:param dict opts: Eg. {'state': [task_states.OPEN]}
:param dict queryOpts: Eg. {'order' : 'priority,create_time'}
:returns: deferred that when fired returns a list of Task objects.
### Response:
def listTasks(self, opts={}, queryOpts={}):
"""
Get information about all Koji tasks.
Calls "listTasks" XML-RPC.
:param dict opts: Eg. {'state': [task_states.OPEN]}
:param dict queryOpts: Eg. {'order' : 'priority,create_time'}
:returns: deferred that when fired returns a list of Task objects.
"""
opts['decode'] = True # decode xmlrpc data in "request"
data = yield self.call('listTasks', opts, queryOpts)
tasks = []
for tdata in data:
task = Task.fromDict(tdata)
task.connection = self
tasks.append(task)
defer.returnValue(tasks) |
def handle(client, request):
"""
Handle format request
request struct:
{
'data': 'data_need_format',
'formaters': [
{
'name': 'formater_name',
'config': {} # None or dict
},
... # formaters
]
}
if no formaters, use autopep8 formater and it's default config
"""
formaters = request.get('formaters', None)
if not formaters:
formaters = [{'name': 'autopep8'}]
logging.debug('formaters: ' + json.dumps(formaters, indent=4))
data = request.get('data', None)
if not isinstance(data, str):
return send(client, 'invalid data', None)
max_line_length = None
for formater in formaters:
max_line_length = formater.get('config', {}).get('max_line_length')
if max_line_length:
break
for formater in formaters:
name = formater.get('name', None)
config = formater.get('config', {})
if name not in FORMATERS:
return send(client, 'formater {} not support'.format(name), None)
formater = FORMATERS[name]
if formater is None:
return send(client, 'formater {} not installed'.format(name), None)
if name == 'isort' and max_line_length:
config.setdefault('line_length', max_line_length)
data = formater(data, **config)
return send(client, None, data) | Handle format request
request struct:
{
'data': 'data_need_format',
'formaters': [
{
'name': 'formater_name',
'config': {} # None or dict
},
... # formaters
]
}
if no formaters, use autopep8 formater and it's default config | Below is the the instruction that describes the task:
### Input:
Handle format request
request struct:
{
'data': 'data_need_format',
'formaters': [
{
'name': 'formater_name',
'config': {} # None or dict
},
... # formaters
]
}
if no formaters, use autopep8 formater and it's default config
### Response:
def handle(client, request):
"""
Handle format request
request struct:
{
'data': 'data_need_format',
'formaters': [
{
'name': 'formater_name',
'config': {} # None or dict
},
... # formaters
]
}
if no formaters, use autopep8 formater and it's default config
"""
formaters = request.get('formaters', None)
if not formaters:
formaters = [{'name': 'autopep8'}]
logging.debug('formaters: ' + json.dumps(formaters, indent=4))
data = request.get('data', None)
if not isinstance(data, str):
return send(client, 'invalid data', None)
max_line_length = None
for formater in formaters:
max_line_length = formater.get('config', {}).get('max_line_length')
if max_line_length:
break
for formater in formaters:
name = formater.get('name', None)
config = formater.get('config', {})
if name not in FORMATERS:
return send(client, 'formater {} not support'.format(name), None)
formater = FORMATERS[name]
if formater is None:
return send(client, 'formater {} not installed'.format(name), None)
if name == 'isort' and max_line_length:
config.setdefault('line_length', max_line_length)
data = formater(data, **config)
return send(client, None, data) |
def add_element(self, elt):
"""Helper to add a element to the current section. The Element name
will be used as an identifier."""
if not isinstance(elt, Element):
raise TypeError("argument should be a subclass of Element")
self.elements[elt.get_name()] = elt
return elt | Helper to add a element to the current section. The Element name
will be used as an identifier. | Below is the the instruction that describes the task:
### Input:
Helper to add a element to the current section. The Element name
will be used as an identifier.
### Response:
def add_element(self, elt):
"""Helper to add a element to the current section. The Element name
will be used as an identifier."""
if not isinstance(elt, Element):
raise TypeError("argument should be a subclass of Element")
self.elements[elt.get_name()] = elt
return elt |
def sunrise(self, date=None, local=True, use_elevation=True):
"""Return sunrise time.
Calculates the time in the morning when the sun is a 0.833 degrees
below the horizon. This is to account for refraction.
:param date: The date for which to calculate the sunrise time.
If no date is specified then the current date will be used.
:type date: :class:`~datetime.date`
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:type local: bool
:param use_elevation: True = Return times that allow for the location's elevation;
False = Return times that don't use elevation.
If not specified then times will take elevation into account.
:type use_elevation: bool
:returns: The date and time at which sunrise occurs.
:rtype: :class:`~datetime.datetime`
"""
if local and self.timezone is None:
raise ValueError("Local time requested but Location has no timezone set.")
if self.astral is None:
self.astral = Astral()
if date is None:
date = datetime.date.today()
elevation = self.elevation if use_elevation else 0
sunrise = self.astral.sunrise_utc(date, self.latitude, self.longitude, elevation)
if local:
return sunrise.astimezone(self.tz)
else:
return sunrise | Return sunrise time.
Calculates the time in the morning when the sun is a 0.833 degrees
below the horizon. This is to account for refraction.
:param date: The date for which to calculate the sunrise time.
If no date is specified then the current date will be used.
:type date: :class:`~datetime.date`
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:type local: bool
:param use_elevation: True = Return times that allow for the location's elevation;
False = Return times that don't use elevation.
If not specified then times will take elevation into account.
:type use_elevation: bool
:returns: The date and time at which sunrise occurs.
:rtype: :class:`~datetime.datetime` | Below is the the instruction that describes the task:
### Input:
Return sunrise time.
Calculates the time in the morning when the sun is a 0.833 degrees
below the horizon. This is to account for refraction.
:param date: The date for which to calculate the sunrise time.
If no date is specified then the current date will be used.
:type date: :class:`~datetime.date`
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:type local: bool
:param use_elevation: True = Return times that allow for the location's elevation;
False = Return times that don't use elevation.
If not specified then times will take elevation into account.
:type use_elevation: bool
:returns: The date and time at which sunrise occurs.
:rtype: :class:`~datetime.datetime`
### Response:
def sunrise(self, date=None, local=True, use_elevation=True):
"""Return sunrise time.
Calculates the time in the morning when the sun is a 0.833 degrees
below the horizon. This is to account for refraction.
:param date: The date for which to calculate the sunrise time.
If no date is specified then the current date will be used.
:type date: :class:`~datetime.date`
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:type local: bool
:param use_elevation: True = Return times that allow for the location's elevation;
False = Return times that don't use elevation.
If not specified then times will take elevation into account.
:type use_elevation: bool
:returns: The date and time at which sunrise occurs.
:rtype: :class:`~datetime.datetime`
"""
if local and self.timezone is None:
raise ValueError("Local time requested but Location has no timezone set.")
if self.astral is None:
self.astral = Astral()
if date is None:
date = datetime.date.today()
elevation = self.elevation if use_elevation else 0
sunrise = self.astral.sunrise_utc(date, self.latitude, self.longitude, elevation)
if local:
return sunrise.astimezone(self.tz)
else:
return sunrise |
def _writable(method):
"""Check that record is in defined status.
:param method: Method to be decorated.
:returns: Function decorated.
"""
@wraps(method)
def wrapper(self, *args, **kwargs):
"""Send record for indexing.
:returns: Execution result of the decorated method.
:raises InvalidOperationError: It occurs when the bucket is locked or
deleted.
"""
if self.bucket.locked or self.bucket.deleted:
raise InvalidOperationError()
return method(self, *args, **kwargs)
return wrapper | Check that record is in defined status.
:param method: Method to be decorated.
:returns: Function decorated. | Below is the the instruction that describes the task:
### Input:
Check that record is in defined status.
:param method: Method to be decorated.
:returns: Function decorated.
### Response:
def _writable(method):
"""Check that record is in defined status.
:param method: Method to be decorated.
:returns: Function decorated.
"""
@wraps(method)
def wrapper(self, *args, **kwargs):
"""Send record for indexing.
:returns: Execution result of the decorated method.
:raises InvalidOperationError: It occurs when the bucket is locked or
deleted.
"""
if self.bucket.locked or self.bucket.deleted:
raise InvalidOperationError()
return method(self, *args, **kwargs)
return wrapper |
def type_object_attrgetter(obj, attr, *defargs):
"""
This implements an improved attrgetter for type objects (i.e. classes)
that can handle class attributes that are implemented as properties on
a metaclass.
Normally `getattr` on a class with a `property` (say, "foo"), would return
the `property` object itself. However, if the class has a metaclass which
*also* defines a `property` named "foo", ``getattr(cls, 'foo')`` will find
the "foo" property on the metaclass and resolve it. For the purposes of
autodoc we just want to document the "foo" property defined on the class,
not on the metaclass.
For example::
>>> class Meta(type):
... @property
... def foo(cls):
... return 'foo'
...
>>> class MyClass(metaclass=Meta):
... @property
... def foo(self):
... \"\"\"Docstring for MyClass.foo property.\"\"\"
... return 'myfoo'
...
>>> getattr(MyClass, 'foo')
'foo'
>>> type_object_attrgetter(MyClass, 'foo')
<property at 0x...>
>>> type_object_attrgetter(MyClass, 'foo').__doc__
'Docstring for MyClass.foo property.'
The last line of the example shows the desired behavior for the purposes
of autodoc.
"""
for base in obj.__mro__:
if attr in base.__dict__:
if isinstance(base.__dict__[attr], property):
# Note, this should only be used for properties--for any other
# type of descriptor (classmethod, for example) this can mess
# up existing expectations of what getattr(cls, ...) returns
return base.__dict__[attr]
break
return getattr(obj, attr, *defargs) | This implements an improved attrgetter for type objects (i.e. classes)
that can handle class attributes that are implemented as properties on
a metaclass.
Normally `getattr` on a class with a `property` (say, "foo"), would return
the `property` object itself. However, if the class has a metaclass which
*also* defines a `property` named "foo", ``getattr(cls, 'foo')`` will find
the "foo" property on the metaclass and resolve it. For the purposes of
autodoc we just want to document the "foo" property defined on the class,
not on the metaclass.
For example::
>>> class Meta(type):
... @property
... def foo(cls):
... return 'foo'
...
>>> class MyClass(metaclass=Meta):
... @property
... def foo(self):
... \"\"\"Docstring for MyClass.foo property.\"\"\"
... return 'myfoo'
...
>>> getattr(MyClass, 'foo')
'foo'
>>> type_object_attrgetter(MyClass, 'foo')
<property at 0x...>
>>> type_object_attrgetter(MyClass, 'foo').__doc__
'Docstring for MyClass.foo property.'
The last line of the example shows the desired behavior for the purposes
of autodoc. | Below is the the instruction that describes the task:
### Input:
This implements an improved attrgetter for type objects (i.e. classes)
that can handle class attributes that are implemented as properties on
a metaclass.
Normally `getattr` on a class with a `property` (say, "foo"), would return
the `property` object itself. However, if the class has a metaclass which
*also* defines a `property` named "foo", ``getattr(cls, 'foo')`` will find
the "foo" property on the metaclass and resolve it. For the purposes of
autodoc we just want to document the "foo" property defined on the class,
not on the metaclass.
For example::
>>> class Meta(type):
... @property
... def foo(cls):
... return 'foo'
...
>>> class MyClass(metaclass=Meta):
... @property
... def foo(self):
... \"\"\"Docstring for MyClass.foo property.\"\"\"
... return 'myfoo'
...
>>> getattr(MyClass, 'foo')
'foo'
>>> type_object_attrgetter(MyClass, 'foo')
<property at 0x...>
>>> type_object_attrgetter(MyClass, 'foo').__doc__
'Docstring for MyClass.foo property.'
The last line of the example shows the desired behavior for the purposes
of autodoc.
### Response:
def type_object_attrgetter(obj, attr, *defargs):
"""
This implements an improved attrgetter for type objects (i.e. classes)
that can handle class attributes that are implemented as properties on
a metaclass.
Normally `getattr` on a class with a `property` (say, "foo"), would return
the `property` object itself. However, if the class has a metaclass which
*also* defines a `property` named "foo", ``getattr(cls, 'foo')`` will find
the "foo" property on the metaclass and resolve it. For the purposes of
autodoc we just want to document the "foo" property defined on the class,
not on the metaclass.
For example::
>>> class Meta(type):
... @property
... def foo(cls):
... return 'foo'
...
>>> class MyClass(metaclass=Meta):
... @property
... def foo(self):
... \"\"\"Docstring for MyClass.foo property.\"\"\"
... return 'myfoo'
...
>>> getattr(MyClass, 'foo')
'foo'
>>> type_object_attrgetter(MyClass, 'foo')
<property at 0x...>
>>> type_object_attrgetter(MyClass, 'foo').__doc__
'Docstring for MyClass.foo property.'
The last line of the example shows the desired behavior for the purposes
of autodoc.
"""
for base in obj.__mro__:
if attr in base.__dict__:
if isinstance(base.__dict__[attr], property):
# Note, this should only be used for properties--for any other
# type of descriptor (classmethod, for example) this can mess
# up existing expectations of what getattr(cls, ...) returns
return base.__dict__[attr]
break
return getattr(obj, attr, *defargs) |
def setCurrentRegItem(self, regItem):
""" Sets the current registry item.
"""
rowIndex = self.model().indexFromItem(regItem)
if not rowIndex.isValid():
logger.warn("Can't select {!r} in table".format(regItem))
self.setCurrentIndex(rowIndex) | Sets the current registry item. | Below is the the instruction that describes the task:
### Input:
Sets the current registry item.
### Response:
def setCurrentRegItem(self, regItem):
""" Sets the current registry item.
"""
rowIndex = self.model().indexFromItem(regItem)
if not rowIndex.isValid():
logger.warn("Can't select {!r} in table".format(regItem))
self.setCurrentIndex(rowIndex) |
def recipe_status(self, kitchen, recipe, local_dir=None):
"""
gets the status of a recipe
:param self: DKCloudAPI
:param kitchen: string
:param recipe: string
:param local_dir: string --
:rtype: dict
"""
rc = DKReturnCode()
if kitchen is None or isinstance(kitchen, basestring) is False:
rc.set(rc.DK_FAIL, 'issue with kitchen parameter')
return rc
if recipe is None or isinstance(recipe, basestring) is False:
rc.set(rc.DK_FAIL, 'issue with recipe parameter')
return rc
url = '%s/v2/recipe/tree/%s/%s' % (self.get_url_for_direct_rest_call(),
kitchen, recipe)
try:
response = requests.get(url, headers=self._get_common_headers())
rdict = self._get_json(response)
pass
except (RequestException, ValueError, TypeError), c:
s = "get_recipe: exception: %s" % str(c)
rc.set(rc.DK_FAIL, s)
return rc
if DKCloudAPI._valid_response(response):
# Now get the local sha.
if local_dir is None:
check_path = os.getcwd()
else:
if os.path.isdir(local_dir) is False:
print 'Local path %s does not exist' % local_dir
return None
else:
check_path = local_dir
local_sha = get_directory_sha(check_path)
remote_sha = rdict['recipes'][recipe]
rv = compare_sha(remote_sha, local_sha)
rc.set(rc.DK_SUCCESS, None, rv)
else:
arc = DKAPIReturnCode(rdict, response)
rc.set(rc.DK_FAIL, arc.get_message())
return rc | gets the status of a recipe
:param self: DKCloudAPI
:param kitchen: string
:param recipe: string
:param local_dir: string --
:rtype: dict | Below is the the instruction that describes the task:
### Input:
gets the status of a recipe
:param self: DKCloudAPI
:param kitchen: string
:param recipe: string
:param local_dir: string --
:rtype: dict
### Response:
def recipe_status(self, kitchen, recipe, local_dir=None):
"""
gets the status of a recipe
:param self: DKCloudAPI
:param kitchen: string
:param recipe: string
:param local_dir: string --
:rtype: dict
"""
rc = DKReturnCode()
if kitchen is None or isinstance(kitchen, basestring) is False:
rc.set(rc.DK_FAIL, 'issue with kitchen parameter')
return rc
if recipe is None or isinstance(recipe, basestring) is False:
rc.set(rc.DK_FAIL, 'issue with recipe parameter')
return rc
url = '%s/v2/recipe/tree/%s/%s' % (self.get_url_for_direct_rest_call(),
kitchen, recipe)
try:
response = requests.get(url, headers=self._get_common_headers())
rdict = self._get_json(response)
pass
except (RequestException, ValueError, TypeError), c:
s = "get_recipe: exception: %s" % str(c)
rc.set(rc.DK_FAIL, s)
return rc
if DKCloudAPI._valid_response(response):
# Now get the local sha.
if local_dir is None:
check_path = os.getcwd()
else:
if os.path.isdir(local_dir) is False:
print 'Local path %s does not exist' % local_dir
return None
else:
check_path = local_dir
local_sha = get_directory_sha(check_path)
remote_sha = rdict['recipes'][recipe]
rv = compare_sha(remote_sha, local_sha)
rc.set(rc.DK_SUCCESS, None, rv)
else:
arc = DKAPIReturnCode(rdict, response)
rc.set(rc.DK_FAIL, arc.get_message())
return rc |
async def add(self, setname, ip, timeout):
"""
Adds the given IP address to the specified set.
If timeout is specified, the IP will stay in the set for the given
duration. Else it will stay in the set during the set default timeout.
timeout must be given in seconds.
The resulting command looks like this:
``nft add element inet firewall ellis_blacklist4 { 192.0.2.10 timeout 30s }``
"""
# We have to double-quote the '{' '}' at both ends for `format` to work.
if timeout > 0:
to_ban = "{{ {0} timeout {1}s }}".format(ip, timeout)
else:
to_ban = "{{ {0} }}".format(ip)
args = ['add', 'element', self.table_family, self.table_name, setname, to_ban]
return await self.start(__class__.CMD, *args) | Adds the given IP address to the specified set.
If timeout is specified, the IP will stay in the set for the given
duration. Else it will stay in the set during the set default timeout.
timeout must be given in seconds.
The resulting command looks like this:
``nft add element inet firewall ellis_blacklist4 { 192.0.2.10 timeout 30s }`` | Below is the the instruction that describes the task:
### Input:
Adds the given IP address to the specified set.
If timeout is specified, the IP will stay in the set for the given
duration. Else it will stay in the set during the set default timeout.
timeout must be given in seconds.
The resulting command looks like this:
``nft add element inet firewall ellis_blacklist4 { 192.0.2.10 timeout 30s }``
### Response:
async def add(self, setname, ip, timeout):
"""
Adds the given IP address to the specified set.
If timeout is specified, the IP will stay in the set for the given
duration. Else it will stay in the set during the set default timeout.
timeout must be given in seconds.
The resulting command looks like this:
``nft add element inet firewall ellis_blacklist4 { 192.0.2.10 timeout 30s }``
"""
# We have to double-quote the '{' '}' at both ends for `format` to work.
if timeout > 0:
to_ban = "{{ {0} timeout {1}s }}".format(ip, timeout)
else:
to_ban = "{{ {0} }}".format(ip)
args = ['add', 'element', self.table_family, self.table_name, setname, to_ban]
return await self.start(__class__.CMD, *args) |
def small_abc_image_recognition():
"""!
@brief Trains network using letters 'A', 'B', 'C', and recognize each of them with and without noise.
"""
images = [];
images += IMAGE_SYMBOL_SAMPLES.LIST_IMAGES_SYMBOL_A;
images += IMAGE_SYMBOL_SAMPLES.LIST_IMAGES_SYMBOL_B;
images += IMAGE_SYMBOL_SAMPLES.LIST_IMAGES_SYMBOL_C;
template_recognition_image(images, 250, 25); | !
@brief Trains network using letters 'A', 'B', 'C', and recognize each of them with and without noise. | Below is the the instruction that describes the task:
### Input:
!
@brief Trains network using letters 'A', 'B', 'C', and recognize each of them with and without noise.
### Response:
def small_abc_image_recognition():
"""!
@brief Trains network using letters 'A', 'B', 'C', and recognize each of them with and without noise.
"""
images = [];
images += IMAGE_SYMBOL_SAMPLES.LIST_IMAGES_SYMBOL_A;
images += IMAGE_SYMBOL_SAMPLES.LIST_IMAGES_SYMBOL_B;
images += IMAGE_SYMBOL_SAMPLES.LIST_IMAGES_SYMBOL_C;
template_recognition_image(images, 250, 25); |
def post_refresh_system_metadata(request):
"""MNStorage.systemMetadataChanged(session, did, serialVersion,
dateSysMetaLastModified) → boolean."""
d1_gmn.app.views.assert_db.post_has_mime_parts(
request,
(
('field', 'pid'),
('field', 'serialVersion'),
('field', 'dateSysMetaLastModified'),
),
)
d1_gmn.app.views.assert_db.is_existing_object(request.POST['pid'])
d1_gmn.app.models.sysmeta_refresh_queue(
request.POST['pid'],
request.POST['serialVersion'],
request.POST['dateSysMetaLastModified'],
'queued',
).save()
return d1_gmn.app.views.util.http_response_with_boolean_true_type() | MNStorage.systemMetadataChanged(session, did, serialVersion,
dateSysMetaLastModified) → boolean. | Below is the the instruction that describes the task:
### Input:
MNStorage.systemMetadataChanged(session, did, serialVersion,
dateSysMetaLastModified) → boolean.
### Response:
def post_refresh_system_metadata(request):
"""MNStorage.systemMetadataChanged(session, did, serialVersion,
dateSysMetaLastModified) → boolean."""
d1_gmn.app.views.assert_db.post_has_mime_parts(
request,
(
('field', 'pid'),
('field', 'serialVersion'),
('field', 'dateSysMetaLastModified'),
),
)
d1_gmn.app.views.assert_db.is_existing_object(request.POST['pid'])
d1_gmn.app.models.sysmeta_refresh_queue(
request.POST['pid'],
request.POST['serialVersion'],
request.POST['dateSysMetaLastModified'],
'queued',
).save()
return d1_gmn.app.views.util.http_response_with_boolean_true_type() |
def mul(left, right):
"""
Distribution multiplication.
Args:
left (Dist, numpy.ndarray) : left hand side.
right (Dist, numpy.ndarray) : right hand side.
"""
from .mv_mul import MvMul
length = max(left, right)
if length == 1:
return Mul(left, right)
return MvMul(left, right) | Distribution multiplication.
Args:
left (Dist, numpy.ndarray) : left hand side.
right (Dist, numpy.ndarray) : right hand side. | Below is the the instruction that describes the task:
### Input:
Distribution multiplication.
Args:
left (Dist, numpy.ndarray) : left hand side.
right (Dist, numpy.ndarray) : right hand side.
### Response:
def mul(left, right):
"""
Distribution multiplication.
Args:
left (Dist, numpy.ndarray) : left hand side.
right (Dist, numpy.ndarray) : right hand side.
"""
from .mv_mul import MvMul
length = max(left, right)
if length == 1:
return Mul(left, right)
return MvMul(left, right) |
def borrow_readwrite_instance(cls, working_dir, block_number, expected_snapshots={}):
"""
Get a read/write database handle to the blockstack db.
At most one such handle can exist within the program.
When the caller is done with the handle, it should call release_readwrite_instance()
Returns the handle on success
Returns None if we can't set up the db.
Aborts if there is another read/write handle out there somewhere.
"""
global blockstack_db, blockstack_db_lastblock, blockstack_db_lock
import virtualchain_hooks
db_path = virtualchain.get_db_filename(virtualchain_hooks, working_dir)
blockstack_db_lock.acquire()
try:
assert blockstack_db is None, "Borrowing violation"
except Exception, e:
log.exception(e)
log.error("FATAL: Borrowing violation")
os.abort()
db = BlockstackDB(db_path, DISPOSITION_RW, working_dir, get_genesis_block(), expected_snapshots=expected_snapshots)
rc = db.db_setup()
if not rc:
db.close()
blockstack_db_lock.release()
log.error("Failed to set up virtualchain state engine")
return None
blockstack_db = db
blockstack_db_lastblock = block_number
blockstack_db_lock.release()
return blockstack_db | Get a read/write database handle to the blockstack db.
At most one such handle can exist within the program.
When the caller is done with the handle, it should call release_readwrite_instance()
Returns the handle on success
Returns None if we can't set up the db.
Aborts if there is another read/write handle out there somewhere. | Below is the the instruction that describes the task:
### Input:
Get a read/write database handle to the blockstack db.
At most one such handle can exist within the program.
When the caller is done with the handle, it should call release_readwrite_instance()
Returns the handle on success
Returns None if we can't set up the db.
Aborts if there is another read/write handle out there somewhere.
### Response:
def borrow_readwrite_instance(cls, working_dir, block_number, expected_snapshots={}):
"""
Get a read/write database handle to the blockstack db.
At most one such handle can exist within the program.
When the caller is done with the handle, it should call release_readwrite_instance()
Returns the handle on success
Returns None if we can't set up the db.
Aborts if there is another read/write handle out there somewhere.
"""
global blockstack_db, blockstack_db_lastblock, blockstack_db_lock
import virtualchain_hooks
db_path = virtualchain.get_db_filename(virtualchain_hooks, working_dir)
blockstack_db_lock.acquire()
try:
assert blockstack_db is None, "Borrowing violation"
except Exception, e:
log.exception(e)
log.error("FATAL: Borrowing violation")
os.abort()
db = BlockstackDB(db_path, DISPOSITION_RW, working_dir, get_genesis_block(), expected_snapshots=expected_snapshots)
rc = db.db_setup()
if not rc:
db.close()
blockstack_db_lock.release()
log.error("Failed to set up virtualchain state engine")
return None
blockstack_db = db
blockstack_db_lastblock = block_number
blockstack_db_lock.release()
return blockstack_db |
def parse(self, input):
"""
Parses a time delta from the input.
See :py:class:`TimeDeltaParameter` for details on supported formats.
"""
result = self._parseIso8601(input)
if not result:
result = self._parseSimple(input)
if result is not None:
return result
else:
raise ParameterException("Invalid time delta - could not parse %s" % input) | Parses a time delta from the input.
See :py:class:`TimeDeltaParameter` for details on supported formats. | Below is the the instruction that describes the task:
### Input:
Parses a time delta from the input.
See :py:class:`TimeDeltaParameter` for details on supported formats.
### Response:
def parse(self, input):
"""
Parses a time delta from the input.
See :py:class:`TimeDeltaParameter` for details on supported formats.
"""
result = self._parseIso8601(input)
if not result:
result = self._parseSimple(input)
if result is not None:
return result
else:
raise ParameterException("Invalid time delta - could not parse %s" % input) |
def _eval_xpath(self, xpath):
"""
Evaluates xpath expressions.
Either string or XPath object.
"""
if isinstance(xpath, etree.XPath):
result = xpath(self._dataObject)
else:
result = self._dataObject.xpath(xpath,namespaces=self._namespaces)
#print 'Xpath expression:', xpath
#print etree.tostring(self._dataObject)
#print 'Got Result: \n%s\n End Result' % result
return result | Evaluates xpath expressions.
Either string or XPath object. | Below is the the instruction that describes the task:
### Input:
Evaluates xpath expressions.
Either string or XPath object.
### Response:
def _eval_xpath(self, xpath):
"""
Evaluates xpath expressions.
Either string or XPath object.
"""
if isinstance(xpath, etree.XPath):
result = xpath(self._dataObject)
else:
result = self._dataObject.xpath(xpath,namespaces=self._namespaces)
#print 'Xpath expression:', xpath
#print etree.tostring(self._dataObject)
#print 'Got Result: \n%s\n End Result' % result
return result |
def setup_client_rpc(self):
"""Setup RPC client for dfa agent."""
# Setup RPC client.
self.clnt = rpc.DfaRpcClient(self._url, constants.DFA_SERVER_QUEUE,
exchange=constants.DFA_EXCHANGE) | Setup RPC client for dfa agent. | Below is the the instruction that describes the task:
### Input:
Setup RPC client for dfa agent.
### Response:
def setup_client_rpc(self):
"""Setup RPC client for dfa agent."""
# Setup RPC client.
self.clnt = rpc.DfaRpcClient(self._url, constants.DFA_SERVER_QUEUE,
exchange=constants.DFA_EXCHANGE) |
def query_form_data(self):
"""
Get the formdata stored in the database for existing slice.
params: slice_id: integer
"""
form_data = {}
slice_id = request.args.get('slice_id')
if slice_id:
slc = db.session.query(models.Slice).filter_by(id=slice_id).one_or_none()
if slc:
form_data = slc.form_data.copy()
update_time_range(form_data)
return json.dumps(form_data) | Get the formdata stored in the database for existing slice.
params: slice_id: integer | Below is the the instruction that describes the task:
### Input:
Get the formdata stored in the database for existing slice.
params: slice_id: integer
### Response:
def query_form_data(self):
"""
Get the formdata stored in the database for existing slice.
params: slice_id: integer
"""
form_data = {}
slice_id = request.args.get('slice_id')
if slice_id:
slc = db.session.query(models.Slice).filter_by(id=slice_id).one_or_none()
if slc:
form_data = slc.form_data.copy()
update_time_range(form_data)
return json.dumps(form_data) |
def _parse_conf(conf_file=None, in_mem=False, family='ipv4'):
'''
If a file is not passed in, and the correct one for this OS is not
detected, return False
'''
if _conf() and not conf_file and not in_mem:
conf_file = _conf(family)
rules = ''
if conf_file:
with salt.utils.files.fopen(conf_file, 'r') as ifile:
rules = ifile.read()
elif in_mem:
cmd = '{0}-save' . format(_iptables_cmd(family))
rules = __salt__['cmd.run'](cmd)
else:
raise SaltException('A file was not found to parse')
ret = {}
table = ''
parser = _parser()
for line in rules.splitlines():
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('*'):
table = line.replace('*', '')
ret[table] = {}
elif line.startswith(':'):
comps = line.split()
chain = comps[0].replace(':', '')
ret[table][chain] = {}
ret[table][chain]['policy'] = comps[1]
counters = comps[2].replace('[', '').replace(']', '')
(pcount, bcount) = counters.split(':')
ret[table][chain]['packet count'] = pcount
ret[table][chain]['byte count'] = bcount
ret[table][chain]['rules'] = []
ret[table][chain]['rules_comment'] = {}
elif line.startswith('-A'):
args = salt.utils.args.shlex_split(line)
index = 0
while index + 1 < len(args):
swap = args[index] == '!' and args[index + 1].startswith('-')
if swap:
args[index], args[index + 1] = args[index + 1], args[index]
if args[index].startswith('-'):
index += 1
if args[index].startswith('-') or (args[index] == '!' and
not swap):
args.insert(index, '')
else:
while (index + 1 < len(args) and
args[index + 1] != '!' and
not args[index + 1].startswith('-')):
args[index] += ' {0}'.format(args.pop(index + 1))
index += 1
if args[-1].startswith('-'):
args.append('')
parsed_args = []
opts, _ = parser.parse_known_args(args)
parsed_args = vars(opts)
ret_args = {}
chain = parsed_args['append']
for arg in parsed_args:
if parsed_args[arg] and arg is not 'append':
ret_args[arg] = parsed_args[arg]
if parsed_args['comment'] is not None:
comment = parsed_args['comment'][0].strip('"')
ret[table][chain[0]]['rules_comment'][comment] = ret_args
ret[table][chain[0]]['rules'].append(ret_args)
return ret | If a file is not passed in, and the correct one for this OS is not
detected, return False | Below is the the instruction that describes the task:
### Input:
If a file is not passed in, and the correct one for this OS is not
detected, return False
### Response:
def _parse_conf(conf_file=None, in_mem=False, family='ipv4'):
'''
If a file is not passed in, and the correct one for this OS is not
detected, return False
'''
if _conf() and not conf_file and not in_mem:
conf_file = _conf(family)
rules = ''
if conf_file:
with salt.utils.files.fopen(conf_file, 'r') as ifile:
rules = ifile.read()
elif in_mem:
cmd = '{0}-save' . format(_iptables_cmd(family))
rules = __salt__['cmd.run'](cmd)
else:
raise SaltException('A file was not found to parse')
ret = {}
table = ''
parser = _parser()
for line in rules.splitlines():
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('*'):
table = line.replace('*', '')
ret[table] = {}
elif line.startswith(':'):
comps = line.split()
chain = comps[0].replace(':', '')
ret[table][chain] = {}
ret[table][chain]['policy'] = comps[1]
counters = comps[2].replace('[', '').replace(']', '')
(pcount, bcount) = counters.split(':')
ret[table][chain]['packet count'] = pcount
ret[table][chain]['byte count'] = bcount
ret[table][chain]['rules'] = []
ret[table][chain]['rules_comment'] = {}
elif line.startswith('-A'):
args = salt.utils.args.shlex_split(line)
index = 0
while index + 1 < len(args):
swap = args[index] == '!' and args[index + 1].startswith('-')
if swap:
args[index], args[index + 1] = args[index + 1], args[index]
if args[index].startswith('-'):
index += 1
if args[index].startswith('-') or (args[index] == '!' and
not swap):
args.insert(index, '')
else:
while (index + 1 < len(args) and
args[index + 1] != '!' and
not args[index + 1].startswith('-')):
args[index] += ' {0}'.format(args.pop(index + 1))
index += 1
if args[-1].startswith('-'):
args.append('')
parsed_args = []
opts, _ = parser.parse_known_args(args)
parsed_args = vars(opts)
ret_args = {}
chain = parsed_args['append']
for arg in parsed_args:
if parsed_args[arg] and arg is not 'append':
ret_args[arg] = parsed_args[arg]
if parsed_args['comment'] is not None:
comment = parsed_args['comment'][0].strip('"')
ret[table][chain[0]]['rules_comment'][comment] = ret_args
ret[table][chain[0]]['rules'].append(ret_args)
return ret |
def is_isomorphic_to(self, other):
"""
Returns true if all fields of other struct are isomorphic to this
struct's fields
"""
return (isinstance(other, self.__class__)
and
len(self.fields) == len(other.fields)
and
all(a.is_isomorphic_to(b) for a, b in zip(self.fields,
other.fields))) | Returns true if all fields of other struct are isomorphic to this
struct's fields | Below is the the instruction that describes the task:
### Input:
Returns true if all fields of other struct are isomorphic to this
struct's fields
### Response:
def is_isomorphic_to(self, other):
"""
Returns true if all fields of other struct are isomorphic to this
struct's fields
"""
return (isinstance(other, self.__class__)
and
len(self.fields) == len(other.fields)
and
all(a.is_isomorphic_to(b) for a, b in zip(self.fields,
other.fields))) |
def can_add_new_content(self, block, file_info):
"""
new content from file_info can be added into block iff
- file count limit hasn't been reached for the block
- there is enough space to completely fit the info into the block
- OR the info can be split and some info can fit into the block
"""
return ((self._max_files_per_container == 0 or self._max_files_per_container > len(block.content_file_infos))
and (self.does_content_fit(file_info, block)
or
# check if we can fit some content by splitting the file
# Note: if max size was unlimited, does_content_fit would have been True
(block.content_size < self._max_container_content_size_in_bytes
and (self._should_split_small_files or not self._is_small_file(file_info))))) | new content from file_info can be added into block iff
- file count limit hasn't been reached for the block
- there is enough space to completely fit the info into the block
- OR the info can be split and some info can fit into the block | Below is the the instruction that describes the task:
### Input:
new content from file_info can be added into block iff
- file count limit hasn't been reached for the block
- there is enough space to completely fit the info into the block
- OR the info can be split and some info can fit into the block
### Response:
def can_add_new_content(self, block, file_info):
"""
new content from file_info can be added into block iff
- file count limit hasn't been reached for the block
- there is enough space to completely fit the info into the block
- OR the info can be split and some info can fit into the block
"""
return ((self._max_files_per_container == 0 or self._max_files_per_container > len(block.content_file_infos))
and (self.does_content_fit(file_info, block)
or
# check if we can fit some content by splitting the file
# Note: if max size was unlimited, does_content_fit would have been True
(block.content_size < self._max_container_content_size_in_bytes
and (self._should_split_small_files or not self._is_small_file(file_info))))) |
def validate_arg(f,
arg_name,
*validation_func, # type: ValidationFuncs
**kwargs
):
# type: (...) -> Callable
"""
A decorator to apply function input validation for the given argument name, with the provided base validation
function(s). You may use several such decorators on a given function as long as they are stacked on top of each
other (no external decorator in the middle)
:param arg_name:
:param validation_func: the base validation function or list of base validation functions to use. A callable, a
tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists
are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit
`_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead
of callables, they will be transformed to functions automatically.
:param error_type: a subclass of ValidationError to raise in case of validation failure. By default a
ValidationError will be raised with the provided help_msg
:param help_msg: an optional help message to be used in the raised error in case of validation failure.
:param none_policy: describes how None values should be handled. See `NoneArgPolicy` for the various
possibilities. Default is `NoneArgPolicy.ACCEPT_IF_OPTIONAl_ELSE_VALIDATE`.
:param kw_context_args: optional contextual information to store in the exception, and that may be also used
to format the help message
:return: a function decorator, able to transform a function into a function that will perform input validation
before executing the function's code everytime it is executed.
"""
return decorate_with_validation(f, arg_name, *validation_func, **kwargs) | A decorator to apply function input validation for the given argument name, with the provided base validation
function(s). You may use several such decorators on a given function as long as they are stacked on top of each
other (no external decorator in the middle)
:param arg_name:
:param validation_func: the base validation function or list of base validation functions to use. A callable, a
tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists
are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit
`_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead
of callables, they will be transformed to functions automatically.
:param error_type: a subclass of ValidationError to raise in case of validation failure. By default a
ValidationError will be raised with the provided help_msg
:param help_msg: an optional help message to be used in the raised error in case of validation failure.
:param none_policy: describes how None values should be handled. See `NoneArgPolicy` for the various
possibilities. Default is `NoneArgPolicy.ACCEPT_IF_OPTIONAl_ELSE_VALIDATE`.
:param kw_context_args: optional contextual information to store in the exception, and that may be also used
to format the help message
:return: a function decorator, able to transform a function into a function that will perform input validation
before executing the function's code everytime it is executed. | Below is the the instruction that describes the task:
### Input:
A decorator to apply function input validation for the given argument name, with the provided base validation
function(s). You may use several such decorators on a given function as long as they are stacked on top of each
other (no external decorator in the middle)
:param arg_name:
:param validation_func: the base validation function or list of base validation functions to use. A callable, a
tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists
are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit
`_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead
of callables, they will be transformed to functions automatically.
:param error_type: a subclass of ValidationError to raise in case of validation failure. By default a
ValidationError will be raised with the provided help_msg
:param help_msg: an optional help message to be used in the raised error in case of validation failure.
:param none_policy: describes how None values should be handled. See `NoneArgPolicy` for the various
possibilities. Default is `NoneArgPolicy.ACCEPT_IF_OPTIONAl_ELSE_VALIDATE`.
:param kw_context_args: optional contextual information to store in the exception, and that may be also used
to format the help message
:return: a function decorator, able to transform a function into a function that will perform input validation
before executing the function's code everytime it is executed.
### Response:
def validate_arg(f,
arg_name,
*validation_func, # type: ValidationFuncs
**kwargs
):
# type: (...) -> Callable
"""
A decorator to apply function input validation for the given argument name, with the provided base validation
function(s). You may use several such decorators on a given function as long as they are stacked on top of each
other (no external decorator in the middle)
:param arg_name:
:param validation_func: the base validation function or list of base validation functions to use. A callable, a
tuple(callable, help_msg_str), a tuple(callable, failure_type), or a list of several such elements. Nested lists
are supported and indicate an implicit `and_` (such as the main list). Tuples indicate an implicit
`_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead
of callables, they will be transformed to functions automatically.
:param error_type: a subclass of ValidationError to raise in case of validation failure. By default a
ValidationError will be raised with the provided help_msg
:param help_msg: an optional help message to be used in the raised error in case of validation failure.
:param none_policy: describes how None values should be handled. See `NoneArgPolicy` for the various
possibilities. Default is `NoneArgPolicy.ACCEPT_IF_OPTIONAl_ELSE_VALIDATE`.
:param kw_context_args: optional contextual information to store in the exception, and that may be also used
to format the help message
:return: a function decorator, able to transform a function into a function that will perform input validation
before executing the function's code everytime it is executed.
"""
return decorate_with_validation(f, arg_name, *validation_func, **kwargs) |
def read_geo(fid, key):
"""Read geolocation and related datasets."""
dsid = GEO_NAMES[key.name]
add_epoch = False
if "time" in key.name:
days = fid["/L1C/" + dsid["day"]].value
msecs = fid["/L1C/" + dsid["msec"]].value
data = _form_datetimes(days, msecs)
add_epoch = True
dtype = np.float64
else:
data = fid["/L1C/" + dsid].value
dtype = np.float32
data = xr.DataArray(da.from_array(data, chunks=CHUNK_SIZE),
name=key.name, dims=['y', 'x']).astype(dtype)
if add_epoch:
data.attrs['sensing_time_epoch'] = EPOCH
return data | Read geolocation and related datasets. | Below is the the instruction that describes the task:
### Input:
Read geolocation and related datasets.
### Response:
def read_geo(fid, key):
"""Read geolocation and related datasets."""
dsid = GEO_NAMES[key.name]
add_epoch = False
if "time" in key.name:
days = fid["/L1C/" + dsid["day"]].value
msecs = fid["/L1C/" + dsid["msec"]].value
data = _form_datetimes(days, msecs)
add_epoch = True
dtype = np.float64
else:
data = fid["/L1C/" + dsid].value
dtype = np.float32
data = xr.DataArray(da.from_array(data, chunks=CHUNK_SIZE),
name=key.name, dims=['y', 'x']).astype(dtype)
if add_epoch:
data.attrs['sensing_time_epoch'] = EPOCH
return data |
def last_index_of(self, item):
"""
Returns the last index of specified items's occurrences in this list. If specified item is not present in this
list, returns -1.
:param item: (object), the specified item to be searched for.
:return: (int), the last index of specified items's occurrences, -1 if item is not present in this list.
"""
check_not_none(item, "Value can't be None")
item_data = self._to_data(item)
return self._encode_invoke(list_last_index_of_codec, value=item_data) | Returns the last index of specified items's occurrences in this list. If specified item is not present in this
list, returns -1.
:param item: (object), the specified item to be searched for.
:return: (int), the last index of specified items's occurrences, -1 if item is not present in this list. | Below is the the instruction that describes the task:
### Input:
Returns the last index of specified items's occurrences in this list. If specified item is not present in this
list, returns -1.
:param item: (object), the specified item to be searched for.
:return: (int), the last index of specified items's occurrences, -1 if item is not present in this list.
### Response:
def last_index_of(self, item):
"""
Returns the last index of specified items's occurrences in this list. If specified item is not present in this
list, returns -1.
:param item: (object), the specified item to be searched for.
:return: (int), the last index of specified items's occurrences, -1 if item is not present in this list.
"""
check_not_none(item, "Value can't be None")
item_data = self._to_data(item)
return self._encode_invoke(list_last_index_of_codec, value=item_data) |
def fix_surrogates(text):
"""
Replace 16-bit surrogate codepoints with the characters they represent
(when properly paired), or with \ufffd otherwise.
>>> high_surrogate = chr(0xd83d)
>>> low_surrogate = chr(0xdca9)
>>> print(fix_surrogates(high_surrogate + low_surrogate))
💩
>>> print(fix_surrogates(low_surrogate + high_surrogate))
��
The above doctest had to be very carefully written, because even putting
the Unicode escapes of the surrogates in the docstring was causing
various tools to fail, which I think just goes to show why this fixer is
necessary.
"""
if SURROGATE_RE.search(text):
text = SURROGATE_PAIR_RE.sub(convert_surrogate_pair, text)
text = SURROGATE_RE.sub('\ufffd', text)
return text | Replace 16-bit surrogate codepoints with the characters they represent
(when properly paired), or with \ufffd otherwise.
>>> high_surrogate = chr(0xd83d)
>>> low_surrogate = chr(0xdca9)
>>> print(fix_surrogates(high_surrogate + low_surrogate))
💩
>>> print(fix_surrogates(low_surrogate + high_surrogate))
��
The above doctest had to be very carefully written, because even putting
the Unicode escapes of the surrogates in the docstring was causing
various tools to fail, which I think just goes to show why this fixer is
necessary. | Below is the the instruction that describes the task:
### Input:
Replace 16-bit surrogate codepoints with the characters they represent
(when properly paired), or with \ufffd otherwise.
>>> high_surrogate = chr(0xd83d)
>>> low_surrogate = chr(0xdca9)
>>> print(fix_surrogates(high_surrogate + low_surrogate))
💩
>>> print(fix_surrogates(low_surrogate + high_surrogate))
��
The above doctest had to be very carefully written, because even putting
the Unicode escapes of the surrogates in the docstring was causing
various tools to fail, which I think just goes to show why this fixer is
necessary.
### Response:
def fix_surrogates(text):
"""
Replace 16-bit surrogate codepoints with the characters they represent
(when properly paired), or with \ufffd otherwise.
>>> high_surrogate = chr(0xd83d)
>>> low_surrogate = chr(0xdca9)
>>> print(fix_surrogates(high_surrogate + low_surrogate))
💩
>>> print(fix_surrogates(low_surrogate + high_surrogate))
��
The above doctest had to be very carefully written, because even putting
the Unicode escapes of the surrogates in the docstring was causing
various tools to fail, which I think just goes to show why this fixer is
necessary.
"""
if SURROGATE_RE.search(text):
text = SURROGATE_PAIR_RE.sub(convert_surrogate_pair, text)
text = SURROGATE_RE.sub('\ufffd', text)
return text |
def parse_time(time_input):
""" Parse input time/date string into ISO 8601 string
:param time_input: time/date to parse
:type time_input: str or datetime.date or datetime.datetime
:return: parsed string in ISO 8601 format
:rtype: str
"""
if isinstance(time_input, datetime.date):
return time_input.isoformat() # datetime.date only returns date, datetime.datetime also returns time
if len(time_input) < 8:
raise ValueError('Invalid time string {}.\n'
'Please specify time in formats YYYY-MM-DD or YYYY-MM-DDTHH:MM:SS'.format(time_input))
time = dateutil.parser.parse(time_input)
if len(time_input) <= 10:
return time.date().isoformat()
return time.isoformat() | Parse input time/date string into ISO 8601 string
:param time_input: time/date to parse
:type time_input: str or datetime.date or datetime.datetime
:return: parsed string in ISO 8601 format
:rtype: str | Below is the the instruction that describes the task:
### Input:
Parse input time/date string into ISO 8601 string
:param time_input: time/date to parse
:type time_input: str or datetime.date or datetime.datetime
:return: parsed string in ISO 8601 format
:rtype: str
### Response:
def parse_time(time_input):
""" Parse input time/date string into ISO 8601 string
:param time_input: time/date to parse
:type time_input: str or datetime.date or datetime.datetime
:return: parsed string in ISO 8601 format
:rtype: str
"""
if isinstance(time_input, datetime.date):
return time_input.isoformat() # datetime.date only returns date, datetime.datetime also returns time
if len(time_input) < 8:
raise ValueError('Invalid time string {}.\n'
'Please specify time in formats YYYY-MM-DD or YYYY-MM-DDTHH:MM:SS'.format(time_input))
time = dateutil.parser.parse(time_input)
if len(time_input) <= 10:
return time.date().isoformat()
return time.isoformat() |
def _render_val_with_prev(self, w, n, current_val, symbol_len):
"""Return a string encoding the given value in a waveform.
:param w: The WireVector we are rendering to a waveform
:param n: An integer from 0 to segment_len-1
:param current_val: the value to be rendered
:param symbol_len: and integer for how big to draw the current value
Returns a string of printed length symbol_len that will draw the
representation of current_val. The input prior_val is used to
render transitions.
"""
sl = symbol_len-1
if len(w) > 1:
out = self._revstart
if current_val != self.prior_val:
out += self._x + hex(current_val).rstrip('L').ljust(sl)[:sl]
elif n == 0:
out += hex(current_val).rstrip('L').ljust(symbol_len)[:symbol_len]
else:
out += ' '*symbol_len
out += self._revstop
else:
pretty_map = {
(0, 0): self._low + self._low * sl,
(0, 1): self._up + self._high * sl,
(1, 0): self._down + self._low * sl,
(1, 1): self._high + self._high * sl,
}
out = pretty_map[(self.prior_val, current_val)]
return out | Return a string encoding the given value in a waveform.
:param w: The WireVector we are rendering to a waveform
:param n: An integer from 0 to segment_len-1
:param current_val: the value to be rendered
:param symbol_len: and integer for how big to draw the current value
Returns a string of printed length symbol_len that will draw the
representation of current_val. The input prior_val is used to
render transitions. | Below is the the instruction that describes the task:
### Input:
Return a string encoding the given value in a waveform.
:param w: The WireVector we are rendering to a waveform
:param n: An integer from 0 to segment_len-1
:param current_val: the value to be rendered
:param symbol_len: and integer for how big to draw the current value
Returns a string of printed length symbol_len that will draw the
representation of current_val. The input prior_val is used to
render transitions.
### Response:
def _render_val_with_prev(self, w, n, current_val, symbol_len):
"""Return a string encoding the given value in a waveform.
:param w: The WireVector we are rendering to a waveform
:param n: An integer from 0 to segment_len-1
:param current_val: the value to be rendered
:param symbol_len: and integer for how big to draw the current value
Returns a string of printed length symbol_len that will draw the
representation of current_val. The input prior_val is used to
render transitions.
"""
sl = symbol_len-1
if len(w) > 1:
out = self._revstart
if current_val != self.prior_val:
out += self._x + hex(current_val).rstrip('L').ljust(sl)[:sl]
elif n == 0:
out += hex(current_val).rstrip('L').ljust(symbol_len)[:symbol_len]
else:
out += ' '*symbol_len
out += self._revstop
else:
pretty_map = {
(0, 0): self._low + self._low * sl,
(0, 1): self._up + self._high * sl,
(1, 0): self._down + self._low * sl,
(1, 1): self._high + self._high * sl,
}
out = pretty_map[(self.prior_val, current_val)]
return out |
def no_counterpart_found(string, options, rc_so_far):
"""Takes action determined by options.else_action. Unless told to
raise an exception, this function returns the errno that is supposed
to be returned in this case.
:param string: The lookup string.
:param options: ArgumentParser or equivalent to provide
options.else_action, options.else_errno, options.no_newline
:param rc_so_far: Becomes set to the value set in options.
"""
logger.debug("options.else_action: %s", options.else_action)
if options.else_action == "passthrough":
format_list = [string]
output_fd = sys.stdout
elif options.else_action == "exception":
raise KeyError("No counterpart found for: %s" % (string))
elif options.else_action == "error":
format_list = ["# No counterpart found for: %s" % (string)]
output_fd = sys.stderr
if not options.no_newline:
format_list.append("\n")
output_fd.write("".join(format_list))
return options.else_errno | Takes action determined by options.else_action. Unless told to
raise an exception, this function returns the errno that is supposed
to be returned in this case.
:param string: The lookup string.
:param options: ArgumentParser or equivalent to provide
options.else_action, options.else_errno, options.no_newline
:param rc_so_far: Becomes set to the value set in options. | Below is the the instruction that describes the task:
### Input:
Takes action determined by options.else_action. Unless told to
raise an exception, this function returns the errno that is supposed
to be returned in this case.
:param string: The lookup string.
:param options: ArgumentParser or equivalent to provide
options.else_action, options.else_errno, options.no_newline
:param rc_so_far: Becomes set to the value set in options.
### Response:
def no_counterpart_found(string, options, rc_so_far):
"""Takes action determined by options.else_action. Unless told to
raise an exception, this function returns the errno that is supposed
to be returned in this case.
:param string: The lookup string.
:param options: ArgumentParser or equivalent to provide
options.else_action, options.else_errno, options.no_newline
:param rc_so_far: Becomes set to the value set in options.
"""
logger.debug("options.else_action: %s", options.else_action)
if options.else_action == "passthrough":
format_list = [string]
output_fd = sys.stdout
elif options.else_action == "exception":
raise KeyError("No counterpart found for: %s" % (string))
elif options.else_action == "error":
format_list = ["# No counterpart found for: %s" % (string)]
output_fd = sys.stderr
if not options.no_newline:
format_list.append("\n")
output_fd.write("".join(format_list))
return options.else_errno |
def ensure_workspace(self, target):
"""Ensures that an up-to-date Go workspace exists for the given target.
Creates any necessary symlinks to source files based on the target and its transitive
dependencies, and removes any symlinks which do not correspond to any needed dep.
"""
gopath = self.get_gopath(target)
for d in ('bin', 'pkg', 'src'):
safe_mkdir(os.path.join(gopath, d))
required_links = set()
for dep in target.closure():
if not isinstance(dep, GoTarget):
continue
if self.is_remote_lib(dep):
self._symlink_remote_lib(gopath, dep, required_links)
else:
self._symlink_local_src(gopath, dep, required_links)
self.remove_unused_links(os.path.join(gopath, 'src'), required_links) | Ensures that an up-to-date Go workspace exists for the given target.
Creates any necessary symlinks to source files based on the target and its transitive
dependencies, and removes any symlinks which do not correspond to any needed dep. | Below is the the instruction that describes the task:
### Input:
Ensures that an up-to-date Go workspace exists for the given target.
Creates any necessary symlinks to source files based on the target and its transitive
dependencies, and removes any symlinks which do not correspond to any needed dep.
### Response:
def ensure_workspace(self, target):
"""Ensures that an up-to-date Go workspace exists for the given target.
Creates any necessary symlinks to source files based on the target and its transitive
dependencies, and removes any symlinks which do not correspond to any needed dep.
"""
gopath = self.get_gopath(target)
for d in ('bin', 'pkg', 'src'):
safe_mkdir(os.path.join(gopath, d))
required_links = set()
for dep in target.closure():
if not isinstance(dep, GoTarget):
continue
if self.is_remote_lib(dep):
self._symlink_remote_lib(gopath, dep, required_links)
else:
self._symlink_local_src(gopath, dep, required_links)
self.remove_unused_links(os.path.join(gopath, 'src'), required_links) |
def timezone(self, value):
"""Set the timezone."""
self._timezone = (value if isinstance(value, datetime.tzinfo)
else tz.gettz(value)) | Set the timezone. | Below is the the instruction that describes the task:
### Input:
Set the timezone.
### Response:
def timezone(self, value):
"""Set the timezone."""
self._timezone = (value if isinstance(value, datetime.tzinfo)
else tz.gettz(value)) |
def constraint_from_parent_conflicts(self):
"""
Given a resolved entry with multiple parent dependencies with different
constraints, searches for the resolution that satisfies all of the parent
constraints.
:return: A new **InstallRequirement** satisfying all parent constraints
:raises: :exc:`~pipenv.exceptions.DependencyConflict` if resolution is impossible
"""
# ensure that we satisfy the parent dependencies of this dep
from pipenv.vendor.packaging.specifiers import Specifier
parent_dependencies = set()
has_mismatch = False
can_use_original = True
for p in self.parent_deps:
# updated dependencies should be satisfied since they were resolved already
if p.is_updated:
continue
# parents with no requirements can't conflict
if not p.requirements:
continue
needed = p.requirements.get("dependencies", [])
entry_ref = p.get_dependency(self.name)
required = entry_ref.get("required_version", "*")
required = self.clean_specifier(required)
parent_requires = self.make_requirement(self.name, required)
parent_dependencies.add("{0} => {1} ({2})".format(p.name, self.name, required))
if not parent_requires.requirement.specifier.contains(self.original_version):
can_use_original = False
if not parent_requires.requirement.specifier.contains(self.updated_version):
has_mismatch = True
if has_mismatch and not can_use_original:
from pipenv.exceptions import DependencyConflict
msg = (
"Cannot resolve {0} ({1}) due to conflicting parent dependencies: "
"\n\t{2}".format(
self.name, self.updated_version, "\n\t".join(parent_dependencies)
)
)
raise DependencyConflict(msg)
elif can_use_original:
return self.lockfile_entry.as_ireq()
return self.entry.as_ireq() | Given a resolved entry with multiple parent dependencies with different
constraints, searches for the resolution that satisfies all of the parent
constraints.
:return: A new **InstallRequirement** satisfying all parent constraints
:raises: :exc:`~pipenv.exceptions.DependencyConflict` if resolution is impossible | Below is the the instruction that describes the task:
### Input:
Given a resolved entry with multiple parent dependencies with different
constraints, searches for the resolution that satisfies all of the parent
constraints.
:return: A new **InstallRequirement** satisfying all parent constraints
:raises: :exc:`~pipenv.exceptions.DependencyConflict` if resolution is impossible
### Response:
def constraint_from_parent_conflicts(self):
"""
Given a resolved entry with multiple parent dependencies with different
constraints, searches for the resolution that satisfies all of the parent
constraints.
:return: A new **InstallRequirement** satisfying all parent constraints
:raises: :exc:`~pipenv.exceptions.DependencyConflict` if resolution is impossible
"""
# ensure that we satisfy the parent dependencies of this dep
from pipenv.vendor.packaging.specifiers import Specifier
parent_dependencies = set()
has_mismatch = False
can_use_original = True
for p in self.parent_deps:
# updated dependencies should be satisfied since they were resolved already
if p.is_updated:
continue
# parents with no requirements can't conflict
if not p.requirements:
continue
needed = p.requirements.get("dependencies", [])
entry_ref = p.get_dependency(self.name)
required = entry_ref.get("required_version", "*")
required = self.clean_specifier(required)
parent_requires = self.make_requirement(self.name, required)
parent_dependencies.add("{0} => {1} ({2})".format(p.name, self.name, required))
if not parent_requires.requirement.specifier.contains(self.original_version):
can_use_original = False
if not parent_requires.requirement.specifier.contains(self.updated_version):
has_mismatch = True
if has_mismatch and not can_use_original:
from pipenv.exceptions import DependencyConflict
msg = (
"Cannot resolve {0} ({1}) due to conflicting parent dependencies: "
"\n\t{2}".format(
self.name, self.updated_version, "\n\t".join(parent_dependencies)
)
)
raise DependencyConflict(msg)
elif can_use_original:
return self.lockfile_entry.as_ireq()
return self.entry.as_ireq() |
def get_interface_detail_input_request_type_get_next_request_last_rcvd_interface_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_interface_detail = ET.Element("get_interface_detail")
config = get_interface_detail
input = ET.SubElement(get_interface_detail, "input")
request_type = ET.SubElement(input, "request-type")
get_next_request = ET.SubElement(request_type, "get-next-request")
last_rcvd_interface = ET.SubElement(get_next_request, "last-rcvd-interface")
interface_name = ET.SubElement(last_rcvd_interface, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_interface_detail_input_request_type_get_next_request_last_rcvd_interface_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_interface_detail = ET.Element("get_interface_detail")
config = get_interface_detail
input = ET.SubElement(get_interface_detail, "input")
request_type = ET.SubElement(input, "request-type")
get_next_request = ET.SubElement(request_type, "get-next-request")
last_rcvd_interface = ET.SubElement(get_next_request, "last-rcvd-interface")
interface_name = ET.SubElement(last_rcvd_interface, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def get_shots(self):
"""Returns the shot chart data as a pandas DataFrame."""
shots = self.response.json()['resultSets'][0]['rowSet']
headers = self.response.json()['resultSets'][0]['headers']
return pd.DataFrame(shots, columns=headers) | Returns the shot chart data as a pandas DataFrame. | Below is the the instruction that describes the task:
### Input:
Returns the shot chart data as a pandas DataFrame.
### Response:
def get_shots(self):
"""Returns the shot chart data as a pandas DataFrame."""
shots = self.response.json()['resultSets'][0]['rowSet']
headers = self.response.json()['resultSets'][0]['headers']
return pd.DataFrame(shots, columns=headers) |
def save_state(state, output_dir, keep=False):
"""Save State and optionally gin config."""
params_file = os.path.join(output_dir, "model.pkl")
with gfile.GFile(params_file, "wb") as f:
pickle.dump((state.params, state.step, state.history), f)
if keep:
params_file = os.path.join(output_dir, "model_{}.pkl".format(state.step))
with gfile.GFile(params_file, "wb") as f:
pickle.dump((state.params, state.step, state.history), f)
log("Model saved to %s" % params_file, stdout=False) | Save State and optionally gin config. | Below is the the instruction that describes the task:
### Input:
Save State and optionally gin config.
### Response:
def save_state(state, output_dir, keep=False):
"""Save State and optionally gin config."""
params_file = os.path.join(output_dir, "model.pkl")
with gfile.GFile(params_file, "wb") as f:
pickle.dump((state.params, state.step, state.history), f)
if keep:
params_file = os.path.join(output_dir, "model_{}.pkl".format(state.step))
with gfile.GFile(params_file, "wb") as f:
pickle.dump((state.params, state.step, state.history), f)
log("Model saved to %s" % params_file, stdout=False) |
def get_dataframe(self, tickers,
startDate=None, endDate=None, metric_name=None, frequency='daily'):
""" Return a pandas.DataFrame of historical prices for one or more ticker symbols.
By default, return latest EOD Composite Price for a list of stock tickers.
On average, each feed contains 3 data sources.
Supported tickers + Available Day Ranges are here:
https://apimedia.tiingo.com/docs/tiingo/daily/supported_tickers.zip
or from the TiingoClient.list_tickers() method.
Args:
tickers (string/list): One or more unique identifiers for a stock ticker.
startDate (string): Start of ticker range in YYYY-MM-DD format.
endDate (string): End of ticker range in YYYY-MM-DD format.
metric_name (string): Optional parameter specifying metric to be returned for each
ticker. In the event of a single ticker, this is optional and if not specified
all of the available data will be returned. In the event of a list of tickers,
this parameter is required.
frequency (string): Resample frequency (defaults to daily).
"""
valid_columns = ['open', 'high', 'low', 'close', 'volume', 'adjOpen', 'adjHigh', 'adjLow',
'adjClose', 'adjVolume', 'divCash', 'splitFactor']
if metric_name is not None and metric_name not in valid_columns:
raise APIColumnNameError('Valid data items are: ' + str(valid_columns))
params = {
'format': 'json',
'resampleFreq': frequency
}
if startDate:
params['startDate'] = startDate
if endDate:
params['endDate'] = endDate
if pandas_is_installed:
if type(tickers) is str:
stock = tickers
url = self._get_url(stock, frequency)
response = self._request('GET', url, params=params)
df = pd.DataFrame(response.json())
if metric_name is not None:
prices = df[metric_name]
prices.index = df['date']
else:
prices = df
prices.index = df['date']
del (prices['date'])
else:
prices = pd.DataFrame()
for stock in tickers:
url = self._get_url(stock, frequency)
response = self._request('GET', url, params=params)
df = pd.DataFrame(response.json())
df.index = df['date']
df.rename(index=str, columns={metric_name: stock}, inplace=True)
prices = pd.concat([prices, df[stock]], axis=1)
prices.index = pd.to_datetime(prices.index)
return prices
else:
error_message = ("Pandas is not installed, but .get_ticker_price() was "
"called with fmt=pandas. In order to install tiingo with "
"pandas, reinstall with pandas as an optional dependency. \n"
"Install tiingo with pandas dependency: \'pip install tiingo[pandas]\'\n"
"Alternatively, just install pandas: pip install pandas.")
raise InstallPandasException(error_message) | Return a pandas.DataFrame of historical prices for one or more ticker symbols.
By default, return latest EOD Composite Price for a list of stock tickers.
On average, each feed contains 3 data sources.
Supported tickers + Available Day Ranges are here:
https://apimedia.tiingo.com/docs/tiingo/daily/supported_tickers.zip
or from the TiingoClient.list_tickers() method.
Args:
tickers (string/list): One or more unique identifiers for a stock ticker.
startDate (string): Start of ticker range in YYYY-MM-DD format.
endDate (string): End of ticker range in YYYY-MM-DD format.
metric_name (string): Optional parameter specifying metric to be returned for each
ticker. In the event of a single ticker, this is optional and if not specified
all of the available data will be returned. In the event of a list of tickers,
this parameter is required.
frequency (string): Resample frequency (defaults to daily). | Below is the the instruction that describes the task:
### Input:
Return a pandas.DataFrame of historical prices for one or more ticker symbols.
By default, return latest EOD Composite Price for a list of stock tickers.
On average, each feed contains 3 data sources.
Supported tickers + Available Day Ranges are here:
https://apimedia.tiingo.com/docs/tiingo/daily/supported_tickers.zip
or from the TiingoClient.list_tickers() method.
Args:
tickers (string/list): One or more unique identifiers for a stock ticker.
startDate (string): Start of ticker range in YYYY-MM-DD format.
endDate (string): End of ticker range in YYYY-MM-DD format.
metric_name (string): Optional parameter specifying metric to be returned for each
ticker. In the event of a single ticker, this is optional and if not specified
all of the available data will be returned. In the event of a list of tickers,
this parameter is required.
frequency (string): Resample frequency (defaults to daily).
### Response:
def get_dataframe(self, tickers,
startDate=None, endDate=None, metric_name=None, frequency='daily'):
""" Return a pandas.DataFrame of historical prices for one or more ticker symbols.
By default, return latest EOD Composite Price for a list of stock tickers.
On average, each feed contains 3 data sources.
Supported tickers + Available Day Ranges are here:
https://apimedia.tiingo.com/docs/tiingo/daily/supported_tickers.zip
or from the TiingoClient.list_tickers() method.
Args:
tickers (string/list): One or more unique identifiers for a stock ticker.
startDate (string): Start of ticker range in YYYY-MM-DD format.
endDate (string): End of ticker range in YYYY-MM-DD format.
metric_name (string): Optional parameter specifying metric to be returned for each
ticker. In the event of a single ticker, this is optional and if not specified
all of the available data will be returned. In the event of a list of tickers,
this parameter is required.
frequency (string): Resample frequency (defaults to daily).
"""
valid_columns = ['open', 'high', 'low', 'close', 'volume', 'adjOpen', 'adjHigh', 'adjLow',
'adjClose', 'adjVolume', 'divCash', 'splitFactor']
if metric_name is not None and metric_name not in valid_columns:
raise APIColumnNameError('Valid data items are: ' + str(valid_columns))
params = {
'format': 'json',
'resampleFreq': frequency
}
if startDate:
params['startDate'] = startDate
if endDate:
params['endDate'] = endDate
if pandas_is_installed:
if type(tickers) is str:
stock = tickers
url = self._get_url(stock, frequency)
response = self._request('GET', url, params=params)
df = pd.DataFrame(response.json())
if metric_name is not None:
prices = df[metric_name]
prices.index = df['date']
else:
prices = df
prices.index = df['date']
del (prices['date'])
else:
prices = pd.DataFrame()
for stock in tickers:
url = self._get_url(stock, frequency)
response = self._request('GET', url, params=params)
df = pd.DataFrame(response.json())
df.index = df['date']
df.rename(index=str, columns={metric_name: stock}, inplace=True)
prices = pd.concat([prices, df[stock]], axis=1)
prices.index = pd.to_datetime(prices.index)
return prices
else:
error_message = ("Pandas is not installed, but .get_ticker_price() was "
"called with fmt=pandas. In order to install tiingo with "
"pandas, reinstall with pandas as an optional dependency. \n"
"Install tiingo with pandas dependency: \'pip install tiingo[pandas]\'\n"
"Alternatively, just install pandas: pip install pandas.")
raise InstallPandasException(error_message) |
def _longestCommonPrefix(seq1, seq2, start1=0, start2=0):
"""
Returns the length of the longest common prefix of seq1
starting at offset start1 and seq2 starting at offset start2.
>>> _longestCommonPrefix("abcdef", "abcghj")
3
>>> _longestCommonPrefix("abcghj", "abcdef")
3
>>> _longestCommonPrefix("miss", "")
0
>>> _longestCommonPrefix("", "mr")
0
>>> _longestCommonPrefix(range(128), range(128))
128
>>> _longestCommonPrefix("abcabcabc", "abcdefabcdef", 0, 6)
3
>>> _longestCommonPrefix("abcdefabcdef", "abcabcabc", 6, 0)
3
>>> _longestCommonPrefix("abc", "abcabc", 1, 4)
2
>>> _longestCommonPrefix("abcabc", "abc", 4, 1)
2
"""
len1 = len(seq1) - start1
len2 = len(seq2) - start2
# We set seq2 as the shortest sequence
if len1 < len2:
seq1, seq2 = seq2, seq1
start1, start2 = start2, start1
len1, len2 = len2, len1
# if seq2 is empty returns 0
if len2 == 0:
return 0
i = 0
pos2 = start2
for i in range(min(len1, len2)):
# print seq1, seq2, start1, start2
if seq1[start1 + i] != seq2[start2 + i]:
return i
# we have reached the end of seq2 (need to increment i)
return i + 1 | Returns the length of the longest common prefix of seq1
starting at offset start1 and seq2 starting at offset start2.
>>> _longestCommonPrefix("abcdef", "abcghj")
3
>>> _longestCommonPrefix("abcghj", "abcdef")
3
>>> _longestCommonPrefix("miss", "")
0
>>> _longestCommonPrefix("", "mr")
0
>>> _longestCommonPrefix(range(128), range(128))
128
>>> _longestCommonPrefix("abcabcabc", "abcdefabcdef", 0, 6)
3
>>> _longestCommonPrefix("abcdefabcdef", "abcabcabc", 6, 0)
3
>>> _longestCommonPrefix("abc", "abcabc", 1, 4)
2
>>> _longestCommonPrefix("abcabc", "abc", 4, 1)
2 | Below is the the instruction that describes the task:
### Input:
Returns the length of the longest common prefix of seq1
starting at offset start1 and seq2 starting at offset start2.
>>> _longestCommonPrefix("abcdef", "abcghj")
3
>>> _longestCommonPrefix("abcghj", "abcdef")
3
>>> _longestCommonPrefix("miss", "")
0
>>> _longestCommonPrefix("", "mr")
0
>>> _longestCommonPrefix(range(128), range(128))
128
>>> _longestCommonPrefix("abcabcabc", "abcdefabcdef", 0, 6)
3
>>> _longestCommonPrefix("abcdefabcdef", "abcabcabc", 6, 0)
3
>>> _longestCommonPrefix("abc", "abcabc", 1, 4)
2
>>> _longestCommonPrefix("abcabc", "abc", 4, 1)
2
### Response:
def _longestCommonPrefix(seq1, seq2, start1=0, start2=0):
"""
Returns the length of the longest common prefix of seq1
starting at offset start1 and seq2 starting at offset start2.
>>> _longestCommonPrefix("abcdef", "abcghj")
3
>>> _longestCommonPrefix("abcghj", "abcdef")
3
>>> _longestCommonPrefix("miss", "")
0
>>> _longestCommonPrefix("", "mr")
0
>>> _longestCommonPrefix(range(128), range(128))
128
>>> _longestCommonPrefix("abcabcabc", "abcdefabcdef", 0, 6)
3
>>> _longestCommonPrefix("abcdefabcdef", "abcabcabc", 6, 0)
3
>>> _longestCommonPrefix("abc", "abcabc", 1, 4)
2
>>> _longestCommonPrefix("abcabc", "abc", 4, 1)
2
"""
len1 = len(seq1) - start1
len2 = len(seq2) - start2
# We set seq2 as the shortest sequence
if len1 < len2:
seq1, seq2 = seq2, seq1
start1, start2 = start2, start1
len1, len2 = len2, len1
# if seq2 is empty returns 0
if len2 == 0:
return 0
i = 0
pos2 = start2
for i in range(min(len1, len2)):
# print seq1, seq2, start1, start2
if seq1[start1 + i] != seq2[start2 + i]:
return i
# we have reached the end of seq2 (need to increment i)
return i + 1 |
def compressBWTPoolProcess(tup):
'''
During compression, each available process will calculate a subportion of the BWT independently using this
function. This process takes the chunk and rewrites it into a given filename using the technique described
in the compressBWT(...) function header
'''
#pull the tuple info
inputFN = tup[0]
startIndex = tup[1]
endIndex = tup[2]
tempFN = tup[3]
#this shouldn't happen
if startIndex == endIndex:
print 'ERROR: EQUAL INDICES'
return None
#load the file
bwt = np.load(inputFN, 'r')
#create bit spacings
letterBits = 3
numberBits = 8-letterBits
numPower = 2**numberBits
mask = 255 >> letterBits
#search for the places they're different
whereSol = np.add(startIndex+1, np.where(bwt[startIndex:endIndex-1] != bwt[startIndex+1:endIndex])[0])
#this is the difference between two adjacent ones
deltas = np.zeros(dtype='<u4', shape=(whereSol.shape[0]+1,))
if whereSol.shape[0] == 0:
deltas[0] = endIndex-startIndex
else:
deltas[0] = whereSol[0]-startIndex
deltas[1:-1] = np.subtract(whereSol[1:], whereSol[0:-1])
deltas[-1] = endIndex - whereSol[-1]
#calculate the number of bytes we need to store this information
size = 0
byteCount = 0
lastCount = 1
while lastCount > 0:
lastCount = np.where(deltas >= 2**(numberBits*byteCount))[0].shape[0]
size += lastCount
byteCount += 1
#create the file
ret = np.lib.format.open_memmap(tempFN, 'w+', '<u1', (size,))
retIndex = 0
c = bwt[startIndex]
startChar = c
delta = deltas[0]
while delta > 0:
ret[retIndex] = ((delta & mask) << letterBits)+c
delta /= numPower
retIndex += 1
#fill in the values based on the bit functions
for i in xrange(0, whereSol.shape[0]):
c = bwt[whereSol[i]]
delta = deltas[i+1]
while delta > 0:
ret[retIndex] = ((delta & mask) << letterBits)+c
delta /= numPower
retIndex += 1
endChar = c
#return a lot of information so we can easily combine the results
return (size, startChar, deltas[0], endChar, deltas[-1], tempFN) | During compression, each available process will calculate a subportion of the BWT independently using this
function. This process takes the chunk and rewrites it into a given filename using the technique described
in the compressBWT(...) function header | Below is the the instruction that describes the task:
### Input:
During compression, each available process will calculate a subportion of the BWT independently using this
function. This process takes the chunk and rewrites it into a given filename using the technique described
in the compressBWT(...) function header
### Response:
def compressBWTPoolProcess(tup):
'''
During compression, each available process will calculate a subportion of the BWT independently using this
function. This process takes the chunk and rewrites it into a given filename using the technique described
in the compressBWT(...) function header
'''
#pull the tuple info
inputFN = tup[0]
startIndex = tup[1]
endIndex = tup[2]
tempFN = tup[3]
#this shouldn't happen
if startIndex == endIndex:
print 'ERROR: EQUAL INDICES'
return None
#load the file
bwt = np.load(inputFN, 'r')
#create bit spacings
letterBits = 3
numberBits = 8-letterBits
numPower = 2**numberBits
mask = 255 >> letterBits
#search for the places they're different
whereSol = np.add(startIndex+1, np.where(bwt[startIndex:endIndex-1] != bwt[startIndex+1:endIndex])[0])
#this is the difference between two adjacent ones
deltas = np.zeros(dtype='<u4', shape=(whereSol.shape[0]+1,))
if whereSol.shape[0] == 0:
deltas[0] = endIndex-startIndex
else:
deltas[0] = whereSol[0]-startIndex
deltas[1:-1] = np.subtract(whereSol[1:], whereSol[0:-1])
deltas[-1] = endIndex - whereSol[-1]
#calculate the number of bytes we need to store this information
size = 0
byteCount = 0
lastCount = 1
while lastCount > 0:
lastCount = np.where(deltas >= 2**(numberBits*byteCount))[0].shape[0]
size += lastCount
byteCount += 1
#create the file
ret = np.lib.format.open_memmap(tempFN, 'w+', '<u1', (size,))
retIndex = 0
c = bwt[startIndex]
startChar = c
delta = deltas[0]
while delta > 0:
ret[retIndex] = ((delta & mask) << letterBits)+c
delta /= numPower
retIndex += 1
#fill in the values based on the bit functions
for i in xrange(0, whereSol.shape[0]):
c = bwt[whereSol[i]]
delta = deltas[i+1]
while delta > 0:
ret[retIndex] = ((delta & mask) << letterBits)+c
delta /= numPower
retIndex += 1
endChar = c
#return a lot of information so we can easily combine the results
return (size, startChar, deltas[0], endChar, deltas[-1], tempFN) |
def flush_one(self, process_name, ignore_priority=False):
""" method iterates over the reprocessing queue for the given process
and re-submits UOW whose waiting time has expired """
q = self.reprocess_uows[process_name]
self._flush_queue(q, ignore_priority) | method iterates over the reprocessing queue for the given process
and re-submits UOW whose waiting time has expired | Below is the the instruction that describes the task:
### Input:
method iterates over the reprocessing queue for the given process
and re-submits UOW whose waiting time has expired
### Response:
def flush_one(self, process_name, ignore_priority=False):
""" method iterates over the reprocessing queue for the given process
and re-submits UOW whose waiting time has expired """
q = self.reprocess_uows[process_name]
self._flush_queue(q, ignore_priority) |
def record(self):
# type: () -> bytes
'''
A method to generate the string representing this UDF Primary Volume
Descriptor.
Parameters:
None.
Returns:
A string representing this UDF Primary Volume Descriptor.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Primary Volume Descriptor not initialized')
rec = struct.pack(self.FMT, b'\x00' * 16,
self.vol_desc_seqnum, self.desc_num,
self.vol_ident, 1, 1, 2, self.max_interchange_level, 1, 1,
self.vol_set_ident,
self.desc_char_set, self.explanatory_char_set,
self.vol_abstract_length, self.vol_abstract_extent,
self.vol_copyright_length, self.vol_copyright_extent,
self.app_ident.record(), self.recording_date.record(),
self.impl_ident.record(), self.implementation_use,
self.predecessor_vol_desc_location, 0, b'\x00' * 22)[16:]
return self.desc_tag.record(rec) + rec | A method to generate the string representing this UDF Primary Volume
Descriptor.
Parameters:
None.
Returns:
A string representing this UDF Primary Volume Descriptor. | Below is the the instruction that describes the task:
### Input:
A method to generate the string representing this UDF Primary Volume
Descriptor.
Parameters:
None.
Returns:
A string representing this UDF Primary Volume Descriptor.
### Response:
def record(self):
# type: () -> bytes
'''
A method to generate the string representing this UDF Primary Volume
Descriptor.
Parameters:
None.
Returns:
A string representing this UDF Primary Volume Descriptor.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Primary Volume Descriptor not initialized')
rec = struct.pack(self.FMT, b'\x00' * 16,
self.vol_desc_seqnum, self.desc_num,
self.vol_ident, 1, 1, 2, self.max_interchange_level, 1, 1,
self.vol_set_ident,
self.desc_char_set, self.explanatory_char_set,
self.vol_abstract_length, self.vol_abstract_extent,
self.vol_copyright_length, self.vol_copyright_extent,
self.app_ident.record(), self.recording_date.record(),
self.impl_ident.record(), self.implementation_use,
self.predecessor_vol_desc_location, 0, b'\x00' * 22)[16:]
return self.desc_tag.record(rec) + rec |
def do_worker(self, arg):
"""
Usage:
worker approve (--all | --hit <hit_id> ... | <assignment_id> ...) [--all-studies] [--force]
worker reject (--hit <hit_id> | <assignment_id> ...)
worker unreject (--hit <hit_id> | <assignment_id> ...)
worker bonus (--amount <amount> | --auto) (--hit <hit_id> | <assignment_id> ...)
worker list [--submitted | --approved | --rejected] [(--hit <hit_id>)] [--all-studies]
worker help
"""
if arg['approve']:
self.worker_approve(arg['--all'], arg['<hit_id>'], arg['<assignment_id>'], arg['--all-studies'], arg['--force'])
elif arg['reject']:
self.amt_services_wrapper.worker_reject(arg['<hit_id>'], arg['<assignment_id>'])
elif arg['unreject']:
self.amt_services_wrapper.worker_unreject(arg['<hit_id>'], arg['<assignment_id>'])
elif arg['list']:
self.worker_list(arg['--submitted'], arg['--approved'], arg['--rejected'], arg['<hit_id>'], arg['--all-studies'])
elif arg['bonus']:
self.amt_services_wrapper.worker_bonus(arg['<hit_id>'], arg['--auto'], arg['<amount>'], '',
arg['<assignment_id>'])
else:
self.help_worker() | Usage:
worker approve (--all | --hit <hit_id> ... | <assignment_id> ...) [--all-studies] [--force]
worker reject (--hit <hit_id> | <assignment_id> ...)
worker unreject (--hit <hit_id> | <assignment_id> ...)
worker bonus (--amount <amount> | --auto) (--hit <hit_id> | <assignment_id> ...)
worker list [--submitted | --approved | --rejected] [(--hit <hit_id>)] [--all-studies]
worker help | Below is the the instruction that describes the task:
### Input:
Usage:
worker approve (--all | --hit <hit_id> ... | <assignment_id> ...) [--all-studies] [--force]
worker reject (--hit <hit_id> | <assignment_id> ...)
worker unreject (--hit <hit_id> | <assignment_id> ...)
worker bonus (--amount <amount> | --auto) (--hit <hit_id> | <assignment_id> ...)
worker list [--submitted | --approved | --rejected] [(--hit <hit_id>)] [--all-studies]
worker help
### Response:
def do_worker(self, arg):
"""
Usage:
worker approve (--all | --hit <hit_id> ... | <assignment_id> ...) [--all-studies] [--force]
worker reject (--hit <hit_id> | <assignment_id> ...)
worker unreject (--hit <hit_id> | <assignment_id> ...)
worker bonus (--amount <amount> | --auto) (--hit <hit_id> | <assignment_id> ...)
worker list [--submitted | --approved | --rejected] [(--hit <hit_id>)] [--all-studies]
worker help
"""
if arg['approve']:
self.worker_approve(arg['--all'], arg['<hit_id>'], arg['<assignment_id>'], arg['--all-studies'], arg['--force'])
elif arg['reject']:
self.amt_services_wrapper.worker_reject(arg['<hit_id>'], arg['<assignment_id>'])
elif arg['unreject']:
self.amt_services_wrapper.worker_unreject(arg['<hit_id>'], arg['<assignment_id>'])
elif arg['list']:
self.worker_list(arg['--submitted'], arg['--approved'], arg['--rejected'], arg['<hit_id>'], arg['--all-studies'])
elif arg['bonus']:
self.amt_services_wrapper.worker_bonus(arg['<hit_id>'], arg['--auto'], arg['<amount>'], '',
arg['<assignment_id>'])
else:
self.help_worker() |
def serializeG1(x, compress=True):
"""
Converts G1 element @x into an array of bytes. If @compress is True,
the point will be compressed resulting in a much shorter string of bytes.
"""
assertType(x, G1Element)
return _serialize(x, compress, librelic.g1_size_bin_abi,
librelic.g1_write_bin_abi) | Converts G1 element @x into an array of bytes. If @compress is True,
the point will be compressed resulting in a much shorter string of bytes. | Below is the the instruction that describes the task:
### Input:
Converts G1 element @x into an array of bytes. If @compress is True,
the point will be compressed resulting in a much shorter string of bytes.
### Response:
def serializeG1(x, compress=True):
"""
Converts G1 element @x into an array of bytes. If @compress is True,
the point will be compressed resulting in a much shorter string of bytes.
"""
assertType(x, G1Element)
return _serialize(x, compress, librelic.g1_size_bin_abi,
librelic.g1_write_bin_abi) |
def parse_quantitationesultsline(self, line):
""" Parses quantitation result lines
Please see samples/GC-MS output.txt
[MS Quantitative Results] section
"""
if line == ',,,,,,,,,,,,,,,,,,':
return 0
if line.startswith('SampleID'):
self._end_header = True
self._quantitationresultsheader = [token.strip() for token
in line.split(self.COMMAS)
if token.strip()]
return 0
splitted = [token.strip() for token in line.split(self.COMMAS)]
quantitation = {'DefaultResult': 'FinalConcentration'}
for colname in self._quantitationresultsheader:
quantitation[colname] = ''
for i in range(len(splitted)):
token = splitted[i]
if i < len(self._quantitationresultsheader):
colname = self._quantitationresultsheader[i]
if colname in self.QUANTITATIONRESULTS_NUMERICHEADERS:
try:
quantitation[colname] = float(token)
except ValueError:
self.warn(
"No valid number ${token} in column "
"${index} (${column_name})",
mapping={"token": token,
"index": str(i + 1),
"column_name": colname},
numline=self._numline, line=line)
quantitation[colname] = token
else:
quantitation[colname] = token
elif token:
self.err("Orphan value in column ${index} (${token})",
mapping={"index": str(i+1),
"token": token},
numline=self._numline, line=line)
result = quantitation[quantitation['DefaultResult']]
column_name = quantitation['DefaultResult']
result = self.zeroValueDefaultInstrumentResults(column_name,
result, line)
quantitation[quantitation['DefaultResult']] = result
d = datetime.strptime(quantitation['AcqDateTime'], "%m/%d/%Y %H:%M")
quantitation['AcqDateTime'] = d
val = re.sub(r"\W", "", quantitation['Compound'])
self._addRawResult(quantitation['DataFileName'],
values={val: quantitation},
override=False) | Parses quantitation result lines
Please see samples/GC-MS output.txt
[MS Quantitative Results] section | Below is the the instruction that describes the task:
### Input:
Parses quantitation result lines
Please see samples/GC-MS output.txt
[MS Quantitative Results] section
### Response:
def parse_quantitationesultsline(self, line):
""" Parses quantitation result lines
Please see samples/GC-MS output.txt
[MS Quantitative Results] section
"""
if line == ',,,,,,,,,,,,,,,,,,':
return 0
if line.startswith('SampleID'):
self._end_header = True
self._quantitationresultsheader = [token.strip() for token
in line.split(self.COMMAS)
if token.strip()]
return 0
splitted = [token.strip() for token in line.split(self.COMMAS)]
quantitation = {'DefaultResult': 'FinalConcentration'}
for colname in self._quantitationresultsheader:
quantitation[colname] = ''
for i in range(len(splitted)):
token = splitted[i]
if i < len(self._quantitationresultsheader):
colname = self._quantitationresultsheader[i]
if colname in self.QUANTITATIONRESULTS_NUMERICHEADERS:
try:
quantitation[colname] = float(token)
except ValueError:
self.warn(
"No valid number ${token} in column "
"${index} (${column_name})",
mapping={"token": token,
"index": str(i + 1),
"column_name": colname},
numline=self._numline, line=line)
quantitation[colname] = token
else:
quantitation[colname] = token
elif token:
self.err("Orphan value in column ${index} (${token})",
mapping={"index": str(i+1),
"token": token},
numline=self._numline, line=line)
result = quantitation[quantitation['DefaultResult']]
column_name = quantitation['DefaultResult']
result = self.zeroValueDefaultInstrumentResults(column_name,
result, line)
quantitation[quantitation['DefaultResult']] = result
d = datetime.strptime(quantitation['AcqDateTime'], "%m/%d/%Y %H:%M")
quantitation['AcqDateTime'] = d
val = re.sub(r"\W", "", quantitation['Compound'])
self._addRawResult(quantitation['DataFileName'],
values={val: quantitation},
override=False) |
def respond_list_directory(self, dir_path, query=None):
"""
Respond to the client with an HTML page listing the contents of
the specified directory.
:param str dir_path: The path of the directory to list the contents of.
"""
del query
try:
dir_contents = os.listdir(dir_path)
except os.error:
self.respond_not_found()
return
if os.path.normpath(dir_path) != self.__config['serve_files_root']:
dir_contents.append('..')
dir_contents.sort(key=lambda a: a.lower())
displaypath = html.escape(urllib.parse.unquote(self.path), quote=True)
f = io.BytesIO()
encoding = sys.getfilesystemencoding()
f.write(b'<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n')
f.write(b'<html>\n<title>Directory listing for ' + displaypath.encode(encoding) + b'</title>\n')
f.write(b'<body>\n<h2>Directory listing for ' + displaypath.encode(encoding) + b'</h2>\n')
f.write(b'<hr>\n<ul>\n')
for name in dir_contents:
fullname = os.path.join(dir_path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write(('<li><a href="' + urllib.parse.quote(linkname) + '">' + html.escape(displayname, quote=True) + '</a>\n').encode(encoding))
f.write(b'</ul>\n<hr>\n</body>\n</html>\n')
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header('Content-Type', 'text/html; charset=' + encoding)
self.send_header('Content-Length', length)
self.end_headers()
shutil.copyfileobj(f, self.wfile)
f.close()
return | Respond to the client with an HTML page listing the contents of
the specified directory.
:param str dir_path: The path of the directory to list the contents of. | Below is the the instruction that describes the task:
### Input:
Respond to the client with an HTML page listing the contents of
the specified directory.
:param str dir_path: The path of the directory to list the contents of.
### Response:
def respond_list_directory(self, dir_path, query=None):
"""
Respond to the client with an HTML page listing the contents of
the specified directory.
:param str dir_path: The path of the directory to list the contents of.
"""
del query
try:
dir_contents = os.listdir(dir_path)
except os.error:
self.respond_not_found()
return
if os.path.normpath(dir_path) != self.__config['serve_files_root']:
dir_contents.append('..')
dir_contents.sort(key=lambda a: a.lower())
displaypath = html.escape(urllib.parse.unquote(self.path), quote=True)
f = io.BytesIO()
encoding = sys.getfilesystemencoding()
f.write(b'<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n')
f.write(b'<html>\n<title>Directory listing for ' + displaypath.encode(encoding) + b'</title>\n')
f.write(b'<body>\n<h2>Directory listing for ' + displaypath.encode(encoding) + b'</h2>\n')
f.write(b'<hr>\n<ul>\n')
for name in dir_contents:
fullname = os.path.join(dir_path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write(('<li><a href="' + urllib.parse.quote(linkname) + '">' + html.escape(displayname, quote=True) + '</a>\n').encode(encoding))
f.write(b'</ul>\n<hr>\n</body>\n</html>\n')
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header('Content-Type', 'text/html; charset=' + encoding)
self.send_header('Content-Length', length)
self.end_headers()
shutil.copyfileobj(f, self.wfile)
f.close()
return |
def read_list_from_csv(filepath, dict_form=False, headers=None, **kwargs):
# type: (str, bool, Union[int, List[int], List[str], None], Any) -> List[Union[Dict, List]]
"""Read a list of rows in dict or list form from a csv. (The headers argument is either a row
number or list of row numbers (in case of multi-line headers) to be considered as headers
(rows start counting at 1), or the actual headers defined a list of strings. If not set,
all rows will be treated as containing values.)
Args:
filepath (str): Path to read from
dict_form (bool): Return in dict form. Defaults to False.
headers (Union[int, List[int], List[str], None]): Row number of headers. Defaults to None.
**kwargs: Other arguments to pass to Tabulator Stream
Returns:
List[Union[Dict, List]]: List of rows in dict or list form
"""
stream = Stream(filepath, headers=headers, **kwargs)
stream.open()
result = stream.read(keyed=dict_form)
stream.close()
return result | Read a list of rows in dict or list form from a csv. (The headers argument is either a row
number or list of row numbers (in case of multi-line headers) to be considered as headers
(rows start counting at 1), or the actual headers defined a list of strings. If not set,
all rows will be treated as containing values.)
Args:
filepath (str): Path to read from
dict_form (bool): Return in dict form. Defaults to False.
headers (Union[int, List[int], List[str], None]): Row number of headers. Defaults to None.
**kwargs: Other arguments to pass to Tabulator Stream
Returns:
List[Union[Dict, List]]: List of rows in dict or list form | Below is the the instruction that describes the task:
### Input:
Read a list of rows in dict or list form from a csv. (The headers argument is either a row
number or list of row numbers (in case of multi-line headers) to be considered as headers
(rows start counting at 1), or the actual headers defined a list of strings. If not set,
all rows will be treated as containing values.)
Args:
filepath (str): Path to read from
dict_form (bool): Return in dict form. Defaults to False.
headers (Union[int, List[int], List[str], None]): Row number of headers. Defaults to None.
**kwargs: Other arguments to pass to Tabulator Stream
Returns:
List[Union[Dict, List]]: List of rows in dict or list form
### Response:
def read_list_from_csv(filepath, dict_form=False, headers=None, **kwargs):
# type: (str, bool, Union[int, List[int], List[str], None], Any) -> List[Union[Dict, List]]
"""Read a list of rows in dict or list form from a csv. (The headers argument is either a row
number or list of row numbers (in case of multi-line headers) to be considered as headers
(rows start counting at 1), or the actual headers defined a list of strings. If not set,
all rows will be treated as containing values.)
Args:
filepath (str): Path to read from
dict_form (bool): Return in dict form. Defaults to False.
headers (Union[int, List[int], List[str], None]): Row number of headers. Defaults to None.
**kwargs: Other arguments to pass to Tabulator Stream
Returns:
List[Union[Dict, List]]: List of rows in dict or list form
"""
stream = Stream(filepath, headers=headers, **kwargs)
stream.open()
result = stream.read(keyed=dict_form)
stream.close()
return result |
def compute(datetimes, to_np=None): # @NoSelf
"""
Computes the provided date/time components into CDF epoch value(s).
For CDF_EPOCH:
For computing into CDF_EPOCH value, each date/time elements should
have exactly seven (7) components, as year, month, day, hour, minute,
second and millisecond, in a list. For example:
[[2017,1,1,1,1,1,111],[2017,2,2,2,2,2,222]]
Or, call function compute_epoch directly, instead, with at least three
(3) first (up to seven) components. The last component, if
not the 7th, can be a float that can have a fraction of the unit.
For CDF_EPOCH16:
They should have exactly ten (10) components, as year,
month, day, hour, minute, second, millisecond, microsecond, nanosecond
and picosecond, in a list. For example:
[[2017,1,1,1,1,1,123,456,789,999],[2017,2,2,2,2,2,987,654,321,999]]
Or, call function compute_epoch directly, instead, with at least three
(3) first (up to ten) components. The last component, if
not the 10th, can be a float that can have a fraction of the unit.
For TT2000:
Each TT2000 typed date/time should have exactly nine (9) components, as
year, month, day, hour, minute, second, millisecond, microsecond,
and nanosecond, in a list. For example:
[[2017,1,1,1,1,1,123,456,789],[2017,2,2,2,2,2,987,654,321]]
Or, call function compute_tt2000 directly, instead, with at least three
(3) first (up to nine) components. The last component, if
not the 9th, can be a float that can have a fraction of the unit.
Specify to_np to True, if the result should be in numpy class.
"""
if not isinstance(datetimes, (list, tuple, np.ndarray)):
raise TypeError('datetime must be in list form')
if isinstance(datetimes[0], numbers.Number):
items = len(datetimes)
elif isinstance(datetimes[0], (list, tuple, np.ndarray)):
items = len(datetimes[0])
else:
print('Unknown input')
return
if (items == 7):
return CDFepoch.compute_epoch(datetimes, to_np)
elif (items == 10):
return CDFepoch.compute_epoch16(datetimes, to_np)
elif (items == 9):
return CDFepoch.compute_tt2000(datetimes, to_np)
else:
print('Unknown input')
return | Computes the provided date/time components into CDF epoch value(s).
For CDF_EPOCH:
For computing into CDF_EPOCH value, each date/time elements should
have exactly seven (7) components, as year, month, day, hour, minute,
second and millisecond, in a list. For example:
[[2017,1,1,1,1,1,111],[2017,2,2,2,2,2,222]]
Or, call function compute_epoch directly, instead, with at least three
(3) first (up to seven) components. The last component, if
not the 7th, can be a float that can have a fraction of the unit.
For CDF_EPOCH16:
They should have exactly ten (10) components, as year,
month, day, hour, minute, second, millisecond, microsecond, nanosecond
and picosecond, in a list. For example:
[[2017,1,1,1,1,1,123,456,789,999],[2017,2,2,2,2,2,987,654,321,999]]
Or, call function compute_epoch directly, instead, with at least three
(3) first (up to ten) components. The last component, if
not the 10th, can be a float that can have a fraction of the unit.
For TT2000:
Each TT2000 typed date/time should have exactly nine (9) components, as
year, month, day, hour, minute, second, millisecond, microsecond,
and nanosecond, in a list. For example:
[[2017,1,1,1,1,1,123,456,789],[2017,2,2,2,2,2,987,654,321]]
Or, call function compute_tt2000 directly, instead, with at least three
(3) first (up to nine) components. The last component, if
not the 9th, can be a float that can have a fraction of the unit.
Specify to_np to True, if the result should be in numpy class. | Below is the the instruction that describes the task:
### Input:
Computes the provided date/time components into CDF epoch value(s).
For CDF_EPOCH:
For computing into CDF_EPOCH value, each date/time elements should
have exactly seven (7) components, as year, month, day, hour, minute,
second and millisecond, in a list. For example:
[[2017,1,1,1,1,1,111],[2017,2,2,2,2,2,222]]
Or, call function compute_epoch directly, instead, with at least three
(3) first (up to seven) components. The last component, if
not the 7th, can be a float that can have a fraction of the unit.
For CDF_EPOCH16:
They should have exactly ten (10) components, as year,
month, day, hour, minute, second, millisecond, microsecond, nanosecond
and picosecond, in a list. For example:
[[2017,1,1,1,1,1,123,456,789,999],[2017,2,2,2,2,2,987,654,321,999]]
Or, call function compute_epoch directly, instead, with at least three
(3) first (up to ten) components. The last component, if
not the 10th, can be a float that can have a fraction of the unit.
For TT2000:
Each TT2000 typed date/time should have exactly nine (9) components, as
year, month, day, hour, minute, second, millisecond, microsecond,
and nanosecond, in a list. For example:
[[2017,1,1,1,1,1,123,456,789],[2017,2,2,2,2,2,987,654,321]]
Or, call function compute_tt2000 directly, instead, with at least three
(3) first (up to nine) components. The last component, if
not the 9th, can be a float that can have a fraction of the unit.
Specify to_np to True, if the result should be in numpy class.
### Response:
def compute(datetimes, to_np=None): # @NoSelf
"""
Computes the provided date/time components into CDF epoch value(s).
For CDF_EPOCH:
For computing into CDF_EPOCH value, each date/time elements should
have exactly seven (7) components, as year, month, day, hour, minute,
second and millisecond, in a list. For example:
[[2017,1,1,1,1,1,111],[2017,2,2,2,2,2,222]]
Or, call function compute_epoch directly, instead, with at least three
(3) first (up to seven) components. The last component, if
not the 7th, can be a float that can have a fraction of the unit.
For CDF_EPOCH16:
They should have exactly ten (10) components, as year,
month, day, hour, minute, second, millisecond, microsecond, nanosecond
and picosecond, in a list. For example:
[[2017,1,1,1,1,1,123,456,789,999],[2017,2,2,2,2,2,987,654,321,999]]
Or, call function compute_epoch directly, instead, with at least three
(3) first (up to ten) components. The last component, if
not the 10th, can be a float that can have a fraction of the unit.
For TT2000:
Each TT2000 typed date/time should have exactly nine (9) components, as
year, month, day, hour, minute, second, millisecond, microsecond,
and nanosecond, in a list. For example:
[[2017,1,1,1,1,1,123,456,789],[2017,2,2,2,2,2,987,654,321]]
Or, call function compute_tt2000 directly, instead, with at least three
(3) first (up to nine) components. The last component, if
not the 9th, can be a float that can have a fraction of the unit.
Specify to_np to True, if the result should be in numpy class.
"""
if not isinstance(datetimes, (list, tuple, np.ndarray)):
raise TypeError('datetime must be in list form')
if isinstance(datetimes[0], numbers.Number):
items = len(datetimes)
elif isinstance(datetimes[0], (list, tuple, np.ndarray)):
items = len(datetimes[0])
else:
print('Unknown input')
return
if (items == 7):
return CDFepoch.compute_epoch(datetimes, to_np)
elif (items == 10):
return CDFepoch.compute_epoch16(datetimes, to_np)
elif (items == 9):
return CDFepoch.compute_tt2000(datetimes, to_np)
else:
print('Unknown input')
return |
def register_model(self, key, *models, **kwargs):
"""
Register a cache_group with this manager.
Use this method to register more simple
groups where all models share the same parameters.
Any arguments are treated as models that you would like
to register.
Any keyword arguments received are passed to the
register method when registering each model.
:param key: The key to register this group as. \
Raises an exception if the key is already registered.
"""
cache_group = CacheGroup(key)
for model in models:
cache_group.register(model, **kwargs)
self.register_cache(cache_group) | Register a cache_group with this manager.
Use this method to register more simple
groups where all models share the same parameters.
Any arguments are treated as models that you would like
to register.
Any keyword arguments received are passed to the
register method when registering each model.
:param key: The key to register this group as. \
Raises an exception if the key is already registered. | Below is the the instruction that describes the task:
### Input:
Register a cache_group with this manager.
Use this method to register more simple
groups where all models share the same parameters.
Any arguments are treated as models that you would like
to register.
Any keyword arguments received are passed to the
register method when registering each model.
:param key: The key to register this group as. \
Raises an exception if the key is already registered.
### Response:
def register_model(self, key, *models, **kwargs):
"""
Register a cache_group with this manager.
Use this method to register more simple
groups where all models share the same parameters.
Any arguments are treated as models that you would like
to register.
Any keyword arguments received are passed to the
register method when registering each model.
:param key: The key to register this group as. \
Raises an exception if the key is already registered.
"""
cache_group = CacheGroup(key)
for model in models:
cache_group.register(model, **kwargs)
self.register_cache(cache_group) |
def save(self, obj):
"""Required functionality."""
if not obj.id:
obj.id = uuid()
stored_data = {
'id': obj.id,
'value': obj.to_data()
}
index_vals = obj.indexes() or {}
for key in obj.__class__.index_names() or []:
val = index_vals.get(key, '')
stored_data[key] = DynamoMappings.map_index_val(val)
table = self.get_class_table(obj.__class__)
item = Item(table, data=stored_data)
item.save(overwrite=True) | Required functionality. | Below is the the instruction that describes the task:
### Input:
Required functionality.
### Response:
def save(self, obj):
"""Required functionality."""
if not obj.id:
obj.id = uuid()
stored_data = {
'id': obj.id,
'value': obj.to_data()
}
index_vals = obj.indexes() or {}
for key in obj.__class__.index_names() or []:
val = index_vals.get(key, '')
stored_data[key] = DynamoMappings.map_index_val(val)
table = self.get_class_table(obj.__class__)
item = Item(table, data=stored_data)
item.save(overwrite=True) |
def close(self) -> None:
"""
Close the server and terminate connections with close code 1001.
This method is idempotent.
"""
if self.close_task is None:
self.close_task = self.loop.create_task(self._close()) | Close the server and terminate connections with close code 1001.
This method is idempotent. | Below is the the instruction that describes the task:
### Input:
Close the server and terminate connections with close code 1001.
This method is idempotent.
### Response:
def close(self) -> None:
"""
Close the server and terminate connections with close code 1001.
This method is idempotent.
"""
if self.close_task is None:
self.close_task = self.loop.create_task(self._close()) |
def _run_atexit():
'''Hook frameworks must invoke this after the main hook body has
successfully completed. Do not invoke it if the hook fails.'''
global _atexit
for callback, args, kwargs in reversed(_atexit):
callback(*args, **kwargs)
del _atexit[:] | Hook frameworks must invoke this after the main hook body has
successfully completed. Do not invoke it if the hook fails. | Below is the the instruction that describes the task:
### Input:
Hook frameworks must invoke this after the main hook body has
successfully completed. Do not invoke it if the hook fails.
### Response:
def _run_atexit():
'''Hook frameworks must invoke this after the main hook body has
successfully completed. Do not invoke it if the hook fails.'''
global _atexit
for callback, args, kwargs in reversed(_atexit):
callback(*args, **kwargs)
del _atexit[:] |
def dispatch(self, *args, **kwargs):
""" Only staff members can access this view """
return super(GetAppListJsonView, self).dispatch(*args, **kwargs) | Only staff members can access this view | Below is the the instruction that describes the task:
### Input:
Only staff members can access this view
### Response:
def dispatch(self, *args, **kwargs):
""" Only staff members can access this view """
return super(GetAppListJsonView, self).dispatch(*args, **kwargs) |
def _calculate_optimal_column_widths(self):
"""Calculates widths of columns
:return: Length of longest data in each column (labels and data)
"""
columns = len(self.data[0]) # number of columns
str_labels = [parse_colorama(str(l)) for l in
self.labels] # labels as strings
str_data = [[parse_colorama(str(col)) for col in row] for row in
self.data]
# values as strings
widths = [0] * columns # length of longest string in each column
for row in str_data: # calculate max width in each column
widths = [max(w, len(c)) for w, c in zip(widths, row)]
# check if label name is longer than data
for col, label in enumerate(str_labels):
if len(label) > widths[col]:
widths[col] = len(label)
self.widths = widths | Calculates widths of columns
:return: Length of longest data in each column (labels and data) | Below is the the instruction that describes the task:
### Input:
Calculates widths of columns
:return: Length of longest data in each column (labels and data)
### Response:
def _calculate_optimal_column_widths(self):
"""Calculates widths of columns
:return: Length of longest data in each column (labels and data)
"""
columns = len(self.data[0]) # number of columns
str_labels = [parse_colorama(str(l)) for l in
self.labels] # labels as strings
str_data = [[parse_colorama(str(col)) for col in row] for row in
self.data]
# values as strings
widths = [0] * columns # length of longest string in each column
for row in str_data: # calculate max width in each column
widths = [max(w, len(c)) for w, c in zip(widths, row)]
# check if label name is longer than data
for col, label in enumerate(str_labels):
if len(label) > widths[col]:
widths[col] = len(label)
self.widths = widths |
Subsets and Splits