code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _augment_observation_files(self, e):
"""
Augment all the file records in an event
:internal:
"""
e.file_records = [self._augment_file(f) for f in e.file_records]
return e | Augment all the file records in an event
:internal: | Below is the the instruction that describes the task:
### Input:
Augment all the file records in an event
:internal:
### Response:
def _augment_observation_files(self, e):
"""
Augment all the file records in an event
:internal:
"""
e.file_records = [self._augment_file(f) for f in e.file_records]
return e |
def Backup(self, duration=0):
'''
method to use when a backup tag is encountered in musicXML. Moves back in the bar by <duration>
:param duration:
:return:
'''
total = 0
duration_total = duration * 4
children = self.GetChildrenIndexes()
notes = 0
for voice in children:
v = self.GetChild(voice)
indexes = v.GetChildrenIndexes()
if len(indexes) > 1:
indexes.reverse()
for index in indexes:
notes += 1
note = v.GetChild(index)
if hasattr(note, "duration"):
total += note.duration
if total >= duration_total:
break
gap = [
v.GetChild(i).duration for i in range(
0,
self.index -
notes) if hasattr(
v.GetChild(i),
"duration")]
previous = 0
for item in gap:
if item == previous:
self.gap -= previous
item = item / 2
self.gap += item
previous = item
#self.gap = sum([])
self.index -= notes | method to use when a backup tag is encountered in musicXML. Moves back in the bar by <duration>
:param duration:
:return: | Below is the the instruction that describes the task:
### Input:
method to use when a backup tag is encountered in musicXML. Moves back in the bar by <duration>
:param duration:
:return:
### Response:
def Backup(self, duration=0):
'''
method to use when a backup tag is encountered in musicXML. Moves back in the bar by <duration>
:param duration:
:return:
'''
total = 0
duration_total = duration * 4
children = self.GetChildrenIndexes()
notes = 0
for voice in children:
v = self.GetChild(voice)
indexes = v.GetChildrenIndexes()
if len(indexes) > 1:
indexes.reverse()
for index in indexes:
notes += 1
note = v.GetChild(index)
if hasattr(note, "duration"):
total += note.duration
if total >= duration_total:
break
gap = [
v.GetChild(i).duration for i in range(
0,
self.index -
notes) if hasattr(
v.GetChild(i),
"duration")]
previous = 0
for item in gap:
if item == previous:
self.gap -= previous
item = item / 2
self.gap += item
previous = item
#self.gap = sum([])
self.index -= notes |
def pickle_encode(session_dict):
"Returns the given session dictionary pickled and encoded as a string."
pickled = pickle.dumps(session_dict, pickle.HIGHEST_PROTOCOL)
return base64.encodestring(pickled + get_query_hash(pickled).encode()) | Returns the given session dictionary pickled and encoded as a string. | Below is the the instruction that describes the task:
### Input:
Returns the given session dictionary pickled and encoded as a string.
### Response:
def pickle_encode(session_dict):
"Returns the given session dictionary pickled and encoded as a string."
pickled = pickle.dumps(session_dict, pickle.HIGHEST_PROTOCOL)
return base64.encodestring(pickled + get_query_hash(pickled).encode()) |
def transform(self, y):
"""
Transform features per specified math function.
:param y:
:return:
"""
if isinstance(y, pd.DataFrame):
x = y.ix[:,0]
y = y.ix[:,1]
else:
x = y[:,0]
y = y[:,1]
if self.transform_type == 'add':
return pd.DataFrame(np.add(x, y))
elif self.transform_type == 'sub':
return pd.DataFrame(np.subtract(x, y))
elif self.transform_type == 'mul':
return pd.DataFrame(np.multiply(x, y))
elif self.transform_type == 'div':
return pd.DataFrame(np.divide(x, y))
elif self.transform_type == 'rem':
return pd.DataFrame(np.remainder(x, y))
elif self.transform_type == 'pow':
return pd.DataFrame(x**y) | Transform features per specified math function.
:param y:
:return: | Below is the the instruction that describes the task:
### Input:
Transform features per specified math function.
:param y:
:return:
### Response:
def transform(self, y):
"""
Transform features per specified math function.
:param y:
:return:
"""
if isinstance(y, pd.DataFrame):
x = y.ix[:,0]
y = y.ix[:,1]
else:
x = y[:,0]
y = y[:,1]
if self.transform_type == 'add':
return pd.DataFrame(np.add(x, y))
elif self.transform_type == 'sub':
return pd.DataFrame(np.subtract(x, y))
elif self.transform_type == 'mul':
return pd.DataFrame(np.multiply(x, y))
elif self.transform_type == 'div':
return pd.DataFrame(np.divide(x, y))
elif self.transform_type == 'rem':
return pd.DataFrame(np.remainder(x, y))
elif self.transform_type == 'pow':
return pd.DataFrame(x**y) |
def find_harpoon_options(self, configuration, args_dict):
"""Return us all the harpoon options"""
d = lambda r: {} if r in (None, "", NotSpecified) else r
return MergedOptions.using(
dict(d(configuration.get('harpoon')).items())
, dict(d(args_dict.get("harpoon")).items())
).as_dict() | Return us all the harpoon options | Below is the the instruction that describes the task:
### Input:
Return us all the harpoon options
### Response:
def find_harpoon_options(self, configuration, args_dict):
"""Return us all the harpoon options"""
d = lambda r: {} if r in (None, "", NotSpecified) else r
return MergedOptions.using(
dict(d(configuration.get('harpoon')).items())
, dict(d(args_dict.get("harpoon")).items())
).as_dict() |
def hash_file_contents(requirements_option: RequirementsOptions, path: Path) -> str:
""" Returns a SHA256 hash of the contents of ``path`` combined with the Arca version.
"""
return hashlib.sha256(path.read_bytes() + bytes(
requirements_option.name + arca.__version__, "utf-8"
)).hexdigest() | Returns a SHA256 hash of the contents of ``path`` combined with the Arca version. | Below is the the instruction that describes the task:
### Input:
Returns a SHA256 hash of the contents of ``path`` combined with the Arca version.
### Response:
def hash_file_contents(requirements_option: RequirementsOptions, path: Path) -> str:
""" Returns a SHA256 hash of the contents of ``path`` combined with the Arca version.
"""
return hashlib.sha256(path.read_bytes() + bytes(
requirements_option.name + arca.__version__, "utf-8"
)).hexdigest() |
def is_valid_camel(cls, input_string, strcmp=None, ignore=''):
""" Checks to see if an input string is valid for use in camel casing
This assumes that all lowercase strings are not valid camel case situations and no camel string
can just be a capitalized word. Took ideas from here:
http://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python
:param input_string: str, input word
:param strcmp: str, force detection on a substring just in case its undetectable
(e.g. part of a section of text that's all lowercase)
:param ignore: str, what kind of string to ignore in the regex search
:return: bool, whether it is valid or not
"""
# clear any non chars from the string
if not input_string:
return False
input_string = ''.join([c for c in input_string if c.isalpha()])
matches = cls._get_regex_search(input_string,
cls.REGEX_CAMEL.format(SEP=cls.REGEX_SEPARATORS),
match_index=0,
ignore=ignore)
if matches or input_string == strcmp:
if strcmp:
index = input_string.find(strcmp) - 1
is_camel = strcmp[0].isupper() and input_string[index].islower()
is_input = strcmp == input_string
is_start = index + 1 == 0
return is_camel or is_input or is_start
return True
elif len(input_string) == 1:
return True
return False | Checks to see if an input string is valid for use in camel casing
This assumes that all lowercase strings are not valid camel case situations and no camel string
can just be a capitalized word. Took ideas from here:
http://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python
:param input_string: str, input word
:param strcmp: str, force detection on a substring just in case its undetectable
(e.g. part of a section of text that's all lowercase)
:param ignore: str, what kind of string to ignore in the regex search
:return: bool, whether it is valid or not | Below is the the instruction that describes the task:
### Input:
Checks to see if an input string is valid for use in camel casing
This assumes that all lowercase strings are not valid camel case situations and no camel string
can just be a capitalized word. Took ideas from here:
http://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python
:param input_string: str, input word
:param strcmp: str, force detection on a substring just in case its undetectable
(e.g. part of a section of text that's all lowercase)
:param ignore: str, what kind of string to ignore in the regex search
:return: bool, whether it is valid or not
### Response:
def is_valid_camel(cls, input_string, strcmp=None, ignore=''):
""" Checks to see if an input string is valid for use in camel casing
This assumes that all lowercase strings are not valid camel case situations and no camel string
can just be a capitalized word. Took ideas from here:
http://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python
:param input_string: str, input word
:param strcmp: str, force detection on a substring just in case its undetectable
(e.g. part of a section of text that's all lowercase)
:param ignore: str, what kind of string to ignore in the regex search
:return: bool, whether it is valid or not
"""
# clear any non chars from the string
if not input_string:
return False
input_string = ''.join([c for c in input_string if c.isalpha()])
matches = cls._get_regex_search(input_string,
cls.REGEX_CAMEL.format(SEP=cls.REGEX_SEPARATORS),
match_index=0,
ignore=ignore)
if matches or input_string == strcmp:
if strcmp:
index = input_string.find(strcmp) - 1
is_camel = strcmp[0].isupper() and input_string[index].islower()
is_input = strcmp == input_string
is_start = index + 1 == 0
return is_camel or is_input or is_start
return True
elif len(input_string) == 1:
return True
return False |
def ring_is_planar(ring, r_atoms):
"""Given a set of ring atoms, check if the ring is sufficiently planar
to be considered aromatic"""
normals = []
for a in r_atoms:
adj = pybel.ob.OBAtomAtomIter(a.OBAtom)
# Check for neighboring atoms in the ring
n_coords = [pybel.Atom(neigh).coords for neigh in adj if ring.IsMember(neigh)]
vec1, vec2 = vector(a.coords, n_coords[0]), vector(a.coords, n_coords[1])
normals.append(np.cross(vec1, vec2))
# Given all normals of ring atoms and their neighbors, the angle between any has to be 5.0 deg or less
for n1, n2 in itertools.product(normals, repeat=2):
arom_angle = vecangle(n1, n2)
if all([arom_angle > config.AROMATIC_PLANARITY, arom_angle < 180.0 - config.AROMATIC_PLANARITY]):
return False
return True | Given a set of ring atoms, check if the ring is sufficiently planar
to be considered aromatic | Below is the the instruction that describes the task:
### Input:
Given a set of ring atoms, check if the ring is sufficiently planar
to be considered aromatic
### Response:
def ring_is_planar(ring, r_atoms):
"""Given a set of ring atoms, check if the ring is sufficiently planar
to be considered aromatic"""
normals = []
for a in r_atoms:
adj = pybel.ob.OBAtomAtomIter(a.OBAtom)
# Check for neighboring atoms in the ring
n_coords = [pybel.Atom(neigh).coords for neigh in adj if ring.IsMember(neigh)]
vec1, vec2 = vector(a.coords, n_coords[0]), vector(a.coords, n_coords[1])
normals.append(np.cross(vec1, vec2))
# Given all normals of ring atoms and their neighbors, the angle between any has to be 5.0 deg or less
for n1, n2 in itertools.product(normals, repeat=2):
arom_angle = vecangle(n1, n2)
if all([arom_angle > config.AROMATIC_PLANARITY, arom_angle < 180.0 - config.AROMATIC_PLANARITY]):
return False
return True |
def coil_combine(data, w_idx=[1,2,3], coil_dim=2, sampling_rate=5000.):
"""
Combine data across coils based on the amplitude of the water peak,
according to:
.. math::
X = \sum_{i}{w_i S_i}
Where X is the resulting combined signal, $S_i$ are the individual coil
signals and $w_i$ are calculated as:
.. math::
w_i = mean(S_i) / var (S_i)
following [Hall2013]_. In addition, we apply a phase-correction, so that
all the phases of the signals from each coil are 0
Parameters
----------
data : float array
The data as it comes from the scanner, with shape (transients, echos,
coils, time points)
w_idx : list
The indices to the non-water-suppressed transients. Per default we take
the 2nd-4th transients. We dump the first one, because it seems to be
quite different than the rest of them...
coil_dim : int
The dimension on which the coils are represented. Default: 2
sampling rate : float
The sampling rate in Hz. Default : 5000.
References
----------
.. [Hall2013] Emma L. Hall, Mary C. Stephenson, Darren Price, Peter
G. Morris (2013). Methodology for improved detection of low
concentration metabolites in MRS: Optimised combination of signals from
multi-element coil arrays. Neuroimage 86: 35-42.
.. [Wald1997] Wald, L. and Wright, S. (1997). Theory and application of
array coils in MR spectroscopy. NMR in Biomedicine, 10: 394-410.
.. [Keeler2005] Keeler, J (2005). Understanding NMR spectroscopy, second
edition. Wiley (West Sussex, UK).
"""
w_data, w_supp_data = separate_signals(data, w_idx)
fft_w = np.fft.fftshift(fft.fft(w_data))
fft_w_supp = np.fft.fftshift(fft.fft(w_supp_data))
freqs_w = np.linspace(-sampling_rate/2.0,
sampling_rate/2.0,
w_data.shape[-1])
# To determine phase and amplitude, fit a Lorentzian line-shape to each
# coils data in each trial:
# No bounds except for on the phase:
bounds = [(None,None),
(0,None),
(0,None),
(-np.pi, np.pi),
(None,None),
(None, None)]
n_params = len(bounds)
params = np.zeros(fft_w.shape[:-1] + (n_params,))
# Let's fit a Lorentzian line-shape to each one of these:
for repeat in range(w_data.shape[0]):
for echo in range(w_data.shape[1]):
for coil in range(w_data.shape[2]):
sig = fft_w[repeat, echo, coil]
# Use the private function to do this:
params[repeat, echo, coil] = _do_lorentzian_fit(freqs_w,
sig, bounds)
# The area parameter stands for the magnitude:
area_w = params[..., 1]
# In each coil, we derive S/(N^2):
s = np.mean(area_w.reshape(-1, area_w.shape[-1]), 0)
n = np.var(area_w.reshape(-1, area_w.shape[-1]), 0)
amp_weight = s/n
# Normalize to sum to 1:
amp_weight = amp_weight / np.sum(amp_weight)
# Next, we make sure that all the coils have the same phase. We will use
# the phase of the Lorentzian to align the phases:
phase_param = params[..., 3]
zero_phi_w = np.mean(phase_param.reshape(-1, phase_param.shape[-1]),0)
# This recalculates the weight with the phase alignment (see page 397 in
# Wald paper):
weight = amp_weight * np.exp(-1j * zero_phi_w)
# Multiply each one of the signals by its coil-weights and average across
# coils:
na = np.newaxis # Short-hand
# Collapse across coils for the combination in both the water
weighted_w_data = np.mean(np.fft.ifft(np.fft.fftshift(
weight[na, na, :, na] * fft_w)), coil_dim)
weighted_w_supp_data = np.mean(np.fft.ifft(np.fft.fftshift(
weight[na, na, : ,na] * fft_w_supp)) , coil_dim)
# Normalize each series by the sqrt(rms):
def normalize_this(x):
return x * (x.shape[-1] / (np.sum(np.abs(x))))
weighted_w_data = normalize_this(weighted_w_data)
weighted_w_supp_data = normalize_this(weighted_w_supp_data)
# Squeeze in case that some extraneous dimensions were introduced (can
# happen for SV data, for example)
return weighted_w_data.squeeze(), weighted_w_supp_data.squeeze() | Combine data across coils based on the amplitude of the water peak,
according to:
.. math::
X = \sum_{i}{w_i S_i}
Where X is the resulting combined signal, $S_i$ are the individual coil
signals and $w_i$ are calculated as:
.. math::
w_i = mean(S_i) / var (S_i)
following [Hall2013]_. In addition, we apply a phase-correction, so that
all the phases of the signals from each coil are 0
Parameters
----------
data : float array
The data as it comes from the scanner, with shape (transients, echos,
coils, time points)
w_idx : list
The indices to the non-water-suppressed transients. Per default we take
the 2nd-4th transients. We dump the first one, because it seems to be
quite different than the rest of them...
coil_dim : int
The dimension on which the coils are represented. Default: 2
sampling rate : float
The sampling rate in Hz. Default : 5000.
References
----------
.. [Hall2013] Emma L. Hall, Mary C. Stephenson, Darren Price, Peter
G. Morris (2013). Methodology for improved detection of low
concentration metabolites in MRS: Optimised combination of signals from
multi-element coil arrays. Neuroimage 86: 35-42.
.. [Wald1997] Wald, L. and Wright, S. (1997). Theory and application of
array coils in MR spectroscopy. NMR in Biomedicine, 10: 394-410.
.. [Keeler2005] Keeler, J (2005). Understanding NMR spectroscopy, second
edition. Wiley (West Sussex, UK). | Below is the the instruction that describes the task:
### Input:
Combine data across coils based on the amplitude of the water peak,
according to:
.. math::
X = \sum_{i}{w_i S_i}
Where X is the resulting combined signal, $S_i$ are the individual coil
signals and $w_i$ are calculated as:
.. math::
w_i = mean(S_i) / var (S_i)
following [Hall2013]_. In addition, we apply a phase-correction, so that
all the phases of the signals from each coil are 0
Parameters
----------
data : float array
The data as it comes from the scanner, with shape (transients, echos,
coils, time points)
w_idx : list
The indices to the non-water-suppressed transients. Per default we take
the 2nd-4th transients. We dump the first one, because it seems to be
quite different than the rest of them...
coil_dim : int
The dimension on which the coils are represented. Default: 2
sampling rate : float
The sampling rate in Hz. Default : 5000.
References
----------
.. [Hall2013] Emma L. Hall, Mary C. Stephenson, Darren Price, Peter
G. Morris (2013). Methodology for improved detection of low
concentration metabolites in MRS: Optimised combination of signals from
multi-element coil arrays. Neuroimage 86: 35-42.
.. [Wald1997] Wald, L. and Wright, S. (1997). Theory and application of
array coils in MR spectroscopy. NMR in Biomedicine, 10: 394-410.
.. [Keeler2005] Keeler, J (2005). Understanding NMR spectroscopy, second
edition. Wiley (West Sussex, UK).
### Response:
def coil_combine(data, w_idx=[1,2,3], coil_dim=2, sampling_rate=5000.):
"""
Combine data across coils based on the amplitude of the water peak,
according to:
.. math::
X = \sum_{i}{w_i S_i}
Where X is the resulting combined signal, $S_i$ are the individual coil
signals and $w_i$ are calculated as:
.. math::
w_i = mean(S_i) / var (S_i)
following [Hall2013]_. In addition, we apply a phase-correction, so that
all the phases of the signals from each coil are 0
Parameters
----------
data : float array
The data as it comes from the scanner, with shape (transients, echos,
coils, time points)
w_idx : list
The indices to the non-water-suppressed transients. Per default we take
the 2nd-4th transients. We dump the first one, because it seems to be
quite different than the rest of them...
coil_dim : int
The dimension on which the coils are represented. Default: 2
sampling rate : float
The sampling rate in Hz. Default : 5000.
References
----------
.. [Hall2013] Emma L. Hall, Mary C. Stephenson, Darren Price, Peter
G. Morris (2013). Methodology for improved detection of low
concentration metabolites in MRS: Optimised combination of signals from
multi-element coil arrays. Neuroimage 86: 35-42.
.. [Wald1997] Wald, L. and Wright, S. (1997). Theory and application of
array coils in MR spectroscopy. NMR in Biomedicine, 10: 394-410.
.. [Keeler2005] Keeler, J (2005). Understanding NMR spectroscopy, second
edition. Wiley (West Sussex, UK).
"""
w_data, w_supp_data = separate_signals(data, w_idx)
fft_w = np.fft.fftshift(fft.fft(w_data))
fft_w_supp = np.fft.fftshift(fft.fft(w_supp_data))
freqs_w = np.linspace(-sampling_rate/2.0,
sampling_rate/2.0,
w_data.shape[-1])
# To determine phase and amplitude, fit a Lorentzian line-shape to each
# coils data in each trial:
# No bounds except for on the phase:
bounds = [(None,None),
(0,None),
(0,None),
(-np.pi, np.pi),
(None,None),
(None, None)]
n_params = len(bounds)
params = np.zeros(fft_w.shape[:-1] + (n_params,))
# Let's fit a Lorentzian line-shape to each one of these:
for repeat in range(w_data.shape[0]):
for echo in range(w_data.shape[1]):
for coil in range(w_data.shape[2]):
sig = fft_w[repeat, echo, coil]
# Use the private function to do this:
params[repeat, echo, coil] = _do_lorentzian_fit(freqs_w,
sig, bounds)
# The area parameter stands for the magnitude:
area_w = params[..., 1]
# In each coil, we derive S/(N^2):
s = np.mean(area_w.reshape(-1, area_w.shape[-1]), 0)
n = np.var(area_w.reshape(-1, area_w.shape[-1]), 0)
amp_weight = s/n
# Normalize to sum to 1:
amp_weight = amp_weight / np.sum(amp_weight)
# Next, we make sure that all the coils have the same phase. We will use
# the phase of the Lorentzian to align the phases:
phase_param = params[..., 3]
zero_phi_w = np.mean(phase_param.reshape(-1, phase_param.shape[-1]),0)
# This recalculates the weight with the phase alignment (see page 397 in
# Wald paper):
weight = amp_weight * np.exp(-1j * zero_phi_w)
# Multiply each one of the signals by its coil-weights and average across
# coils:
na = np.newaxis # Short-hand
# Collapse across coils for the combination in both the water
weighted_w_data = np.mean(np.fft.ifft(np.fft.fftshift(
weight[na, na, :, na] * fft_w)), coil_dim)
weighted_w_supp_data = np.mean(np.fft.ifft(np.fft.fftshift(
weight[na, na, : ,na] * fft_w_supp)) , coil_dim)
# Normalize each series by the sqrt(rms):
def normalize_this(x):
return x * (x.shape[-1] / (np.sum(np.abs(x))))
weighted_w_data = normalize_this(weighted_w_data)
weighted_w_supp_data = normalize_this(weighted_w_supp_data)
# Squeeze in case that some extraneous dimensions were introduced (can
# happen for SV data, for example)
return weighted_w_data.squeeze(), weighted_w_supp_data.squeeze() |
def GetSOAPHeaders(self, create_method):
"""Returns the SOAP headers required for request authorization.
Args:
create_method: The SOAP library specific method used to instantiate SOAP
objects.
Returns:
A SOAP object containing the headers.
"""
header = create_method(self._SOAP_HEADER_CLASS)
header.networkCode = self._ad_manager_client.network_code
header.applicationName = ''.join([
self._ad_manager_client.application_name,
googleads.common.GenerateLibSig(self._PRODUCT_SIG)])
return header | Returns the SOAP headers required for request authorization.
Args:
create_method: The SOAP library specific method used to instantiate SOAP
objects.
Returns:
A SOAP object containing the headers. | Below is the the instruction that describes the task:
### Input:
Returns the SOAP headers required for request authorization.
Args:
create_method: The SOAP library specific method used to instantiate SOAP
objects.
Returns:
A SOAP object containing the headers.
### Response:
def GetSOAPHeaders(self, create_method):
"""Returns the SOAP headers required for request authorization.
Args:
create_method: The SOAP library specific method used to instantiate SOAP
objects.
Returns:
A SOAP object containing the headers.
"""
header = create_method(self._SOAP_HEADER_CLASS)
header.networkCode = self._ad_manager_client.network_code
header.applicationName = ''.join([
self._ad_manager_client.application_name,
googleads.common.GenerateLibSig(self._PRODUCT_SIG)])
return header |
def immutable_worker(worker, state, pre_state, created):
"""Converts to an immutable slots class to handle internally."""
# noinspection PyUnresolvedReferences,PyProtectedMember
return WorkerData._make(chain(
(getattr(worker, f) for f in WORKER_OWN_FIELDS),
(state, pre_state, created),
(worker.heartbeats[-1] if worker.heartbeats else None,),
)) | Converts to an immutable slots class to handle internally. | Below is the the instruction that describes the task:
### Input:
Converts to an immutable slots class to handle internally.
### Response:
def immutable_worker(worker, state, pre_state, created):
"""Converts to an immutable slots class to handle internally."""
# noinspection PyUnresolvedReferences,PyProtectedMember
return WorkerData._make(chain(
(getattr(worker, f) for f in WORKER_OWN_FIELDS),
(state, pre_state, created),
(worker.heartbeats[-1] if worker.heartbeats else None,),
)) |
def _get_asam_configuration(driver_url=''):
'''
Return the configuration read from the master configuration
file or directory
'''
asam_config = __opts__['asam'] if 'asam' in __opts__ else None
if asam_config:
try:
for asam_server, service_config in six.iteritems(asam_config):
username = service_config.get('username', None)
password = service_config.get('password', None)
protocol = service_config.get('protocol', 'https')
port = service_config.get('port', 3451)
if not username or not password:
log.error(
'Username or Password has not been specified in the '
'master configuration for %s', asam_server
)
return False
ret = {
'platform_edit_url': "{0}://{1}:{2}/config/PlatformEdit.html".format(protocol, asam_server, port),
'platform_config_url': "{0}://{1}:{2}/config/PlatformConfig.html".format(protocol, asam_server, port),
'platformset_edit_url': "{0}://{1}:{2}/config/PlatformSetEdit.html".format(protocol, asam_server, port),
'platformset_config_url': "{0}://{1}:{2}/config/PlatformSetConfig.html".format(protocol, asam_server, port),
'username': username,
'password': password
}
if (not driver_url) or (driver_url == asam_server):
return ret
except Exception as exc:
log.error('Exception encountered: %s', exc)
return False
if driver_url:
log.error(
'Configuration for %s has not been specified in the master '
'configuration', driver_url
)
return False
return False | Return the configuration read from the master configuration
file or directory | Below is the the instruction that describes the task:
### Input:
Return the configuration read from the master configuration
file or directory
### Response:
def _get_asam_configuration(driver_url=''):
'''
Return the configuration read from the master configuration
file or directory
'''
asam_config = __opts__['asam'] if 'asam' in __opts__ else None
if asam_config:
try:
for asam_server, service_config in six.iteritems(asam_config):
username = service_config.get('username', None)
password = service_config.get('password', None)
protocol = service_config.get('protocol', 'https')
port = service_config.get('port', 3451)
if not username or not password:
log.error(
'Username or Password has not been specified in the '
'master configuration for %s', asam_server
)
return False
ret = {
'platform_edit_url': "{0}://{1}:{2}/config/PlatformEdit.html".format(protocol, asam_server, port),
'platform_config_url': "{0}://{1}:{2}/config/PlatformConfig.html".format(protocol, asam_server, port),
'platformset_edit_url': "{0}://{1}:{2}/config/PlatformSetEdit.html".format(protocol, asam_server, port),
'platformset_config_url': "{0}://{1}:{2}/config/PlatformSetConfig.html".format(protocol, asam_server, port),
'username': username,
'password': password
}
if (not driver_url) or (driver_url == asam_server):
return ret
except Exception as exc:
log.error('Exception encountered: %s', exc)
return False
if driver_url:
log.error(
'Configuration for %s has not been specified in the master '
'configuration', driver_url
)
return False
return False |
def _increment(self, n=1):
"""Move forward n tokens in the stream."""
if self._cur_position >= self.num_tokens-1:
self._cur_positon = self.num_tokens - 1
self._finished = True
else:
self._cur_position += n | Move forward n tokens in the stream. | Below is the the instruction that describes the task:
### Input:
Move forward n tokens in the stream.
### Response:
def _increment(self, n=1):
"""Move forward n tokens in the stream."""
if self._cur_position >= self.num_tokens-1:
self._cur_positon = self.num_tokens - 1
self._finished = True
else:
self._cur_position += n |
def _get_result_constructor(self):
""" Returns a function that will be used to instantiate query results """
if not self._values_list: # we want models
return self.model._construct_instance
elif self._flat_values_list: # the user has requested flattened list (1 value per row)
key = self._only_fields[0]
return lambda row: row[key]
else:
return lambda row: [row[f] for f in self._only_fields] | Returns a function that will be used to instantiate query results | Below is the the instruction that describes the task:
### Input:
Returns a function that will be used to instantiate query results
### Response:
def _get_result_constructor(self):
""" Returns a function that will be used to instantiate query results """
if not self._values_list: # we want models
return self.model._construct_instance
elif self._flat_values_list: # the user has requested flattened list (1 value per row)
key = self._only_fields[0]
return lambda row: row[key]
else:
return lambda row: [row[f] for f in self._only_fields] |
def save_diskspace(fname, reason, config):
"""Overwrite a file in place with a short message to save disk.
This keeps files as a sanity check on processes working, but saves
disk by replacing them with a short message.
"""
if config["algorithm"].get("save_diskspace", False):
for ext in ["", ".bai"]:
if os.path.exists(fname + ext):
with open(fname + ext, "w") as out_handle:
out_handle.write("File removed to save disk space: %s" % reason) | Overwrite a file in place with a short message to save disk.
This keeps files as a sanity check on processes working, but saves
disk by replacing them with a short message. | Below is the the instruction that describes the task:
### Input:
Overwrite a file in place with a short message to save disk.
This keeps files as a sanity check on processes working, but saves
disk by replacing them with a short message.
### Response:
def save_diskspace(fname, reason, config):
"""Overwrite a file in place with a short message to save disk.
This keeps files as a sanity check on processes working, but saves
disk by replacing them with a short message.
"""
if config["algorithm"].get("save_diskspace", False):
for ext in ["", ".bai"]:
if os.path.exists(fname + ext):
with open(fname + ext, "w") as out_handle:
out_handle.write("File removed to save disk space: %s" % reason) |
def cast(self, method, args={}, declare=None, retry=None,
retry_policy=None, type=None, exchange=None, **props):
"""Send message to actor. Discarding replies."""
retry = self.retry if retry is None else retry
body = {'class': self.name, 'method': method, 'args': args}
_retry_policy = self.retry_policy
if retry_policy: # merge default and custom policies.
_retry_policy = dict(_retry_policy, **retry_policy)
if type and type not in self.types:
raise ValueError('Unsupported type: {0}'.format(type))
elif not type:
type = ACTOR_TYPE.DIRECT
props.setdefault('routing_key', self.routing_key)
props.setdefault('serializer', self.serializer)
exchange = exchange or self.type_to_exchange[type]()
declare = (maybe_list(declare) or []) + [exchange]
with producers[self._connection].acquire(block=True) as producer:
return producer.publish(body, exchange=exchange, declare=declare,
retry=retry, retry_policy=retry_policy,
**props) | Send message to actor. Discarding replies. | Below is the the instruction that describes the task:
### Input:
Send message to actor. Discarding replies.
### Response:
def cast(self, method, args={}, declare=None, retry=None,
retry_policy=None, type=None, exchange=None, **props):
"""Send message to actor. Discarding replies."""
retry = self.retry if retry is None else retry
body = {'class': self.name, 'method': method, 'args': args}
_retry_policy = self.retry_policy
if retry_policy: # merge default and custom policies.
_retry_policy = dict(_retry_policy, **retry_policy)
if type and type not in self.types:
raise ValueError('Unsupported type: {0}'.format(type))
elif not type:
type = ACTOR_TYPE.DIRECT
props.setdefault('routing_key', self.routing_key)
props.setdefault('serializer', self.serializer)
exchange = exchange or self.type_to_exchange[type]()
declare = (maybe_list(declare) or []) + [exchange]
with producers[self._connection].acquire(block=True) as producer:
return producer.publish(body, exchange=exchange, declare=declare,
retry=retry, retry_policy=retry_policy,
**props) |
def pass_to_pipeline_if_article(
self,
response,
source_domain,
original_url,
rss_title=None
):
"""
Responsible for passing a NewscrawlerItem to the pipeline if the
response contains an article.
:param obj response: the scrapy response to work on
:param str source_domain: the response's domain as set for the crawler
:param str original_url: the url set in the json file
:param str rss_title: the title extracted by an rssCrawler
:return NewscrawlerItem: NewscrawlerItem to pass to the pipeline
"""
if self.helper.heuristics.is_article(response, original_url):
return self.pass_to_pipeline(
response, source_domain, rss_title=None) | Responsible for passing a NewscrawlerItem to the pipeline if the
response contains an article.
:param obj response: the scrapy response to work on
:param str source_domain: the response's domain as set for the crawler
:param str original_url: the url set in the json file
:param str rss_title: the title extracted by an rssCrawler
:return NewscrawlerItem: NewscrawlerItem to pass to the pipeline | Below is the the instruction that describes the task:
### Input:
Responsible for passing a NewscrawlerItem to the pipeline if the
response contains an article.
:param obj response: the scrapy response to work on
:param str source_domain: the response's domain as set for the crawler
:param str original_url: the url set in the json file
:param str rss_title: the title extracted by an rssCrawler
:return NewscrawlerItem: NewscrawlerItem to pass to the pipeline
### Response:
def pass_to_pipeline_if_article(
self,
response,
source_domain,
original_url,
rss_title=None
):
"""
Responsible for passing a NewscrawlerItem to the pipeline if the
response contains an article.
:param obj response: the scrapy response to work on
:param str source_domain: the response's domain as set for the crawler
:param str original_url: the url set in the json file
:param str rss_title: the title extracted by an rssCrawler
:return NewscrawlerItem: NewscrawlerItem to pass to the pipeline
"""
if self.helper.heuristics.is_article(response, original_url):
return self.pass_to_pipeline(
response, source_domain, rss_title=None) |
def update_service(self, service_id, **kwargs):
"""Update a service."""
body = self._formdata(kwargs, FastlyService.FIELDS)
content = self._fetch("/service/%s" % service_id, method="PUT", body=body)
return FastlyService(self, content) | Update a service. | Below is the the instruction that describes the task:
### Input:
Update a service.
### Response:
def update_service(self, service_id, **kwargs):
"""Update a service."""
body = self._formdata(kwargs, FastlyService.FIELDS)
content = self._fetch("/service/%s" % service_id, method="PUT", body=body)
return FastlyService(self, content) |
def get_template_name(self):
"""
Get the template name of this page if defined or if a closer
parent has a defined template or
:data:`pages.settings.PAGE_DEFAULT_TEMPLATE` otherwise.
"""
template = self.get_template()
page_templates = settings.get_page_templates()
for t in page_templates:
if t[0] == template:
return t[1]
return template | Get the template name of this page if defined or if a closer
parent has a defined template or
:data:`pages.settings.PAGE_DEFAULT_TEMPLATE` otherwise. | Below is the the instruction that describes the task:
### Input:
Get the template name of this page if defined or if a closer
parent has a defined template or
:data:`pages.settings.PAGE_DEFAULT_TEMPLATE` otherwise.
### Response:
def get_template_name(self):
"""
Get the template name of this page if defined or if a closer
parent has a defined template or
:data:`pages.settings.PAGE_DEFAULT_TEMPLATE` otherwise.
"""
template = self.get_template()
page_templates = settings.get_page_templates()
for t in page_templates:
if t[0] == template:
return t[1]
return template |
def score(package_path):
"""
Runs pylint on a package and returns a score
Lower score is better
:param package_path: path of the package to score
:return: number of score
"""
python_files = find_files(package_path, '*.py')
total_counter = Counter()
for python_file in python_files:
output = run_pylint(python_file)
counter = parse_pylint_output(output)
total_counter += counter
score_value = 0
for count, stat in enumerate(total_counter):
score_value += SCORING_VALUES[stat] * count
return score_value / 5 | Runs pylint on a package and returns a score
Lower score is better
:param package_path: path of the package to score
:return: number of score | Below is the the instruction that describes the task:
### Input:
Runs pylint on a package and returns a score
Lower score is better
:param package_path: path of the package to score
:return: number of score
### Response:
def score(package_path):
"""
Runs pylint on a package and returns a score
Lower score is better
:param package_path: path of the package to score
:return: number of score
"""
python_files = find_files(package_path, '*.py')
total_counter = Counter()
for python_file in python_files:
output = run_pylint(python_file)
counter = parse_pylint_output(output)
total_counter += counter
score_value = 0
for count, stat in enumerate(total_counter):
score_value += SCORING_VALUES[stat] * count
return score_value / 5 |
def replace_vcf_info(keyword, annotation, variant_line=None, variant_dict=None):
"""Replace the information of a info field of a vcf variant line or a
variant dict.
Arguments:
variant_line (str): A vcf formatted variant line
variant_dict (dict): A variant dictionary
keyword (str): The info field key
annotation (str): If the annotation is a key, value pair
this is the string that represents the value
Returns:
variant_line (str): A annotated variant line
"""
new_info = '{0}={1}'.format(keyword, annotation)
logger.debug("Replacing the variant information {0}".format(new_info))
fixed_variant = None
new_info_list = []
if variant_line:
logger.debug("Adding information to a variant line")
splitted_variant = variant_line.rstrip('\n').split('\t')
logger.debug("Adding information to splitted variant line")
old_info = splitted_variant[7]
if old_info == '.':
new_info_string = new_info
else:
splitted_info_string = old_info.split(';')
for info in splitted_info_string:
splitted_info_entry = info.split('=')
if splitted_info_entry[0] == keyword:
new_info_list.append(new_info)
else:
new_info_list.append(info)
new_info_string = ';'.join(new_info_list)
splitted_variant[7] = new_info_string
fixed_variant = '\t'.join(splitted_variant)
elif variant_dict:
logger.debug("Adding information to a variant dict")
old_info = variant_dict['INFO']
if old_info == '.':
variant_dict['INFO'] = new_info
else:
for info in old_info.split(';'):
splitted_info_entry = info.split('=')
if splitted_info_entry[0] == keyword:
new_info_list.append(new_info)
else:
new_info_list.append(info)
new_info_string = ';'.join(new_info_list)
variant_dict['INFO'] = new_info_string
fixed_variant = variant_dict
return fixed_variant | Replace the information of a info field of a vcf variant line or a
variant dict.
Arguments:
variant_line (str): A vcf formatted variant line
variant_dict (dict): A variant dictionary
keyword (str): The info field key
annotation (str): If the annotation is a key, value pair
this is the string that represents the value
Returns:
variant_line (str): A annotated variant line | Below is the the instruction that describes the task:
### Input:
Replace the information of a info field of a vcf variant line or a
variant dict.
Arguments:
variant_line (str): A vcf formatted variant line
variant_dict (dict): A variant dictionary
keyword (str): The info field key
annotation (str): If the annotation is a key, value pair
this is the string that represents the value
Returns:
variant_line (str): A annotated variant line
### Response:
def replace_vcf_info(keyword, annotation, variant_line=None, variant_dict=None):
"""Replace the information of a info field of a vcf variant line or a
variant dict.
Arguments:
variant_line (str): A vcf formatted variant line
variant_dict (dict): A variant dictionary
keyword (str): The info field key
annotation (str): If the annotation is a key, value pair
this is the string that represents the value
Returns:
variant_line (str): A annotated variant line
"""
new_info = '{0}={1}'.format(keyword, annotation)
logger.debug("Replacing the variant information {0}".format(new_info))
fixed_variant = None
new_info_list = []
if variant_line:
logger.debug("Adding information to a variant line")
splitted_variant = variant_line.rstrip('\n').split('\t')
logger.debug("Adding information to splitted variant line")
old_info = splitted_variant[7]
if old_info == '.':
new_info_string = new_info
else:
splitted_info_string = old_info.split(';')
for info in splitted_info_string:
splitted_info_entry = info.split('=')
if splitted_info_entry[0] == keyword:
new_info_list.append(new_info)
else:
new_info_list.append(info)
new_info_string = ';'.join(new_info_list)
splitted_variant[7] = new_info_string
fixed_variant = '\t'.join(splitted_variant)
elif variant_dict:
logger.debug("Adding information to a variant dict")
old_info = variant_dict['INFO']
if old_info == '.':
variant_dict['INFO'] = new_info
else:
for info in old_info.split(';'):
splitted_info_entry = info.split('=')
if splitted_info_entry[0] == keyword:
new_info_list.append(new_info)
else:
new_info_list.append(info)
new_info_string = ';'.join(new_info_list)
variant_dict['INFO'] = new_info_string
fixed_variant = variant_dict
return fixed_variant |
def primitive(self, primitive):
"""Entry from Python primitive."""
self.entry_number = primitive['entry-number']
self.item_hash = primitive['item-hash']
self.timestamp = primitive['timestamp'] | Entry from Python primitive. | Below is the the instruction that describes the task:
### Input:
Entry from Python primitive.
### Response:
def primitive(self, primitive):
"""Entry from Python primitive."""
self.entry_number = primitive['entry-number']
self.item_hash = primitive['item-hash']
self.timestamp = primitive['timestamp'] |
def interpolate_exe(self, testString):
"""
Replace testString with a path to an executable based on the format.
If this looks like
${which:lalapps_tmpltbank}
it will return the equivalent of which(lalapps_tmpltbank)
Otherwise it will return an unchanged string.
Parameters
-----------
testString : string
The input string
Returns
--------
newString : string
The output string.
"""
# First check if any interpolation is needed and abort if not
testString = testString.strip()
if not (testString.startswith('${') and testString.endswith('}')):
return testString
# This may not be an exe interpolation, so even if it has ${XXX} form
# I may not have to do anything
newString = testString
# Strip the ${ and }
testString = testString[2:-1]
testList = testString.split(':')
# Maybe we can add a few different possibilities for substitution
if len(testList) == 2:
if testList[0] == 'which':
newString = distutils.spawn.find_executable(testList[1])
if not newString:
errmsg = "Cannot find exe %s in your path " %(testList[1])
errmsg += "and you specified ${which:%s}." %(testList[1])
raise ValueError(errmsg)
return newString | Replace testString with a path to an executable based on the format.
If this looks like
${which:lalapps_tmpltbank}
it will return the equivalent of which(lalapps_tmpltbank)
Otherwise it will return an unchanged string.
Parameters
-----------
testString : string
The input string
Returns
--------
newString : string
The output string. | Below is the the instruction that describes the task:
### Input:
Replace testString with a path to an executable based on the format.
If this looks like
${which:lalapps_tmpltbank}
it will return the equivalent of which(lalapps_tmpltbank)
Otherwise it will return an unchanged string.
Parameters
-----------
testString : string
The input string
Returns
--------
newString : string
The output string.
### Response:
def interpolate_exe(self, testString):
"""
Replace testString with a path to an executable based on the format.
If this looks like
${which:lalapps_tmpltbank}
it will return the equivalent of which(lalapps_tmpltbank)
Otherwise it will return an unchanged string.
Parameters
-----------
testString : string
The input string
Returns
--------
newString : string
The output string.
"""
# First check if any interpolation is needed and abort if not
testString = testString.strip()
if not (testString.startswith('${') and testString.endswith('}')):
return testString
# This may not be an exe interpolation, so even if it has ${XXX} form
# I may not have to do anything
newString = testString
# Strip the ${ and }
testString = testString[2:-1]
testList = testString.split(':')
# Maybe we can add a few different possibilities for substitution
if len(testList) == 2:
if testList[0] == 'which':
newString = distutils.spawn.find_executable(testList[1])
if not newString:
errmsg = "Cannot find exe %s in your path " %(testList[1])
errmsg += "and you specified ${which:%s}." %(testList[1])
raise ValueError(errmsg)
return newString |
def client_get(self, url, **kwargs):
"""Send GET request with given url."""
response = requests.get(self.make_url(url), headers=self.headers)
if not response.ok:
raise Exception(
'{status}: {reason}.\nCircleCI Status NOT OK'.format(
status=response.status_code, reason=response.reason))
return response.json() | Send GET request with given url. | Below is the the instruction that describes the task:
### Input:
Send GET request with given url.
### Response:
def client_get(self, url, **kwargs):
"""Send GET request with given url."""
response = requests.get(self.make_url(url), headers=self.headers)
if not response.ok:
raise Exception(
'{status}: {reason}.\nCircleCI Status NOT OK'.format(
status=response.status_code, reason=response.reason))
return response.json() |
def fstab_present(name, fs_file, fs_vfstype, fs_mntops='defaults',
fs_freq=0, fs_passno=0, mount_by=None,
config='/etc/fstab', mount=True, match_on='auto'):
'''
Makes sure that a fstab mount point is pressent.
name
The name of block device. Can be any valid fs_spec value.
fs_file
Mount point (target) for the filesystem.
fs_vfstype
The type of the filesystem (e.g. ext4, xfs, btrfs, ...)
fs_mntops
The mount options associated with the filesystem. Default is
``defaults``.
fs_freq
Field is used by dump to determine which fs need to be
dumped. Default is ``0``
fs_passno
Field is used by fsck to determine the order in which
filesystem checks are done at boot time. Default is ``0``
mount_by
Select the final value for fs_spec. Can be [``None``,
``device``, ``label``, ``uuid``, ``partlabel``,
``partuuid``]. If ``None``, the value for fs_spect will be the
parameter ``name``, in other case will search the correct
value based on the device name. For example, for ``uuid``, the
value for fs_spec will be of type 'UUID=xxx' instead of the
device name set in ``name``.
config
Place where the fstab file lives. Default is ``/etc/fstab``
mount
Set if the mount should be mounted immediately. Default is
``True``
match_on
A name or list of fstab properties on which this state should
be applied. Default is ``auto``, a special value indicating
to guess based on fstype. In general, ``auto`` matches on
name for recognized special devices and device otherwise.
'''
ret = {
'name': name,
'result': False,
'changes': {},
'comment': [],
}
# Adjust fs_mntops based on the OS
if fs_mntops == 'defaults':
if __grains__['os'] in ['MacOS', 'Darwin']:
fs_mntops = 'noowners'
elif __grains__['os'] == 'AIX':
fs_mntops = ''
# Adjust the config file based on the OS
if config == '/etc/fstab':
if __grains__['os'] in ['MacOS', 'Darwin']:
config = '/etc/auto_salt'
elif __grains__['os'] == 'AIX':
config = '/etc/filesystems'
if not fs_file == '/':
fs_file = fs_file.rstrip('/')
fs_spec = _convert_to(name, mount_by)
# Validate that the device is valid after the conversion
if not fs_spec:
msg = 'Device {} cannot be converted to {}'
ret['comment'].append(msg.format(name, mount_by))
return ret
if __opts__['test']:
if __grains__['os'] in ['MacOS', 'Darwin']:
out = __salt__['mount.set_automaster'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
config=config,
test=True)
elif __grains__['os'] == 'AIX':
out = __salt__['mount.set_filesystems'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
mount=mount,
config=config,
test=True,
match_on=match_on)
else:
out = __salt__['mount.set_fstab'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
dump=fs_freq,
pass_num=fs_passno,
config=config,
test=True,
match_on=match_on)
ret['result'] = None
if out == 'present':
msg = '{} entry is already in {}.'
ret['comment'].append(msg.format(fs_file, config))
elif out == 'new':
msg = '{} entry will be written in {}.'
ret['comment'].append(msg.format(fs_file, config))
elif out == 'change':
msg = '{} entry will be updated in {}.'
ret['comment'].append(msg.format(fs_file, config))
else:
ret['result'] = False
msg = '{} entry cannot be created in {}: {}.'
ret['comment'].append(msg.format(fs_file, config, out))
return ret
if __grains__['os'] in ['MacOS', 'Darwin']:
out = __salt__['mount.set_automaster'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
config=config)
elif __grains__['os'] == 'AIX':
out = __salt__['mount.set_filesystems'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
mount=mount,
config=config,
match_on=match_on)
else:
out = __salt__['mount.set_fstab'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
dump=fs_freq,
pass_num=fs_passno,
config=config,
match_on=match_on)
ret['result'] = True
if out == 'present':
msg = '{} entry was already in {}.'
ret['comment'].append(msg.format(fs_file, config))
elif out == 'new':
ret['changes']['persist'] = out
msg = '{} entry added in {}.'
ret['comment'].append(msg.format(fs_file, config))
elif out == 'change':
ret['changes']['persist'] = out
msg = '{} entry updated in {}.'
ret['comment'].append(msg.format(fs_file, config))
else:
ret['result'] = False
msg = '{} entry cannot be changed in {}: {}.'
ret['comment'].append(msg.format(fs_file, config, out))
return ret | Makes sure that a fstab mount point is pressent.
name
The name of block device. Can be any valid fs_spec value.
fs_file
Mount point (target) for the filesystem.
fs_vfstype
The type of the filesystem (e.g. ext4, xfs, btrfs, ...)
fs_mntops
The mount options associated with the filesystem. Default is
``defaults``.
fs_freq
Field is used by dump to determine which fs need to be
dumped. Default is ``0``
fs_passno
Field is used by fsck to determine the order in which
filesystem checks are done at boot time. Default is ``0``
mount_by
Select the final value for fs_spec. Can be [``None``,
``device``, ``label``, ``uuid``, ``partlabel``,
``partuuid``]. If ``None``, the value for fs_spect will be the
parameter ``name``, in other case will search the correct
value based on the device name. For example, for ``uuid``, the
value for fs_spec will be of type 'UUID=xxx' instead of the
device name set in ``name``.
config
Place where the fstab file lives. Default is ``/etc/fstab``
mount
Set if the mount should be mounted immediately. Default is
``True``
match_on
A name or list of fstab properties on which this state should
be applied. Default is ``auto``, a special value indicating
to guess based on fstype. In general, ``auto`` matches on
name for recognized special devices and device otherwise. | Below is the the instruction that describes the task:
### Input:
Makes sure that a fstab mount point is pressent.
name
The name of block device. Can be any valid fs_spec value.
fs_file
Mount point (target) for the filesystem.
fs_vfstype
The type of the filesystem (e.g. ext4, xfs, btrfs, ...)
fs_mntops
The mount options associated with the filesystem. Default is
``defaults``.
fs_freq
Field is used by dump to determine which fs need to be
dumped. Default is ``0``
fs_passno
Field is used by fsck to determine the order in which
filesystem checks are done at boot time. Default is ``0``
mount_by
Select the final value for fs_spec. Can be [``None``,
``device``, ``label``, ``uuid``, ``partlabel``,
``partuuid``]. If ``None``, the value for fs_spect will be the
parameter ``name``, in other case will search the correct
value based on the device name. For example, for ``uuid``, the
value for fs_spec will be of type 'UUID=xxx' instead of the
device name set in ``name``.
config
Place where the fstab file lives. Default is ``/etc/fstab``
mount
Set if the mount should be mounted immediately. Default is
``True``
match_on
A name or list of fstab properties on which this state should
be applied. Default is ``auto``, a special value indicating
to guess based on fstype. In general, ``auto`` matches on
name for recognized special devices and device otherwise.
### Response:
def fstab_present(name, fs_file, fs_vfstype, fs_mntops='defaults',
fs_freq=0, fs_passno=0, mount_by=None,
config='/etc/fstab', mount=True, match_on='auto'):
'''
Makes sure that a fstab mount point is pressent.
name
The name of block device. Can be any valid fs_spec value.
fs_file
Mount point (target) for the filesystem.
fs_vfstype
The type of the filesystem (e.g. ext4, xfs, btrfs, ...)
fs_mntops
The mount options associated with the filesystem. Default is
``defaults``.
fs_freq
Field is used by dump to determine which fs need to be
dumped. Default is ``0``
fs_passno
Field is used by fsck to determine the order in which
filesystem checks are done at boot time. Default is ``0``
mount_by
Select the final value for fs_spec. Can be [``None``,
``device``, ``label``, ``uuid``, ``partlabel``,
``partuuid``]. If ``None``, the value for fs_spect will be the
parameter ``name``, in other case will search the correct
value based on the device name. For example, for ``uuid``, the
value for fs_spec will be of type 'UUID=xxx' instead of the
device name set in ``name``.
config
Place where the fstab file lives. Default is ``/etc/fstab``
mount
Set if the mount should be mounted immediately. Default is
``True``
match_on
A name or list of fstab properties on which this state should
be applied. Default is ``auto``, a special value indicating
to guess based on fstype. In general, ``auto`` matches on
name for recognized special devices and device otherwise.
'''
ret = {
'name': name,
'result': False,
'changes': {},
'comment': [],
}
# Adjust fs_mntops based on the OS
if fs_mntops == 'defaults':
if __grains__['os'] in ['MacOS', 'Darwin']:
fs_mntops = 'noowners'
elif __grains__['os'] == 'AIX':
fs_mntops = ''
# Adjust the config file based on the OS
if config == '/etc/fstab':
if __grains__['os'] in ['MacOS', 'Darwin']:
config = '/etc/auto_salt'
elif __grains__['os'] == 'AIX':
config = '/etc/filesystems'
if not fs_file == '/':
fs_file = fs_file.rstrip('/')
fs_spec = _convert_to(name, mount_by)
# Validate that the device is valid after the conversion
if not fs_spec:
msg = 'Device {} cannot be converted to {}'
ret['comment'].append(msg.format(name, mount_by))
return ret
if __opts__['test']:
if __grains__['os'] in ['MacOS', 'Darwin']:
out = __salt__['mount.set_automaster'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
config=config,
test=True)
elif __grains__['os'] == 'AIX':
out = __salt__['mount.set_filesystems'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
mount=mount,
config=config,
test=True,
match_on=match_on)
else:
out = __salt__['mount.set_fstab'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
dump=fs_freq,
pass_num=fs_passno,
config=config,
test=True,
match_on=match_on)
ret['result'] = None
if out == 'present':
msg = '{} entry is already in {}.'
ret['comment'].append(msg.format(fs_file, config))
elif out == 'new':
msg = '{} entry will be written in {}.'
ret['comment'].append(msg.format(fs_file, config))
elif out == 'change':
msg = '{} entry will be updated in {}.'
ret['comment'].append(msg.format(fs_file, config))
else:
ret['result'] = False
msg = '{} entry cannot be created in {}: {}.'
ret['comment'].append(msg.format(fs_file, config, out))
return ret
if __grains__['os'] in ['MacOS', 'Darwin']:
out = __salt__['mount.set_automaster'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
config=config)
elif __grains__['os'] == 'AIX':
out = __salt__['mount.set_filesystems'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
mount=mount,
config=config,
match_on=match_on)
else:
out = __salt__['mount.set_fstab'](name=fs_file,
device=fs_spec,
fstype=fs_vfstype,
opts=fs_mntops,
dump=fs_freq,
pass_num=fs_passno,
config=config,
match_on=match_on)
ret['result'] = True
if out == 'present':
msg = '{} entry was already in {}.'
ret['comment'].append(msg.format(fs_file, config))
elif out == 'new':
ret['changes']['persist'] = out
msg = '{} entry added in {}.'
ret['comment'].append(msg.format(fs_file, config))
elif out == 'change':
ret['changes']['persist'] = out
msg = '{} entry updated in {}.'
ret['comment'].append(msg.format(fs_file, config))
else:
ret['result'] = False
msg = '{} entry cannot be changed in {}: {}.'
ret['comment'].append(msg.format(fs_file, config, out))
return ret |
def do_PROPPATCH(self, environ, start_response):
"""Handle PROPPATCH request to set or remove a property.
@see http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH
"""
path = environ["PATH_INFO"]
res = self._davProvider.get_resource_inst(path, environ)
# Only accept Depth: 0 (but assume this, if omitted)
environ.setdefault("HTTP_DEPTH", "0")
if environ["HTTP_DEPTH"] != "0":
self._fail(HTTP_BAD_REQUEST, "Depth must be '0'.")
if res is None:
self._fail(HTTP_NOT_FOUND)
self._evaluate_if_headers(res, environ)
self._check_write_permission(res, "0", environ)
# Parse request
requestEL = util.parse_xml_body(environ)
if requestEL.tag != "{DAV:}propertyupdate":
self._fail(HTTP_BAD_REQUEST)
# Create a list of update request tuples: (name, value)
propupdatelist = []
for ppnode in requestEL:
propupdatemethod = None
if ppnode.tag == "{DAV:}remove":
propupdatemethod = "remove"
elif ppnode.tag == "{DAV:}set":
propupdatemethod = "set"
else:
self._fail(
HTTP_BAD_REQUEST, "Unknown tag (expected 'set' or 'remove')."
)
for propnode in ppnode:
if propnode.tag != "{DAV:}prop":
self._fail(HTTP_BAD_REQUEST, "Unknown tag (expected 'prop').")
for propertynode in propnode:
propvalue = None
if propupdatemethod == "remove":
propvalue = None # Mark as 'remove'
if len(propertynode) > 0:
# 14.23: All the XML elements in a 'prop' XML
# element inside of a 'remove' XML element MUST be
# empty
self._fail(
HTTP_BAD_REQUEST,
"prop element must be empty for 'remove'.",
)
else:
propvalue = propertynode
propupdatelist.append((propertynode.tag, propvalue))
# Apply updates in SIMULATION MODE and create a result list (name,
# result)
successflag = True
writeresultlist = []
for (name, propvalue) in propupdatelist:
try:
res.set_property_value(name, propvalue, dry_run=True)
except Exception as e:
writeresult = as_DAVError(e)
else:
writeresult = "200 OK"
writeresultlist.append((name, writeresult))
successflag = successflag and writeresult == "200 OK"
# Generate response list of 2-tuples (name, value)
# <value> is None on success, or an instance of DAVError
propResponseList = []
responsedescription = []
if not successflag:
# If dry run failed: convert all OK to FAILED_DEPENDENCY.
for (name, result) in writeresultlist:
if result == "200 OK":
result = DAVError(HTTP_FAILED_DEPENDENCY)
elif isinstance(result, DAVError):
responsedescription.append(result.get_user_info())
propResponseList.append((name, result))
else:
# Dry-run succeeded: set properties again, this time in 'real' mode
# In theory, there should be no exceptions thrown here, but this is
# real live...
for (name, propvalue) in propupdatelist:
try:
res.set_property_value(name, propvalue, dry_run=False)
# Set value to None, so the response xml contains empty tags
propResponseList.append((name, None))
except Exception as e:
e = as_DAVError(e)
propResponseList.append((name, e))
responsedescription.append(e.get_user_info())
# Generate response XML
multistatusEL = xml_tools.make_multistatus_el()
href = res.get_href()
util.add_property_response(multistatusEL, href, propResponseList)
if responsedescription:
etree.SubElement(
multistatusEL, "{DAV:}responsedescription"
).text = "\n".join(responsedescription)
# Send response
return util.send_multi_status_response(environ, start_response, multistatusEL) | Handle PROPPATCH request to set or remove a property.
@see http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH | Below is the the instruction that describes the task:
### Input:
Handle PROPPATCH request to set or remove a property.
@see http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH
### Response:
def do_PROPPATCH(self, environ, start_response):
"""Handle PROPPATCH request to set or remove a property.
@see http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH
"""
path = environ["PATH_INFO"]
res = self._davProvider.get_resource_inst(path, environ)
# Only accept Depth: 0 (but assume this, if omitted)
environ.setdefault("HTTP_DEPTH", "0")
if environ["HTTP_DEPTH"] != "0":
self._fail(HTTP_BAD_REQUEST, "Depth must be '0'.")
if res is None:
self._fail(HTTP_NOT_FOUND)
self._evaluate_if_headers(res, environ)
self._check_write_permission(res, "0", environ)
# Parse request
requestEL = util.parse_xml_body(environ)
if requestEL.tag != "{DAV:}propertyupdate":
self._fail(HTTP_BAD_REQUEST)
# Create a list of update request tuples: (name, value)
propupdatelist = []
for ppnode in requestEL:
propupdatemethod = None
if ppnode.tag == "{DAV:}remove":
propupdatemethod = "remove"
elif ppnode.tag == "{DAV:}set":
propupdatemethod = "set"
else:
self._fail(
HTTP_BAD_REQUEST, "Unknown tag (expected 'set' or 'remove')."
)
for propnode in ppnode:
if propnode.tag != "{DAV:}prop":
self._fail(HTTP_BAD_REQUEST, "Unknown tag (expected 'prop').")
for propertynode in propnode:
propvalue = None
if propupdatemethod == "remove":
propvalue = None # Mark as 'remove'
if len(propertynode) > 0:
# 14.23: All the XML elements in a 'prop' XML
# element inside of a 'remove' XML element MUST be
# empty
self._fail(
HTTP_BAD_REQUEST,
"prop element must be empty for 'remove'.",
)
else:
propvalue = propertynode
propupdatelist.append((propertynode.tag, propvalue))
# Apply updates in SIMULATION MODE and create a result list (name,
# result)
successflag = True
writeresultlist = []
for (name, propvalue) in propupdatelist:
try:
res.set_property_value(name, propvalue, dry_run=True)
except Exception as e:
writeresult = as_DAVError(e)
else:
writeresult = "200 OK"
writeresultlist.append((name, writeresult))
successflag = successflag and writeresult == "200 OK"
# Generate response list of 2-tuples (name, value)
# <value> is None on success, or an instance of DAVError
propResponseList = []
responsedescription = []
if not successflag:
# If dry run failed: convert all OK to FAILED_DEPENDENCY.
for (name, result) in writeresultlist:
if result == "200 OK":
result = DAVError(HTTP_FAILED_DEPENDENCY)
elif isinstance(result, DAVError):
responsedescription.append(result.get_user_info())
propResponseList.append((name, result))
else:
# Dry-run succeeded: set properties again, this time in 'real' mode
# In theory, there should be no exceptions thrown here, but this is
# real live...
for (name, propvalue) in propupdatelist:
try:
res.set_property_value(name, propvalue, dry_run=False)
# Set value to None, so the response xml contains empty tags
propResponseList.append((name, None))
except Exception as e:
e = as_DAVError(e)
propResponseList.append((name, e))
responsedescription.append(e.get_user_info())
# Generate response XML
multistatusEL = xml_tools.make_multistatus_el()
href = res.get_href()
util.add_property_response(multistatusEL, href, propResponseList)
if responsedescription:
etree.SubElement(
multistatusEL, "{DAV:}responsedescription"
).text = "\n".join(responsedescription)
# Send response
return util.send_multi_status_response(environ, start_response, multistatusEL) |
def _add_image_part(self, image):
"""
Return an |ImagePart| instance newly created from image and appended
to the collection.
"""
partname = self._next_image_partname(image.ext)
image_part = ImagePart.from_image(image, partname)
self.append(image_part)
return image_part | Return an |ImagePart| instance newly created from image and appended
to the collection. | Below is the the instruction that describes the task:
### Input:
Return an |ImagePart| instance newly created from image and appended
to the collection.
### Response:
def _add_image_part(self, image):
"""
Return an |ImagePart| instance newly created from image and appended
to the collection.
"""
partname = self._next_image_partname(image.ext)
image_part = ImagePart.from_image(image, partname)
self.append(image_part)
return image_part |
def generate_ucsm_handle(hostname, username, password):
""" Creates UCS Manager handle object and establishes a session with
UCS Manager.
:param hostname: UCS Manager hostname or IP-address
:param username: Username to login to UCS Manager
:param password: Login user password
:raises UcsConnectionError: In case of error.
"""
ucs_handle = UcsHandle()
try:
success = ucs_handle.Login(hostname, username, password)
except UcsException as e:
print("Cisco client exception %(msg)s" % (e.message))
raise exception.UcsConnectionError(message=e.message)
return success, ucs_handle | Creates UCS Manager handle object and establishes a session with
UCS Manager.
:param hostname: UCS Manager hostname or IP-address
:param username: Username to login to UCS Manager
:param password: Login user password
:raises UcsConnectionError: In case of error. | Below is the the instruction that describes the task:
### Input:
Creates UCS Manager handle object and establishes a session with
UCS Manager.
:param hostname: UCS Manager hostname or IP-address
:param username: Username to login to UCS Manager
:param password: Login user password
:raises UcsConnectionError: In case of error.
### Response:
def generate_ucsm_handle(hostname, username, password):
""" Creates UCS Manager handle object and establishes a session with
UCS Manager.
:param hostname: UCS Manager hostname or IP-address
:param username: Username to login to UCS Manager
:param password: Login user password
:raises UcsConnectionError: In case of error.
"""
ucs_handle = UcsHandle()
try:
success = ucs_handle.Login(hostname, username, password)
except UcsException as e:
print("Cisco client exception %(msg)s" % (e.message))
raise exception.UcsConnectionError(message=e.message)
return success, ucs_handle |
def recursive_cov(self, cov, length, mean, chain, scaling=1, epsilon=0):
r"""Compute the covariance recursively.
Return the new covariance and the new mean.
.. math::
C_k & = \frac{1}{k-1} (\sum_{i=1}^k x_i x_i^T - k\bar{x_k}\bar{x_k}^T)
C_n & = \frac{1}{n-1} (\sum_{i=1}^k x_i x_i^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T)
& = \frac{1}{n-1} ((k-1)C_k + k\bar{x_k}\bar{x_k}^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T)
:Parameters:
- cov : matrix
Previous covariance matrix.
- length : int
Length of chain used to compute the previous covariance.
- mean : array
Previous mean.
- chain : array
Sample used to update covariance.
- scaling : float
Scaling parameter
- epsilon : float
Set to a small value to avoid singular matrices.
"""
n = length + len(chain)
k = length
new_mean = self.recursive_mean(mean, length, chain)
t0 = k * np.outer(mean, mean)
t1 = np.dot(chain.T, chain)
t2 = n * np.outer(new_mean, new_mean)
t3 = epsilon * np.eye(cov.shape[0])
new_cov = (
k - 1) / (
n - 1.) * cov + scaling / (
n - 1.) * (
t0 + t1 - t2 + t3)
return new_cov, new_mean | r"""Compute the covariance recursively.
Return the new covariance and the new mean.
.. math::
C_k & = \frac{1}{k-1} (\sum_{i=1}^k x_i x_i^T - k\bar{x_k}\bar{x_k}^T)
C_n & = \frac{1}{n-1} (\sum_{i=1}^k x_i x_i^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T)
& = \frac{1}{n-1} ((k-1)C_k + k\bar{x_k}\bar{x_k}^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T)
:Parameters:
- cov : matrix
Previous covariance matrix.
- length : int
Length of chain used to compute the previous covariance.
- mean : array
Previous mean.
- chain : array
Sample used to update covariance.
- scaling : float
Scaling parameter
- epsilon : float
Set to a small value to avoid singular matrices. | Below is the the instruction that describes the task:
### Input:
r"""Compute the covariance recursively.
Return the new covariance and the new mean.
.. math::
C_k & = \frac{1}{k-1} (\sum_{i=1}^k x_i x_i^T - k\bar{x_k}\bar{x_k}^T)
C_n & = \frac{1}{n-1} (\sum_{i=1}^k x_i x_i^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T)
& = \frac{1}{n-1} ((k-1)C_k + k\bar{x_k}\bar{x_k}^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T)
:Parameters:
- cov : matrix
Previous covariance matrix.
- length : int
Length of chain used to compute the previous covariance.
- mean : array
Previous mean.
- chain : array
Sample used to update covariance.
- scaling : float
Scaling parameter
- epsilon : float
Set to a small value to avoid singular matrices.
### Response:
def recursive_cov(self, cov, length, mean, chain, scaling=1, epsilon=0):
r"""Compute the covariance recursively.
Return the new covariance and the new mean.
.. math::
C_k & = \frac{1}{k-1} (\sum_{i=1}^k x_i x_i^T - k\bar{x_k}\bar{x_k}^T)
C_n & = \frac{1}{n-1} (\sum_{i=1}^k x_i x_i^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T)
& = \frac{1}{n-1} ((k-1)C_k + k\bar{x_k}\bar{x_k}^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T)
:Parameters:
- cov : matrix
Previous covariance matrix.
- length : int
Length of chain used to compute the previous covariance.
- mean : array
Previous mean.
- chain : array
Sample used to update covariance.
- scaling : float
Scaling parameter
- epsilon : float
Set to a small value to avoid singular matrices.
"""
n = length + len(chain)
k = length
new_mean = self.recursive_mean(mean, length, chain)
t0 = k * np.outer(mean, mean)
t1 = np.dot(chain.T, chain)
t2 = n * np.outer(new_mean, new_mean)
t3 = epsilon * np.eye(cov.shape[0])
new_cov = (
k - 1) / (
n - 1.) * cov + scaling / (
n - 1.) * (
t0 + t1 - t2 + t3)
return new_cov, new_mean |
def get_next_step(self):
"""Find the proper step when user clicks the Next button.
:returns: The step to be switched to
:rtype: WizardStep instance or None
"""
if self.parent.is_selected_layer_keywordless:
# insert keyword creation thread here
self.parent.parent_step = self
self.parent.existing_keywords = None
self.parent.set_mode_label_to_keywords_creation()
new_step = self.parent.step_kw_purpose
else:
if layers_intersect(self.parent.hazard_layer,
self.parent.exposure_layer):
new_step = self.parent.step_fc_agglayer_origin
else:
new_step = self.parent.step_fc_disjoint_layers
return new_step | Find the proper step when user clicks the Next button.
:returns: The step to be switched to
:rtype: WizardStep instance or None | Below is the the instruction that describes the task:
### Input:
Find the proper step when user clicks the Next button.
:returns: The step to be switched to
:rtype: WizardStep instance or None
### Response:
def get_next_step(self):
"""Find the proper step when user clicks the Next button.
:returns: The step to be switched to
:rtype: WizardStep instance or None
"""
if self.parent.is_selected_layer_keywordless:
# insert keyword creation thread here
self.parent.parent_step = self
self.parent.existing_keywords = None
self.parent.set_mode_label_to_keywords_creation()
new_step = self.parent.step_kw_purpose
else:
if layers_intersect(self.parent.hazard_layer,
self.parent.exposure_layer):
new_step = self.parent.step_fc_agglayer_origin
else:
new_step = self.parent.step_fc_disjoint_layers
return new_step |
def printer(self, message, color_level='info'):
"""Print Messages and Log it.
:param message: item to print to screen
"""
if self.job_args.get('colorized'):
print(cloud_utils.return_colorized(msg=message, color=color_level))
else:
print(message) | Print Messages and Log it.
:param message: item to print to screen | Below is the the instruction that describes the task:
### Input:
Print Messages and Log it.
:param message: item to print to screen
### Response:
def printer(self, message, color_level='info'):
"""Print Messages and Log it.
:param message: item to print to screen
"""
if self.job_args.get('colorized'):
print(cloud_utils.return_colorized(msg=message, color=color_level))
else:
print(message) |
def _get_col_dimstr(tdim, is_string=False):
"""
not for variable length
"""
dimstr = ''
if tdim is None:
dimstr = 'array[bad TDIM]'
else:
if is_string:
if len(tdim) > 1:
dimstr = [str(d) for d in tdim[1:]]
else:
if len(tdim) > 1 or tdim[0] > 1:
dimstr = [str(d) for d in tdim]
if dimstr != '':
dimstr = ','.join(dimstr)
dimstr = 'array[%s]' % dimstr
return dimstr | not for variable length | Below is the the instruction that describes the task:
### Input:
not for variable length
### Response:
def _get_col_dimstr(tdim, is_string=False):
"""
not for variable length
"""
dimstr = ''
if tdim is None:
dimstr = 'array[bad TDIM]'
else:
if is_string:
if len(tdim) > 1:
dimstr = [str(d) for d in tdim[1:]]
else:
if len(tdim) > 1 or tdim[0] > 1:
dimstr = [str(d) for d in tdim]
if dimstr != '':
dimstr = ','.join(dimstr)
dimstr = 'array[%s]' % dimstr
return dimstr |
def show_disk(name=None, kwargs=None, call=None): # pylint: disable=W0613
'''
Show the details of an existing disk.
CLI Example:
.. code-block:: bash
salt-cloud -a show_disk myinstance disk_name=mydisk
salt-cloud -f show_disk gce disk_name=mydisk
'''
if not kwargs or 'disk_name' not in kwargs:
log.error(
'Must specify disk_name.'
)
return False
conn = get_conn()
return _expand_disk(conn.ex_get_volume(kwargs['disk_name'])) | Show the details of an existing disk.
CLI Example:
.. code-block:: bash
salt-cloud -a show_disk myinstance disk_name=mydisk
salt-cloud -f show_disk gce disk_name=mydisk | Below is the the instruction that describes the task:
### Input:
Show the details of an existing disk.
CLI Example:
.. code-block:: bash
salt-cloud -a show_disk myinstance disk_name=mydisk
salt-cloud -f show_disk gce disk_name=mydisk
### Response:
def show_disk(name=None, kwargs=None, call=None): # pylint: disable=W0613
'''
Show the details of an existing disk.
CLI Example:
.. code-block:: bash
salt-cloud -a show_disk myinstance disk_name=mydisk
salt-cloud -f show_disk gce disk_name=mydisk
'''
if not kwargs or 'disk_name' not in kwargs:
log.error(
'Must specify disk_name.'
)
return False
conn = get_conn()
return _expand_disk(conn.ex_get_volume(kwargs['disk_name'])) |
def get_environment(self):
"""
Get environment facts.
power and fan are currently not implemented
cpu is using 1-minute average
cpu hard-coded to cpu0 (i.e. only a single CPU)
"""
environment = {}
cpu_cmd = 'show proc cpu'
mem_cmd = 'show memory statistics'
temp_cmd = 'show env temperature status'
output = self._send_command(cpu_cmd)
environment.setdefault('cpu', {})
environment['cpu'][0] = {}
environment['cpu'][0]['%usage'] = 0.0
for line in output.splitlines():
if 'CPU utilization' in line:
# CPU utilization for five seconds: 2%/0%; one minute: 2%; five minutes: 1%
cpu_regex = r'^.*one minute: (\d+)%; five.*$'
match = re.search(cpu_regex, line)
environment['cpu'][0]['%usage'] = float(match.group(1))
break
output = self._send_command(mem_cmd)
for line in output.splitlines():
if 'Processor' in line:
_, _, _, proc_used_mem, proc_free_mem = line.split()[:5]
elif 'I/O' in line or 'io' in line:
_, _, _, io_used_mem, io_free_mem = line.split()[:5]
used_mem = int(proc_used_mem) + int(io_used_mem)
free_mem = int(proc_free_mem) + int(io_free_mem)
environment.setdefault('memory', {})
environment['memory']['used_ram'] = used_mem
environment['memory']['available_ram'] = free_mem
environment.setdefault('temperature', {})
# The 'show env temperature status' is not ubiquitous in Cisco IOS
output = self._send_command(temp_cmd)
if '% Invalid' not in output:
for line in output.splitlines():
if 'System Temperature Value' in line:
system_temp = float(line.split(':')[1].split()[0])
elif 'Yellow Threshold' in line:
system_temp_alert = float(line.split(':')[1].split()[0])
elif 'Red Threshold' in line:
system_temp_crit = float(line.split(':')[1].split()[0])
env_value = {'is_alert': system_temp >= system_temp_alert,
'is_critical': system_temp >= system_temp_crit, 'temperature': system_temp}
environment['temperature']['system'] = env_value
else:
env_value = {'is_alert': False, 'is_critical': False, 'temperature': -1.0}
environment['temperature']['invalid'] = env_value
# Initialize 'power' and 'fan' to default values (not implemented)
environment.setdefault('power', {})
environment['power']['invalid'] = {'status': True, 'output': -1.0, 'capacity': -1.0}
environment.setdefault('fans', {})
environment['fans']['invalid'] = {'status': True}
return environment | Get environment facts.
power and fan are currently not implemented
cpu is using 1-minute average
cpu hard-coded to cpu0 (i.e. only a single CPU) | Below is the the instruction that describes the task:
### Input:
Get environment facts.
power and fan are currently not implemented
cpu is using 1-minute average
cpu hard-coded to cpu0 (i.e. only a single CPU)
### Response:
def get_environment(self):
"""
Get environment facts.
power and fan are currently not implemented
cpu is using 1-minute average
cpu hard-coded to cpu0 (i.e. only a single CPU)
"""
environment = {}
cpu_cmd = 'show proc cpu'
mem_cmd = 'show memory statistics'
temp_cmd = 'show env temperature status'
output = self._send_command(cpu_cmd)
environment.setdefault('cpu', {})
environment['cpu'][0] = {}
environment['cpu'][0]['%usage'] = 0.0
for line in output.splitlines():
if 'CPU utilization' in line:
# CPU utilization for five seconds: 2%/0%; one minute: 2%; five minutes: 1%
cpu_regex = r'^.*one minute: (\d+)%; five.*$'
match = re.search(cpu_regex, line)
environment['cpu'][0]['%usage'] = float(match.group(1))
break
output = self._send_command(mem_cmd)
for line in output.splitlines():
if 'Processor' in line:
_, _, _, proc_used_mem, proc_free_mem = line.split()[:5]
elif 'I/O' in line or 'io' in line:
_, _, _, io_used_mem, io_free_mem = line.split()[:5]
used_mem = int(proc_used_mem) + int(io_used_mem)
free_mem = int(proc_free_mem) + int(io_free_mem)
environment.setdefault('memory', {})
environment['memory']['used_ram'] = used_mem
environment['memory']['available_ram'] = free_mem
environment.setdefault('temperature', {})
# The 'show env temperature status' is not ubiquitous in Cisco IOS
output = self._send_command(temp_cmd)
if '% Invalid' not in output:
for line in output.splitlines():
if 'System Temperature Value' in line:
system_temp = float(line.split(':')[1].split()[0])
elif 'Yellow Threshold' in line:
system_temp_alert = float(line.split(':')[1].split()[0])
elif 'Red Threshold' in line:
system_temp_crit = float(line.split(':')[1].split()[0])
env_value = {'is_alert': system_temp >= system_temp_alert,
'is_critical': system_temp >= system_temp_crit, 'temperature': system_temp}
environment['temperature']['system'] = env_value
else:
env_value = {'is_alert': False, 'is_critical': False, 'temperature': -1.0}
environment['temperature']['invalid'] = env_value
# Initialize 'power' and 'fan' to default values (not implemented)
environment.setdefault('power', {})
environment['power']['invalid'] = {'status': True, 'output': -1.0, 'capacity': -1.0}
environment.setdefault('fans', {})
environment['fans']['invalid'] = {'status': True}
return environment |
def loadScopeGroupbyID(self, id, callback=None, errback=None):
"""
Load an existing Scope Group by ID into a high level Scope Group object
:param int id: id of an existing ScopeGroup
"""
import ns1.ipam
scope_group = ns1.ipam.Scopegroup(self.config, id=id)
return scope_group.load(callback=callback, errback=errback) | Load an existing Scope Group by ID into a high level Scope Group object
:param int id: id of an existing ScopeGroup | Below is the the instruction that describes the task:
### Input:
Load an existing Scope Group by ID into a high level Scope Group object
:param int id: id of an existing ScopeGroup
### Response:
def loadScopeGroupbyID(self, id, callback=None, errback=None):
"""
Load an existing Scope Group by ID into a high level Scope Group object
:param int id: id of an existing ScopeGroup
"""
import ns1.ipam
scope_group = ns1.ipam.Scopegroup(self.config, id=id)
return scope_group.load(callback=callback, errback=errback) |
def mean(self, values, axis=0, weights=None, dtype=None):
"""compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
if weights is None:
result = self.reduce(values, axis=axis, dtype=dtype)
shape = [1] * values.ndim
shape[axis] = self.groups
weights = self.count.reshape(shape)
else:
weights = np.asarray(weights)
result = self.reduce(values * weights, axis=axis, dtype=dtype)
weights = self.reduce(weights, axis=axis, dtype=dtype)
return self.unique, result / weights | compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups | Below is the the instruction that describes the task:
### Input:
compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
### Response:
def mean(self, values, axis=0, weights=None, dtype=None):
"""compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
if weights is None:
result = self.reduce(values, axis=axis, dtype=dtype)
shape = [1] * values.ndim
shape[axis] = self.groups
weights = self.count.reshape(shape)
else:
weights = np.asarray(weights)
result = self.reduce(values * weights, axis=axis, dtype=dtype)
weights = self.reduce(weights, axis=axis, dtype=dtype)
return self.unique, result / weights |
def create(self, name, data):
"""Create a Job Binary Internal.
:param str data: raw data of script text
"""
return self._update('/job-binary-internals/%s' %
urlparse.quote(name.encode('utf-8')), data,
'job_binary_internal', dump_json=False) | Create a Job Binary Internal.
:param str data: raw data of script text | Below is the the instruction that describes the task:
### Input:
Create a Job Binary Internal.
:param str data: raw data of script text
### Response:
def create(self, name, data):
"""Create a Job Binary Internal.
:param str data: raw data of script text
"""
return self._update('/job-binary-internals/%s' %
urlparse.quote(name.encode('utf-8')), data,
'job_binary_internal', dump_json=False) |
def update(request, ident, stateless=False, **kwargs):
'Generate update json response'
dash_app, app = DashApp.locate_item(ident, stateless)
request_body = json.loads(request.body.decode('utf-8'))
if app.use_dash_dispatch():
# Force call through dash
view_func = app.locate_endpoint_function('dash-update-component')
import flask
with app.test_request_context():
# Fudge request object
# pylint: disable=protected-access
flask.request._cached_json = (request_body, flask.request._cached_json[True])
resp = view_func()
else:
# Use direct dispatch with extra arguments in the argMap
app_state = request.session.get("django_plotly_dash", dict())
arg_map = {'dash_app_id': ident,
'dash_app': dash_app,
'user': request.user,
'session_state': app_state}
resp = app.dispatch_with_args(request_body, arg_map)
request.session['django_plotly_dash'] = app_state
dash_app.handle_current_state()
# Special for ws-driven edge case
if str(resp) == 'EDGECASEEXIT':
return HttpResponse("")
# Change in returned value type
try:
rdata = resp.data
rtype = resp.mimetype
except:
rdata = resp
rtype = "application/json"
return HttpResponse(rdata,
content_type=rtype) | Generate update json response | Below is the the instruction that describes the task:
### Input:
Generate update json response
### Response:
def update(request, ident, stateless=False, **kwargs):
'Generate update json response'
dash_app, app = DashApp.locate_item(ident, stateless)
request_body = json.loads(request.body.decode('utf-8'))
if app.use_dash_dispatch():
# Force call through dash
view_func = app.locate_endpoint_function('dash-update-component')
import flask
with app.test_request_context():
# Fudge request object
# pylint: disable=protected-access
flask.request._cached_json = (request_body, flask.request._cached_json[True])
resp = view_func()
else:
# Use direct dispatch with extra arguments in the argMap
app_state = request.session.get("django_plotly_dash", dict())
arg_map = {'dash_app_id': ident,
'dash_app': dash_app,
'user': request.user,
'session_state': app_state}
resp = app.dispatch_with_args(request_body, arg_map)
request.session['django_plotly_dash'] = app_state
dash_app.handle_current_state()
# Special for ws-driven edge case
if str(resp) == 'EDGECASEEXIT':
return HttpResponse("")
# Change in returned value type
try:
rdata = resp.data
rtype = resp.mimetype
except:
rdata = resp
rtype = "application/json"
return HttpResponse(rdata,
content_type=rtype) |
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
)) | Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes | Below is the the instruction that describes the task:
### Input:
Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
### Response:
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
)) |
def load_path(path, overrides=None, **kwargs):
"""
Convenience function for loading a YAML configuration from a file.
Parameters
----------
path : str
The path to the file to load on disk.
overrides : dict, optional
A dictionary containing overrides to apply. The location of
the override is specified in the key as a dot-delimited path
to the desired parameter, e.g. "model.corruptor.corruption_level".
Returns
-------
graph : dict or object
The dictionary or object (if the top-level element specified an
Python object to instantiate).
Notes
-----
Other keyword arguments are passed on to `yaml.load`.
"""
f = open(path, 'r')
content = ''.join(f.readlines())
f.close()
if not isinstance(content, str):
raise AssertionError("Expected content to be of type str but it is "+str(type(content)))
return load(content, **kwargs) | Convenience function for loading a YAML configuration from a file.
Parameters
----------
path : str
The path to the file to load on disk.
overrides : dict, optional
A dictionary containing overrides to apply. The location of
the override is specified in the key as a dot-delimited path
to the desired parameter, e.g. "model.corruptor.corruption_level".
Returns
-------
graph : dict or object
The dictionary or object (if the top-level element specified an
Python object to instantiate).
Notes
-----
Other keyword arguments are passed on to `yaml.load`. | Below is the the instruction that describes the task:
### Input:
Convenience function for loading a YAML configuration from a file.
Parameters
----------
path : str
The path to the file to load on disk.
overrides : dict, optional
A dictionary containing overrides to apply. The location of
the override is specified in the key as a dot-delimited path
to the desired parameter, e.g. "model.corruptor.corruption_level".
Returns
-------
graph : dict or object
The dictionary or object (if the top-level element specified an
Python object to instantiate).
Notes
-----
Other keyword arguments are passed on to `yaml.load`.
### Response:
def load_path(path, overrides=None, **kwargs):
"""
Convenience function for loading a YAML configuration from a file.
Parameters
----------
path : str
The path to the file to load on disk.
overrides : dict, optional
A dictionary containing overrides to apply. The location of
the override is specified in the key as a dot-delimited path
to the desired parameter, e.g. "model.corruptor.corruption_level".
Returns
-------
graph : dict or object
The dictionary or object (if the top-level element specified an
Python object to instantiate).
Notes
-----
Other keyword arguments are passed on to `yaml.load`.
"""
f = open(path, 'r')
content = ''.join(f.readlines())
f.close()
if not isinstance(content, str):
raise AssertionError("Expected content to be of type str but it is "+str(type(content)))
return load(content, **kwargs) |
def stop_app(self):
"""Overrides superclass."""
try:
if self._conn:
# Be polite; let the dest know we're shutting down.
try:
self.closeSl4aSession()
except:
self.log.exception('Failed to gracefully shut down %s.',
self.app_name)
# Close the socket connection.
self.disconnect()
self.stop_event_dispatcher()
# Terminate the app
self._adb.shell('am force-stop com.googlecode.android_scripting')
finally:
# Always clean up the adb port
self.clear_host_port() | Overrides superclass. | Below is the the instruction that describes the task:
### Input:
Overrides superclass.
### Response:
def stop_app(self):
"""Overrides superclass."""
try:
if self._conn:
# Be polite; let the dest know we're shutting down.
try:
self.closeSl4aSession()
except:
self.log.exception('Failed to gracefully shut down %s.',
self.app_name)
# Close the socket connection.
self.disconnect()
self.stop_event_dispatcher()
# Terminate the app
self._adb.shell('am force-stop com.googlecode.android_scripting')
finally:
# Always clean up the adb port
self.clear_host_port() |
def get_evidence_by_hash(self, evidence_hash: str) -> Optional[Evidence]:
"""Look up an evidence by its hash."""
return self.session.query(Evidence).filter(Evidence.sha512 == evidence_hash).one_or_none() | Look up an evidence by its hash. | Below is the the instruction that describes the task:
### Input:
Look up an evidence by its hash.
### Response:
def get_evidence_by_hash(self, evidence_hash: str) -> Optional[Evidence]:
"""Look up an evidence by its hash."""
return self.session.query(Evidence).filter(Evidence.sha512 == evidence_hash).one_or_none() |
def reduce_data_frame (df, chunk_slicers,
avg_cols=(),
uavg_cols=(),
minmax_cols=(),
nchunk_colname='nchunk',
uncert_prefix='u',
min_points_per_chunk=3):
""""Reduce" a DataFrame by collapsing rows in grouped chunks. Returns another
DataFrame with similar columns but fewer rows.
Arguments:
df
The input :class:`pandas.DataFrame`.
chunk_slicers
An iterable that returns values that are used to slice *df* with its
:meth:`pandas.DataFrame.iloc` indexer. An example value might be the
generator returned from :func:`slice_evenly_with_gaps`.
avg_cols
An iterable of names of columns that are to be reduced by taking the mean.
uavg_cols
An iterable of names of columns that are to be reduced by taking a
weighted mean.
minmax_cols
An iterable of names of columns that are to be reduced by reporting minimum
and maximum values.
nchunk_colname
The name of a column to create reporting the number of rows contributing
to each chunk.
uncert_prefix
The column name prefix for locating uncertainty estimates. By default, the
uncertainty on the column ``"temp"`` is given in the column ``"utemp"``.
min_points_per_chunk
Require at least this many rows in each chunk. Smaller chunks are discarded.
Returns a new :class:`pandas.DataFrame`.
"""
subds = [df.iloc[idx] for idx in chunk_slicers]
subds = [sd for sd in subds if sd.shape[0] >= min_points_per_chunk]
chunked = df.__class__ ({nchunk_colname: np.zeros (len (subds), dtype=np.int)})
# Some future-proofing: allow possibility of different ways of mapping
# from a column giving a value to a column giving its uncertainty.
uncert_col_name = lambda c: uncert_prefix + c
for i, subd in enumerate (subds):
label = chunked.index[i]
chunked.loc[label,nchunk_colname] = subd.shape[0]
for col in avg_cols:
chunked.loc[label,col] = subd[col].mean ()
for col in uavg_cols:
ucol = uncert_col_name (col)
v, u = weighted_mean (subd[col], subd[ucol])
chunked.loc[label,col] = v
chunked.loc[label,ucol] = u
for col in minmax_cols:
chunked.loc[label, 'min_'+col] = subd[col].min ()
chunked.loc[label, 'max_'+col] = subd[col].max ()
return chunked | Reduce" a DataFrame by collapsing rows in grouped chunks. Returns another
DataFrame with similar columns but fewer rows.
Arguments:
df
The input :class:`pandas.DataFrame`.
chunk_slicers
An iterable that returns values that are used to slice *df* with its
:meth:`pandas.DataFrame.iloc` indexer. An example value might be the
generator returned from :func:`slice_evenly_with_gaps`.
avg_cols
An iterable of names of columns that are to be reduced by taking the mean.
uavg_cols
An iterable of names of columns that are to be reduced by taking a
weighted mean.
minmax_cols
An iterable of names of columns that are to be reduced by reporting minimum
and maximum values.
nchunk_colname
The name of a column to create reporting the number of rows contributing
to each chunk.
uncert_prefix
The column name prefix for locating uncertainty estimates. By default, the
uncertainty on the column ``"temp"`` is given in the column ``"utemp"``.
min_points_per_chunk
Require at least this many rows in each chunk. Smaller chunks are discarded.
Returns a new :class:`pandas.DataFrame`. | Below is the the instruction that describes the task:
### Input:
Reduce" a DataFrame by collapsing rows in grouped chunks. Returns another
DataFrame with similar columns but fewer rows.
Arguments:
df
The input :class:`pandas.DataFrame`.
chunk_slicers
An iterable that returns values that are used to slice *df* with its
:meth:`pandas.DataFrame.iloc` indexer. An example value might be the
generator returned from :func:`slice_evenly_with_gaps`.
avg_cols
An iterable of names of columns that are to be reduced by taking the mean.
uavg_cols
An iterable of names of columns that are to be reduced by taking a
weighted mean.
minmax_cols
An iterable of names of columns that are to be reduced by reporting minimum
and maximum values.
nchunk_colname
The name of a column to create reporting the number of rows contributing
to each chunk.
uncert_prefix
The column name prefix for locating uncertainty estimates. By default, the
uncertainty on the column ``"temp"`` is given in the column ``"utemp"``.
min_points_per_chunk
Require at least this many rows in each chunk. Smaller chunks are discarded.
Returns a new :class:`pandas.DataFrame`.
### Response:
def reduce_data_frame (df, chunk_slicers,
avg_cols=(),
uavg_cols=(),
minmax_cols=(),
nchunk_colname='nchunk',
uncert_prefix='u',
min_points_per_chunk=3):
""""Reduce" a DataFrame by collapsing rows in grouped chunks. Returns another
DataFrame with similar columns but fewer rows.
Arguments:
df
The input :class:`pandas.DataFrame`.
chunk_slicers
An iterable that returns values that are used to slice *df* with its
:meth:`pandas.DataFrame.iloc` indexer. An example value might be the
generator returned from :func:`slice_evenly_with_gaps`.
avg_cols
An iterable of names of columns that are to be reduced by taking the mean.
uavg_cols
An iterable of names of columns that are to be reduced by taking a
weighted mean.
minmax_cols
An iterable of names of columns that are to be reduced by reporting minimum
and maximum values.
nchunk_colname
The name of a column to create reporting the number of rows contributing
to each chunk.
uncert_prefix
The column name prefix for locating uncertainty estimates. By default, the
uncertainty on the column ``"temp"`` is given in the column ``"utemp"``.
min_points_per_chunk
Require at least this many rows in each chunk. Smaller chunks are discarded.
Returns a new :class:`pandas.DataFrame`.
"""
subds = [df.iloc[idx] for idx in chunk_slicers]
subds = [sd for sd in subds if sd.shape[0] >= min_points_per_chunk]
chunked = df.__class__ ({nchunk_colname: np.zeros (len (subds), dtype=np.int)})
# Some future-proofing: allow possibility of different ways of mapping
# from a column giving a value to a column giving its uncertainty.
uncert_col_name = lambda c: uncert_prefix + c
for i, subd in enumerate (subds):
label = chunked.index[i]
chunked.loc[label,nchunk_colname] = subd.shape[0]
for col in avg_cols:
chunked.loc[label,col] = subd[col].mean ()
for col in uavg_cols:
ucol = uncert_col_name (col)
v, u = weighted_mean (subd[col], subd[ucol])
chunked.loc[label,col] = v
chunked.loc[label,ucol] = u
for col in minmax_cols:
chunked.loc[label, 'min_'+col] = subd[col].min ()
chunked.loc[label, 'max_'+col] = subd[col].max ()
return chunked |
def invoke_ssh_shell(cls, *args, **kwargs):
"""invoke_ssh(arguments..., pty=False, echo=False)
Star a new shell on a remote server. It first calls
:meth:`Flow.connect_ssh` using all positional and keyword
arguments, then calls :meth:`SSHClient.invoke_shell` with the
pty / echo options.
Args:
arguments...: The options for the SSH connection.
pty(bool): Request a pseudo-terminal from the server.
echo(bool): Whether to echo read/written data to stdout by default.
Returns:
:class:`Flow`: A Flow instance initialised with the SSH channel.
"""
pty = kwargs.pop('pty', True)
echo = kwargs.pop('echo', False)
client = cls.connect_ssh(*args, **kwargs)
f = client.invoke_shell(pty=pty, echo=echo)
f.client = client
return f | invoke_ssh(arguments..., pty=False, echo=False)
Star a new shell on a remote server. It first calls
:meth:`Flow.connect_ssh` using all positional and keyword
arguments, then calls :meth:`SSHClient.invoke_shell` with the
pty / echo options.
Args:
arguments...: The options for the SSH connection.
pty(bool): Request a pseudo-terminal from the server.
echo(bool): Whether to echo read/written data to stdout by default.
Returns:
:class:`Flow`: A Flow instance initialised with the SSH channel. | Below is the the instruction that describes the task:
### Input:
invoke_ssh(arguments..., pty=False, echo=False)
Star a new shell on a remote server. It first calls
:meth:`Flow.connect_ssh` using all positional and keyword
arguments, then calls :meth:`SSHClient.invoke_shell` with the
pty / echo options.
Args:
arguments...: The options for the SSH connection.
pty(bool): Request a pseudo-terminal from the server.
echo(bool): Whether to echo read/written data to stdout by default.
Returns:
:class:`Flow`: A Flow instance initialised with the SSH channel.
### Response:
def invoke_ssh_shell(cls, *args, **kwargs):
"""invoke_ssh(arguments..., pty=False, echo=False)
Star a new shell on a remote server. It first calls
:meth:`Flow.connect_ssh` using all positional and keyword
arguments, then calls :meth:`SSHClient.invoke_shell` with the
pty / echo options.
Args:
arguments...: The options for the SSH connection.
pty(bool): Request a pseudo-terminal from the server.
echo(bool): Whether to echo read/written data to stdout by default.
Returns:
:class:`Flow`: A Flow instance initialised with the SSH channel.
"""
pty = kwargs.pop('pty', True)
echo = kwargs.pop('echo', False)
client = cls.connect_ssh(*args, **kwargs)
f = client.invoke_shell(pty=pty, echo=echo)
f.client = client
return f |
def post_fork_child(self):
"""Post-fork() child callback for ProcessManager.daemon_spawn()."""
entry_point = '{}:launch'.format(__name__)
exec_env = combined_dict(os.environ, dict(PANTS_ENTRYPOINT=entry_point))
# Pass all of sys.argv so that we can proxy arg flags e.g. `-ldebug`.
cmd = [sys.executable] + sys.argv
self._logger.debug('cmd is: PANTS_ENTRYPOINT={} {}'.format(entry_point, ' '.join(cmd)))
# TODO: Improve error handling on launch failures.
os.spawnve(os.P_NOWAIT, sys.executable, cmd, env=exec_env) | Post-fork() child callback for ProcessManager.daemon_spawn(). | Below is the the instruction that describes the task:
### Input:
Post-fork() child callback for ProcessManager.daemon_spawn().
### Response:
def post_fork_child(self):
"""Post-fork() child callback for ProcessManager.daemon_spawn()."""
entry_point = '{}:launch'.format(__name__)
exec_env = combined_dict(os.environ, dict(PANTS_ENTRYPOINT=entry_point))
# Pass all of sys.argv so that we can proxy arg flags e.g. `-ldebug`.
cmd = [sys.executable] + sys.argv
self._logger.debug('cmd is: PANTS_ENTRYPOINT={} {}'.format(entry_point, ' '.join(cmd)))
# TODO: Improve error handling on launch failures.
os.spawnve(os.P_NOWAIT, sys.executable, cmd, env=exec_env) |
def indication(self, pdu):
"""Message going downstream."""
if _debug: StreamToPacket._debug("indication %r", pdu)
# hack it up into chunks
for packet in self.packetize(pdu, self.downstreamBuffer):
self.request(packet) | Message going downstream. | Below is the the instruction that describes the task:
### Input:
Message going downstream.
### Response:
def indication(self, pdu):
"""Message going downstream."""
if _debug: StreamToPacket._debug("indication %r", pdu)
# hack it up into chunks
for packet in self.packetize(pdu, self.downstreamBuffer):
self.request(packet) |
def assignMgtKey(self, CorpNum, MgtKeyType, ItemKey, MgtKey, UserID=None):
""" 관리번호할당
args
CorpNum : 팝빌회원 사업자번호
MgtKeyType : 세금계산서 유형, SELL-매출, BUY-매입, TRUSTEE-위수탁
ItemKey : 아이템키 (Search API로 조회 가능)
MgtKey : 세금계산서에 할당할 파트너 관리 번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType == None or MgtKeyType == '':
raise PopbillException(-99999999, "세금계산서 발행유형이 입력되지 않았습니다.")
if ItemKey == None or ItemKey == '':
raise PopbillException(-99999999, "아이템키가 입력되지 않았습니다.")
if MgtKey == None or MgtKey == '':
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
postDate = "MgtKey=" + MgtKey
return self._httppost('/Taxinvoice/' + ItemKey + '/' + MgtKeyType, postDate, CorpNum, UserID, "",
"application/x-www-form-urlencoded; charset=utf-8") | 관리번호할당
args
CorpNum : 팝빌회원 사업자번호
MgtKeyType : 세금계산서 유형, SELL-매출, BUY-매입, TRUSTEE-위수탁
ItemKey : 아이템키 (Search API로 조회 가능)
MgtKey : 세금계산서에 할당할 파트너 관리 번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException | Below is the the instruction that describes the task:
### Input:
관리번호할당
args
CorpNum : 팝빌회원 사업자번호
MgtKeyType : 세금계산서 유형, SELL-매출, BUY-매입, TRUSTEE-위수탁
ItemKey : 아이템키 (Search API로 조회 가능)
MgtKey : 세금계산서에 할당할 파트너 관리 번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
### Response:
def assignMgtKey(self, CorpNum, MgtKeyType, ItemKey, MgtKey, UserID=None):
""" 관리번호할당
args
CorpNum : 팝빌회원 사업자번호
MgtKeyType : 세금계산서 유형, SELL-매출, BUY-매입, TRUSTEE-위수탁
ItemKey : 아이템키 (Search API로 조회 가능)
MgtKey : 세금계산서에 할당할 파트너 관리 번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType == None or MgtKeyType == '':
raise PopbillException(-99999999, "세금계산서 발행유형이 입력되지 않았습니다.")
if ItemKey == None or ItemKey == '':
raise PopbillException(-99999999, "아이템키가 입력되지 않았습니다.")
if MgtKey == None or MgtKey == '':
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
postDate = "MgtKey=" + MgtKey
return self._httppost('/Taxinvoice/' + ItemKey + '/' + MgtKeyType, postDate, CorpNum, UserID, "",
"application/x-www-form-urlencoded; charset=utf-8") |
def get_conditional_probs(self, source=None):
"""Returns the full conditional probabilities table as a numpy array,
where row i*(k+1) + ly is the conditional probabilities of source i
emmiting label ly (including abstains 0), conditioned on different
values of Y, i.e.:
c_probs[i*(k+1) + ly, y] = P(\lambda_i = ly | Y = y)
Note that this simply involves inferring the kth row by law of total
probability and adding in to mu.
If `source` is not None, returns only the corresponding block.
"""
c_probs = np.zeros((self.m * (self.k + 1), self.k))
mu = self.mu.detach().clone().numpy()
for i in range(self.m):
# si = self.c_data[(i,)]['start_index']
# ei = self.c_data[(i,)]['end_index']
# mu_i = mu[si:ei, :]
mu_i = mu[i * self.k : (i + 1) * self.k, :]
c_probs[i * (self.k + 1) + 1 : (i + 1) * (self.k + 1), :] = mu_i
# The 0th row (corresponding to abstains) is the difference between
# the sums of the other rows and one, by law of total prob
c_probs[i * (self.k + 1), :] = 1 - mu_i.sum(axis=0)
c_probs = np.clip(c_probs, 0.01, 0.99)
if source is not None:
return c_probs[source * (self.k + 1) : (source + 1) * (self.k + 1)]
else:
return c_probs | Returns the full conditional probabilities table as a numpy array,
where row i*(k+1) + ly is the conditional probabilities of source i
emmiting label ly (including abstains 0), conditioned on different
values of Y, i.e.:
c_probs[i*(k+1) + ly, y] = P(\lambda_i = ly | Y = y)
Note that this simply involves inferring the kth row by law of total
probability and adding in to mu.
If `source` is not None, returns only the corresponding block. | Below is the the instruction that describes the task:
### Input:
Returns the full conditional probabilities table as a numpy array,
where row i*(k+1) + ly is the conditional probabilities of source i
emmiting label ly (including abstains 0), conditioned on different
values of Y, i.e.:
c_probs[i*(k+1) + ly, y] = P(\lambda_i = ly | Y = y)
Note that this simply involves inferring the kth row by law of total
probability and adding in to mu.
If `source` is not None, returns only the corresponding block.
### Response:
def get_conditional_probs(self, source=None):
"""Returns the full conditional probabilities table as a numpy array,
where row i*(k+1) + ly is the conditional probabilities of source i
emmiting label ly (including abstains 0), conditioned on different
values of Y, i.e.:
c_probs[i*(k+1) + ly, y] = P(\lambda_i = ly | Y = y)
Note that this simply involves inferring the kth row by law of total
probability and adding in to mu.
If `source` is not None, returns only the corresponding block.
"""
c_probs = np.zeros((self.m * (self.k + 1), self.k))
mu = self.mu.detach().clone().numpy()
for i in range(self.m):
# si = self.c_data[(i,)]['start_index']
# ei = self.c_data[(i,)]['end_index']
# mu_i = mu[si:ei, :]
mu_i = mu[i * self.k : (i + 1) * self.k, :]
c_probs[i * (self.k + 1) + 1 : (i + 1) * (self.k + 1), :] = mu_i
# The 0th row (corresponding to abstains) is the difference between
# the sums of the other rows and one, by law of total prob
c_probs[i * (self.k + 1), :] = 1 - mu_i.sum(axis=0)
c_probs = np.clip(c_probs, 0.01, 0.99)
if source is not None:
return c_probs[source * (self.k + 1) : (source + 1) * (self.k + 1)]
else:
return c_probs |
def _build_register_function(universe: bool, in_place: bool): # noqa: D202
"""Build a decorator function to tag transformation functions.
:param universe: Does the first positional argument of this function correspond to a universe graph?
:param in_place: Does this function return a new graph, or just modify it in-place?
"""
def register(func):
"""Tag a transformation function.
:param func: A function
:return: The same function, with additional properties added
"""
return _register_function(func.__name__, func, universe, in_place)
return register | Build a decorator function to tag transformation functions.
:param universe: Does the first positional argument of this function correspond to a universe graph?
:param in_place: Does this function return a new graph, or just modify it in-place? | Below is the the instruction that describes the task:
### Input:
Build a decorator function to tag transformation functions.
:param universe: Does the first positional argument of this function correspond to a universe graph?
:param in_place: Does this function return a new graph, or just modify it in-place?
### Response:
def _build_register_function(universe: bool, in_place: bool): # noqa: D202
"""Build a decorator function to tag transformation functions.
:param universe: Does the first positional argument of this function correspond to a universe graph?
:param in_place: Does this function return a new graph, or just modify it in-place?
"""
def register(func):
"""Tag a transformation function.
:param func: A function
:return: The same function, with additional properties added
"""
return _register_function(func.__name__, func, universe, in_place)
return register |
def p_expr_usr(p):
""" bexpr : USR bexpr %prec UMINUS
"""
if p[2].type_ == TYPE.string:
p[0] = make_builtin(p.lineno(1), 'USR_STR', p[2], type_=TYPE.uinteger)
else:
p[0] = make_builtin(p.lineno(1), 'USR',
make_typecast(TYPE.uinteger, p[2], p.lineno(1)),
type_=TYPE.uinteger) | bexpr : USR bexpr %prec UMINUS | Below is the the instruction that describes the task:
### Input:
bexpr : USR bexpr %prec UMINUS
### Response:
def p_expr_usr(p):
""" bexpr : USR bexpr %prec UMINUS
"""
if p[2].type_ == TYPE.string:
p[0] = make_builtin(p.lineno(1), 'USR_STR', p[2], type_=TYPE.uinteger)
else:
p[0] = make_builtin(p.lineno(1), 'USR',
make_typecast(TYPE.uinteger, p[2], p.lineno(1)),
type_=TYPE.uinteger) |
def WaitHotKeyReleased(hotkey: tuple) -> None:
"""hotkey: tuple, two ints tuple(modifierKey, key)"""
mod = {ModifierKey.Alt: Keys.VK_MENU,
ModifierKey.Control: Keys.VK_CONTROL,
ModifierKey.Shift: Keys.VK_SHIFT,
ModifierKey.Win: Keys.VK_LWIN
}
while True:
time.sleep(0.05)
if IsKeyPressed(hotkey[1]):
continue
for k, v in mod.items():
if k & hotkey[0]:
if IsKeyPressed(v):
break
else:
break | hotkey: tuple, two ints tuple(modifierKey, key) | Below is the the instruction that describes the task:
### Input:
hotkey: tuple, two ints tuple(modifierKey, key)
### Response:
def WaitHotKeyReleased(hotkey: tuple) -> None:
"""hotkey: tuple, two ints tuple(modifierKey, key)"""
mod = {ModifierKey.Alt: Keys.VK_MENU,
ModifierKey.Control: Keys.VK_CONTROL,
ModifierKey.Shift: Keys.VK_SHIFT,
ModifierKey.Win: Keys.VK_LWIN
}
while True:
time.sleep(0.05)
if IsKeyPressed(hotkey[1]):
continue
for k, v in mod.items():
if k & hotkey[0]:
if IsKeyPressed(v):
break
else:
break |
def _separate(self):
"""
get a width of separator for current column
:return: int
"""
if self.total_free_space is None:
return 0
else:
sepa = self.default_column_space
# we need to distribute remainders
if self.default_column_space_remainder > 0:
sepa += 1
self.default_column_space_remainder -= 1
logger.debug("remainder: %d, separator: %d",
self.default_column_space_remainder, sepa)
return sepa | get a width of separator for current column
:return: int | Below is the the instruction that describes the task:
### Input:
get a width of separator for current column
:return: int
### Response:
def _separate(self):
"""
get a width of separator for current column
:return: int
"""
if self.total_free_space is None:
return 0
else:
sepa = self.default_column_space
# we need to distribute remainders
if self.default_column_space_remainder > 0:
sepa += 1
self.default_column_space_remainder -= 1
logger.debug("remainder: %d, separator: %d",
self.default_column_space_remainder, sepa)
return sepa |
def p_expression_sla(self, p):
'expression : expression LSHIFTA expression'
p[0] = Sll(p[1], p[3], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | expression : expression LSHIFTA expression | Below is the the instruction that describes the task:
### Input:
expression : expression LSHIFTA expression
### Response:
def p_expression_sla(self, p):
'expression : expression LSHIFTA expression'
p[0] = Sll(p[1], p[3], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) |
def _cb_inform_interface_change(self, msg):
"""Update the sensors and requests available."""
self._logger.debug('cb_inform_interface_change(%s)', msg)
self._interface_changed.set() | Update the sensors and requests available. | Below is the the instruction that describes the task:
### Input:
Update the sensors and requests available.
### Response:
def _cb_inform_interface_change(self, msg):
"""Update the sensors and requests available."""
self._logger.debug('cb_inform_interface_change(%s)', msg)
self._interface_changed.set() |
def to_dict(mapreduce_yaml):
"""Converts a MapReduceYaml file into a JSON-encodable dictionary.
For use in user-visible UI and internal methods for interfacing with
user code (like param validation). as a list
Args:
mapreduce_yaml: The Pyton representation of the mapreduce.yaml document.
Returns:
A list of configuration dictionaries.
"""
all_configs = []
for config in mapreduce_yaml.mapreduce:
out = {
"name": config.name,
"mapper_input_reader": config.mapper.input_reader,
"mapper_handler": config.mapper.handler,
}
if config.mapper.params_validator:
out["mapper_params_validator"] = config.mapper.params_validator
if config.mapper.params:
param_defaults = {}
for param in config.mapper.params:
param_defaults[param.name] = param.default or param.value
out["mapper_params"] = param_defaults
if config.params:
param_defaults = {}
for param in config.params:
param_defaults[param.name] = param.default or param.value
out["params"] = param_defaults
if config.mapper.output_writer:
out["mapper_output_writer"] = config.mapper.output_writer
all_configs.append(out)
return all_configs | Converts a MapReduceYaml file into a JSON-encodable dictionary.
For use in user-visible UI and internal methods for interfacing with
user code (like param validation). as a list
Args:
mapreduce_yaml: The Pyton representation of the mapreduce.yaml document.
Returns:
A list of configuration dictionaries. | Below is the the instruction that describes the task:
### Input:
Converts a MapReduceYaml file into a JSON-encodable dictionary.
For use in user-visible UI and internal methods for interfacing with
user code (like param validation). as a list
Args:
mapreduce_yaml: The Pyton representation of the mapreduce.yaml document.
Returns:
A list of configuration dictionaries.
### Response:
def to_dict(mapreduce_yaml):
"""Converts a MapReduceYaml file into a JSON-encodable dictionary.
For use in user-visible UI and internal methods for interfacing with
user code (like param validation). as a list
Args:
mapreduce_yaml: The Pyton representation of the mapreduce.yaml document.
Returns:
A list of configuration dictionaries.
"""
all_configs = []
for config in mapreduce_yaml.mapreduce:
out = {
"name": config.name,
"mapper_input_reader": config.mapper.input_reader,
"mapper_handler": config.mapper.handler,
}
if config.mapper.params_validator:
out["mapper_params_validator"] = config.mapper.params_validator
if config.mapper.params:
param_defaults = {}
for param in config.mapper.params:
param_defaults[param.name] = param.default or param.value
out["mapper_params"] = param_defaults
if config.params:
param_defaults = {}
for param in config.params:
param_defaults[param.name] = param.default or param.value
out["params"] = param_defaults
if config.mapper.output_writer:
out["mapper_output_writer"] = config.mapper.output_writer
all_configs.append(out)
return all_configs |
def explained_variance(pred:Tensor, targ:Tensor)->Rank0Tensor:
"Explained variance between `pred` and `targ`."
pred,targ = flatten_check(pred,targ)
var_pct = torch.var(targ - pred) / torch.var(targ)
return 1 - var_pct | Explained variance between `pred` and `targ`. | Below is the the instruction that describes the task:
### Input:
Explained variance between `pred` and `targ`.
### Response:
def explained_variance(pred:Tensor, targ:Tensor)->Rank0Tensor:
"Explained variance between `pred` and `targ`."
pred,targ = flatten_check(pred,targ)
var_pct = torch.var(targ - pred) / torch.var(targ)
return 1 - var_pct |
def tracefunc_xml(func):
"""
Causes output of function to be printed in an XML style block
"""
funcname = meta_util_six.get_funcname(func)
def wrp_tracefunc2(*args, **kwargs):
verbose = kwargs.get('verbose', True)
if verbose:
print('<%s>' % (funcname,))
with util_print.Indenter(' '):
ret = func(*args, **kwargs)
if verbose:
print('</%s>' % (funcname,))
return ret
wrp_tracefunc2_ = ignores_exc_tb(wrp_tracefunc2)
wrp_tracefunc2_ = preserve_sig(wrp_tracefunc2_, func)
return wrp_tracefunc2_ | Causes output of function to be printed in an XML style block | Below is the the instruction that describes the task:
### Input:
Causes output of function to be printed in an XML style block
### Response:
def tracefunc_xml(func):
"""
Causes output of function to be printed in an XML style block
"""
funcname = meta_util_six.get_funcname(func)
def wrp_tracefunc2(*args, **kwargs):
verbose = kwargs.get('verbose', True)
if verbose:
print('<%s>' % (funcname,))
with util_print.Indenter(' '):
ret = func(*args, **kwargs)
if verbose:
print('</%s>' % (funcname,))
return ret
wrp_tracefunc2_ = ignores_exc_tb(wrp_tracefunc2)
wrp_tracefunc2_ = preserve_sig(wrp_tracefunc2_, func)
return wrp_tracefunc2_ |
def tokenize_sents(string):
"""
Tokenize input text to sentences.
:param string: Text to tokenize
:type string: str or unicode
:return: sentences
:rtype: list of strings
"""
string = six.text_type(string)
spans = []
for match in re.finditer('[^\s]+', string):
spans.append(match)
spans_count = len(spans)
rez = []
off = 0
for i in range(spans_count):
tok = string[spans[i].start():spans[i].end()]
if i == spans_count - 1:
rez.append(string[off:spans[i].end()])
elif tok[-1] in ['.', '!', '?', '…', '»']:
tok1 = tok[re.search('[.!?…»]', tok).start()-1]
next_tok = string[spans[i + 1].start():spans[i + 1].end()]
if (next_tok[0].isupper()
and not tok1.isupper()
and not (tok[-1] != '.'
or tok1[0] == '('
or tok in ABBRS)):
rez.append(string[off:spans[i].end()])
off = spans[i + 1].start()
return rez | Tokenize input text to sentences.
:param string: Text to tokenize
:type string: str or unicode
:return: sentences
:rtype: list of strings | Below is the the instruction that describes the task:
### Input:
Tokenize input text to sentences.
:param string: Text to tokenize
:type string: str or unicode
:return: sentences
:rtype: list of strings
### Response:
def tokenize_sents(string):
"""
Tokenize input text to sentences.
:param string: Text to tokenize
:type string: str or unicode
:return: sentences
:rtype: list of strings
"""
string = six.text_type(string)
spans = []
for match in re.finditer('[^\s]+', string):
spans.append(match)
spans_count = len(spans)
rez = []
off = 0
for i in range(spans_count):
tok = string[spans[i].start():spans[i].end()]
if i == spans_count - 1:
rez.append(string[off:spans[i].end()])
elif tok[-1] in ['.', '!', '?', '…', '»']:
tok1 = tok[re.search('[.!?…»]', tok).start()-1]
next_tok = string[spans[i + 1].start():spans[i + 1].end()]
if (next_tok[0].isupper()
and not tok1.isupper()
and not (tok[-1] != '.'
or tok1[0] == '('
or tok in ABBRS)):
rez.append(string[off:spans[i].end()])
off = spans[i + 1].start()
return rez |
def turb_ice(turbice: [str], unit: str = 'ft') -> str: # type: ignore
"""
Translate the list of turbulance or icing into a readable sentence
Ex: Occasional moderate turbulence in clouds from 3000ft to 14000ft
"""
if not turbice:
return ''
# Determine turbulance or icing
if turbice[0][0] == '5':
conditions = TURBULANCE_CONDITIONS
elif turbice[0][0] == '6':
conditions = ICING_CONDITIONS
else:
return ''
# Create list of split items (type, floor, height)
split = []
for item in turbice:
if len(item) == 6:
split.append([item[1:2], item[2:5], item[5]])
# Combine items that cover a layer greater than 9000ft
for i in reversed(range(len(split) - 1)):
if split[i][2] == '9' and split[i][0] == split[i + 1][0] \
and int(split[i + 1][1]) == (int(split[i][1]) + int(split[i][2]) * 10):
split[i][2] = str(int(split[i][2]) + int(split[i + 1][2]))
split.pop(i + 1)
# Return joined, formatted string from split items
return ', '.join(['{conditions} from {low_alt}{unit} to {high_alt}{unit}'.format(
conditions=conditions[item[0]], low_alt=int(item[1]) * 100,
high_alt=int(item[1]) * 100 + int(item[2]) * 1000, unit=unit) for item in split]) | Translate the list of turbulance or icing into a readable sentence
Ex: Occasional moderate turbulence in clouds from 3000ft to 14000ft | Below is the the instruction that describes the task:
### Input:
Translate the list of turbulance or icing into a readable sentence
Ex: Occasional moderate turbulence in clouds from 3000ft to 14000ft
### Response:
def turb_ice(turbice: [str], unit: str = 'ft') -> str: # type: ignore
"""
Translate the list of turbulance or icing into a readable sentence
Ex: Occasional moderate turbulence in clouds from 3000ft to 14000ft
"""
if not turbice:
return ''
# Determine turbulance or icing
if turbice[0][0] == '5':
conditions = TURBULANCE_CONDITIONS
elif turbice[0][0] == '6':
conditions = ICING_CONDITIONS
else:
return ''
# Create list of split items (type, floor, height)
split = []
for item in turbice:
if len(item) == 6:
split.append([item[1:2], item[2:5], item[5]])
# Combine items that cover a layer greater than 9000ft
for i in reversed(range(len(split) - 1)):
if split[i][2] == '9' and split[i][0] == split[i + 1][0] \
and int(split[i + 1][1]) == (int(split[i][1]) + int(split[i][2]) * 10):
split[i][2] = str(int(split[i][2]) + int(split[i + 1][2]))
split.pop(i + 1)
# Return joined, formatted string from split items
return ', '.join(['{conditions} from {low_alt}{unit} to {high_alt}{unit}'.format(
conditions=conditions[item[0]], low_alt=int(item[1]) * 100,
high_alt=int(item[1]) * 100 + int(item[2]) * 1000, unit=unit) for item in split]) |
def by_position(self, position):
"""Like `.get()`, but by position number."""
# don't use .first(), so that MultipleResultsFound can be raised
try:
return self.filter_by(position=position).one()
except sa.orm.exc.NoResultFound:
return None | Like `.get()`, but by position number. | Below is the the instruction that describes the task:
### Input:
Like `.get()`, but by position number.
### Response:
def by_position(self, position):
"""Like `.get()`, but by position number."""
# don't use .first(), so that MultipleResultsFound can be raised
try:
return self.filter_by(position=position).one()
except sa.orm.exc.NoResultFound:
return None |
def get_tables(self):
""" Returns a collection of this worksheet tables"""
url = self.build_url(self._endpoints.get('get_tables'))
response = self.session.get(url)
if not response:
return []
data = response.json()
return [self.table_constructor(parent=self, **{self._cloud_data_key: table})
for table in data.get('value', [])] | Returns a collection of this worksheet tables | Below is the the instruction that describes the task:
### Input:
Returns a collection of this worksheet tables
### Response:
def get_tables(self):
""" Returns a collection of this worksheet tables"""
url = self.build_url(self._endpoints.get('get_tables'))
response = self.session.get(url)
if not response:
return []
data = response.json()
return [self.table_constructor(parent=self, **{self._cloud_data_key: table})
for table in data.get('value', [])] |
def to_vcf(self, path, rename=None, number=None, description=None,
fill=None, write_header=True):
r"""Write to a variant call format (VCF) file.
Parameters
----------
path : string
File path.
rename : dict, optional
Rename these columns in the VCF.
number : dict, optional
Override the number specified in INFO headers.
description : dict, optional
Descriptions for the INFO and FILTER headers.
fill : dict, optional
Fill values used for missing data in the table.
write_header : bool, optional
If True write VCF header.
Examples
--------
Setup a variant table to write out::
>>> import allel
>>> chrom = [b'chr1', b'chr1', b'chr2', b'chr2', b'chr3']
>>> pos = [2, 6, 3, 8, 1]
>>> ids = ['a', 'b', 'c', 'd', 'e']
>>> ref = [b'A', b'C', b'T', b'G', b'N']
>>> alt = [(b'T', b'.'),
... (b'G', b'.'),
... (b'A', b'C'),
... (b'C', b'A'),
... (b'X', b'.')]
>>> qual = [1.2, 2.3, 3.4, 4.5, 5.6]
>>> filter_qd = [True, True, True, False, False]
>>> filter_dp = [True, False, True, False, False]
>>> dp = [12, 23, 34, 45, 56]
>>> qd = [12.3, 23.4, 34.5, 45.6, 56.7]
>>> flg = [True, False, True, False, True]
>>> ac = [(1, -1), (3, -1), (5, 6), (7, 8), (9, -1)]
>>> xx = [(1.2, 2.3), (3.4, 4.5), (5.6, 6.7), (7.8, 8.9),
... (9.0, 9.9)]
>>> columns = [chrom, pos, ids, ref, alt, qual, filter_dp,
... filter_qd, dp, qd, flg, ac, xx]
>>> records = list(zip(*columns))
>>> dtype = [('CHROM', 'S4'),
... ('POS', 'u4'),
... ('ID', 'S1'),
... ('REF', 'S1'),
... ('ALT', ('S1', 2)),
... ('qual', 'f4'),
... ('filter_dp', bool),
... ('filter_qd', bool),
... ('dp', int),
... ('qd', float),
... ('flg', bool),
... ('ac', (int, 2)),
... ('xx', (float, 2))]
>>> vt = allel.VariantTable(records, dtype=dtype)
Now write out to VCF and inspect the result::
>>> rename = {'dp': 'DP', 'qd': 'QD', 'filter_qd': 'QD'}
>>> fill = {'ALT': b'.', 'ac': -1}
>>> number = {'ac': 'A'}
>>> description = {'ac': 'Allele counts', 'filter_dp': 'Low depth'}
>>> vt.to_vcf('example.vcf', rename=rename, fill=fill,
... number=number, description=description)
>>> print(open('example.vcf').read())
##fileformat=VCFv4.1
##fileDate=...
##source=...
##INFO=<ID=DP,Number=1,Type=Integer,Description="">
##INFO=<ID=QD,Number=1,Type=Float,Description="">
##INFO=<ID=ac,Number=A,Type=Integer,Description="Allele counts">
##INFO=<ID=flg,Number=0,Type=Flag,Description="">
##INFO=<ID=xx,Number=2,Type=Float,Description="">
##FILTER=<ID=QD,Description="">
##FILTER=<ID=dp,Description="Low depth">
#CHROM POS ID REF ALT QUAL FILTER INFO
chr1 2 a A T 1.2 QD;dp DP=12;QD=12.3;ac=1;flg;xx=...
chr1 6 b C G 2.3 QD DP=23;QD=23.4;ac=3;xx=3.4,4.5
chr2 3 c T A,C 3.4 QD;dp DP=34;QD=34.5;ac=5,6;flg;x...
chr2 8 d G C,A 4.5 PASS DP=45;QD=45.6;ac=7,8;xx=7...
chr3 1 e N X 5.6 PASS DP=56;QD=56.7;ac=9;flg;xx=...
"""
write_vcf(path, callset=self, rename=rename, number=number,
description=description, fill=fill,
write_header=write_header) | r"""Write to a variant call format (VCF) file.
Parameters
----------
path : string
File path.
rename : dict, optional
Rename these columns in the VCF.
number : dict, optional
Override the number specified in INFO headers.
description : dict, optional
Descriptions for the INFO and FILTER headers.
fill : dict, optional
Fill values used for missing data in the table.
write_header : bool, optional
If True write VCF header.
Examples
--------
Setup a variant table to write out::
>>> import allel
>>> chrom = [b'chr1', b'chr1', b'chr2', b'chr2', b'chr3']
>>> pos = [2, 6, 3, 8, 1]
>>> ids = ['a', 'b', 'c', 'd', 'e']
>>> ref = [b'A', b'C', b'T', b'G', b'N']
>>> alt = [(b'T', b'.'),
... (b'G', b'.'),
... (b'A', b'C'),
... (b'C', b'A'),
... (b'X', b'.')]
>>> qual = [1.2, 2.3, 3.4, 4.5, 5.6]
>>> filter_qd = [True, True, True, False, False]
>>> filter_dp = [True, False, True, False, False]
>>> dp = [12, 23, 34, 45, 56]
>>> qd = [12.3, 23.4, 34.5, 45.6, 56.7]
>>> flg = [True, False, True, False, True]
>>> ac = [(1, -1), (3, -1), (5, 6), (7, 8), (9, -1)]
>>> xx = [(1.2, 2.3), (3.4, 4.5), (5.6, 6.7), (7.8, 8.9),
... (9.0, 9.9)]
>>> columns = [chrom, pos, ids, ref, alt, qual, filter_dp,
... filter_qd, dp, qd, flg, ac, xx]
>>> records = list(zip(*columns))
>>> dtype = [('CHROM', 'S4'),
... ('POS', 'u4'),
... ('ID', 'S1'),
... ('REF', 'S1'),
... ('ALT', ('S1', 2)),
... ('qual', 'f4'),
... ('filter_dp', bool),
... ('filter_qd', bool),
... ('dp', int),
... ('qd', float),
... ('flg', bool),
... ('ac', (int, 2)),
... ('xx', (float, 2))]
>>> vt = allel.VariantTable(records, dtype=dtype)
Now write out to VCF and inspect the result::
>>> rename = {'dp': 'DP', 'qd': 'QD', 'filter_qd': 'QD'}
>>> fill = {'ALT': b'.', 'ac': -1}
>>> number = {'ac': 'A'}
>>> description = {'ac': 'Allele counts', 'filter_dp': 'Low depth'}
>>> vt.to_vcf('example.vcf', rename=rename, fill=fill,
... number=number, description=description)
>>> print(open('example.vcf').read())
##fileformat=VCFv4.1
##fileDate=...
##source=...
##INFO=<ID=DP,Number=1,Type=Integer,Description="">
##INFO=<ID=QD,Number=1,Type=Float,Description="">
##INFO=<ID=ac,Number=A,Type=Integer,Description="Allele counts">
##INFO=<ID=flg,Number=0,Type=Flag,Description="">
##INFO=<ID=xx,Number=2,Type=Float,Description="">
##FILTER=<ID=QD,Description="">
##FILTER=<ID=dp,Description="Low depth">
#CHROM POS ID REF ALT QUAL FILTER INFO
chr1 2 a A T 1.2 QD;dp DP=12;QD=12.3;ac=1;flg;xx=...
chr1 6 b C G 2.3 QD DP=23;QD=23.4;ac=3;xx=3.4,4.5
chr2 3 c T A,C 3.4 QD;dp DP=34;QD=34.5;ac=5,6;flg;x...
chr2 8 d G C,A 4.5 PASS DP=45;QD=45.6;ac=7,8;xx=7...
chr3 1 e N X 5.6 PASS DP=56;QD=56.7;ac=9;flg;xx=... | Below is the the instruction that describes the task:
### Input:
r"""Write to a variant call format (VCF) file.
Parameters
----------
path : string
File path.
rename : dict, optional
Rename these columns in the VCF.
number : dict, optional
Override the number specified in INFO headers.
description : dict, optional
Descriptions for the INFO and FILTER headers.
fill : dict, optional
Fill values used for missing data in the table.
write_header : bool, optional
If True write VCF header.
Examples
--------
Setup a variant table to write out::
>>> import allel
>>> chrom = [b'chr1', b'chr1', b'chr2', b'chr2', b'chr3']
>>> pos = [2, 6, 3, 8, 1]
>>> ids = ['a', 'b', 'c', 'd', 'e']
>>> ref = [b'A', b'C', b'T', b'G', b'N']
>>> alt = [(b'T', b'.'),
... (b'G', b'.'),
... (b'A', b'C'),
... (b'C', b'A'),
... (b'X', b'.')]
>>> qual = [1.2, 2.3, 3.4, 4.5, 5.6]
>>> filter_qd = [True, True, True, False, False]
>>> filter_dp = [True, False, True, False, False]
>>> dp = [12, 23, 34, 45, 56]
>>> qd = [12.3, 23.4, 34.5, 45.6, 56.7]
>>> flg = [True, False, True, False, True]
>>> ac = [(1, -1), (3, -1), (5, 6), (7, 8), (9, -1)]
>>> xx = [(1.2, 2.3), (3.4, 4.5), (5.6, 6.7), (7.8, 8.9),
... (9.0, 9.9)]
>>> columns = [chrom, pos, ids, ref, alt, qual, filter_dp,
... filter_qd, dp, qd, flg, ac, xx]
>>> records = list(zip(*columns))
>>> dtype = [('CHROM', 'S4'),
... ('POS', 'u4'),
... ('ID', 'S1'),
... ('REF', 'S1'),
... ('ALT', ('S1', 2)),
... ('qual', 'f4'),
... ('filter_dp', bool),
... ('filter_qd', bool),
... ('dp', int),
... ('qd', float),
... ('flg', bool),
... ('ac', (int, 2)),
... ('xx', (float, 2))]
>>> vt = allel.VariantTable(records, dtype=dtype)
Now write out to VCF and inspect the result::
>>> rename = {'dp': 'DP', 'qd': 'QD', 'filter_qd': 'QD'}
>>> fill = {'ALT': b'.', 'ac': -1}
>>> number = {'ac': 'A'}
>>> description = {'ac': 'Allele counts', 'filter_dp': 'Low depth'}
>>> vt.to_vcf('example.vcf', rename=rename, fill=fill,
... number=number, description=description)
>>> print(open('example.vcf').read())
##fileformat=VCFv4.1
##fileDate=...
##source=...
##INFO=<ID=DP,Number=1,Type=Integer,Description="">
##INFO=<ID=QD,Number=1,Type=Float,Description="">
##INFO=<ID=ac,Number=A,Type=Integer,Description="Allele counts">
##INFO=<ID=flg,Number=0,Type=Flag,Description="">
##INFO=<ID=xx,Number=2,Type=Float,Description="">
##FILTER=<ID=QD,Description="">
##FILTER=<ID=dp,Description="Low depth">
#CHROM POS ID REF ALT QUAL FILTER INFO
chr1 2 a A T 1.2 QD;dp DP=12;QD=12.3;ac=1;flg;xx=...
chr1 6 b C G 2.3 QD DP=23;QD=23.4;ac=3;xx=3.4,4.5
chr2 3 c T A,C 3.4 QD;dp DP=34;QD=34.5;ac=5,6;flg;x...
chr2 8 d G C,A 4.5 PASS DP=45;QD=45.6;ac=7,8;xx=7...
chr3 1 e N X 5.6 PASS DP=56;QD=56.7;ac=9;flg;xx=...
### Response:
def to_vcf(self, path, rename=None, number=None, description=None,
fill=None, write_header=True):
r"""Write to a variant call format (VCF) file.
Parameters
----------
path : string
File path.
rename : dict, optional
Rename these columns in the VCF.
number : dict, optional
Override the number specified in INFO headers.
description : dict, optional
Descriptions for the INFO and FILTER headers.
fill : dict, optional
Fill values used for missing data in the table.
write_header : bool, optional
If True write VCF header.
Examples
--------
Setup a variant table to write out::
>>> import allel
>>> chrom = [b'chr1', b'chr1', b'chr2', b'chr2', b'chr3']
>>> pos = [2, 6, 3, 8, 1]
>>> ids = ['a', 'b', 'c', 'd', 'e']
>>> ref = [b'A', b'C', b'T', b'G', b'N']
>>> alt = [(b'T', b'.'),
... (b'G', b'.'),
... (b'A', b'C'),
... (b'C', b'A'),
... (b'X', b'.')]
>>> qual = [1.2, 2.3, 3.4, 4.5, 5.6]
>>> filter_qd = [True, True, True, False, False]
>>> filter_dp = [True, False, True, False, False]
>>> dp = [12, 23, 34, 45, 56]
>>> qd = [12.3, 23.4, 34.5, 45.6, 56.7]
>>> flg = [True, False, True, False, True]
>>> ac = [(1, -1), (3, -1), (5, 6), (7, 8), (9, -1)]
>>> xx = [(1.2, 2.3), (3.4, 4.5), (5.6, 6.7), (7.8, 8.9),
... (9.0, 9.9)]
>>> columns = [chrom, pos, ids, ref, alt, qual, filter_dp,
... filter_qd, dp, qd, flg, ac, xx]
>>> records = list(zip(*columns))
>>> dtype = [('CHROM', 'S4'),
... ('POS', 'u4'),
... ('ID', 'S1'),
... ('REF', 'S1'),
... ('ALT', ('S1', 2)),
... ('qual', 'f4'),
... ('filter_dp', bool),
... ('filter_qd', bool),
... ('dp', int),
... ('qd', float),
... ('flg', bool),
... ('ac', (int, 2)),
... ('xx', (float, 2))]
>>> vt = allel.VariantTable(records, dtype=dtype)
Now write out to VCF and inspect the result::
>>> rename = {'dp': 'DP', 'qd': 'QD', 'filter_qd': 'QD'}
>>> fill = {'ALT': b'.', 'ac': -1}
>>> number = {'ac': 'A'}
>>> description = {'ac': 'Allele counts', 'filter_dp': 'Low depth'}
>>> vt.to_vcf('example.vcf', rename=rename, fill=fill,
... number=number, description=description)
>>> print(open('example.vcf').read())
##fileformat=VCFv4.1
##fileDate=...
##source=...
##INFO=<ID=DP,Number=1,Type=Integer,Description="">
##INFO=<ID=QD,Number=1,Type=Float,Description="">
##INFO=<ID=ac,Number=A,Type=Integer,Description="Allele counts">
##INFO=<ID=flg,Number=0,Type=Flag,Description="">
##INFO=<ID=xx,Number=2,Type=Float,Description="">
##FILTER=<ID=QD,Description="">
##FILTER=<ID=dp,Description="Low depth">
#CHROM POS ID REF ALT QUAL FILTER INFO
chr1 2 a A T 1.2 QD;dp DP=12;QD=12.3;ac=1;flg;xx=...
chr1 6 b C G 2.3 QD DP=23;QD=23.4;ac=3;xx=3.4,4.5
chr2 3 c T A,C 3.4 QD;dp DP=34;QD=34.5;ac=5,6;flg;x...
chr2 8 d G C,A 4.5 PASS DP=45;QD=45.6;ac=7,8;xx=7...
chr3 1 e N X 5.6 PASS DP=56;QD=56.7;ac=9;flg;xx=...
"""
write_vcf(path, callset=self, rename=rename, number=number,
description=description, fill=fill,
write_header=write_header) |
def scale_dtype(arr, dtype):
"""Convert an array from 0..1 to dtype, scaling up linearly
"""
max_int = np.iinfo(dtype).max
return (arr * max_int).astype(dtype) | Convert an array from 0..1 to dtype, scaling up linearly | Below is the the instruction that describes the task:
### Input:
Convert an array from 0..1 to dtype, scaling up linearly
### Response:
def scale_dtype(arr, dtype):
"""Convert an array from 0..1 to dtype, scaling up linearly
"""
max_int = np.iinfo(dtype).max
return (arr * max_int).astype(dtype) |
def index():
"""Display a list of all user institutes."""
institute_objs = user_institutes(store, current_user)
institutes_count = ((institute_obj, store.cases(collaborator=institute_obj['_id']).count())
for institute_obj in institute_objs if institute_obj)
return dict(institutes=institutes_count) | Display a list of all user institutes. | Below is the the instruction that describes the task:
### Input:
Display a list of all user institutes.
### Response:
def index():
"""Display a list of all user institutes."""
institute_objs = user_institutes(store, current_user)
institutes_count = ((institute_obj, store.cases(collaborator=institute_obj['_id']).count())
for institute_obj in institute_objs if institute_obj)
return dict(institutes=institutes_count) |
def conv_elems_1d(x, factor, out_depth=None):
"""Decrease the length and change the dimensionality.
Merge/restore/compress factors positions of dim depth of the input into
a single position of dim out_depth.
This is basically just a strided convolution without overlap
between each strides. The original length has to be divided by factor.
Args:
x (tf.Tensor): shape [batch_size, length, depth]
factor (int): Length compression factor.
out_depth (int): Output depth
Returns:
tf.Tensor: shape [batch_size, length//factor, out_depth]
"""
out_depth = out_depth or x.get_shape().as_list()[-1]
# with tf.control_dependencies( # Dynamic assertion
# [tf.assert_equal(tf.shape(x)[1] % factor, 0)]):
x = tf.expand_dims(x, 1) # [batch_size, 1, length, depth]
x = layers().Conv2D(
filters=out_depth,
kernel_size=(1, factor),
strides=(1, factor),
padding="valid",
data_format="channels_last",
)(x) # [batch_size, 1, length//factor, out_depth]
x = tf.squeeze(x, 1) # [batch_size, length//factor, depth]
return x | Decrease the length and change the dimensionality.
Merge/restore/compress factors positions of dim depth of the input into
a single position of dim out_depth.
This is basically just a strided convolution without overlap
between each strides. The original length has to be divided by factor.
Args:
x (tf.Tensor): shape [batch_size, length, depth]
factor (int): Length compression factor.
out_depth (int): Output depth
Returns:
tf.Tensor: shape [batch_size, length//factor, out_depth] | Below is the the instruction that describes the task:
### Input:
Decrease the length and change the dimensionality.
Merge/restore/compress factors positions of dim depth of the input into
a single position of dim out_depth.
This is basically just a strided convolution without overlap
between each strides. The original length has to be divided by factor.
Args:
x (tf.Tensor): shape [batch_size, length, depth]
factor (int): Length compression factor.
out_depth (int): Output depth
Returns:
tf.Tensor: shape [batch_size, length//factor, out_depth]
### Response:
def conv_elems_1d(x, factor, out_depth=None):
"""Decrease the length and change the dimensionality.
Merge/restore/compress factors positions of dim depth of the input into
a single position of dim out_depth.
This is basically just a strided convolution without overlap
between each strides. The original length has to be divided by factor.
Args:
x (tf.Tensor): shape [batch_size, length, depth]
factor (int): Length compression factor.
out_depth (int): Output depth
Returns:
tf.Tensor: shape [batch_size, length//factor, out_depth]
"""
out_depth = out_depth or x.get_shape().as_list()[-1]
# with tf.control_dependencies( # Dynamic assertion
# [tf.assert_equal(tf.shape(x)[1] % factor, 0)]):
x = tf.expand_dims(x, 1) # [batch_size, 1, length, depth]
x = layers().Conv2D(
filters=out_depth,
kernel_size=(1, factor),
strides=(1, factor),
padding="valid",
data_format="channels_last",
)(x) # [batch_size, 1, length//factor, out_depth]
x = tf.squeeze(x, 1) # [batch_size, length//factor, depth]
return x |
def _make_request(session, url, argument=None, params=None, raw=False):
"""Make a request to API endpoint."""
if not params:
params = {}
params['key'] = session.auth.key
try:
if argument:
request_url = '{}{}{}{}'.format(session.auth.base_url, VOOBLY_API_URL, url, argument)
else:
request_url = '{}{}'.format(VOOBLY_API_URL, url)
resp = session.get(request_url, params=params)
except RequestException:
raise VooblyError('failed to connect')
if resp.text == 'bad-key':
raise VooblyError('bad api key')
elif resp.text == 'too-busy':
raise VooblyError('service too busy')
elif not resp.text:
raise VooblyError('no data returned')
if raw:
return resp.text
try:
return tablib.Dataset().load(resp.text).dict
except UnsupportedFormat:
raise VooblyError('unexpected error {}'.format(resp.text)) | Make a request to API endpoint. | Below is the the instruction that describes the task:
### Input:
Make a request to API endpoint.
### Response:
def _make_request(session, url, argument=None, params=None, raw=False):
"""Make a request to API endpoint."""
if not params:
params = {}
params['key'] = session.auth.key
try:
if argument:
request_url = '{}{}{}{}'.format(session.auth.base_url, VOOBLY_API_URL, url, argument)
else:
request_url = '{}{}'.format(VOOBLY_API_URL, url)
resp = session.get(request_url, params=params)
except RequestException:
raise VooblyError('failed to connect')
if resp.text == 'bad-key':
raise VooblyError('bad api key')
elif resp.text == 'too-busy':
raise VooblyError('service too busy')
elif not resp.text:
raise VooblyError('no data returned')
if raw:
return resp.text
try:
return tablib.Dataset().load(resp.text).dict
except UnsupportedFormat:
raise VooblyError('unexpected error {}'.format(resp.text)) |
def tokenize(self, s, pattern=None, active=None):
"""
Rewrite and tokenize the input string *s*.
Args:
s (str): the input string to process
pattern (str, optional): the regular expression pattern on
which to split tokens; defaults to `[ \t]+`
active (optional): a collection of external module names
that may be applied if called
Returns:
a :class:`~delphin.tokens.YyTokenLattice` containing the
tokens and their characterization information
"""
if pattern is None:
if self.tokenize_pattern is None:
pattern = r'[ \t]+'
else:
pattern = self.tokenize_pattern
if active is None:
active = self.active
return self.group.tokenize(s, pattern=pattern, active=active) | Rewrite and tokenize the input string *s*.
Args:
s (str): the input string to process
pattern (str, optional): the regular expression pattern on
which to split tokens; defaults to `[ \t]+`
active (optional): a collection of external module names
that may be applied if called
Returns:
a :class:`~delphin.tokens.YyTokenLattice` containing the
tokens and their characterization information | Below is the the instruction that describes the task:
### Input:
Rewrite and tokenize the input string *s*.
Args:
s (str): the input string to process
pattern (str, optional): the regular expression pattern on
which to split tokens; defaults to `[ \t]+`
active (optional): a collection of external module names
that may be applied if called
Returns:
a :class:`~delphin.tokens.YyTokenLattice` containing the
tokens and their characterization information
### Response:
def tokenize(self, s, pattern=None, active=None):
"""
Rewrite and tokenize the input string *s*.
Args:
s (str): the input string to process
pattern (str, optional): the regular expression pattern on
which to split tokens; defaults to `[ \t]+`
active (optional): a collection of external module names
that may be applied if called
Returns:
a :class:`~delphin.tokens.YyTokenLattice` containing the
tokens and their characterization information
"""
if pattern is None:
if self.tokenize_pattern is None:
pattern = r'[ \t]+'
else:
pattern = self.tokenize_pattern
if active is None:
active = self.active
return self.group.tokenize(s, pattern=pattern, active=active) |
def conv_block(name, x, mid_channels, dilations=None, activation="relu",
dropout=0.0):
"""2 layer conv block used in the affine coupling layer.
Args:
name: variable scope.
x: 4-D or 5-D Tensor.
mid_channels: Output channels of the second layer.
dilations: Optional, list of integers.
activation: relu or gatu.
If relu, the second layer is relu(W*x)
If gatu, the second layer is tanh(W1*x) * sigmoid(W2*x)
dropout: Dropout probability.
Returns:
x: 4-D Tensor: Output activations.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x_shape = common_layers.shape_list(x)
is_2d = len(x_shape) == 4
num_steps = x_shape[1]
if is_2d:
first_filter = [3, 3]
second_filter = [1, 1]
else:
# special case when number of steps equal 1 to avoid
# padding.
if num_steps == 1:
first_filter = [1, 3, 3]
else:
first_filter = [2, 3, 3]
second_filter = [1, 1, 1]
# Edge Padding + conv2d + actnorm + relu:
# [output: 512 channels]
x = conv("1_1", x, output_channels=mid_channels, filter_size=first_filter,
dilations=dilations)
x = tf.nn.relu(x)
x = get_dropout(x, rate=dropout)
# Padding + conv2d + actnorm + activation.
# [input, output: 512 channels]
if activation == "relu":
x = conv("1_2", x, output_channels=mid_channels,
filter_size=second_filter, dilations=dilations)
x = tf.nn.relu(x)
elif activation == "gatu":
# x = tanh(w1*x) * sigm(w2*x)
x_tanh = conv("1_tanh", x, output_channels=mid_channels,
filter_size=second_filter, dilations=dilations)
x_sigm = conv("1_sigm", x, output_channels=mid_channels,
filter_size=second_filter, dilations=dilations)
x = tf.nn.tanh(x_tanh) * tf.nn.sigmoid(x_sigm)
x = get_dropout(x, rate=dropout)
return x | 2 layer conv block used in the affine coupling layer.
Args:
name: variable scope.
x: 4-D or 5-D Tensor.
mid_channels: Output channels of the second layer.
dilations: Optional, list of integers.
activation: relu or gatu.
If relu, the second layer is relu(W*x)
If gatu, the second layer is tanh(W1*x) * sigmoid(W2*x)
dropout: Dropout probability.
Returns:
x: 4-D Tensor: Output activations. | Below is the the instruction that describes the task:
### Input:
2 layer conv block used in the affine coupling layer.
Args:
name: variable scope.
x: 4-D or 5-D Tensor.
mid_channels: Output channels of the second layer.
dilations: Optional, list of integers.
activation: relu or gatu.
If relu, the second layer is relu(W*x)
If gatu, the second layer is tanh(W1*x) * sigmoid(W2*x)
dropout: Dropout probability.
Returns:
x: 4-D Tensor: Output activations.
### Response:
def conv_block(name, x, mid_channels, dilations=None, activation="relu",
dropout=0.0):
"""2 layer conv block used in the affine coupling layer.
Args:
name: variable scope.
x: 4-D or 5-D Tensor.
mid_channels: Output channels of the second layer.
dilations: Optional, list of integers.
activation: relu or gatu.
If relu, the second layer is relu(W*x)
If gatu, the second layer is tanh(W1*x) * sigmoid(W2*x)
dropout: Dropout probability.
Returns:
x: 4-D Tensor: Output activations.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x_shape = common_layers.shape_list(x)
is_2d = len(x_shape) == 4
num_steps = x_shape[1]
if is_2d:
first_filter = [3, 3]
second_filter = [1, 1]
else:
# special case when number of steps equal 1 to avoid
# padding.
if num_steps == 1:
first_filter = [1, 3, 3]
else:
first_filter = [2, 3, 3]
second_filter = [1, 1, 1]
# Edge Padding + conv2d + actnorm + relu:
# [output: 512 channels]
x = conv("1_1", x, output_channels=mid_channels, filter_size=first_filter,
dilations=dilations)
x = tf.nn.relu(x)
x = get_dropout(x, rate=dropout)
# Padding + conv2d + actnorm + activation.
# [input, output: 512 channels]
if activation == "relu":
x = conv("1_2", x, output_channels=mid_channels,
filter_size=second_filter, dilations=dilations)
x = tf.nn.relu(x)
elif activation == "gatu":
# x = tanh(w1*x) * sigm(w2*x)
x_tanh = conv("1_tanh", x, output_channels=mid_channels,
filter_size=second_filter, dilations=dilations)
x_sigm = conv("1_sigm", x, output_channels=mid_channels,
filter_size=second_filter, dilations=dilations)
x = tf.nn.tanh(x_tanh) * tf.nn.sigmoid(x_sigm)
x = get_dropout(x, rate=dropout)
return x |
def inverse_transform(self, maps):
""" This function transforms from component masses and cartesian spins to
mass-weighted spin parameters perpendicular with the angular momentum.
Parameters
----------
maps : a mapping object
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
# convert
out = {}
xi1 = conversions.primary_xi(
maps[parameters.mass1], maps[parameters.mass2],
maps[parameters.spin1x], maps[parameters.spin1y],
maps[parameters.spin2x], maps[parameters.spin2y])
xi2 = conversions.secondary_xi(
maps[parameters.mass1], maps[parameters.mass2],
maps[parameters.spin1x], maps[parameters.spin1y],
maps[parameters.spin2x], maps[parameters.spin2y])
out["phi_a"] = conversions.phi_a(
maps[parameters.mass1], maps[parameters.mass2],
maps[parameters.spin1x], maps[parameters.spin1y],
maps[parameters.spin2x], maps[parameters.spin2y])
out["phi_s"] = conversions.phi_s(
maps[parameters.spin1x], maps[parameters.spin1y],
maps[parameters.spin2x], maps[parameters.spin2y])
# map parameters from primary/secondary to indices
if isinstance(xi1, numpy.ndarray):
mass1, mass2 = map(numpy.array, [maps[parameters.mass1],
maps[parameters.mass2]])
mask_mass1_gte_mass2 = mass1 >= mass2
mask_mass1_lt_mass2 = mass1 < mass2
out["xi1"] = numpy.concatenate((
xi1[mask_mass1_gte_mass2],
xi2[mask_mass1_lt_mass2]))
out["xi2"] = numpy.concatenate((
xi1[mask_mass1_gte_mass2],
xi2[mask_mass1_lt_mass2]))
elif maps["mass1"] > maps["mass2"]:
out["xi1"] = xi1
out["xi2"] = xi2
else:
out["xi1"] = xi2
out["xi2"] = xi1
return self.format_output(maps, out) | This function transforms from component masses and cartesian spins to
mass-weighted spin parameters perpendicular with the angular momentum.
Parameters
----------
maps : a mapping object
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values. | Below is the the instruction that describes the task:
### Input:
This function transforms from component masses and cartesian spins to
mass-weighted spin parameters perpendicular with the angular momentum.
Parameters
----------
maps : a mapping object
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
### Response:
def inverse_transform(self, maps):
""" This function transforms from component masses and cartesian spins to
mass-weighted spin parameters perpendicular with the angular momentum.
Parameters
----------
maps : a mapping object
Returns
-------
out : dict
A dict with key as parameter name and value as numpy.array or float
of transformed values.
"""
# convert
out = {}
xi1 = conversions.primary_xi(
maps[parameters.mass1], maps[parameters.mass2],
maps[parameters.spin1x], maps[parameters.spin1y],
maps[parameters.spin2x], maps[parameters.spin2y])
xi2 = conversions.secondary_xi(
maps[parameters.mass1], maps[parameters.mass2],
maps[parameters.spin1x], maps[parameters.spin1y],
maps[parameters.spin2x], maps[parameters.spin2y])
out["phi_a"] = conversions.phi_a(
maps[parameters.mass1], maps[parameters.mass2],
maps[parameters.spin1x], maps[parameters.spin1y],
maps[parameters.spin2x], maps[parameters.spin2y])
out["phi_s"] = conversions.phi_s(
maps[parameters.spin1x], maps[parameters.spin1y],
maps[parameters.spin2x], maps[parameters.spin2y])
# map parameters from primary/secondary to indices
if isinstance(xi1, numpy.ndarray):
mass1, mass2 = map(numpy.array, [maps[parameters.mass1],
maps[parameters.mass2]])
mask_mass1_gte_mass2 = mass1 >= mass2
mask_mass1_lt_mass2 = mass1 < mass2
out["xi1"] = numpy.concatenate((
xi1[mask_mass1_gte_mass2],
xi2[mask_mass1_lt_mass2]))
out["xi2"] = numpy.concatenate((
xi1[mask_mass1_gte_mass2],
xi2[mask_mass1_lt_mass2]))
elif maps["mass1"] > maps["mass2"]:
out["xi1"] = xi1
out["xi2"] = xi2
else:
out["xi1"] = xi2
out["xi2"] = xi1
return self.format_output(maps, out) |
def run(*args):
'''Run the normal shovel functionality'''
import os
import sys
import argparse
import pkg_resources
# First off, read the arguments
parser = argparse.ArgumentParser(prog='shovel', description='Rake, for Python')
parser.add_argument('method', help='The task to run')
parser.add_argument('--verbose', dest='verbose', action='store_true',
help='Be extra talkative')
parser.add_argument('--dry-run', dest='dryRun', action='store_true',
help='Show the args that would be used')
ver = pkg_resources.require('shovel')[0].version
parser.add_argument('--version', action='version',
version='Shovel v %s' % ver, help='print the version of Shovel.')
# Parse our arguments
if args:
clargs, remaining = parser.parse_known_args(args=args)
else: # pragma: no cover
clargs, remaining = parser.parse_known_args()
if clargs.verbose:
logger.setLevel(logging.DEBUG)
args, kwargs = parse(remaining)
# Import all of the files we want
shovel = Shovel()
# Read in any tasks that have already been defined
shovel.extend(Task.clear())
for path in [
os.path.expanduser('~/.shovel.py'),
os.path.expanduser('~/.shovel')]:
if os.path.exists(path): # pragma: no cover
shovel.read(path, os.path.expanduser('~/'))
shovel_home = os.environ.get('SHOVEL_HOME')
if shovel_home and os.path.exists(shovel_home):
shovel.read(shovel_home, shovel_home)
for path in ['shovel.py', 'shovel']:
if os.path.exists(path):
shovel.read(path)
# If it's help we're looking for, look no further
if clargs.method == 'help':
print(help.shovel_help(shovel, *args, **kwargs))
elif clargs.method == 'tasks':
tasks = list(v for _, v in shovel.items())
if not tasks:
print('No tasks found!')
else:
names = list(t.fullname for t in tasks)
docs = list(t.doc for t in tasks)
# The width of the screen
width = 80
import shutil
try:
width, _ = shutil.get_terminal_size(fallback=(0, width))
except AttributeError:
pass
# Create the format with padding for the longest name, and to
# accomodate the screen width
format = '%%-%is # %%-%is' % (
max(len(name) for name in names), width)
for name, doc in zip(names, docs):
print(format % (name, doc))
elif clargs.method:
# Try to get the first command provided
try:
tasks = shovel.tasks(clargs.method)
except KeyError:
print('Could not find task "%s"' % clargs.method, file=sys.stderr)
exit(1)
if len(tasks) > 1:
print('Specifier "%s" matches multiple tasks:' % clargs.method, file=sys.stderr)
for task in tasks:
print('\t%s' % task.fullname, file=sys.stderr)
exit(2)
task = tasks[0]
if clargs.dryRun:
print(task.dry(*args, **kwargs))
else:
task(*args, **kwargs) | Run the normal shovel functionality | Below is the the instruction that describes the task:
### Input:
Run the normal shovel functionality
### Response:
def run(*args):
'''Run the normal shovel functionality'''
import os
import sys
import argparse
import pkg_resources
# First off, read the arguments
parser = argparse.ArgumentParser(prog='shovel', description='Rake, for Python')
parser.add_argument('method', help='The task to run')
parser.add_argument('--verbose', dest='verbose', action='store_true',
help='Be extra talkative')
parser.add_argument('--dry-run', dest='dryRun', action='store_true',
help='Show the args that would be used')
ver = pkg_resources.require('shovel')[0].version
parser.add_argument('--version', action='version',
version='Shovel v %s' % ver, help='print the version of Shovel.')
# Parse our arguments
if args:
clargs, remaining = parser.parse_known_args(args=args)
else: # pragma: no cover
clargs, remaining = parser.parse_known_args()
if clargs.verbose:
logger.setLevel(logging.DEBUG)
args, kwargs = parse(remaining)
# Import all of the files we want
shovel = Shovel()
# Read in any tasks that have already been defined
shovel.extend(Task.clear())
for path in [
os.path.expanduser('~/.shovel.py'),
os.path.expanduser('~/.shovel')]:
if os.path.exists(path): # pragma: no cover
shovel.read(path, os.path.expanduser('~/'))
shovel_home = os.environ.get('SHOVEL_HOME')
if shovel_home and os.path.exists(shovel_home):
shovel.read(shovel_home, shovel_home)
for path in ['shovel.py', 'shovel']:
if os.path.exists(path):
shovel.read(path)
# If it's help we're looking for, look no further
if clargs.method == 'help':
print(help.shovel_help(shovel, *args, **kwargs))
elif clargs.method == 'tasks':
tasks = list(v for _, v in shovel.items())
if not tasks:
print('No tasks found!')
else:
names = list(t.fullname for t in tasks)
docs = list(t.doc for t in tasks)
# The width of the screen
width = 80
import shutil
try:
width, _ = shutil.get_terminal_size(fallback=(0, width))
except AttributeError:
pass
# Create the format with padding for the longest name, and to
# accomodate the screen width
format = '%%-%is # %%-%is' % (
max(len(name) for name in names), width)
for name, doc in zip(names, docs):
print(format % (name, doc))
elif clargs.method:
# Try to get the first command provided
try:
tasks = shovel.tasks(clargs.method)
except KeyError:
print('Could not find task "%s"' % clargs.method, file=sys.stderr)
exit(1)
if len(tasks) > 1:
print('Specifier "%s" matches multiple tasks:' % clargs.method, file=sys.stderr)
for task in tasks:
print('\t%s' % task.fullname, file=sys.stderr)
exit(2)
task = tasks[0]
if clargs.dryRun:
print(task.dry(*args, **kwargs))
else:
task(*args, **kwargs) |
def remove(self, first, count):
"""
Remove a range of count consecutive ids starting at id first
from all the ranges in the set.
"""
# Avoid trivialities
if first < 0 or count < 1:
return
new_range = []
last = first + count - 1
for r in self.__range:
if first <= r.last and r.first <= last:
# There is an overlap
if r.first < first:
new_range.append(IdRange(r.first, first-r.first))
if last < r.last:
new_range.append(IdRange(last+1, r.last-last))
else:
# No overlap, range is kept
new_range.append(r)
self.__range = new_range | Remove a range of count consecutive ids starting at id first
from all the ranges in the set. | Below is the the instruction that describes the task:
### Input:
Remove a range of count consecutive ids starting at id first
from all the ranges in the set.
### Response:
def remove(self, first, count):
"""
Remove a range of count consecutive ids starting at id first
from all the ranges in the set.
"""
# Avoid trivialities
if first < 0 or count < 1:
return
new_range = []
last = first + count - 1
for r in self.__range:
if first <= r.last and r.first <= last:
# There is an overlap
if r.first < first:
new_range.append(IdRange(r.first, first-r.first))
if last < r.last:
new_range.append(IdRange(last+1, r.last-last))
else:
# No overlap, range is kept
new_range.append(r)
self.__range = new_range |
def _get_current_object(self):
"""Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
if not hasattr(self.__local, '__release_local__'):
return self.__local()
try:
return getattr(self.__local, self.__name__)
except AttributeError:
raise RuntimeError('no object bound to %s' % self.__name__) | Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context. | Below is the the instruction that describes the task:
### Input:
Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
### Response:
def _get_current_object(self):
"""Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
if not hasattr(self.__local, '__release_local__'):
return self.__local()
try:
return getattr(self.__local, self.__name__)
except AttributeError:
raise RuntimeError('no object bound to %s' % self.__name__) |
def days_in_month(year, month):
"""
returns number of days for the given year and month
:param int year: calendar year
:param int month: calendar month
:return int:
"""
eom = _days_per_month[month - 1]
if is_leap_year(year) and month == 2:
eom += 1
return eom | returns number of days for the given year and month
:param int year: calendar year
:param int month: calendar month
:return int: | Below is the the instruction that describes the task:
### Input:
returns number of days for the given year and month
:param int year: calendar year
:param int month: calendar month
:return int:
### Response:
def days_in_month(year, month):
"""
returns number of days for the given year and month
:param int year: calendar year
:param int month: calendar month
:return int:
"""
eom = _days_per_month[month - 1]
if is_leap_year(year) and month == 2:
eom += 1
return eom |
def get_map_values(self, lons, lats, ibin=None):
"""Return the map values corresponding to a set of coordinates.
Parameters
----------
lons : array-like
'Longitudes' (RA or GLON)
lats : array-like
'Latitidues' (DEC or GLAT)
ibin : int or array-like
Extract data only for a given energy bin. None -> extract data for all bins
Returns
----------
vals : numpy.ndarray((n))
Values of pixels in the flattened map, np.nan used to flag
coords outside of map
"""
pix_idxs = self.get_pixel_indices(lons, lats, ibin)
idxs = copy.copy(pix_idxs)
m = np.empty_like(idxs[0], dtype=bool)
m.fill(True)
for i, p in enumerate(pix_idxs):
m &= (pix_idxs[i] >= 0) & (pix_idxs[i] < self._npix[i])
idxs[i][~m] = 0
vals = self.counts.T[idxs]
vals[~m] = np.nan
return vals | Return the map values corresponding to a set of coordinates.
Parameters
----------
lons : array-like
'Longitudes' (RA or GLON)
lats : array-like
'Latitidues' (DEC or GLAT)
ibin : int or array-like
Extract data only for a given energy bin. None -> extract data for all bins
Returns
----------
vals : numpy.ndarray((n))
Values of pixels in the flattened map, np.nan used to flag
coords outside of map | Below is the the instruction that describes the task:
### Input:
Return the map values corresponding to a set of coordinates.
Parameters
----------
lons : array-like
'Longitudes' (RA or GLON)
lats : array-like
'Latitidues' (DEC or GLAT)
ibin : int or array-like
Extract data only for a given energy bin. None -> extract data for all bins
Returns
----------
vals : numpy.ndarray((n))
Values of pixels in the flattened map, np.nan used to flag
coords outside of map
### Response:
def get_map_values(self, lons, lats, ibin=None):
"""Return the map values corresponding to a set of coordinates.
Parameters
----------
lons : array-like
'Longitudes' (RA or GLON)
lats : array-like
'Latitidues' (DEC or GLAT)
ibin : int or array-like
Extract data only for a given energy bin. None -> extract data for all bins
Returns
----------
vals : numpy.ndarray((n))
Values of pixels in the flattened map, np.nan used to flag
coords outside of map
"""
pix_idxs = self.get_pixel_indices(lons, lats, ibin)
idxs = copy.copy(pix_idxs)
m = np.empty_like(idxs[0], dtype=bool)
m.fill(True)
for i, p in enumerate(pix_idxs):
m &= (pix_idxs[i] >= 0) & (pix_idxs[i] < self._npix[i])
idxs[i][~m] = 0
vals = self.counts.T[idxs]
vals[~m] = np.nan
return vals |
def _add_url(self, chunk):
""" Add a 'url' property to a chunk and return it """
if 'url' in chunk:
return chunk
public_path = chunk.get('publicPath')
if public_path:
chunk['url'] = public_path
else:
fullpath = posixpath.join(self.state.static_view_path,
chunk['name'])
chunk['url'] = self._request.static_url(fullpath)
return chunk | Add a 'url' property to a chunk and return it | Below is the the instruction that describes the task:
### Input:
Add a 'url' property to a chunk and return it
### Response:
def _add_url(self, chunk):
""" Add a 'url' property to a chunk and return it """
if 'url' in chunk:
return chunk
public_path = chunk.get('publicPath')
if public_path:
chunk['url'] = public_path
else:
fullpath = posixpath.join(self.state.static_view_path,
chunk['name'])
chunk['url'] = self._request.static_url(fullpath)
return chunk |
def chunks(iterable, n):
"""Yield successive n-sized chunks from iterable object. https://stackoverflow.com/a/312464 """
for i in range(0, len(iterable), n):
yield iterable[i:i + n] | Yield successive n-sized chunks from iterable object. https://stackoverflow.com/a/312464 | Below is the the instruction that describes the task:
### Input:
Yield successive n-sized chunks from iterable object. https://stackoverflow.com/a/312464
### Response:
def chunks(iterable, n):
"""Yield successive n-sized chunks from iterable object. https://stackoverflow.com/a/312464 """
for i in range(0, len(iterable), n):
yield iterable[i:i + n] |
def unmodified_isinstance(*bases):
"""When called in the form
MyOverrideClass(unmodified_isinstance(BuiltInClass))
it allows calls against passed in built in instances to pass even if there not a subclass
"""
class UnmodifiedIsInstance(type):
if sys.version_info[0] == 2 and sys.version_info[1] <= 6:
@classmethod
def __instancecheck__(cls, instance):
if cls.__name__ in (str(base.__name__) for base in bases):
return isinstance(instance, bases)
subclass = getattr(instance, '__class__', None)
subtype = type(instance)
instance_type = getattr(abc, '_InstanceType', None)
if not instance_type:
class test_object:
pass
instance_type = type(test_object)
if subtype is instance_type:
subtype = subclass
if subtype is subclass or subclass is None:
return cls.__subclasscheck__(subtype)
return (cls.__subclasscheck__(subclass) or cls.__subclasscheck__(subtype))
else:
@classmethod
def __instancecheck__(cls, instance):
if cls.__name__ in (str(base.__name__) for base in bases):
return isinstance(instance, bases)
return type.__instancecheck__(cls, instance)
return with_metaclass(UnmodifiedIsInstance, *bases) | When called in the form
MyOverrideClass(unmodified_isinstance(BuiltInClass))
it allows calls against passed in built in instances to pass even if there not a subclass | Below is the the instruction that describes the task:
### Input:
When called in the form
MyOverrideClass(unmodified_isinstance(BuiltInClass))
it allows calls against passed in built in instances to pass even if there not a subclass
### Response:
def unmodified_isinstance(*bases):
"""When called in the form
MyOverrideClass(unmodified_isinstance(BuiltInClass))
it allows calls against passed in built in instances to pass even if there not a subclass
"""
class UnmodifiedIsInstance(type):
if sys.version_info[0] == 2 and sys.version_info[1] <= 6:
@classmethod
def __instancecheck__(cls, instance):
if cls.__name__ in (str(base.__name__) for base in bases):
return isinstance(instance, bases)
subclass = getattr(instance, '__class__', None)
subtype = type(instance)
instance_type = getattr(abc, '_InstanceType', None)
if not instance_type:
class test_object:
pass
instance_type = type(test_object)
if subtype is instance_type:
subtype = subclass
if subtype is subclass or subclass is None:
return cls.__subclasscheck__(subtype)
return (cls.__subclasscheck__(subclass) or cls.__subclasscheck__(subtype))
else:
@classmethod
def __instancecheck__(cls, instance):
if cls.__name__ in (str(base.__name__) for base in bases):
return isinstance(instance, bases)
return type.__instancecheck__(cls, instance)
return with_metaclass(UnmodifiedIsInstance, *bases) |
def write(self, buffer):
"""Write a string to the socket, doubling any IAC characters.
Can block if the connection is blocked. May raise
socket.error if the connection is closed.
"""
if type(buffer) == type(0):
buffer = chr(buffer)
elif not isinstance(buffer, bytes):
buffer = buffer.encode(self.encoding)
if IAC in buffer:
buffer = buffer.replace(IAC, IAC+IAC)
self.msg("send %s", repr(buffer))
self.sock.send(buffer) | Write a string to the socket, doubling any IAC characters.
Can block if the connection is blocked. May raise
socket.error if the connection is closed. | Below is the the instruction that describes the task:
### Input:
Write a string to the socket, doubling any IAC characters.
Can block if the connection is blocked. May raise
socket.error if the connection is closed.
### Response:
def write(self, buffer):
"""Write a string to the socket, doubling any IAC characters.
Can block if the connection is blocked. May raise
socket.error if the connection is closed.
"""
if type(buffer) == type(0):
buffer = chr(buffer)
elif not isinstance(buffer, bytes):
buffer = buffer.encode(self.encoding)
if IAC in buffer:
buffer = buffer.replace(IAC, IAC+IAC)
self.msg("send %s", repr(buffer))
self.sock.send(buffer) |
def first_interval_starting(self, start: datetime.datetime) -> \
Optional[Interval]:
"""
Returns our first interval that starts with the ``start`` parameter, or
``None``.
"""
for i in self.intervals:
if i.start == start:
return i
return None | Returns our first interval that starts with the ``start`` parameter, or
``None``. | Below is the the instruction that describes the task:
### Input:
Returns our first interval that starts with the ``start`` parameter, or
``None``.
### Response:
def first_interval_starting(self, start: datetime.datetime) -> \
Optional[Interval]:
"""
Returns our first interval that starts with the ``start`` parameter, or
``None``.
"""
for i in self.intervals:
if i.start == start:
return i
return None |
def _convert(cls, record):
"""
Core method of the converter. Converts a single dictionary into another dictionary.
"""
if not record:
return {}
converted_dict = {}
for field in cls.conversion:
key = field[0]
if len(field) >= 2 and field[1]:
converted_key = field[1]
else:
converted_key = key
if len(field) >= 3 and field[2]:
conversion_method = field[2]
else:
conversion_method = cls.default_conversion_method
if len(field) >= 4:
converter = field[3]
else:
converter = None
try:
value = conversion_method(record[key])
except KeyError:
continue
if converter:
value = converter._convert_internal(value)
if converted_key is APPEND:
if isinstance(value, list):
for v in value:
converted_dict.update(v)
else:
converted_dict.update(value)
else:
converted_dict[converted_key] = value
return converted_dict | Core method of the converter. Converts a single dictionary into another dictionary. | Below is the the instruction that describes the task:
### Input:
Core method of the converter. Converts a single dictionary into another dictionary.
### Response:
def _convert(cls, record):
"""
Core method of the converter. Converts a single dictionary into another dictionary.
"""
if not record:
return {}
converted_dict = {}
for field in cls.conversion:
key = field[0]
if len(field) >= 2 and field[1]:
converted_key = field[1]
else:
converted_key = key
if len(field) >= 3 and field[2]:
conversion_method = field[2]
else:
conversion_method = cls.default_conversion_method
if len(field) >= 4:
converter = field[3]
else:
converter = None
try:
value = conversion_method(record[key])
except KeyError:
continue
if converter:
value = converter._convert_internal(value)
if converted_key is APPEND:
if isinstance(value, list):
for v in value:
converted_dict.update(v)
else:
converted_dict.update(value)
else:
converted_dict[converted_key] = value
return converted_dict |
def _set_link_error_disable(self, v, load=False):
"""
Setter method for link_error_disable, mapped from YANG variable /interface/ethernet/link_error_disable (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_error_disable is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_error_disable() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=link_error_disable.link_error_disable, is_container='container', presence=False, yang_name="link-error-disable", rest_name="link-error-disable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'port link dampening', u'callpoint': u'Pld', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-pld', defining_module='brocade-pld', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link_error_disable must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=link_error_disable.link_error_disable, is_container='container', presence=False, yang_name="link-error-disable", rest_name="link-error-disable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'port link dampening', u'callpoint': u'Pld', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-pld', defining_module='brocade-pld', yang_type='container', is_config=True)""",
})
self.__link_error_disable = t
if hasattr(self, '_set'):
self._set() | Setter method for link_error_disable, mapped from YANG variable /interface/ethernet/link_error_disable (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_error_disable is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_error_disable() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for link_error_disable, mapped from YANG variable /interface/ethernet/link_error_disable (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_error_disable is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_error_disable() directly.
### Response:
def _set_link_error_disable(self, v, load=False):
"""
Setter method for link_error_disable, mapped from YANG variable /interface/ethernet/link_error_disable (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_error_disable is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_error_disable() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=link_error_disable.link_error_disable, is_container='container', presence=False, yang_name="link-error-disable", rest_name="link-error-disable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'port link dampening', u'callpoint': u'Pld', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-pld', defining_module='brocade-pld', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link_error_disable must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=link_error_disable.link_error_disable, is_container='container', presence=False, yang_name="link-error-disable", rest_name="link-error-disable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'port link dampening', u'callpoint': u'Pld', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-pld', defining_module='brocade-pld', yang_type='container', is_config=True)""",
})
self.__link_error_disable = t
if hasattr(self, '_set'):
self._set() |
def _locate_free_sectors(self, ignore_chunk=None):
"""Return a list of booleans, indicating the free sectors."""
sectors = self._sectors(ignore_chunk=ignore_chunk)
# Sectors are considered free, if the value is an empty list.
return [not i for i in sectors] | Return a list of booleans, indicating the free sectors. | Below is the the instruction that describes the task:
### Input:
Return a list of booleans, indicating the free sectors.
### Response:
def _locate_free_sectors(self, ignore_chunk=None):
"""Return a list of booleans, indicating the free sectors."""
sectors = self._sectors(ignore_chunk=ignore_chunk)
# Sectors are considered free, if the value is an empty list.
return [not i for i in sectors] |
def handleGetValue(self, topContainer):
""" This method overrides ValueGetterBase's "pure virtual" method. It
returns the referenced value. The derived class is NOT responsible for
fully resolving the reference'd value in the event the value resolves to
another ValueGetterBase-based instance -- this is handled automatically
within ValueGetterBase implementation.
topContainer: The top-level container (dict, tuple, or list [sub-]instance)
within whose context the value-getter is applied. If
self.__referenceDict is None, then topContainer will be used
as the reference dictionary for resolving our dictionary key
chain.
Returns: The value referenced by this instance (which may be another
value-getter instance)
"""
value = self.__referenceDict if self.__referenceDict is not None else topContainer
for key in self.__dictKeyChain:
value = value[key]
return value | This method overrides ValueGetterBase's "pure virtual" method. It
returns the referenced value. The derived class is NOT responsible for
fully resolving the reference'd value in the event the value resolves to
another ValueGetterBase-based instance -- this is handled automatically
within ValueGetterBase implementation.
topContainer: The top-level container (dict, tuple, or list [sub-]instance)
within whose context the value-getter is applied. If
self.__referenceDict is None, then topContainer will be used
as the reference dictionary for resolving our dictionary key
chain.
Returns: The value referenced by this instance (which may be another
value-getter instance) | Below is the the instruction that describes the task:
### Input:
This method overrides ValueGetterBase's "pure virtual" method. It
returns the referenced value. The derived class is NOT responsible for
fully resolving the reference'd value in the event the value resolves to
another ValueGetterBase-based instance -- this is handled automatically
within ValueGetterBase implementation.
topContainer: The top-level container (dict, tuple, or list [sub-]instance)
within whose context the value-getter is applied. If
self.__referenceDict is None, then topContainer will be used
as the reference dictionary for resolving our dictionary key
chain.
Returns: The value referenced by this instance (which may be another
value-getter instance)
### Response:
def handleGetValue(self, topContainer):
""" This method overrides ValueGetterBase's "pure virtual" method. It
returns the referenced value. The derived class is NOT responsible for
fully resolving the reference'd value in the event the value resolves to
another ValueGetterBase-based instance -- this is handled automatically
within ValueGetterBase implementation.
topContainer: The top-level container (dict, tuple, or list [sub-]instance)
within whose context the value-getter is applied. If
self.__referenceDict is None, then topContainer will be used
as the reference dictionary for resolving our dictionary key
chain.
Returns: The value referenced by this instance (which may be another
value-getter instance)
"""
value = self.__referenceDict if self.__referenceDict is not None else topContainer
for key in self.__dictKeyChain:
value = value[key]
return value |
def installed(name,
cyg_arch='x86_64',
mirrors=None):
'''
Make sure that a package is installed.
name
The name of the package to install
cyg_arch : x86_64
The cygwin architecture to install the package into.
Current options are x86 and x86_64
mirrors : None
List of mirrors to check.
None will use a default mirror (kernel.org)
CLI Example:
.. code-block:: yaml
rsync:
cyg.installed:
- mirrors:
- http://mirror/without/public/key: ""
- http://mirror/with/public/key: http://url/of/public/key
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
if cyg_arch not in ['x86', 'x86_64']:
ret['result'] = False
ret['comment'] = 'The \'cyg_arch\' argument must\
be one of \'x86\' or \'x86_64\''
return ret
LOG.debug('Installed State: Initial Mirror list: %s', mirrors)
if not __salt__['cyg.check_valid_package'](name,
cyg_arch=cyg_arch,
mirrors=mirrors):
ret['result'] = False
ret['comment'] = 'Invalid package name.'
return ret
pkgs = __salt__['cyg.list'](name, cyg_arch)
if name in pkgs:
ret['result'] = True
ret['comment'] = 'Package is already installed.'
return ret
if __opts__['test']:
ret['comment'] = 'The package {0} would\
have been installed'.format(name)
return ret
if __salt__['cyg.install'](name,
cyg_arch=cyg_arch,
mirrors=mirrors):
ret['result'] = True
ret['changes'][name] = 'Installed'
ret['comment'] = 'Package was successfully installed'
else:
ret['result'] = False
ret['comment'] = 'Could not install package.'
return ret | Make sure that a package is installed.
name
The name of the package to install
cyg_arch : x86_64
The cygwin architecture to install the package into.
Current options are x86 and x86_64
mirrors : None
List of mirrors to check.
None will use a default mirror (kernel.org)
CLI Example:
.. code-block:: yaml
rsync:
cyg.installed:
- mirrors:
- http://mirror/without/public/key: ""
- http://mirror/with/public/key: http://url/of/public/key | Below is the the instruction that describes the task:
### Input:
Make sure that a package is installed.
name
The name of the package to install
cyg_arch : x86_64
The cygwin architecture to install the package into.
Current options are x86 and x86_64
mirrors : None
List of mirrors to check.
None will use a default mirror (kernel.org)
CLI Example:
.. code-block:: yaml
rsync:
cyg.installed:
- mirrors:
- http://mirror/without/public/key: ""
- http://mirror/with/public/key: http://url/of/public/key
### Response:
def installed(name,
cyg_arch='x86_64',
mirrors=None):
'''
Make sure that a package is installed.
name
The name of the package to install
cyg_arch : x86_64
The cygwin architecture to install the package into.
Current options are x86 and x86_64
mirrors : None
List of mirrors to check.
None will use a default mirror (kernel.org)
CLI Example:
.. code-block:: yaml
rsync:
cyg.installed:
- mirrors:
- http://mirror/without/public/key: ""
- http://mirror/with/public/key: http://url/of/public/key
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
if cyg_arch not in ['x86', 'x86_64']:
ret['result'] = False
ret['comment'] = 'The \'cyg_arch\' argument must\
be one of \'x86\' or \'x86_64\''
return ret
LOG.debug('Installed State: Initial Mirror list: %s', mirrors)
if not __salt__['cyg.check_valid_package'](name,
cyg_arch=cyg_arch,
mirrors=mirrors):
ret['result'] = False
ret['comment'] = 'Invalid package name.'
return ret
pkgs = __salt__['cyg.list'](name, cyg_arch)
if name in pkgs:
ret['result'] = True
ret['comment'] = 'Package is already installed.'
return ret
if __opts__['test']:
ret['comment'] = 'The package {0} would\
have been installed'.format(name)
return ret
if __salt__['cyg.install'](name,
cyg_arch=cyg_arch,
mirrors=mirrors):
ret['result'] = True
ret['changes'][name] = 'Installed'
ret['comment'] = 'Package was successfully installed'
else:
ret['result'] = False
ret['comment'] = 'Could not install package.'
return ret |
def loads(cls, s):
"""
Load an instance of this class from YAML.
"""
with closing(StringIO(s)) as fileobj:
return cls.load(fileobj) | Load an instance of this class from YAML. | Below is the the instruction that describes the task:
### Input:
Load an instance of this class from YAML.
### Response:
def loads(cls, s):
"""
Load an instance of this class from YAML.
"""
with closing(StringIO(s)) as fileobj:
return cls.load(fileobj) |
def cmd_func(self, command: str) -> Optional[Callable]:
"""
Get the function for a command
:param command: the name of the command
"""
func_name = self.cmd_func_name(command)
if func_name:
return getattr(self, func_name) | Get the function for a command
:param command: the name of the command | Below is the the instruction that describes the task:
### Input:
Get the function for a command
:param command: the name of the command
### Response:
def cmd_func(self, command: str) -> Optional[Callable]:
"""
Get the function for a command
:param command: the name of the command
"""
func_name = self.cmd_func_name(command)
if func_name:
return getattr(self, func_name) |
def get_vcs_root(path):
"""Return VCS root directory path
Return None if path is not within a supported VCS repository"""
previous_path = path
while get_vcs_info(path) is None:
path = abspardir(path)
if path == previous_path:
return
else:
previous_path = path
return osp.abspath(path) | Return VCS root directory path
Return None if path is not within a supported VCS repository | Below is the the instruction that describes the task:
### Input:
Return VCS root directory path
Return None if path is not within a supported VCS repository
### Response:
def get_vcs_root(path):
"""Return VCS root directory path
Return None if path is not within a supported VCS repository"""
previous_path = path
while get_vcs_info(path) is None:
path = abspardir(path)
if path == previous_path:
return
else:
previous_path = path
return osp.abspath(path) |
def _do_multipart_upload(self, stream, metadata, size, num_retries):
"""Perform a multipart upload.
:type stream: IO[bytes]
:param stream: A bytes IO object open for reading.
:type metadata: dict
:param metadata: The metadata associated with the upload.
:type size: int
:param size: The number of bytes to be uploaded (which will be read
from ``stream``). If not provided, the upload will be
concluded once ``stream`` is exhausted (or :data:`None`).
:type num_retries: int
:param num_retries: Number of upload retries. (Deprecated: This
argument will be removed in a future release.)
:rtype: :class:`~requests.Response`
:returns: The "200 OK" response object returned after the multipart
upload request.
:raises: :exc:`ValueError` if the ``stream`` has fewer than ``size``
bytes remaining.
"""
data = stream.read(size)
if len(data) < size:
msg = _READ_LESS_THAN_SIZE.format(size, len(data))
raise ValueError(msg)
headers = _get_upload_headers(self._connection.USER_AGENT)
upload_url = _MULTIPART_URL_TEMPLATE.format(project=self.project)
upload = MultipartUpload(upload_url, headers=headers)
if num_retries is not None:
upload._retry_strategy = resumable_media.RetryStrategy(
max_retries=num_retries
)
response = upload.transmit(self._http, data, metadata, _GENERIC_CONTENT_TYPE)
return response | Perform a multipart upload.
:type stream: IO[bytes]
:param stream: A bytes IO object open for reading.
:type metadata: dict
:param metadata: The metadata associated with the upload.
:type size: int
:param size: The number of bytes to be uploaded (which will be read
from ``stream``). If not provided, the upload will be
concluded once ``stream`` is exhausted (or :data:`None`).
:type num_retries: int
:param num_retries: Number of upload retries. (Deprecated: This
argument will be removed in a future release.)
:rtype: :class:`~requests.Response`
:returns: The "200 OK" response object returned after the multipart
upload request.
:raises: :exc:`ValueError` if the ``stream`` has fewer than ``size``
bytes remaining. | Below is the the instruction that describes the task:
### Input:
Perform a multipart upload.
:type stream: IO[bytes]
:param stream: A bytes IO object open for reading.
:type metadata: dict
:param metadata: The metadata associated with the upload.
:type size: int
:param size: The number of bytes to be uploaded (which will be read
from ``stream``). If not provided, the upload will be
concluded once ``stream`` is exhausted (or :data:`None`).
:type num_retries: int
:param num_retries: Number of upload retries. (Deprecated: This
argument will be removed in a future release.)
:rtype: :class:`~requests.Response`
:returns: The "200 OK" response object returned after the multipart
upload request.
:raises: :exc:`ValueError` if the ``stream`` has fewer than ``size``
bytes remaining.
### Response:
def _do_multipart_upload(self, stream, metadata, size, num_retries):
"""Perform a multipart upload.
:type stream: IO[bytes]
:param stream: A bytes IO object open for reading.
:type metadata: dict
:param metadata: The metadata associated with the upload.
:type size: int
:param size: The number of bytes to be uploaded (which will be read
from ``stream``). If not provided, the upload will be
concluded once ``stream`` is exhausted (or :data:`None`).
:type num_retries: int
:param num_retries: Number of upload retries. (Deprecated: This
argument will be removed in a future release.)
:rtype: :class:`~requests.Response`
:returns: The "200 OK" response object returned after the multipart
upload request.
:raises: :exc:`ValueError` if the ``stream`` has fewer than ``size``
bytes remaining.
"""
data = stream.read(size)
if len(data) < size:
msg = _READ_LESS_THAN_SIZE.format(size, len(data))
raise ValueError(msg)
headers = _get_upload_headers(self._connection.USER_AGENT)
upload_url = _MULTIPART_URL_TEMPLATE.format(project=self.project)
upload = MultipartUpload(upload_url, headers=headers)
if num_retries is not None:
upload._retry_strategy = resumable_media.RetryStrategy(
max_retries=num_retries
)
response = upload.transmit(self._http, data, metadata, _GENERIC_CONTENT_TYPE)
return response |
def to_dict(self):
"""Generate a dict for this object's attributes.
:return: A dict representing an :class:`Asset`
"""
rv = {'code': self.code}
if not self.is_native():
rv['issuer'] = self.issuer
rv['type'] = self.type
else:
rv['type'] = 'native'
return rv | Generate a dict for this object's attributes.
:return: A dict representing an :class:`Asset` | Below is the the instruction that describes the task:
### Input:
Generate a dict for this object's attributes.
:return: A dict representing an :class:`Asset`
### Response:
def to_dict(self):
"""Generate a dict for this object's attributes.
:return: A dict representing an :class:`Asset`
"""
rv = {'code': self.code}
if not self.is_native():
rv['issuer'] = self.issuer
rv['type'] = self.type
else:
rv['type'] = 'native'
return rv |
def cli(debug, cache, incremental):
"""Crawler framework for documents and structured scrapers."""
settings.HTTP_CACHE = cache
settings.INCREMENTAL = incremental
settings.DEBUG = debug
if settings.DEBUG:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
init_memorious() | Crawler framework for documents and structured scrapers. | Below is the the instruction that describes the task:
### Input:
Crawler framework for documents and structured scrapers.
### Response:
def cli(debug, cache, incremental):
"""Crawler framework for documents and structured scrapers."""
settings.HTTP_CACHE = cache
settings.INCREMENTAL = incremental
settings.DEBUG = debug
if settings.DEBUG:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
init_memorious() |
def ensure_dir(self, mode=0777):
"""
Make sure the directory exists, create if necessary.
"""
if not self.exists() or not self.isdir():
os.makedirs(self, mode) | Make sure the directory exists, create if necessary. | Below is the the instruction that describes the task:
### Input:
Make sure the directory exists, create if necessary.
### Response:
def ensure_dir(self, mode=0777):
"""
Make sure the directory exists, create if necessary.
"""
if not self.exists() or not self.isdir():
os.makedirs(self, mode) |
def feed_interval_get(feed_id, parameters):
'Get adaptive interval between checks for a feed.'
val = cache.get(getkey( T_INTERVAL,
key=feed_interval_key(feed_id, parameters) ))
return val if isinstance(val, tuple) else (val, None) | Get adaptive interval between checks for a feed. | Below is the the instruction that describes the task:
### Input:
Get adaptive interval between checks for a feed.
### Response:
def feed_interval_get(feed_id, parameters):
'Get adaptive interval between checks for a feed.'
val = cache.get(getkey( T_INTERVAL,
key=feed_interval_key(feed_id, parameters) ))
return val if isinstance(val, tuple) else (val, None) |
def impulse_noise(x, severity=1):
"""Impulse noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added impulse noise.
"""
c = [.03, .06, .09, 0.17, 0.27][severity - 1]
x = tfds.core.lazy_imports.skimage.util.random_noise(
np.array(x) / 255., mode='s&p', amount=c)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip) | Impulse noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added impulse noise. | Below is the the instruction that describes the task:
### Input:
Impulse noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added impulse noise.
### Response:
def impulse_noise(x, severity=1):
"""Impulse noise corruption to images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Added impulse noise.
"""
c = [.03, .06, .09, 0.17, 0.27][severity - 1]
x = tfds.core.lazy_imports.skimage.util.random_noise(
np.array(x) / 255., mode='s&p', amount=c)
x_clip = np.clip(x, 0, 1) * 255
return around_and_astype(x_clip) |
def _open(self, name=None, fileobj=None, mymap=None, block=None):
"""
The _open function takes some form of file identifier and creates
an :py:class:`CpioFile` instance from it.
:param :py:class:`str` name: a file name
:param :py:class:`file` fileobj: if given, this overrides *name*
:param :py:class:`mmap.mmap` mymap: if given, this overrides *fileobj*
:param :py:class:`bytes` block: file contents in a block of memory, (if given, this overrides *mymap*)
The file to be used can be specified in any of four different
forms, (in reverse precedence):
#. a file name
#. :py:class:`file` object
#. :py:mod:`mmap.mmap`, or
#. a block of memory
"""
if block is not None:
if not name:
name = '<unknown>'
self.unpack_from(block)
if fileobj:
fileobj.close()
return self
if mymap is not None:
block = mymap
elif fileobj:
try:
mymap = mmap.mmap(fileobj.fileno(), 0,
mmap.MAP_SHARED, mmap.PROT_READ)
# pylint: disable=W0702
except:
mymap = 0
block = fileobj.read()
elif name:
fileobj = io.open(os.path.normpath(os.path.expanduser(name)), 'rb')
else:
assert False
return self._open(name=name,
fileobj=fileobj,
mymap=mymap,
block=block) | The _open function takes some form of file identifier and creates
an :py:class:`CpioFile` instance from it.
:param :py:class:`str` name: a file name
:param :py:class:`file` fileobj: if given, this overrides *name*
:param :py:class:`mmap.mmap` mymap: if given, this overrides *fileobj*
:param :py:class:`bytes` block: file contents in a block of memory, (if given, this overrides *mymap*)
The file to be used can be specified in any of four different
forms, (in reverse precedence):
#. a file name
#. :py:class:`file` object
#. :py:mod:`mmap.mmap`, or
#. a block of memory | Below is the the instruction that describes the task:
### Input:
The _open function takes some form of file identifier and creates
an :py:class:`CpioFile` instance from it.
:param :py:class:`str` name: a file name
:param :py:class:`file` fileobj: if given, this overrides *name*
:param :py:class:`mmap.mmap` mymap: if given, this overrides *fileobj*
:param :py:class:`bytes` block: file contents in a block of memory, (if given, this overrides *mymap*)
The file to be used can be specified in any of four different
forms, (in reverse precedence):
#. a file name
#. :py:class:`file` object
#. :py:mod:`mmap.mmap`, or
#. a block of memory
### Response:
def _open(self, name=None, fileobj=None, mymap=None, block=None):
"""
The _open function takes some form of file identifier and creates
an :py:class:`CpioFile` instance from it.
:param :py:class:`str` name: a file name
:param :py:class:`file` fileobj: if given, this overrides *name*
:param :py:class:`mmap.mmap` mymap: if given, this overrides *fileobj*
:param :py:class:`bytes` block: file contents in a block of memory, (if given, this overrides *mymap*)
The file to be used can be specified in any of four different
forms, (in reverse precedence):
#. a file name
#. :py:class:`file` object
#. :py:mod:`mmap.mmap`, or
#. a block of memory
"""
if block is not None:
if not name:
name = '<unknown>'
self.unpack_from(block)
if fileobj:
fileobj.close()
return self
if mymap is not None:
block = mymap
elif fileobj:
try:
mymap = mmap.mmap(fileobj.fileno(), 0,
mmap.MAP_SHARED, mmap.PROT_READ)
# pylint: disable=W0702
except:
mymap = 0
block = fileobj.read()
elif name:
fileobj = io.open(os.path.normpath(os.path.expanduser(name)), 'rb')
else:
assert False
return self._open(name=name,
fileobj=fileobj,
mymap=mymap,
block=block) |
def vectorize(self, sentence_list):
'''
Args:
sentence_list: The list of tokenized sentences.
[[`token`, `token`, `token`, ...],
[`token`, `token`, `token`, ...],
[`token`, `token`, `token`, ...]]
Returns:
`np.ndarray` of tokens.
[vector of token, vector of token, vector of token]
'''
test_observed_arr = self.__setup_dataset(sentence_list, self.__token_master_list, self.__seq_len)
inferenced_arr = self.__rbm.inference(
test_observed_arr,
training_count=1,
r_batch_size=-1
)
return inferenced_arr | Args:
sentence_list: The list of tokenized sentences.
[[`token`, `token`, `token`, ...],
[`token`, `token`, `token`, ...],
[`token`, `token`, `token`, ...]]
Returns:
`np.ndarray` of tokens.
[vector of token, vector of token, vector of token] | Below is the the instruction that describes the task:
### Input:
Args:
sentence_list: The list of tokenized sentences.
[[`token`, `token`, `token`, ...],
[`token`, `token`, `token`, ...],
[`token`, `token`, `token`, ...]]
Returns:
`np.ndarray` of tokens.
[vector of token, vector of token, vector of token]
### Response:
def vectorize(self, sentence_list):
'''
Args:
sentence_list: The list of tokenized sentences.
[[`token`, `token`, `token`, ...],
[`token`, `token`, `token`, ...],
[`token`, `token`, `token`, ...]]
Returns:
`np.ndarray` of tokens.
[vector of token, vector of token, vector of token]
'''
test_observed_arr = self.__setup_dataset(sentence_list, self.__token_master_list, self.__seq_len)
inferenced_arr = self.__rbm.inference(
test_observed_arr,
training_count=1,
r_batch_size=-1
)
return inferenced_arr |
def obj(self):
"""Returns the value of :meth:`ObjectMixin.get_object` and sets a private
property called _obj. This property ensures the logic around allow_none
is enforced across Endpoints using the Object interface.
:raises: :class:`werkzeug.exceptions.BadRequest`
:returns: The result of :meth:ObjectMixin.get_object`
"""
if not getattr(self, '_obj', None):
self._obj = self.get_object()
if self._obj is None and not self.allow_none:
self.return_error(404)
return self._obj | Returns the value of :meth:`ObjectMixin.get_object` and sets a private
property called _obj. This property ensures the logic around allow_none
is enforced across Endpoints using the Object interface.
:raises: :class:`werkzeug.exceptions.BadRequest`
:returns: The result of :meth:ObjectMixin.get_object` | Below is the the instruction that describes the task:
### Input:
Returns the value of :meth:`ObjectMixin.get_object` and sets a private
property called _obj. This property ensures the logic around allow_none
is enforced across Endpoints using the Object interface.
:raises: :class:`werkzeug.exceptions.BadRequest`
:returns: The result of :meth:ObjectMixin.get_object`
### Response:
def obj(self):
"""Returns the value of :meth:`ObjectMixin.get_object` and sets a private
property called _obj. This property ensures the logic around allow_none
is enforced across Endpoints using the Object interface.
:raises: :class:`werkzeug.exceptions.BadRequest`
:returns: The result of :meth:ObjectMixin.get_object`
"""
if not getattr(self, '_obj', None):
self._obj = self.get_object()
if self._obj is None and not self.allow_none:
self.return_error(404)
return self._obj |
def relabel_non_zero(label_image, start = 1):
r"""
Relabel the regions of a label image.
Re-processes the labels to make them consecutively and starting from start.
Keeps all zero (0) labels, as they are considered background.
Parameters
----------
label_image : array_like
A nD label map.
start : integer
The id of the first label to assign
Returns
-------
relabel_map : ndarray
The relabelled label map.
See also
--------
relabel
"""
if start <= 0: raise ArgumentError('The starting value can not be 0 or lower.')
l = list(scipy.unique(label_image))
if 0 in l: l.remove(0)
mapping = dict()
mapping[0] = 0
for key, item in zip(l, list(range(start, len(l) + start))):
mapping[key] = item
return relabel_map(label_image, mapping) | r"""
Relabel the regions of a label image.
Re-processes the labels to make them consecutively and starting from start.
Keeps all zero (0) labels, as they are considered background.
Parameters
----------
label_image : array_like
A nD label map.
start : integer
The id of the first label to assign
Returns
-------
relabel_map : ndarray
The relabelled label map.
See also
--------
relabel | Below is the the instruction that describes the task:
### Input:
r"""
Relabel the regions of a label image.
Re-processes the labels to make them consecutively and starting from start.
Keeps all zero (0) labels, as they are considered background.
Parameters
----------
label_image : array_like
A nD label map.
start : integer
The id of the first label to assign
Returns
-------
relabel_map : ndarray
The relabelled label map.
See also
--------
relabel
### Response:
def relabel_non_zero(label_image, start = 1):
r"""
Relabel the regions of a label image.
Re-processes the labels to make them consecutively and starting from start.
Keeps all zero (0) labels, as they are considered background.
Parameters
----------
label_image : array_like
A nD label map.
start : integer
The id of the first label to assign
Returns
-------
relabel_map : ndarray
The relabelled label map.
See also
--------
relabel
"""
if start <= 0: raise ArgumentError('The starting value can not be 0 or lower.')
l = list(scipy.unique(label_image))
if 0 in l: l.remove(0)
mapping = dict()
mapping[0] = 0
for key, item in zip(l, list(range(start, len(l) + start))):
mapping[key] = item
return relabel_map(label_image, mapping) |
def register(self, request, **cleaned_data):
"""
Given a username, email address and password, register a new
user account, which will initially be inactive.
Along with the new ``User`` object, a new
``registration.models.RegistrationProfile`` will be created,
tied to that ``User``, containing the activation key which
will be used for this account.
Two emails will be sent. First one to the admin; this email should
contain an activation link and a resume of the new user infos.
Second one, to the user, for inform him that his request is pending.
After the ``User`` and ``RegistrationProfile`` are created and
the activation email is sent, the signal
``registration.signals.user_registered`` will be sent, with
the new ``User`` as the keyword argument ``user`` and the
class of this backend as the sender.
"""
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
create_user = RegistrationProfile.objects.create_inactive_user
new_user = create_user(
cleaned_data['username'],
cleaned_data['email'],
cleaned_data['password1'],
site,
send_email=False
)
new_user.first_name = cleaned_data['first_name']
new_user.last_name = cleaned_data['last_name']
new_user.save()
user_info = UserInfo(
user=new_user,
company=cleaned_data['company'],
function=cleaned_data['function'],
address=cleaned_data['address'],
postal_code=cleaned_data['postal_code'],
city=cleaned_data['city'],
country=cleaned_data['country'],
phone=cleaned_data['phone'],
)
user_info.save()
send_activation_email(new_user, site, user_info)
send_activation_pending_email(new_user, site, user_info)
signals.user_registered.send(sender=self.__class__, user=new_user, request=request)
return new_user | Given a username, email address and password, register a new
user account, which will initially be inactive.
Along with the new ``User`` object, a new
``registration.models.RegistrationProfile`` will be created,
tied to that ``User``, containing the activation key which
will be used for this account.
Two emails will be sent. First one to the admin; this email should
contain an activation link and a resume of the new user infos.
Second one, to the user, for inform him that his request is pending.
After the ``User`` and ``RegistrationProfile`` are created and
the activation email is sent, the signal
``registration.signals.user_registered`` will be sent, with
the new ``User`` as the keyword argument ``user`` and the
class of this backend as the sender. | Below is the the instruction that describes the task:
### Input:
Given a username, email address and password, register a new
user account, which will initially be inactive.
Along with the new ``User`` object, a new
``registration.models.RegistrationProfile`` will be created,
tied to that ``User``, containing the activation key which
will be used for this account.
Two emails will be sent. First one to the admin; this email should
contain an activation link and a resume of the new user infos.
Second one, to the user, for inform him that his request is pending.
After the ``User`` and ``RegistrationProfile`` are created and
the activation email is sent, the signal
``registration.signals.user_registered`` will be sent, with
the new ``User`` as the keyword argument ``user`` and the
class of this backend as the sender.
### Response:
def register(self, request, **cleaned_data):
"""
Given a username, email address and password, register a new
user account, which will initially be inactive.
Along with the new ``User`` object, a new
``registration.models.RegistrationProfile`` will be created,
tied to that ``User``, containing the activation key which
will be used for this account.
Two emails will be sent. First one to the admin; this email should
contain an activation link and a resume of the new user infos.
Second one, to the user, for inform him that his request is pending.
After the ``User`` and ``RegistrationProfile`` are created and
the activation email is sent, the signal
``registration.signals.user_registered`` will be sent, with
the new ``User`` as the keyword argument ``user`` and the
class of this backend as the sender.
"""
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
create_user = RegistrationProfile.objects.create_inactive_user
new_user = create_user(
cleaned_data['username'],
cleaned_data['email'],
cleaned_data['password1'],
site,
send_email=False
)
new_user.first_name = cleaned_data['first_name']
new_user.last_name = cleaned_data['last_name']
new_user.save()
user_info = UserInfo(
user=new_user,
company=cleaned_data['company'],
function=cleaned_data['function'],
address=cleaned_data['address'],
postal_code=cleaned_data['postal_code'],
city=cleaned_data['city'],
country=cleaned_data['country'],
phone=cleaned_data['phone'],
)
user_info.save()
send_activation_email(new_user, site, user_info)
send_activation_pending_email(new_user, site, user_info)
signals.user_registered.send(sender=self.__class__, user=new_user, request=request)
return new_user |
def _join_factory(cls, gap, pad):
"""Build a joiner for the given cls, and the given padding options
"""
if issubclass(cls, dict):
def _join(data):
out = cls()
data = list(data)
while data:
tsd = data.pop(0)
out.append(tsd, gap=gap, pad=pad)
del tsd
return out
else:
from .. import TimeSeriesBaseList
def _join(arrays):
list_ = TimeSeriesBaseList(*arrays)
return list_.join(pad=pad, gap=gap)
return _join | Build a joiner for the given cls, and the given padding options | Below is the the instruction that describes the task:
### Input:
Build a joiner for the given cls, and the given padding options
### Response:
def _join_factory(cls, gap, pad):
"""Build a joiner for the given cls, and the given padding options
"""
if issubclass(cls, dict):
def _join(data):
out = cls()
data = list(data)
while data:
tsd = data.pop(0)
out.append(tsd, gap=gap, pad=pad)
del tsd
return out
else:
from .. import TimeSeriesBaseList
def _join(arrays):
list_ = TimeSeriesBaseList(*arrays)
return list_.join(pad=pad, gap=gap)
return _join |
def chi2(T1, T2):
"""
chi-squared test of difference between two transition matrices.
Parameters
----------
T1 : array
(k, k), matrix of transitions (counts).
T2 : array
(k, k), matrix of transitions (counts) to use to form the
probabilities under the null.
Returns
-------
: tuple
(3 elements).
(chi2 value, pvalue, degrees of freedom).
Examples
--------
>>> import libpysal
>>> from giddy.markov import Spatial_Markov, chi2
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> w.transform='r'
>>> sm = Spatial_Markov(rpci, w, fixed=True)
>>> T1 = sm.T[0]
>>> T1
array([[562., 22., 1., 0.],
[ 12., 201., 22., 0.],
[ 0., 17., 97., 4.],
[ 0., 0., 3., 19.]])
>>> T2 = sm.transitions
>>> T2
array([[884., 77., 4., 0.],
[ 68., 794., 87., 3.],
[ 1., 92., 815., 51.],
[ 1., 0., 60., 903.]])
>>> chi2(T1,T2)
(23.39728441473295, 0.005363116704861337, 9)
Notes
-----
Second matrix is used to form the probabilities under the null.
Marginal sums from first matrix are distributed across these probabilities
under the null. In other words the observed transitions are taken from T1
while the expected transitions are formed as follows
.. math::
E_{i,j} = \sum_j T1_{i,j} * T2_{i,j}/\sum_j T2_{i,j}
Degrees of freedom corrected for any rows in either T1 or T2 that have
zero total transitions.
"""
rs2 = T2.sum(axis=1)
rs1 = T1.sum(axis=1)
rs2nz = rs2 > 0
rs1nz = rs1 > 0
dof1 = sum(rs1nz)
dof2 = sum(rs2nz)
rs2 = rs2 + (rs2 == 0)
dof = (dof1 - 1) * (dof2 - 1)
p = np.diag(1 / rs2) * np.matrix(T2)
E = np.diag(rs1) * np.matrix(p)
num = T1 - E
num = np.multiply(num, num)
E = E + (E == 0)
chi2 = num / E
chi2 = chi2.sum()
pvalue = 1 - stats.chi2.cdf(chi2, dof)
return chi2, pvalue, dof | chi-squared test of difference between two transition matrices.
Parameters
----------
T1 : array
(k, k), matrix of transitions (counts).
T2 : array
(k, k), matrix of transitions (counts) to use to form the
probabilities under the null.
Returns
-------
: tuple
(3 elements).
(chi2 value, pvalue, degrees of freedom).
Examples
--------
>>> import libpysal
>>> from giddy.markov import Spatial_Markov, chi2
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> w.transform='r'
>>> sm = Spatial_Markov(rpci, w, fixed=True)
>>> T1 = sm.T[0]
>>> T1
array([[562., 22., 1., 0.],
[ 12., 201., 22., 0.],
[ 0., 17., 97., 4.],
[ 0., 0., 3., 19.]])
>>> T2 = sm.transitions
>>> T2
array([[884., 77., 4., 0.],
[ 68., 794., 87., 3.],
[ 1., 92., 815., 51.],
[ 1., 0., 60., 903.]])
>>> chi2(T1,T2)
(23.39728441473295, 0.005363116704861337, 9)
Notes
-----
Second matrix is used to form the probabilities under the null.
Marginal sums from first matrix are distributed across these probabilities
under the null. In other words the observed transitions are taken from T1
while the expected transitions are formed as follows
.. math::
E_{i,j} = \sum_j T1_{i,j} * T2_{i,j}/\sum_j T2_{i,j}
Degrees of freedom corrected for any rows in either T1 or T2 that have
zero total transitions. | Below is the the instruction that describes the task:
### Input:
chi-squared test of difference between two transition matrices.
Parameters
----------
T1 : array
(k, k), matrix of transitions (counts).
T2 : array
(k, k), matrix of transitions (counts) to use to form the
probabilities under the null.
Returns
-------
: tuple
(3 elements).
(chi2 value, pvalue, degrees of freedom).
Examples
--------
>>> import libpysal
>>> from giddy.markov import Spatial_Markov, chi2
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> w.transform='r'
>>> sm = Spatial_Markov(rpci, w, fixed=True)
>>> T1 = sm.T[0]
>>> T1
array([[562., 22., 1., 0.],
[ 12., 201., 22., 0.],
[ 0., 17., 97., 4.],
[ 0., 0., 3., 19.]])
>>> T2 = sm.transitions
>>> T2
array([[884., 77., 4., 0.],
[ 68., 794., 87., 3.],
[ 1., 92., 815., 51.],
[ 1., 0., 60., 903.]])
>>> chi2(T1,T2)
(23.39728441473295, 0.005363116704861337, 9)
Notes
-----
Second matrix is used to form the probabilities under the null.
Marginal sums from first matrix are distributed across these probabilities
under the null. In other words the observed transitions are taken from T1
while the expected transitions are formed as follows
.. math::
E_{i,j} = \sum_j T1_{i,j} * T2_{i,j}/\sum_j T2_{i,j}
Degrees of freedom corrected for any rows in either T1 or T2 that have
zero total transitions.
### Response:
def chi2(T1, T2):
"""
chi-squared test of difference between two transition matrices.
Parameters
----------
T1 : array
(k, k), matrix of transitions (counts).
T2 : array
(k, k), matrix of transitions (counts) to use to form the
probabilities under the null.
Returns
-------
: tuple
(3 elements).
(chi2 value, pvalue, degrees of freedom).
Examples
--------
>>> import libpysal
>>> from giddy.markov import Spatial_Markov, chi2
>>> f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
>>> years = list(range(1929, 2010))
>>> pci = np.array([f.by_col[str(y)] for y in years]).transpose()
>>> rpci = pci/(pci.mean(axis=0))
>>> w = libpysal.io.open(libpysal.examples.get_path("states48.gal")).read()
>>> w.transform='r'
>>> sm = Spatial_Markov(rpci, w, fixed=True)
>>> T1 = sm.T[0]
>>> T1
array([[562., 22., 1., 0.],
[ 12., 201., 22., 0.],
[ 0., 17., 97., 4.],
[ 0., 0., 3., 19.]])
>>> T2 = sm.transitions
>>> T2
array([[884., 77., 4., 0.],
[ 68., 794., 87., 3.],
[ 1., 92., 815., 51.],
[ 1., 0., 60., 903.]])
>>> chi2(T1,T2)
(23.39728441473295, 0.005363116704861337, 9)
Notes
-----
Second matrix is used to form the probabilities under the null.
Marginal sums from first matrix are distributed across these probabilities
under the null. In other words the observed transitions are taken from T1
while the expected transitions are formed as follows
.. math::
E_{i,j} = \sum_j T1_{i,j} * T2_{i,j}/\sum_j T2_{i,j}
Degrees of freedom corrected for any rows in either T1 or T2 that have
zero total transitions.
"""
rs2 = T2.sum(axis=1)
rs1 = T1.sum(axis=1)
rs2nz = rs2 > 0
rs1nz = rs1 > 0
dof1 = sum(rs1nz)
dof2 = sum(rs2nz)
rs2 = rs2 + (rs2 == 0)
dof = (dof1 - 1) * (dof2 - 1)
p = np.diag(1 / rs2) * np.matrix(T2)
E = np.diag(rs1) * np.matrix(p)
num = T1 - E
num = np.multiply(num, num)
E = E + (E == 0)
chi2 = num / E
chi2 = chi2.sum()
pvalue = 1 - stats.chi2.cdf(chi2, dof)
return chi2, pvalue, dof |
Subsets and Splits