code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def convertPath(srcpath, dstdir):
"""Given `srcpath`, return a corresponding path within `dstdir`"""
bits = srcpath.split("/")
bits.pop(0)
# Strip out leading 'unsigned' from paths like unsigned/update/win32/...
if bits[0] == 'unsigned':
bits.pop(0)
return os.path.join(dstdir, *bits) | Given `srcpath`, return a corresponding path within `dstdir` | Below is the the instruction that describes the task:
### Input:
Given `srcpath`, return a corresponding path within `dstdir`
### Response:
def convertPath(srcpath, dstdir):
"""Given `srcpath`, return a corresponding path within `dstdir`"""
bits = srcpath.split("/")
bits.pop(0)
# Strip out leading 'unsigned' from paths like unsigned/update/win32/...
if bits[0] == 'unsigned':
bits.pop(0)
return os.path.join(dstdir, *bits) |
def render_graph(result, cfg, **kwargs):
"""
Render to output a result that can be parsed as an RDF graph
"""
# Mapping from MIME types to formats accepted by RDFlib
rdflib_formats = {'text/rdf+n3': 'n3',
'text/turtle': 'turtle',
'application/x-turtle': 'turtle',
'text/turtle': 'turtle',
'application/rdf+xml': 'xml',
'text/rdf': 'xml',
'application/rdf+xml': 'xml'}
try:
got = kwargs.get('format', 'text/rdf+n3')
fmt = rdflib_formats[got]
except KeyError:
raise KrnlException('Unsupported format for graph processing: {!s}', got)
g = ConjunctiveGraph()
g.load(StringInputSource(result), format=fmt)
display = cfg.dis[0] if is_collection(cfg.dis) else cfg.dis
if display in ('png', 'svg'):
try:
literal = len(cfg.dis) > 1 and cfg.dis[1].startswith('withlit')
opt = {'lang': cfg.lan, 'literal': literal, 'graphviz': []}
data, metadata = draw_graph(g, fmt=display, options=opt)
return {'data': data,
'metadata': metadata}
except Exception as e:
raise KrnlException('Exception while drawing graph: {!r}', e)
elif display == 'table':
it = rdf_iterator(g, set(cfg.lan), add_vtype=cfg.typ)
n, data = html_table(it, limit=cfg.lmt, withtype=cfg.typ)
data += div('Shown: {}, Total rows: {}', n if cfg.lmt else 'all',
len(g), css="tinfo")
data = {'text/html': div(data)}
elif len(g) == 0:
data = {'text/html': div(div('empty graph', css='krn-warn'))}
else:
data = {'text/plain': g.serialize(format='nt').decode('utf-8')}
return {'data': data,
'metadata': {}} | Render to output a result that can be parsed as an RDF graph | Below is the the instruction that describes the task:
### Input:
Render to output a result that can be parsed as an RDF graph
### Response:
def render_graph(result, cfg, **kwargs):
"""
Render to output a result that can be parsed as an RDF graph
"""
# Mapping from MIME types to formats accepted by RDFlib
rdflib_formats = {'text/rdf+n3': 'n3',
'text/turtle': 'turtle',
'application/x-turtle': 'turtle',
'text/turtle': 'turtle',
'application/rdf+xml': 'xml',
'text/rdf': 'xml',
'application/rdf+xml': 'xml'}
try:
got = kwargs.get('format', 'text/rdf+n3')
fmt = rdflib_formats[got]
except KeyError:
raise KrnlException('Unsupported format for graph processing: {!s}', got)
g = ConjunctiveGraph()
g.load(StringInputSource(result), format=fmt)
display = cfg.dis[0] if is_collection(cfg.dis) else cfg.dis
if display in ('png', 'svg'):
try:
literal = len(cfg.dis) > 1 and cfg.dis[1].startswith('withlit')
opt = {'lang': cfg.lan, 'literal': literal, 'graphviz': []}
data, metadata = draw_graph(g, fmt=display, options=opt)
return {'data': data,
'metadata': metadata}
except Exception as e:
raise KrnlException('Exception while drawing graph: {!r}', e)
elif display == 'table':
it = rdf_iterator(g, set(cfg.lan), add_vtype=cfg.typ)
n, data = html_table(it, limit=cfg.lmt, withtype=cfg.typ)
data += div('Shown: {}, Total rows: {}', n if cfg.lmt else 'all',
len(g), css="tinfo")
data = {'text/html': div(data)}
elif len(g) == 0:
data = {'text/html': div(div('empty graph', css='krn-warn'))}
else:
data = {'text/plain': g.serialize(format='nt').decode('utf-8')}
return {'data': data,
'metadata': {}} |
def _system_call(cmd, stdoutfilename=None):
"""Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
logger = logging.getLogger(__name__)
logger.debug('system call: %s' % cmd)
if stdoutfilename:
with open(stdoutfilename, 'w') as f:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=f,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value | Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license. | Below is the the instruction that describes the task:
### Input:
Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
### Response:
def _system_call(cmd, stdoutfilename=None):
"""Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
logger = logging.getLogger(__name__)
logger.debug('system call: %s' % cmd)
if stdoutfilename:
with open(stdoutfilename, 'w') as f:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=f,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value |
def keywords(self):
'''Generator which returns all keywords in the suite'''
for table in self.tables:
if isinstance(table, KeywordTable):
for keyword in table.keywords:
yield keyword | Generator which returns all keywords in the suite | Below is the the instruction that describes the task:
### Input:
Generator which returns all keywords in the suite
### Response:
def keywords(self):
'''Generator which returns all keywords in the suite'''
for table in self.tables:
if isinstance(table, KeywordTable):
for keyword in table.keywords:
yield keyword |
def _parse_key_val(stream):
"""Parse key, value combination
return (tuple):
Parsed key (string)
Parsed value (either a string, array, or dict)
"""
logger.debug("parsing key/val")
key = _parse_key(stream)
val = _parse_val(stream)
logger.debug("parsed key/val")
logger.debug("%s", fmt_green(key))
logger.debug("%s", fmt_green(val))
return key, val | Parse key, value combination
return (tuple):
Parsed key (string)
Parsed value (either a string, array, or dict) | Below is the the instruction that describes the task:
### Input:
Parse key, value combination
return (tuple):
Parsed key (string)
Parsed value (either a string, array, or dict)
### Response:
def _parse_key_val(stream):
"""Parse key, value combination
return (tuple):
Parsed key (string)
Parsed value (either a string, array, or dict)
"""
logger.debug("parsing key/val")
key = _parse_key(stream)
val = _parse_val(stream)
logger.debug("parsed key/val")
logger.debug("%s", fmt_green(key))
logger.debug("%s", fmt_green(val))
return key, val |
def fCZ_std_errs(self):
"""
Get a dictionary of the standard errors of the CZ fidelities from the specs,
keyed by targets (qubit-qubit pairs).
:return: A dictionary of CZ fidelities, normalized to unity.
:rtype: Dict[tuple(int, int), float]
"""
return {tuple(es.targets): es.fCZ_std_err for es in self.edges_specs} | Get a dictionary of the standard errors of the CZ fidelities from the specs,
keyed by targets (qubit-qubit pairs).
:return: A dictionary of CZ fidelities, normalized to unity.
:rtype: Dict[tuple(int, int), float] | Below is the the instruction that describes the task:
### Input:
Get a dictionary of the standard errors of the CZ fidelities from the specs,
keyed by targets (qubit-qubit pairs).
:return: A dictionary of CZ fidelities, normalized to unity.
:rtype: Dict[tuple(int, int), float]
### Response:
def fCZ_std_errs(self):
"""
Get a dictionary of the standard errors of the CZ fidelities from the specs,
keyed by targets (qubit-qubit pairs).
:return: A dictionary of CZ fidelities, normalized to unity.
:rtype: Dict[tuple(int, int), float]
"""
return {tuple(es.targets): es.fCZ_std_err for es in self.edges_specs} |
def _do_synchronise_jobs(walltime, machines):
""" This returns a common reservation date for all the jobs.
This reservation date is really only a hint and will be supplied to each
oar server. Without this *common* reservation_date, one oar server can
decide to postpone the start of the job while the other are already
running. But this doens't prevent the start of a job on one site to drift
(e.g because the machines need to be restarted.) But this shouldn't exceed
few minutes.
"""
offset = SYNCHRONISATION_OFFSET
start = time.time() + offset
_t = time.strptime(walltime, "%H:%M:%S")
_walltime = _t.tm_hour * 3600 + _t.tm_min * 60 + _t.tm_sec
# Compute the demand for each cluster
demands = defaultdict(int)
for machine in machines:
cluster = machine["cluster"]
demands[cluster] += machine["nodes"]
# Early leave if only one cluster is there
if len(list(demands.keys())) <= 1:
logger.debug("Only one cluster detected: no synchronisation needed")
return None
clusters = clusters_sites_obj(list(demands.keys()))
# Early leave if only one site is concerned
sites = set(list(clusters.values()))
if len(sites) <= 1:
logger.debug("Only one site detected: no synchronisation needed")
return None
# Test the proposed reservation_date
ok = True
for cluster, nodes in demands.items():
cluster_status = clusters[cluster].status.list()
ok = ok and can_start_on_cluster(cluster_status.nodes,
nodes,
start,
_walltime)
if not ok:
break
if ok:
# The proposed reservation_date fits
logger.info("Reservation_date=%s (%s)" % (_date2h(start), sites))
return start
if start is None:
raise EnosG5kSynchronisationError(sites) | This returns a common reservation date for all the jobs.
This reservation date is really only a hint and will be supplied to each
oar server. Without this *common* reservation_date, one oar server can
decide to postpone the start of the job while the other are already
running. But this doens't prevent the start of a job on one site to drift
(e.g because the machines need to be restarted.) But this shouldn't exceed
few minutes. | Below is the the instruction that describes the task:
### Input:
This returns a common reservation date for all the jobs.
This reservation date is really only a hint and will be supplied to each
oar server. Without this *common* reservation_date, one oar server can
decide to postpone the start of the job while the other are already
running. But this doens't prevent the start of a job on one site to drift
(e.g because the machines need to be restarted.) But this shouldn't exceed
few minutes.
### Response:
def _do_synchronise_jobs(walltime, machines):
""" This returns a common reservation date for all the jobs.
This reservation date is really only a hint and will be supplied to each
oar server. Without this *common* reservation_date, one oar server can
decide to postpone the start of the job while the other are already
running. But this doens't prevent the start of a job on one site to drift
(e.g because the machines need to be restarted.) But this shouldn't exceed
few minutes.
"""
offset = SYNCHRONISATION_OFFSET
start = time.time() + offset
_t = time.strptime(walltime, "%H:%M:%S")
_walltime = _t.tm_hour * 3600 + _t.tm_min * 60 + _t.tm_sec
# Compute the demand for each cluster
demands = defaultdict(int)
for machine in machines:
cluster = machine["cluster"]
demands[cluster] += machine["nodes"]
# Early leave if only one cluster is there
if len(list(demands.keys())) <= 1:
logger.debug("Only one cluster detected: no synchronisation needed")
return None
clusters = clusters_sites_obj(list(demands.keys()))
# Early leave if only one site is concerned
sites = set(list(clusters.values()))
if len(sites) <= 1:
logger.debug("Only one site detected: no synchronisation needed")
return None
# Test the proposed reservation_date
ok = True
for cluster, nodes in demands.items():
cluster_status = clusters[cluster].status.list()
ok = ok and can_start_on_cluster(cluster_status.nodes,
nodes,
start,
_walltime)
if not ok:
break
if ok:
# The proposed reservation_date fits
logger.info("Reservation_date=%s (%s)" % (_date2h(start), sites))
return start
if start is None:
raise EnosG5kSynchronisationError(sites) |
def process_all(self, texts:Collection[str]) -> List[List[str]]:
"Process a list of `texts`."
if self.n_cpus <= 1: return self._process_all_1(texts)
with ProcessPoolExecutor(self.n_cpus) as e:
return sum(e.map(self._process_all_1, partition_by_cores(texts, self.n_cpus)), []) | Process a list of `texts`. | Below is the the instruction that describes the task:
### Input:
Process a list of `texts`.
### Response:
def process_all(self, texts:Collection[str]) -> List[List[str]]:
"Process a list of `texts`."
if self.n_cpus <= 1: return self._process_all_1(texts)
with ProcessPoolExecutor(self.n_cpus) as e:
return sum(e.map(self._process_all_1, partition_by_cores(texts, self.n_cpus)), []) |
def read_core_registers_raw(self, reg_list):
"""
Read one or more core registers
Read core registers in reg_list and return a list of values.
If any register in reg_list is a string, find the number
associated to this register in the lookup table CORE_REGISTER.
"""
# convert to index only
reg_list = [register_name_to_index(reg) for reg in reg_list]
# Sanity check register values
for reg in reg_list:
if reg not in CORE_REGISTER.values():
raise ValueError("unknown reg: %d" % reg)
elif is_fpu_register(reg) and (not self.has_fpu):
raise ValueError("attempt to read FPU register without FPU")
# Handle doubles.
doubles = [reg for reg in reg_list if is_double_float_register(reg)]
hasDoubles = len(doubles) > 0
if hasDoubles:
originalRegList = reg_list
# Strip doubles from reg_list.
reg_list = [reg for reg in reg_list if not is_double_float_register(reg)]
# Read float regs required to build doubles.
singleRegList = []
for reg in doubles:
singleRegList += (-reg, -reg + 1)
singleValues = self.read_core_registers_raw(singleRegList)
# Begin all reads and writes
dhcsr_cb_list = []
reg_cb_list = []
for reg in reg_list:
if is_cfbp_subregister(reg):
reg = CORE_REGISTER['cfbp']
elif is_psr_subregister(reg):
reg = CORE_REGISTER['xpsr']
# write id in DCRSR
self.write_memory(CortexM.DCRSR, reg)
# Technically, we need to poll S_REGRDY in DHCSR here before reading DCRDR. But
# we're running so slow compared to the target that it's not necessary.
# Read it and assert that S_REGRDY is set
dhcsr_cb = self.read_memory(CortexM.DHCSR, now=False)
reg_cb = self.read_memory(CortexM.DCRDR, now=False)
dhcsr_cb_list.append(dhcsr_cb)
reg_cb_list.append(reg_cb)
# Read all results
reg_vals = []
for reg, reg_cb, dhcsr_cb in zip(reg_list, reg_cb_list, dhcsr_cb_list):
dhcsr_val = dhcsr_cb()
assert dhcsr_val & CortexM.S_REGRDY
val = reg_cb()
# Special handling for registers that are combined into a single DCRSR number.
if is_cfbp_subregister(reg):
val = (val >> ((-reg - 1) * 8)) & 0xff
elif is_psr_subregister(reg):
val &= sysm_to_psr_mask(reg)
reg_vals.append(val)
# Merge double regs back into result list.
if hasDoubles:
results = []
for reg in originalRegList:
# Double
if is_double_float_register(reg):
doubleIndex = doubles.index(reg)
singleLow = singleValues[doubleIndex * 2]
singleHigh = singleValues[doubleIndex * 2 + 1]
double = (singleHigh << 32) | singleLow
results.append(double)
# Other register
else:
results.append(reg_vals[reg_list.index(reg)])
reg_vals = results
return reg_vals | Read one or more core registers
Read core registers in reg_list and return a list of values.
If any register in reg_list is a string, find the number
associated to this register in the lookup table CORE_REGISTER. | Below is the the instruction that describes the task:
### Input:
Read one or more core registers
Read core registers in reg_list and return a list of values.
If any register in reg_list is a string, find the number
associated to this register in the lookup table CORE_REGISTER.
### Response:
def read_core_registers_raw(self, reg_list):
"""
Read one or more core registers
Read core registers in reg_list and return a list of values.
If any register in reg_list is a string, find the number
associated to this register in the lookup table CORE_REGISTER.
"""
# convert to index only
reg_list = [register_name_to_index(reg) for reg in reg_list]
# Sanity check register values
for reg in reg_list:
if reg not in CORE_REGISTER.values():
raise ValueError("unknown reg: %d" % reg)
elif is_fpu_register(reg) and (not self.has_fpu):
raise ValueError("attempt to read FPU register without FPU")
# Handle doubles.
doubles = [reg for reg in reg_list if is_double_float_register(reg)]
hasDoubles = len(doubles) > 0
if hasDoubles:
originalRegList = reg_list
# Strip doubles from reg_list.
reg_list = [reg for reg in reg_list if not is_double_float_register(reg)]
# Read float regs required to build doubles.
singleRegList = []
for reg in doubles:
singleRegList += (-reg, -reg + 1)
singleValues = self.read_core_registers_raw(singleRegList)
# Begin all reads and writes
dhcsr_cb_list = []
reg_cb_list = []
for reg in reg_list:
if is_cfbp_subregister(reg):
reg = CORE_REGISTER['cfbp']
elif is_psr_subregister(reg):
reg = CORE_REGISTER['xpsr']
# write id in DCRSR
self.write_memory(CortexM.DCRSR, reg)
# Technically, we need to poll S_REGRDY in DHCSR here before reading DCRDR. But
# we're running so slow compared to the target that it's not necessary.
# Read it and assert that S_REGRDY is set
dhcsr_cb = self.read_memory(CortexM.DHCSR, now=False)
reg_cb = self.read_memory(CortexM.DCRDR, now=False)
dhcsr_cb_list.append(dhcsr_cb)
reg_cb_list.append(reg_cb)
# Read all results
reg_vals = []
for reg, reg_cb, dhcsr_cb in zip(reg_list, reg_cb_list, dhcsr_cb_list):
dhcsr_val = dhcsr_cb()
assert dhcsr_val & CortexM.S_REGRDY
val = reg_cb()
# Special handling for registers that are combined into a single DCRSR number.
if is_cfbp_subregister(reg):
val = (val >> ((-reg - 1) * 8)) & 0xff
elif is_psr_subregister(reg):
val &= sysm_to_psr_mask(reg)
reg_vals.append(val)
# Merge double regs back into result list.
if hasDoubles:
results = []
for reg in originalRegList:
# Double
if is_double_float_register(reg):
doubleIndex = doubles.index(reg)
singleLow = singleValues[doubleIndex * 2]
singleHigh = singleValues[doubleIndex * 2 + 1]
double = (singleHigh << 32) | singleLow
results.append(double)
# Other register
else:
results.append(reg_vals[reg_list.index(reg)])
reg_vals = results
return reg_vals |
def neighbors(self, type=None, direction="to", failed=None):
"""Get a node's neighbors - nodes that are directly connected to it.
Type specifies the class of neighbour and must be a subclass of
Node (default is Node).
Connection is the direction of the connections and can be "to"
(default), "from", "either", or "both".
"""
# get type
if type is None:
type = Node
if not issubclass(type, Node):
raise ValueError(
"{} is not a valid neighbor type,"
"needs to be a subclass of Node.".format(type)
)
# get direction
if direction not in ["both", "either", "from", "to"]:
raise ValueError(
"{} not a valid neighbor connection."
"Should be both, either, to or from.".format(direction)
)
if failed is not None:
raise ValueError(
"You should not pass a failed argument to neighbors(). "
"Neighbors is "
"unusual in that a failed argument cannot be passed. This is "
"because there is inherent uncertainty in what it means for a "
"neighbor to be failed. The neighbors function will only ever "
"return not-failed nodes connected to you via not-failed "
"vectors. If you want to do more elaborate queries, for "
"example, getting not-failed nodes connected to you via failed"
" vectors, you should do so via sql queries."
)
neighbors = []
# get the neighbours
if direction == "to":
outgoing_vectors = (
Vector.query.with_entities(Vector.destination_id)
.filter_by(origin_id=self.id, failed=False)
.all()
)
neighbor_ids = [v.destination_id for v in outgoing_vectors]
if neighbor_ids:
neighbors = Node.query.filter(Node.id.in_(neighbor_ids)).all()
neighbors = [n for n in neighbors if isinstance(n, type)]
if direction == "from":
incoming_vectors = (
Vector.query.with_entities(Vector.origin_id)
.filter_by(destination_id=self.id, failed=False)
.all()
)
neighbor_ids = [v.origin_id for v in incoming_vectors]
if neighbor_ids:
neighbors = Node.query.filter(Node.id.in_(neighbor_ids)).all()
neighbors = [n for n in neighbors if isinstance(n, type)]
if direction == "either":
neighbors = list(
set(
self.neighbors(type=type, direction="to")
+ self.neighbors(type=type, direction="from")
)
)
if direction == "both":
neighbors = list(
set(self.neighbors(type=type, direction="to"))
& set(self.neighbors(type=type, direction="from"))
)
return neighbors | Get a node's neighbors - nodes that are directly connected to it.
Type specifies the class of neighbour and must be a subclass of
Node (default is Node).
Connection is the direction of the connections and can be "to"
(default), "from", "either", or "both". | Below is the the instruction that describes the task:
### Input:
Get a node's neighbors - nodes that are directly connected to it.
Type specifies the class of neighbour and must be a subclass of
Node (default is Node).
Connection is the direction of the connections and can be "to"
(default), "from", "either", or "both".
### Response:
def neighbors(self, type=None, direction="to", failed=None):
"""Get a node's neighbors - nodes that are directly connected to it.
Type specifies the class of neighbour and must be a subclass of
Node (default is Node).
Connection is the direction of the connections and can be "to"
(default), "from", "either", or "both".
"""
# get type
if type is None:
type = Node
if not issubclass(type, Node):
raise ValueError(
"{} is not a valid neighbor type,"
"needs to be a subclass of Node.".format(type)
)
# get direction
if direction not in ["both", "either", "from", "to"]:
raise ValueError(
"{} not a valid neighbor connection."
"Should be both, either, to or from.".format(direction)
)
if failed is not None:
raise ValueError(
"You should not pass a failed argument to neighbors(). "
"Neighbors is "
"unusual in that a failed argument cannot be passed. This is "
"because there is inherent uncertainty in what it means for a "
"neighbor to be failed. The neighbors function will only ever "
"return not-failed nodes connected to you via not-failed "
"vectors. If you want to do more elaborate queries, for "
"example, getting not-failed nodes connected to you via failed"
" vectors, you should do so via sql queries."
)
neighbors = []
# get the neighbours
if direction == "to":
outgoing_vectors = (
Vector.query.with_entities(Vector.destination_id)
.filter_by(origin_id=self.id, failed=False)
.all()
)
neighbor_ids = [v.destination_id for v in outgoing_vectors]
if neighbor_ids:
neighbors = Node.query.filter(Node.id.in_(neighbor_ids)).all()
neighbors = [n for n in neighbors if isinstance(n, type)]
if direction == "from":
incoming_vectors = (
Vector.query.with_entities(Vector.origin_id)
.filter_by(destination_id=self.id, failed=False)
.all()
)
neighbor_ids = [v.origin_id for v in incoming_vectors]
if neighbor_ids:
neighbors = Node.query.filter(Node.id.in_(neighbor_ids)).all()
neighbors = [n for n in neighbors if isinstance(n, type)]
if direction == "either":
neighbors = list(
set(
self.neighbors(type=type, direction="to")
+ self.neighbors(type=type, direction="from")
)
)
if direction == "both":
neighbors = list(
set(self.neighbors(type=type, direction="to"))
& set(self.neighbors(type=type, direction="from"))
)
return neighbors |
def get_connections_by_dests(self, dests):
'''Search for all connections involving this and all other ports.'''
with self._mutex:
res = []
for c in self.connections:
if not c.has_port(self):
continue
has_dest = False
for d in dests:
if c.has_port(d):
has_dest = True
break
if has_dest:
res.append(c)
return res | Search for all connections involving this and all other ports. | Below is the the instruction that describes the task:
### Input:
Search for all connections involving this and all other ports.
### Response:
def get_connections_by_dests(self, dests):
'''Search for all connections involving this and all other ports.'''
with self._mutex:
res = []
for c in self.connections:
if not c.has_port(self):
continue
has_dest = False
for d in dests:
if c.has_port(d):
has_dest = True
break
if has_dest:
res.append(c)
return res |
def delete(self, account_id, user_id):
""" Only the primary on the account can add or remove user's access to an account
:param account_id: int of the account_id for the account
:param user_id: int of the user_id to grant access
:return: Access dict
"""
return self.connection.delete('account/access', account_id=account_id, user_id=user_id) | Only the primary on the account can add or remove user's access to an account
:param account_id: int of the account_id for the account
:param user_id: int of the user_id to grant access
:return: Access dict | Below is the the instruction that describes the task:
### Input:
Only the primary on the account can add or remove user's access to an account
:param account_id: int of the account_id for the account
:param user_id: int of the user_id to grant access
:return: Access dict
### Response:
def delete(self, account_id, user_id):
""" Only the primary on the account can add or remove user's access to an account
:param account_id: int of the account_id for the account
:param user_id: int of the user_id to grant access
:return: Access dict
"""
return self.connection.delete('account/access', account_id=account_id, user_id=user_id) |
def lloyd_aggregation(C, ratio=0.03, distance='unit', maxiter=10):
"""Aggregate nodes using Lloyd Clustering.
Parameters
----------
C : csr_matrix
strength of connection matrix
ratio : scalar
Fraction of the nodes which will be seeds.
distance : ['unit','abs','inv',None]
Distance assigned to each edge of the graph G used in Lloyd clustering
For each nonzero value C[i,j]:
======= ===========================
'unit' G[i,j] = 1
'abs' G[i,j] = abs(C[i,j])
'inv' G[i,j] = 1.0/abs(C[i,j])
'same' G[i,j] = C[i,j]
'sub' G[i,j] = C[i,j] - min(C)
======= ===========================
maxiter : int
Maximum number of iterations to perform
Returns
-------
AggOp : csr_matrix
aggregation operator which determines the sparsity pattern
of the tentative prolongator
seeds : array
array of Cpts, i.e., Cpts[i] = root node of aggregate i
See Also
--------
amg_core.standard_aggregation
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> from pyamg.gallery import poisson
>>> from pyamg.aggregation.aggregate import lloyd_aggregation
>>> A = poisson((4,), format='csr') # 1D mesh with 4 vertices
>>> A.todense()
matrix([[ 2., -1., 0., 0.],
[-1., 2., -1., 0.],
[ 0., -1., 2., -1.],
[ 0., 0., -1., 2.]])
>>> lloyd_aggregation(A)[0].todense() # one aggregate
matrix([[1],
[1],
[1],
[1]], dtype=int8)
>>> # more seeding for two aggregates
>>> Agg = lloyd_aggregation(A,ratio=0.5)[0].todense()
"""
if ratio <= 0 or ratio > 1:
raise ValueError('ratio must be > 0.0 and <= 1.0')
if not (isspmatrix_csr(C) or isspmatrix_csc(C)):
raise TypeError('expected csr_matrix or csc_matrix')
if distance == 'unit':
data = np.ones_like(C.data).astype(float)
elif distance == 'abs':
data = abs(C.data)
elif distance == 'inv':
data = 1.0/abs(C.data)
elif distance is 'same':
data = C.data
elif distance is 'min':
data = C.data - C.data.min()
else:
raise ValueError('unrecognized value distance=%s' % distance)
if C.dtype == complex:
data = np.real(data)
assert(data.min() >= 0)
G = C.__class__((data, C.indices, C.indptr), shape=C.shape)
num_seeds = int(min(max(ratio * G.shape[0], 1), G.shape[0]))
distances, clusters, seeds = lloyd_cluster(G, num_seeds, maxiter=maxiter)
row = (clusters >= 0).nonzero()[0]
col = clusters[row]
data = np.ones(len(row), dtype='int8')
AggOp = coo_matrix((data, (row, col)),
shape=(G.shape[0], num_seeds)).tocsr()
return AggOp, seeds | Aggregate nodes using Lloyd Clustering.
Parameters
----------
C : csr_matrix
strength of connection matrix
ratio : scalar
Fraction of the nodes which will be seeds.
distance : ['unit','abs','inv',None]
Distance assigned to each edge of the graph G used in Lloyd clustering
For each nonzero value C[i,j]:
======= ===========================
'unit' G[i,j] = 1
'abs' G[i,j] = abs(C[i,j])
'inv' G[i,j] = 1.0/abs(C[i,j])
'same' G[i,j] = C[i,j]
'sub' G[i,j] = C[i,j] - min(C)
======= ===========================
maxiter : int
Maximum number of iterations to perform
Returns
-------
AggOp : csr_matrix
aggregation operator which determines the sparsity pattern
of the tentative prolongator
seeds : array
array of Cpts, i.e., Cpts[i] = root node of aggregate i
See Also
--------
amg_core.standard_aggregation
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> from pyamg.gallery import poisson
>>> from pyamg.aggregation.aggregate import lloyd_aggregation
>>> A = poisson((4,), format='csr') # 1D mesh with 4 vertices
>>> A.todense()
matrix([[ 2., -1., 0., 0.],
[-1., 2., -1., 0.],
[ 0., -1., 2., -1.],
[ 0., 0., -1., 2.]])
>>> lloyd_aggregation(A)[0].todense() # one aggregate
matrix([[1],
[1],
[1],
[1]], dtype=int8)
>>> # more seeding for two aggregates
>>> Agg = lloyd_aggregation(A,ratio=0.5)[0].todense() | Below is the the instruction that describes the task:
### Input:
Aggregate nodes using Lloyd Clustering.
Parameters
----------
C : csr_matrix
strength of connection matrix
ratio : scalar
Fraction of the nodes which will be seeds.
distance : ['unit','abs','inv',None]
Distance assigned to each edge of the graph G used in Lloyd clustering
For each nonzero value C[i,j]:
======= ===========================
'unit' G[i,j] = 1
'abs' G[i,j] = abs(C[i,j])
'inv' G[i,j] = 1.0/abs(C[i,j])
'same' G[i,j] = C[i,j]
'sub' G[i,j] = C[i,j] - min(C)
======= ===========================
maxiter : int
Maximum number of iterations to perform
Returns
-------
AggOp : csr_matrix
aggregation operator which determines the sparsity pattern
of the tentative prolongator
seeds : array
array of Cpts, i.e., Cpts[i] = root node of aggregate i
See Also
--------
amg_core.standard_aggregation
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> from pyamg.gallery import poisson
>>> from pyamg.aggregation.aggregate import lloyd_aggregation
>>> A = poisson((4,), format='csr') # 1D mesh with 4 vertices
>>> A.todense()
matrix([[ 2., -1., 0., 0.],
[-1., 2., -1., 0.],
[ 0., -1., 2., -1.],
[ 0., 0., -1., 2.]])
>>> lloyd_aggregation(A)[0].todense() # one aggregate
matrix([[1],
[1],
[1],
[1]], dtype=int8)
>>> # more seeding for two aggregates
>>> Agg = lloyd_aggregation(A,ratio=0.5)[0].todense()
### Response:
def lloyd_aggregation(C, ratio=0.03, distance='unit', maxiter=10):
"""Aggregate nodes using Lloyd Clustering.
Parameters
----------
C : csr_matrix
strength of connection matrix
ratio : scalar
Fraction of the nodes which will be seeds.
distance : ['unit','abs','inv',None]
Distance assigned to each edge of the graph G used in Lloyd clustering
For each nonzero value C[i,j]:
======= ===========================
'unit' G[i,j] = 1
'abs' G[i,j] = abs(C[i,j])
'inv' G[i,j] = 1.0/abs(C[i,j])
'same' G[i,j] = C[i,j]
'sub' G[i,j] = C[i,j] - min(C)
======= ===========================
maxiter : int
Maximum number of iterations to perform
Returns
-------
AggOp : csr_matrix
aggregation operator which determines the sparsity pattern
of the tentative prolongator
seeds : array
array of Cpts, i.e., Cpts[i] = root node of aggregate i
See Also
--------
amg_core.standard_aggregation
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> from pyamg.gallery import poisson
>>> from pyamg.aggregation.aggregate import lloyd_aggregation
>>> A = poisson((4,), format='csr') # 1D mesh with 4 vertices
>>> A.todense()
matrix([[ 2., -1., 0., 0.],
[-1., 2., -1., 0.],
[ 0., -1., 2., -1.],
[ 0., 0., -1., 2.]])
>>> lloyd_aggregation(A)[0].todense() # one aggregate
matrix([[1],
[1],
[1],
[1]], dtype=int8)
>>> # more seeding for two aggregates
>>> Agg = lloyd_aggregation(A,ratio=0.5)[0].todense()
"""
if ratio <= 0 or ratio > 1:
raise ValueError('ratio must be > 0.0 and <= 1.0')
if not (isspmatrix_csr(C) or isspmatrix_csc(C)):
raise TypeError('expected csr_matrix or csc_matrix')
if distance == 'unit':
data = np.ones_like(C.data).astype(float)
elif distance == 'abs':
data = abs(C.data)
elif distance == 'inv':
data = 1.0/abs(C.data)
elif distance is 'same':
data = C.data
elif distance is 'min':
data = C.data - C.data.min()
else:
raise ValueError('unrecognized value distance=%s' % distance)
if C.dtype == complex:
data = np.real(data)
assert(data.min() >= 0)
G = C.__class__((data, C.indices, C.indptr), shape=C.shape)
num_seeds = int(min(max(ratio * G.shape[0], 1), G.shape[0]))
distances, clusters, seeds = lloyd_cluster(G, num_seeds, maxiter=maxiter)
row = (clusters >= 0).nonzero()[0]
col = clusters[row]
data = np.ones(len(row), dtype='int8')
AggOp = coo_matrix((data, (row, col)),
shape=(G.shape[0], num_seeds)).tocsr()
return AggOp, seeds |
def get_edge_type(self, edge_type):
"""Returns all edges with the specified edge type.
Parameters
----------
edge_type : int
An integer specifying what type of edges to return.
Returns
-------
out : list of 2-tuples
A list of 2-tuples representing the edges in the graph
with the specified edge type.
Examples
--------
Lets get type 2 edges from the following graph
>>> import queueing_tool as qt
>>> adjacency = {
... 0: {1: {'edge_type': 2}},
... 1: {2: {'edge_type': 1},
... 3: {'edge_type': 4}},
... 2: {0: {'edge_type': 2}},
... 3: {3: {'edge_type': 0}}
... }
>>> G = qt.QueueNetworkDiGraph(adjacency)
>>> ans = G.get_edge_type(2)
>>> ans.sort()
>>> ans
[(0, 1), (2, 0)]
"""
edges = []
for e in self.edges():
if self.adj[e[0]][e[1]].get('edge_type') == edge_type:
edges.append(e)
return edges | Returns all edges with the specified edge type.
Parameters
----------
edge_type : int
An integer specifying what type of edges to return.
Returns
-------
out : list of 2-tuples
A list of 2-tuples representing the edges in the graph
with the specified edge type.
Examples
--------
Lets get type 2 edges from the following graph
>>> import queueing_tool as qt
>>> adjacency = {
... 0: {1: {'edge_type': 2}},
... 1: {2: {'edge_type': 1},
... 3: {'edge_type': 4}},
... 2: {0: {'edge_type': 2}},
... 3: {3: {'edge_type': 0}}
... }
>>> G = qt.QueueNetworkDiGraph(adjacency)
>>> ans = G.get_edge_type(2)
>>> ans.sort()
>>> ans
[(0, 1), (2, 0)] | Below is the the instruction that describes the task:
### Input:
Returns all edges with the specified edge type.
Parameters
----------
edge_type : int
An integer specifying what type of edges to return.
Returns
-------
out : list of 2-tuples
A list of 2-tuples representing the edges in the graph
with the specified edge type.
Examples
--------
Lets get type 2 edges from the following graph
>>> import queueing_tool as qt
>>> adjacency = {
... 0: {1: {'edge_type': 2}},
... 1: {2: {'edge_type': 1},
... 3: {'edge_type': 4}},
... 2: {0: {'edge_type': 2}},
... 3: {3: {'edge_type': 0}}
... }
>>> G = qt.QueueNetworkDiGraph(adjacency)
>>> ans = G.get_edge_type(2)
>>> ans.sort()
>>> ans
[(0, 1), (2, 0)]
### Response:
def get_edge_type(self, edge_type):
"""Returns all edges with the specified edge type.
Parameters
----------
edge_type : int
An integer specifying what type of edges to return.
Returns
-------
out : list of 2-tuples
A list of 2-tuples representing the edges in the graph
with the specified edge type.
Examples
--------
Lets get type 2 edges from the following graph
>>> import queueing_tool as qt
>>> adjacency = {
... 0: {1: {'edge_type': 2}},
... 1: {2: {'edge_type': 1},
... 3: {'edge_type': 4}},
... 2: {0: {'edge_type': 2}},
... 3: {3: {'edge_type': 0}}
... }
>>> G = qt.QueueNetworkDiGraph(adjacency)
>>> ans = G.get_edge_type(2)
>>> ans.sort()
>>> ans
[(0, 1), (2, 0)]
"""
edges = []
for e in self.edges():
if self.adj[e[0]][e[1]].get('edge_type') == edge_type:
edges.append(e)
return edges |
def object(self, session):
'''Instance of :attr:`model_type` with id :attr:`object_id`.'''
if not hasattr(self, '_object'):
pkname = self.model_type._meta.pkname()
query = session.query(self.model_type).filter(**{pkname:
self.object_id})
return query.items(callback=self.__set_object)
else:
return self._object | Instance of :attr:`model_type` with id :attr:`object_id`. | Below is the the instruction that describes the task:
### Input:
Instance of :attr:`model_type` with id :attr:`object_id`.
### Response:
def object(self, session):
'''Instance of :attr:`model_type` with id :attr:`object_id`.'''
if not hasattr(self, '_object'):
pkname = self.model_type._meta.pkname()
query = session.query(self.model_type).filter(**{pkname:
self.object_id})
return query.items(callback=self.__set_object)
else:
return self._object |
def __search_files(self, files):
"""
Searches in given files.
:param files: Files.
:type files: list
"""
for file in files:
if self.__interrupt:
return
if not foundations.common.path_exists(file):
continue
if foundations.io.is_readable(file):
if foundations.io.is_binary_file(file):
continue
LOGGER.info("{0} | Searching '{1}' file!".format(self.__class__.__name__, file))
cache_data = self.__container.files_cache.get_content(file)
if not cache_data:
reader = foundations.io.File(file)
content = reader.read()
if content is None:
LOGGER.warning("!> Error occured while reading '{0}' file proceeding to next one!".format(file))
continue
self.__container.files_cache.add_content(**{file: CacheData(content=content, document=None)})
else:
content = cache_data.content
occurrences = self.__search_document(QTextDocument(QString(content)), self.__pattern, self.__settings)
occurrences and self.__search_results.append(SearchResult(file=file,
pattern=self.__pattern,
settings=self.__settings,
occurrences=occurrences)) | Searches in given files.
:param files: Files.
:type files: list | Below is the the instruction that describes the task:
### Input:
Searches in given files.
:param files: Files.
:type files: list
### Response:
def __search_files(self, files):
"""
Searches in given files.
:param files: Files.
:type files: list
"""
for file in files:
if self.__interrupt:
return
if not foundations.common.path_exists(file):
continue
if foundations.io.is_readable(file):
if foundations.io.is_binary_file(file):
continue
LOGGER.info("{0} | Searching '{1}' file!".format(self.__class__.__name__, file))
cache_data = self.__container.files_cache.get_content(file)
if not cache_data:
reader = foundations.io.File(file)
content = reader.read()
if content is None:
LOGGER.warning("!> Error occured while reading '{0}' file proceeding to next one!".format(file))
continue
self.__container.files_cache.add_content(**{file: CacheData(content=content, document=None)})
else:
content = cache_data.content
occurrences = self.__search_document(QTextDocument(QString(content)), self.__pattern, self.__settings)
occurrences and self.__search_results.append(SearchResult(file=file,
pattern=self.__pattern,
settings=self.__settings,
occurrences=occurrences)) |
def pkcs12_kdf(hash_algorithm, password, salt, iterations, key_length, id_):
"""
KDF from RFC7292 appendix b.2 - https://tools.ietf.org/html/rfc7292#page-19
:param hash_algorithm:
The string name of the hash algorithm to use: "md5", "sha1", "sha224",
"sha256", "sha384", "sha512"
:param password:
A byte string of the password to use an input to the KDF
:param salt:
A cryptographic random byte string
:param iterations:
The numbers of iterations to use when deriving the key
:param key_length:
The length of the desired key in bytes
:param id_:
The ID of the usage - 1 for key, 2 for iv, 3 for mac
:return:
The derived key as a byte string
"""
if not isinstance(password, byte_cls):
raise TypeError(pretty_message(
'''
password must be a byte string, not %s
''',
type_name(password)
))
if not isinstance(salt, byte_cls):
raise TypeError(pretty_message(
'''
salt must be a byte string, not %s
''',
type_name(salt)
))
if not isinstance(iterations, int_types):
raise TypeError(pretty_message(
'''
iterations must be an integer, not %s
''',
type_name(iterations)
))
if iterations < 1:
raise ValueError(pretty_message(
'''
iterations must be greater than 0 - is %s
''',
repr(iterations)
))
if not isinstance(key_length, int_types):
raise TypeError(pretty_message(
'''
key_length must be an integer, not %s
''',
type_name(key_length)
))
if key_length < 1:
raise ValueError(pretty_message(
'''
key_length must be greater than 0 - is %s
''',
repr(key_length)
))
if hash_algorithm not in set(['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']):
raise ValueError(pretty_message(
'''
hash_algorithm must be one of "md5", "sha1", "sha224", "sha256",
"sha384", "sha512", not %s
''',
repr(hash_algorithm)
))
if id_ not in set([1, 2, 3]):
raise ValueError(pretty_message(
'''
id_ must be one of 1, 2, 3, not %s
''',
repr(id_)
))
utf16_password = password.decode('utf-8').encode('utf-16be') + b'\x00\x00'
algo = getattr(hashlib, hash_algorithm)
# u and v values are bytes (not bits as in the RFC)
u = {
'md5': 16,
'sha1': 20,
'sha224': 28,
'sha256': 32,
'sha384': 48,
'sha512': 64
}[hash_algorithm]
if hash_algorithm in ['sha384', 'sha512']:
v = 128
else:
v = 64
# Step 1
d = chr_cls(id_) * v
# Step 2
s = b''
if salt != b'':
s_len = v * int(math.ceil(float(len(salt)) / v))
while len(s) < s_len:
s += salt
s = s[0:s_len]
# Step 3
p = b''
if utf16_password != b'':
p_len = v * int(math.ceil(float(len(utf16_password)) / v))
while len(p) < p_len:
p += utf16_password
p = p[0:p_len]
# Step 4
i = s + p
# Step 5
c = int(math.ceil(float(key_length) / u))
a = b'\x00' * (c * u)
for num in range(1, c + 1):
# Step 6A
a2 = algo(d + i).digest()
for _ in range(2, iterations + 1):
a2 = algo(a2).digest()
if num < c:
# Step 6B
b = b''
while len(b) < v:
b += a2
b = int_from_bytes(b[0:v]) + 1
# Step 6C
for num2 in range(0, len(i) // v):
start = num2 * v
end = (num2 + 1) * v
i_num2 = i[start:end]
i_num2 = int_to_bytes(int_from_bytes(i_num2) + b)
# Ensure the new slice is the right size
i_num2_l = len(i_num2)
if i_num2_l > v:
i_num2 = i_num2[i_num2_l - v:]
i = i[0:start] + i_num2 + i[end:]
# Step 7 (one peice at a time)
begin = (num - 1) * u
to_copy = min(key_length, u)
a = a[0:begin] + a2[0:to_copy] + a[begin + to_copy:]
return a[0:key_length] | KDF from RFC7292 appendix b.2 - https://tools.ietf.org/html/rfc7292#page-19
:param hash_algorithm:
The string name of the hash algorithm to use: "md5", "sha1", "sha224",
"sha256", "sha384", "sha512"
:param password:
A byte string of the password to use an input to the KDF
:param salt:
A cryptographic random byte string
:param iterations:
The numbers of iterations to use when deriving the key
:param key_length:
The length of the desired key in bytes
:param id_:
The ID of the usage - 1 for key, 2 for iv, 3 for mac
:return:
The derived key as a byte string | Below is the the instruction that describes the task:
### Input:
KDF from RFC7292 appendix b.2 - https://tools.ietf.org/html/rfc7292#page-19
:param hash_algorithm:
The string name of the hash algorithm to use: "md5", "sha1", "sha224",
"sha256", "sha384", "sha512"
:param password:
A byte string of the password to use an input to the KDF
:param salt:
A cryptographic random byte string
:param iterations:
The numbers of iterations to use when deriving the key
:param key_length:
The length of the desired key in bytes
:param id_:
The ID of the usage - 1 for key, 2 for iv, 3 for mac
:return:
The derived key as a byte string
### Response:
def pkcs12_kdf(hash_algorithm, password, salt, iterations, key_length, id_):
"""
KDF from RFC7292 appendix b.2 - https://tools.ietf.org/html/rfc7292#page-19
:param hash_algorithm:
The string name of the hash algorithm to use: "md5", "sha1", "sha224",
"sha256", "sha384", "sha512"
:param password:
A byte string of the password to use an input to the KDF
:param salt:
A cryptographic random byte string
:param iterations:
The numbers of iterations to use when deriving the key
:param key_length:
The length of the desired key in bytes
:param id_:
The ID of the usage - 1 for key, 2 for iv, 3 for mac
:return:
The derived key as a byte string
"""
if not isinstance(password, byte_cls):
raise TypeError(pretty_message(
'''
password must be a byte string, not %s
''',
type_name(password)
))
if not isinstance(salt, byte_cls):
raise TypeError(pretty_message(
'''
salt must be a byte string, not %s
''',
type_name(salt)
))
if not isinstance(iterations, int_types):
raise TypeError(pretty_message(
'''
iterations must be an integer, not %s
''',
type_name(iterations)
))
if iterations < 1:
raise ValueError(pretty_message(
'''
iterations must be greater than 0 - is %s
''',
repr(iterations)
))
if not isinstance(key_length, int_types):
raise TypeError(pretty_message(
'''
key_length must be an integer, not %s
''',
type_name(key_length)
))
if key_length < 1:
raise ValueError(pretty_message(
'''
key_length must be greater than 0 - is %s
''',
repr(key_length)
))
if hash_algorithm not in set(['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']):
raise ValueError(pretty_message(
'''
hash_algorithm must be one of "md5", "sha1", "sha224", "sha256",
"sha384", "sha512", not %s
''',
repr(hash_algorithm)
))
if id_ not in set([1, 2, 3]):
raise ValueError(pretty_message(
'''
id_ must be one of 1, 2, 3, not %s
''',
repr(id_)
))
utf16_password = password.decode('utf-8').encode('utf-16be') + b'\x00\x00'
algo = getattr(hashlib, hash_algorithm)
# u and v values are bytes (not bits as in the RFC)
u = {
'md5': 16,
'sha1': 20,
'sha224': 28,
'sha256': 32,
'sha384': 48,
'sha512': 64
}[hash_algorithm]
if hash_algorithm in ['sha384', 'sha512']:
v = 128
else:
v = 64
# Step 1
d = chr_cls(id_) * v
# Step 2
s = b''
if salt != b'':
s_len = v * int(math.ceil(float(len(salt)) / v))
while len(s) < s_len:
s += salt
s = s[0:s_len]
# Step 3
p = b''
if utf16_password != b'':
p_len = v * int(math.ceil(float(len(utf16_password)) / v))
while len(p) < p_len:
p += utf16_password
p = p[0:p_len]
# Step 4
i = s + p
# Step 5
c = int(math.ceil(float(key_length) / u))
a = b'\x00' * (c * u)
for num in range(1, c + 1):
# Step 6A
a2 = algo(d + i).digest()
for _ in range(2, iterations + 1):
a2 = algo(a2).digest()
if num < c:
# Step 6B
b = b''
while len(b) < v:
b += a2
b = int_from_bytes(b[0:v]) + 1
# Step 6C
for num2 in range(0, len(i) // v):
start = num2 * v
end = (num2 + 1) * v
i_num2 = i[start:end]
i_num2 = int_to_bytes(int_from_bytes(i_num2) + b)
# Ensure the new slice is the right size
i_num2_l = len(i_num2)
if i_num2_l > v:
i_num2 = i_num2[i_num2_l - v:]
i = i[0:start] + i_num2 + i[end:]
# Step 7 (one peice at a time)
begin = (num - 1) * u
to_copy = min(key_length, u)
a = a[0:begin] + a2[0:to_copy] + a[begin + to_copy:]
return a[0:key_length] |
def get_similarity_measures(self):
"""Helper function for computing similarity measures."""
if not self.quiet:
print
print "Computing", self.current_similarity_measure, "similarity..."
self.compute_similarity_scores() | Helper function for computing similarity measures. | Below is the the instruction that describes the task:
### Input:
Helper function for computing similarity measures.
### Response:
def get_similarity_measures(self):
"""Helper function for computing similarity measures."""
if not self.quiet:
print
print "Computing", self.current_similarity_measure, "similarity..."
self.compute_similarity_scores() |
def acell(self, label, value_render_option='FORMATTED_VALUE'):
"""Returns an instance of a :class:`gspread.models.Cell`.
:param label: Cell label in A1 notation
Letter case is ignored.
:type label: str
:param value_render_option: (optional) Determines how values should be
rendered in the the output. See
`ValueRenderOption`_ in the Sheets API.
:type value_render_option: str
.. _ValueRenderOption: https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption
Example:
>>> worksheet.acell('A1')
<Cell R1C1 "I'm cell A1">
"""
return self.cell(
*(a1_to_rowcol(label)),
value_render_option=value_render_option
) | Returns an instance of a :class:`gspread.models.Cell`.
:param label: Cell label in A1 notation
Letter case is ignored.
:type label: str
:param value_render_option: (optional) Determines how values should be
rendered in the the output. See
`ValueRenderOption`_ in the Sheets API.
:type value_render_option: str
.. _ValueRenderOption: https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption
Example:
>>> worksheet.acell('A1')
<Cell R1C1 "I'm cell A1"> | Below is the the instruction that describes the task:
### Input:
Returns an instance of a :class:`gspread.models.Cell`.
:param label: Cell label in A1 notation
Letter case is ignored.
:type label: str
:param value_render_option: (optional) Determines how values should be
rendered in the the output. See
`ValueRenderOption`_ in the Sheets API.
:type value_render_option: str
.. _ValueRenderOption: https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption
Example:
>>> worksheet.acell('A1')
<Cell R1C1 "I'm cell A1">
### Response:
def acell(self, label, value_render_option='FORMATTED_VALUE'):
"""Returns an instance of a :class:`gspread.models.Cell`.
:param label: Cell label in A1 notation
Letter case is ignored.
:type label: str
:param value_render_option: (optional) Determines how values should be
rendered in the the output. See
`ValueRenderOption`_ in the Sheets API.
:type value_render_option: str
.. _ValueRenderOption: https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption
Example:
>>> worksheet.acell('A1')
<Cell R1C1 "I'm cell A1">
"""
return self.cell(
*(a1_to_rowcol(label)),
value_render_option=value_render_option
) |
def DownloadFile(hUcs, source, destination):
"""
Method provides the functionality to download file from the UCS. This method is used in BackupUcs and GetTechSupport to
download the files from the Ucs.
"""
import urllib2
from sys import stdout
from time import sleep
httpAddress = "%s/%s" % (hUcs.Uri(), source)
file_name = httpAddress.split('/')[-1]
req = urllib2.Request(httpAddress) # send the new url with the cookie.
req.add_header('Cookie', 'ucsm-cookie=%s' % (hUcs._cookie))
res = urllib2.urlopen(req)
meta = res.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (file_name, file_size)
f = open(destination, 'wb')
file_size_dl = 0
block_sz = 8192
while True:
rBuffer = res.read(block_sz)
if not rBuffer:
break
file_size_dl += len(rBuffer)
f.write(rBuffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8) * (len(status) + 1)
stdout.write("\r%s" % status)
stdout.flush()
# print status
f.close() | Method provides the functionality to download file from the UCS. This method is used in BackupUcs and GetTechSupport to
download the files from the Ucs. | Below is the the instruction that describes the task:
### Input:
Method provides the functionality to download file from the UCS. This method is used in BackupUcs and GetTechSupport to
download the files from the Ucs.
### Response:
def DownloadFile(hUcs, source, destination):
"""
Method provides the functionality to download file from the UCS. This method is used in BackupUcs and GetTechSupport to
download the files from the Ucs.
"""
import urllib2
from sys import stdout
from time import sleep
httpAddress = "%s/%s" % (hUcs.Uri(), source)
file_name = httpAddress.split('/')[-1]
req = urllib2.Request(httpAddress) # send the new url with the cookie.
req.add_header('Cookie', 'ucsm-cookie=%s' % (hUcs._cookie))
res = urllib2.urlopen(req)
meta = res.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (file_name, file_size)
f = open(destination, 'wb')
file_size_dl = 0
block_sz = 8192
while True:
rBuffer = res.read(block_sz)
if not rBuffer:
break
file_size_dl += len(rBuffer)
f.write(rBuffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8) * (len(status) + 1)
stdout.write("\r%s" % status)
stdout.flush()
# print status
f.close() |
def debug_tag(self, tag):
"""Setter for the debug tag.
By default, the tag is the serial of the device, but sometimes it may
be more descriptive to use a different tag of the user's choice.
Changing debug tag changes part of the prefix of debug info emitted by
this object, like log lines and the message of DeviceError.
Example:
By default, the device's serial number is used:
'INFO [AndroidDevice|abcdefg12345] One pending call ringing.'
The tag can be customized with `ad.debug_tag = 'Caller'`:
'INFO [AndroidDevice|Caller] One pending call ringing.'
"""
self.log.info('Logging debug tag set to "%s"', tag)
self._debug_tag = tag
self.log.extra['tag'] = tag | Setter for the debug tag.
By default, the tag is the serial of the device, but sometimes it may
be more descriptive to use a different tag of the user's choice.
Changing debug tag changes part of the prefix of debug info emitted by
this object, like log lines and the message of DeviceError.
Example:
By default, the device's serial number is used:
'INFO [AndroidDevice|abcdefg12345] One pending call ringing.'
The tag can be customized with `ad.debug_tag = 'Caller'`:
'INFO [AndroidDevice|Caller] One pending call ringing.' | Below is the the instruction that describes the task:
### Input:
Setter for the debug tag.
By default, the tag is the serial of the device, but sometimes it may
be more descriptive to use a different tag of the user's choice.
Changing debug tag changes part of the prefix of debug info emitted by
this object, like log lines and the message of DeviceError.
Example:
By default, the device's serial number is used:
'INFO [AndroidDevice|abcdefg12345] One pending call ringing.'
The tag can be customized with `ad.debug_tag = 'Caller'`:
'INFO [AndroidDevice|Caller] One pending call ringing.'
### Response:
def debug_tag(self, tag):
"""Setter for the debug tag.
By default, the tag is the serial of the device, but sometimes it may
be more descriptive to use a different tag of the user's choice.
Changing debug tag changes part of the prefix of debug info emitted by
this object, like log lines and the message of DeviceError.
Example:
By default, the device's serial number is used:
'INFO [AndroidDevice|abcdefg12345] One pending call ringing.'
The tag can be customized with `ad.debug_tag = 'Caller'`:
'INFO [AndroidDevice|Caller] One pending call ringing.'
"""
self.log.info('Logging debug tag set to "%s"', tag)
self._debug_tag = tag
self.log.extra['tag'] = tag |
def CirrusIamUserReady(iam_aws_id, iam_aws_secret):
""" Returns true if provided IAM credentials are ready to use. """
is_ready = False
try:
s3 = core.CreateTestedS3Connection(iam_aws_id, iam_aws_secret)
if s3:
if core.CirrusAccessIdMetadata(s3, iam_aws_id).IsInitialized():
is_ready = True
except boto.exception.BotoServerError as e:
print e
return is_ready | Returns true if provided IAM credentials are ready to use. | Below is the the instruction that describes the task:
### Input:
Returns true if provided IAM credentials are ready to use.
### Response:
def CirrusIamUserReady(iam_aws_id, iam_aws_secret):
""" Returns true if provided IAM credentials are ready to use. """
is_ready = False
try:
s3 = core.CreateTestedS3Connection(iam_aws_id, iam_aws_secret)
if s3:
if core.CirrusAccessIdMetadata(s3, iam_aws_id).IsInitialized():
is_ready = True
except boto.exception.BotoServerError as e:
print e
return is_ready |
def _calc_size_stats(self):
"""
get the size in bytes and num records of the content
"""
self.total_records = 0
self.total_length = 0
self.total_nodes = 0
if type(self.content['data']) is dict:
self.total_length += len(str(self.content['data']))
self.total_records += 1
self.total_nodes = sum(len(x) for x in self.content['data'].values())
elif hasattr(self.content['data'], '__iter__') and type(self.content['data']) is not str:
self._get_size_recursive(self.content['data'])
else:
self.total_records += 1
self.total_length += len(str(self.content['data']))
return str(self.total_records) + ' records [or ' + str(self.total_nodes) + ' nodes], taking ' + str(self.total_length) + ' bytes' | get the size in bytes and num records of the content | Below is the the instruction that describes the task:
### Input:
get the size in bytes and num records of the content
### Response:
def _calc_size_stats(self):
"""
get the size in bytes and num records of the content
"""
self.total_records = 0
self.total_length = 0
self.total_nodes = 0
if type(self.content['data']) is dict:
self.total_length += len(str(self.content['data']))
self.total_records += 1
self.total_nodes = sum(len(x) for x in self.content['data'].values())
elif hasattr(self.content['data'], '__iter__') and type(self.content['data']) is not str:
self._get_size_recursive(self.content['data'])
else:
self.total_records += 1
self.total_length += len(str(self.content['data']))
return str(self.total_records) + ' records [or ' + str(self.total_nodes) + ' nodes], taking ' + str(self.total_length) + ' bytes' |
def activate(self, branches, exclusive=False):
"""
Activate branches
Parameters
----------
branches : str or list
branch or list of branches to activate
exclusive : bool, optional (default=False)
if True deactivate the remaining branches
"""
if exclusive:
self.SetBranchStatus('*', 0)
if isinstance(branches, string_types):
branches = [branches]
for branch in branches:
if '*' in branch:
matched_branches = self.glob(branch)
for b in matched_branches:
self.SetBranchStatus(b, 1)
elif self.has_branch(branch):
self.SetBranchStatus(branch, 1) | Activate branches
Parameters
----------
branches : str or list
branch or list of branches to activate
exclusive : bool, optional (default=False)
if True deactivate the remaining branches | Below is the the instruction that describes the task:
### Input:
Activate branches
Parameters
----------
branches : str or list
branch or list of branches to activate
exclusive : bool, optional (default=False)
if True deactivate the remaining branches
### Response:
def activate(self, branches, exclusive=False):
"""
Activate branches
Parameters
----------
branches : str or list
branch or list of branches to activate
exclusive : bool, optional (default=False)
if True deactivate the remaining branches
"""
if exclusive:
self.SetBranchStatus('*', 0)
if isinstance(branches, string_types):
branches = [branches]
for branch in branches:
if '*' in branch:
matched_branches = self.glob(branch)
for b in matched_branches:
self.SetBranchStatus(b, 1)
elif self.has_branch(branch):
self.SetBranchStatus(branch, 1) |
def queue_it(queue=g_queue, **put_args):
"""
Wrapper. Instead of returning the result of the function, add it to a queue.
.. code: python
import reusables
import queue
my_queue = queue.Queue()
@reusables.queue_it(my_queue)
def func(a):
return a
func(10)
print(my_queue.get())
# 10
:param queue: Queue to add result into
"""
def func_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
queue.put(func(*args, **kwargs), **put_args)
return wrapper
return func_wrapper | Wrapper. Instead of returning the result of the function, add it to a queue.
.. code: python
import reusables
import queue
my_queue = queue.Queue()
@reusables.queue_it(my_queue)
def func(a):
return a
func(10)
print(my_queue.get())
# 10
:param queue: Queue to add result into | Below is the the instruction that describes the task:
### Input:
Wrapper. Instead of returning the result of the function, add it to a queue.
.. code: python
import reusables
import queue
my_queue = queue.Queue()
@reusables.queue_it(my_queue)
def func(a):
return a
func(10)
print(my_queue.get())
# 10
:param queue: Queue to add result into
### Response:
def queue_it(queue=g_queue, **put_args):
"""
Wrapper. Instead of returning the result of the function, add it to a queue.
.. code: python
import reusables
import queue
my_queue = queue.Queue()
@reusables.queue_it(my_queue)
def func(a):
return a
func(10)
print(my_queue.get())
# 10
:param queue: Queue to add result into
"""
def func_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
queue.put(func(*args, **kwargs), **put_args)
return wrapper
return func_wrapper |
def extract_causal_relations(self):
"""Extract causal relations as Statements."""
# Get the extractions that are labeled as directed and causal
relations = [e for e in self.doc.extractions if
'DirectedRelation' in e['labels'] and
'Causal' in e['labels']]
# For each relation, we try to extract an INDRA Statement and
# save it if its valid
for relation in relations:
stmt = self.get_causal_relation(relation)
if stmt is not None:
self.statements.append(stmt) | Extract causal relations as Statements. | Below is the the instruction that describes the task:
### Input:
Extract causal relations as Statements.
### Response:
def extract_causal_relations(self):
"""Extract causal relations as Statements."""
# Get the extractions that are labeled as directed and causal
relations = [e for e in self.doc.extractions if
'DirectedRelation' in e['labels'] and
'Causal' in e['labels']]
# For each relation, we try to extract an INDRA Statement and
# save it if its valid
for relation in relations:
stmt = self.get_causal_relation(relation)
if stmt is not None:
self.statements.append(stmt) |
def dump_np_vars(self, store_format='csv', delimiter=','):
"""
Dump the TDS simulation data to files by calling subroutines `write_lst` and
`write_np_dat`.
Parameters
-----------
store_format : str
dump format in `('csv', 'txt', 'hdf5')`
delimiter : str
delimiter for the `csv` and `txt` format
Returns
-------
bool: success flag
"""
ret = False
if self.system.files.no_output is True:
logger.debug('no_output is True, thus no TDS dump saved ')
return True
if self.write_lst() and self.write_np_dat(store_format=store_format, delimiter=delimiter):
ret = True
return ret | Dump the TDS simulation data to files by calling subroutines `write_lst` and
`write_np_dat`.
Parameters
-----------
store_format : str
dump format in `('csv', 'txt', 'hdf5')`
delimiter : str
delimiter for the `csv` and `txt` format
Returns
-------
bool: success flag | Below is the the instruction that describes the task:
### Input:
Dump the TDS simulation data to files by calling subroutines `write_lst` and
`write_np_dat`.
Parameters
-----------
store_format : str
dump format in `('csv', 'txt', 'hdf5')`
delimiter : str
delimiter for the `csv` and `txt` format
Returns
-------
bool: success flag
### Response:
def dump_np_vars(self, store_format='csv', delimiter=','):
"""
Dump the TDS simulation data to files by calling subroutines `write_lst` and
`write_np_dat`.
Parameters
-----------
store_format : str
dump format in `('csv', 'txt', 'hdf5')`
delimiter : str
delimiter for the `csv` and `txt` format
Returns
-------
bool: success flag
"""
ret = False
if self.system.files.no_output is True:
logger.debug('no_output is True, thus no TDS dump saved ')
return True
if self.write_lst() and self.write_np_dat(store_format=store_format, delimiter=delimiter):
ret = True
return ret |
def calculateFields(self):
"""Write calculated fields for read buffer."""
pf1 = self.m_blk_b[Field.Cos_Theta_Ln_1][MeterData.StringValue]
pf2 = self.m_blk_b[Field.Cos_Theta_Ln_2][MeterData.StringValue]
pf3 = self.m_blk_b[Field.Cos_Theta_Ln_3][MeterData.StringValue]
pf1_int = self.calcPF(pf1)
pf2_int = self.calcPF(pf2)
pf3_int = self.calcPF(pf3)
self.m_blk_b[Field.Power_Factor_Ln_1][MeterData.StringValue] = str(pf1_int)
self.m_blk_b[Field.Power_Factor_Ln_2][MeterData.StringValue] = str(pf2_int)
self.m_blk_b[Field.Power_Factor_Ln_3][MeterData.StringValue] = str(pf3_int)
self.m_blk_b[Field.Power_Factor_Ln_1][MeterData.NativeValue] = pf1_int
self.m_blk_b[Field.Power_Factor_Ln_2][MeterData.NativeValue] = pf2_int
self.m_blk_b[Field.Power_Factor_Ln_3][MeterData.NativeValue] = pf2_int
rms_watts_1 = self.m_blk_b[Field.RMS_Watts_Ln_1][MeterData.NativeValue]
rms_watts_2 = self.m_blk_b[Field.RMS_Watts_Ln_2][MeterData.NativeValue]
rms_watts_3 = self.m_blk_b[Field.RMS_Watts_Ln_3][MeterData.NativeValue]
sign_rms_watts_1 = 1
sign_rms_watts_2 = 1
sign_rms_watts_3 = 1
direction_byte = self.m_blk_a[Field.State_Watts_Dir][MeterData.NativeValue]
if direction_byte == DirectionFlag.ForwardForwardForward:
# all good
pass
if direction_byte == DirectionFlag.ForwardForwardReverse:
sign_rms_watts_3 = -1
pass
if direction_byte == DirectionFlag.ForwardReverseForward:
sign_rms_watts_2 = -1
pass
if direction_byte == DirectionFlag.ReverseForwardForward:
sign_rms_watts_1 = -1
pass
if direction_byte == DirectionFlag.ForwardReverseReverse:
sign_rms_watts_2 = -1
sign_rms_watts_3 = -1
pass
if direction_byte == DirectionFlag.ReverseForwardReverse:
sign_rms_watts_1 = -1
sign_rms_watts_3 = -1
pass
if direction_byte == DirectionFlag.ReverseReverseForward:
sign_rms_watts_1 = -1
sign_rms_watts_2 = -1
pass
if direction_byte == DirectionFlag.ReverseReverseReverse:
sign_rms_watts_1 = -1
sign_rms_watts_2 = -1
sign_rms_watts_3 = -1
pass
net_watts_1 = rms_watts_1 * sign_rms_watts_1
net_watts_2 = rms_watts_2 * sign_rms_watts_2
net_watts_3 = rms_watts_3 * sign_rms_watts_3
net_watts_tot = net_watts_1 + net_watts_2 + net_watts_3
self.m_blk_b[Field.Net_Calc_Watts_Ln_1][MeterData.NativeValue] = net_watts_1
self.m_blk_b[Field.Net_Calc_Watts_Ln_2][MeterData.NativeValue] = net_watts_2
self.m_blk_b[Field.Net_Calc_Watts_Ln_3][MeterData.NativeValue] = net_watts_3
self.m_blk_b[Field.Net_Calc_Watts_Tot][MeterData.NativeValue] = net_watts_tot
self.m_blk_b[Field.Net_Calc_Watts_Ln_1][MeterData.StringValue] = str(net_watts_1)
self.m_blk_b[Field.Net_Calc_Watts_Ln_2][MeterData.StringValue] = str(net_watts_2)
self.m_blk_b[Field.Net_Calc_Watts_Ln_3][MeterData.StringValue] = str(net_watts_3)
self.m_blk_b[Field.Net_Calc_Watts_Tot][MeterData.StringValue] = str(net_watts_tot)
pass | Write calculated fields for read buffer. | Below is the the instruction that describes the task:
### Input:
Write calculated fields for read buffer.
### Response:
def calculateFields(self):
"""Write calculated fields for read buffer."""
pf1 = self.m_blk_b[Field.Cos_Theta_Ln_1][MeterData.StringValue]
pf2 = self.m_blk_b[Field.Cos_Theta_Ln_2][MeterData.StringValue]
pf3 = self.m_blk_b[Field.Cos_Theta_Ln_3][MeterData.StringValue]
pf1_int = self.calcPF(pf1)
pf2_int = self.calcPF(pf2)
pf3_int = self.calcPF(pf3)
self.m_blk_b[Field.Power_Factor_Ln_1][MeterData.StringValue] = str(pf1_int)
self.m_blk_b[Field.Power_Factor_Ln_2][MeterData.StringValue] = str(pf2_int)
self.m_blk_b[Field.Power_Factor_Ln_3][MeterData.StringValue] = str(pf3_int)
self.m_blk_b[Field.Power_Factor_Ln_1][MeterData.NativeValue] = pf1_int
self.m_blk_b[Field.Power_Factor_Ln_2][MeterData.NativeValue] = pf2_int
self.m_blk_b[Field.Power_Factor_Ln_3][MeterData.NativeValue] = pf2_int
rms_watts_1 = self.m_blk_b[Field.RMS_Watts_Ln_1][MeterData.NativeValue]
rms_watts_2 = self.m_blk_b[Field.RMS_Watts_Ln_2][MeterData.NativeValue]
rms_watts_3 = self.m_blk_b[Field.RMS_Watts_Ln_3][MeterData.NativeValue]
sign_rms_watts_1 = 1
sign_rms_watts_2 = 1
sign_rms_watts_3 = 1
direction_byte = self.m_blk_a[Field.State_Watts_Dir][MeterData.NativeValue]
if direction_byte == DirectionFlag.ForwardForwardForward:
# all good
pass
if direction_byte == DirectionFlag.ForwardForwardReverse:
sign_rms_watts_3 = -1
pass
if direction_byte == DirectionFlag.ForwardReverseForward:
sign_rms_watts_2 = -1
pass
if direction_byte == DirectionFlag.ReverseForwardForward:
sign_rms_watts_1 = -1
pass
if direction_byte == DirectionFlag.ForwardReverseReverse:
sign_rms_watts_2 = -1
sign_rms_watts_3 = -1
pass
if direction_byte == DirectionFlag.ReverseForwardReverse:
sign_rms_watts_1 = -1
sign_rms_watts_3 = -1
pass
if direction_byte == DirectionFlag.ReverseReverseForward:
sign_rms_watts_1 = -1
sign_rms_watts_2 = -1
pass
if direction_byte == DirectionFlag.ReverseReverseReverse:
sign_rms_watts_1 = -1
sign_rms_watts_2 = -1
sign_rms_watts_3 = -1
pass
net_watts_1 = rms_watts_1 * sign_rms_watts_1
net_watts_2 = rms_watts_2 * sign_rms_watts_2
net_watts_3 = rms_watts_3 * sign_rms_watts_3
net_watts_tot = net_watts_1 + net_watts_2 + net_watts_3
self.m_blk_b[Field.Net_Calc_Watts_Ln_1][MeterData.NativeValue] = net_watts_1
self.m_blk_b[Field.Net_Calc_Watts_Ln_2][MeterData.NativeValue] = net_watts_2
self.m_blk_b[Field.Net_Calc_Watts_Ln_3][MeterData.NativeValue] = net_watts_3
self.m_blk_b[Field.Net_Calc_Watts_Tot][MeterData.NativeValue] = net_watts_tot
self.m_blk_b[Field.Net_Calc_Watts_Ln_1][MeterData.StringValue] = str(net_watts_1)
self.m_blk_b[Field.Net_Calc_Watts_Ln_2][MeterData.StringValue] = str(net_watts_2)
self.m_blk_b[Field.Net_Calc_Watts_Ln_3][MeterData.StringValue] = str(net_watts_3)
self.m_blk_b[Field.Net_Calc_Watts_Tot][MeterData.StringValue] = str(net_watts_tot)
pass |
def _find_files(dirpath: str) -> 'Iterable[str]':
"""Find files recursively.
Returns a generator that yields paths in no particular order.
"""
for dirpath, dirnames, filenames in os.walk(dirpath, topdown=True,
followlinks=True):
if os.path.basename(dirpath).startswith('.'):
del dirnames[:]
for filename in filenames:
yield os.path.join(dirpath, filename) | Find files recursively.
Returns a generator that yields paths in no particular order. | Below is the the instruction that describes the task:
### Input:
Find files recursively.
Returns a generator that yields paths in no particular order.
### Response:
def _find_files(dirpath: str) -> 'Iterable[str]':
"""Find files recursively.
Returns a generator that yields paths in no particular order.
"""
for dirpath, dirnames, filenames in os.walk(dirpath, topdown=True,
followlinks=True):
if os.path.basename(dirpath).startswith('.'):
del dirnames[:]
for filename in filenames:
yield os.path.join(dirpath, filename) |
def _colors_to_code(self, fg_color, bg_color):
" Return a tuple with the vt100 values that represent this color. "
# When requesting ANSI colors only, and both fg/bg color were converted
# to ANSI, ensure that the foreground and background color are not the
# same. (Unless they were explicitely defined to be the same color.)
fg_ansi = [()]
def get(color, bg):
table = BG_ANSI_COLORS if bg else FG_ANSI_COLORS
if color is None:
return ()
# 16 ANSI colors. (Given by name.)
elif color in table:
return (table[color], )
# RGB colors. (Defined as 'ffffff'.)
else:
try:
rgb = self._color_name_to_rgb(color)
except ValueError:
return ()
# When only 16 colors are supported, use that.
if self.ansi_colors_only():
if bg: # Background.
if fg_color != bg_color:
exclude = (fg_ansi[0], )
else:
exclude = ()
code, name = _16_bg_colors.get_code(rgb, exclude=exclude)
return (code, )
else: # Foreground.
code, name = _16_fg_colors.get_code(rgb)
fg_ansi[0] = name
return (code, )
# True colors. (Only when this feature is enabled.)
elif self.true_color:
r, g, b = rgb
return (48 if bg else 38, 2, r, g, b)
# 256 RGB colors.
else:
return (48 if bg else 38, 5, _256_colors[rgb])
result = []
result.extend(get(fg_color, False))
result.extend(get(bg_color, True))
return map(six.text_type, result) | Return a tuple with the vt100 values that represent this color. | Below is the the instruction that describes the task:
### Input:
Return a tuple with the vt100 values that represent this color.
### Response:
def _colors_to_code(self, fg_color, bg_color):
" Return a tuple with the vt100 values that represent this color. "
# When requesting ANSI colors only, and both fg/bg color were converted
# to ANSI, ensure that the foreground and background color are not the
# same. (Unless they were explicitely defined to be the same color.)
fg_ansi = [()]
def get(color, bg):
table = BG_ANSI_COLORS if bg else FG_ANSI_COLORS
if color is None:
return ()
# 16 ANSI colors. (Given by name.)
elif color in table:
return (table[color], )
# RGB colors. (Defined as 'ffffff'.)
else:
try:
rgb = self._color_name_to_rgb(color)
except ValueError:
return ()
# When only 16 colors are supported, use that.
if self.ansi_colors_only():
if bg: # Background.
if fg_color != bg_color:
exclude = (fg_ansi[0], )
else:
exclude = ()
code, name = _16_bg_colors.get_code(rgb, exclude=exclude)
return (code, )
else: # Foreground.
code, name = _16_fg_colors.get_code(rgb)
fg_ansi[0] = name
return (code, )
# True colors. (Only when this feature is enabled.)
elif self.true_color:
r, g, b = rgb
return (48 if bg else 38, 2, r, g, b)
# 256 RGB colors.
else:
return (48 if bg else 38, 5, _256_colors[rgb])
result = []
result.extend(get(fg_color, False))
result.extend(get(bg_color, True))
return map(six.text_type, result) |
def add_ip(self, family='IPv4'):
"""
Allocate a new (random) IP-address to the Server.
"""
IP = self.cloud_manager.attach_ip(self.uuid, family)
self.ip_addresses.append(IP)
return IP | Allocate a new (random) IP-address to the Server. | Below is the the instruction that describes the task:
### Input:
Allocate a new (random) IP-address to the Server.
### Response:
def add_ip(self, family='IPv4'):
"""
Allocate a new (random) IP-address to the Server.
"""
IP = self.cloud_manager.attach_ip(self.uuid, family)
self.ip_addresses.append(IP)
return IP |
def onMessage(self, payload, isBinary):
"""
Send the payload onto the {slack.[payload['type]'} channel.
The message is transalated from IDs to human-readable identifiers.
Note: The slack API only sends JSON, isBinary will always be false.
"""
msg = self.translate(unpack(payload))
if 'type' in msg:
channel_name = 'slack.{}'.format(msg['type'])
print('Sending on {}'.format(channel_name))
channels.Channel(channel_name).send({'text': pack(msg)}) | Send the payload onto the {slack.[payload['type]'} channel.
The message is transalated from IDs to human-readable identifiers.
Note: The slack API only sends JSON, isBinary will always be false. | Below is the the instruction that describes the task:
### Input:
Send the payload onto the {slack.[payload['type]'} channel.
The message is transalated from IDs to human-readable identifiers.
Note: The slack API only sends JSON, isBinary will always be false.
### Response:
def onMessage(self, payload, isBinary):
"""
Send the payload onto the {slack.[payload['type]'} channel.
The message is transalated from IDs to human-readable identifiers.
Note: The slack API only sends JSON, isBinary will always be false.
"""
msg = self.translate(unpack(payload))
if 'type' in msg:
channel_name = 'slack.{}'.format(msg['type'])
print('Sending on {}'.format(channel_name))
channels.Channel(channel_name).send({'text': pack(msg)}) |
async def close_interface(self, conn_id, interface):
"""Close an interface on this IOTile device.
See :meth:`AbstractDeviceAdapter.close_interface`.
"""
resp = await self._execute(self._adapter.close_interface_sync, conn_id, interface)
_raise_error(conn_id, 'close_interface', resp) | Close an interface on this IOTile device.
See :meth:`AbstractDeviceAdapter.close_interface`. | Below is the the instruction that describes the task:
### Input:
Close an interface on this IOTile device.
See :meth:`AbstractDeviceAdapter.close_interface`.
### Response:
async def close_interface(self, conn_id, interface):
"""Close an interface on this IOTile device.
See :meth:`AbstractDeviceAdapter.close_interface`.
"""
resp = await self._execute(self._adapter.close_interface_sync, conn_id, interface)
_raise_error(conn_id, 'close_interface', resp) |
def hessianFreqs(self,jr,jphi,jz,**kwargs):
"""
NAME:
hessianFreqs
PURPOSE:
return the Hessian d Omega / d J and frequencies Omega corresponding to a torus
INPUT:
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
tol= (object-wide value) goal for |dJ|/|J| along the torus
dJ= (object-wide value) action difference when computing derivatives (Hessian or Jacobian)
nosym= (False) if True, don't explicitly symmetrize the Hessian (good to check errors)
OUTPUT:
(dO/dJ,Omegar,Omegaphi,Omegaz,Autofit error message)
HISTORY:
2016-07-15 - Written - Bovy (UofT)
"""
out= actionAngleTorus_c.actionAngleTorus_hessian_c(\
self._pot,
jr,jphi,jz,
tol=kwargs.get('tol',self._tol),
dJ=kwargs.get('dJ',self._dJ))
if out[4] != 0:
warnings.warn("actionAngleTorus' AutoFit exited with non-zero return status %i: %s" % (out[4],_autofit_errvals[out[4]]),
galpyWarning)
# Re-arrange frequencies and actions to r,phi,z
out[0][:,:]= out[0][:,[0,2,1]]
out[0][:,:]= out[0][[0,2,1]]
if kwargs.get('nosym',False):
return out
else :# explicitly symmetrize
return (0.5*(out[0]+out[0].T),out[1],out[2],out[3],out[4]) | NAME:
hessianFreqs
PURPOSE:
return the Hessian d Omega / d J and frequencies Omega corresponding to a torus
INPUT:
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
tol= (object-wide value) goal for |dJ|/|J| along the torus
dJ= (object-wide value) action difference when computing derivatives (Hessian or Jacobian)
nosym= (False) if True, don't explicitly symmetrize the Hessian (good to check errors)
OUTPUT:
(dO/dJ,Omegar,Omegaphi,Omegaz,Autofit error message)
HISTORY:
2016-07-15 - Written - Bovy (UofT) | Below is the the instruction that describes the task:
### Input:
NAME:
hessianFreqs
PURPOSE:
return the Hessian d Omega / d J and frequencies Omega corresponding to a torus
INPUT:
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
tol= (object-wide value) goal for |dJ|/|J| along the torus
dJ= (object-wide value) action difference when computing derivatives (Hessian or Jacobian)
nosym= (False) if True, don't explicitly symmetrize the Hessian (good to check errors)
OUTPUT:
(dO/dJ,Omegar,Omegaphi,Omegaz,Autofit error message)
HISTORY:
2016-07-15 - Written - Bovy (UofT)
### Response:
def hessianFreqs(self,jr,jphi,jz,**kwargs):
"""
NAME:
hessianFreqs
PURPOSE:
return the Hessian d Omega / d J and frequencies Omega corresponding to a torus
INPUT:
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
tol= (object-wide value) goal for |dJ|/|J| along the torus
dJ= (object-wide value) action difference when computing derivatives (Hessian or Jacobian)
nosym= (False) if True, don't explicitly symmetrize the Hessian (good to check errors)
OUTPUT:
(dO/dJ,Omegar,Omegaphi,Omegaz,Autofit error message)
HISTORY:
2016-07-15 - Written - Bovy (UofT)
"""
out= actionAngleTorus_c.actionAngleTorus_hessian_c(\
self._pot,
jr,jphi,jz,
tol=kwargs.get('tol',self._tol),
dJ=kwargs.get('dJ',self._dJ))
if out[4] != 0:
warnings.warn("actionAngleTorus' AutoFit exited with non-zero return status %i: %s" % (out[4],_autofit_errvals[out[4]]),
galpyWarning)
# Re-arrange frequencies and actions to r,phi,z
out[0][:,:]= out[0][:,[0,2,1]]
out[0][:,:]= out[0][[0,2,1]]
if kwargs.get('nosym',False):
return out
else :# explicitly symmetrize
return (0.5*(out[0]+out[0].T),out[1],out[2],out[3],out[4]) |
def ds9_objects_to_string(regions, coordsys='fk5', fmt='.6f', radunit='deg'):
"""
Converts a `list` of `~regions.Region` to DS9 region string.
Parameters
----------
regions : `list`
List of `~regions.Region` objects
coordsys : `str`, optional
This overrides the coordinate system frame for all regions.
Default is 'fk5'.
fmt : `str`, optional
A python string format defining the output precision. Default is .6f,
which is accurate to 0.0036 arcseconds.
radunit : `str`, optional
This denotes the unit of the radius. Default is 'deg'(degrees)
Returns
-------
region_string : `str`
DS9 region string
Examples
--------
>>> from astropy import units as u
>>> from astropy.coordinates import SkyCoord
>>> from regions import CircleSkyRegion, ds9_objects_to_string
>>> reg_sky = CircleSkyRegion(SkyCoord(1 * u.deg, 2 * u.deg), 5 * u.deg)
>>> print(ds9_objects_to_string([reg_sky]))
# Region file format: DS9 astropy/regions
fk5
circle(1.000007,2.000002,5.000000)
"""
shapelist = to_shape_list(regions, coordsys)
return shapelist.to_ds9(coordsys, fmt, radunit) | Converts a `list` of `~regions.Region` to DS9 region string.
Parameters
----------
regions : `list`
List of `~regions.Region` objects
coordsys : `str`, optional
This overrides the coordinate system frame for all regions.
Default is 'fk5'.
fmt : `str`, optional
A python string format defining the output precision. Default is .6f,
which is accurate to 0.0036 arcseconds.
radunit : `str`, optional
This denotes the unit of the radius. Default is 'deg'(degrees)
Returns
-------
region_string : `str`
DS9 region string
Examples
--------
>>> from astropy import units as u
>>> from astropy.coordinates import SkyCoord
>>> from regions import CircleSkyRegion, ds9_objects_to_string
>>> reg_sky = CircleSkyRegion(SkyCoord(1 * u.deg, 2 * u.deg), 5 * u.deg)
>>> print(ds9_objects_to_string([reg_sky]))
# Region file format: DS9 astropy/regions
fk5
circle(1.000007,2.000002,5.000000) | Below is the the instruction that describes the task:
### Input:
Converts a `list` of `~regions.Region` to DS9 region string.
Parameters
----------
regions : `list`
List of `~regions.Region` objects
coordsys : `str`, optional
This overrides the coordinate system frame for all regions.
Default is 'fk5'.
fmt : `str`, optional
A python string format defining the output precision. Default is .6f,
which is accurate to 0.0036 arcseconds.
radunit : `str`, optional
This denotes the unit of the radius. Default is 'deg'(degrees)
Returns
-------
region_string : `str`
DS9 region string
Examples
--------
>>> from astropy import units as u
>>> from astropy.coordinates import SkyCoord
>>> from regions import CircleSkyRegion, ds9_objects_to_string
>>> reg_sky = CircleSkyRegion(SkyCoord(1 * u.deg, 2 * u.deg), 5 * u.deg)
>>> print(ds9_objects_to_string([reg_sky]))
# Region file format: DS9 astropy/regions
fk5
circle(1.000007,2.000002,5.000000)
### Response:
def ds9_objects_to_string(regions, coordsys='fk5', fmt='.6f', radunit='deg'):
"""
Converts a `list` of `~regions.Region` to DS9 region string.
Parameters
----------
regions : `list`
List of `~regions.Region` objects
coordsys : `str`, optional
This overrides the coordinate system frame for all regions.
Default is 'fk5'.
fmt : `str`, optional
A python string format defining the output precision. Default is .6f,
which is accurate to 0.0036 arcseconds.
radunit : `str`, optional
This denotes the unit of the radius. Default is 'deg'(degrees)
Returns
-------
region_string : `str`
DS9 region string
Examples
--------
>>> from astropy import units as u
>>> from astropy.coordinates import SkyCoord
>>> from regions import CircleSkyRegion, ds9_objects_to_string
>>> reg_sky = CircleSkyRegion(SkyCoord(1 * u.deg, 2 * u.deg), 5 * u.deg)
>>> print(ds9_objects_to_string([reg_sky]))
# Region file format: DS9 astropy/regions
fk5
circle(1.000007,2.000002,5.000000)
"""
shapelist = to_shape_list(regions, coordsys)
return shapelist.to_ds9(coordsys, fmt, radunit) |
def itemFromTag( self, tag ):
"""
Returns the item assigned to the given tag.
:param tag | <str>
:return <XMultiTagItem> || None
"""
for row in range(self.count() - 1):
item = self.item(row)
if ( item and item.text() == tag ):
return item
return None | Returns the item assigned to the given tag.
:param tag | <str>
:return <XMultiTagItem> || None | Below is the the instruction that describes the task:
### Input:
Returns the item assigned to the given tag.
:param tag | <str>
:return <XMultiTagItem> || None
### Response:
def itemFromTag( self, tag ):
"""
Returns the item assigned to the given tag.
:param tag | <str>
:return <XMultiTagItem> || None
"""
for row in range(self.count() - 1):
item = self.item(row)
if ( item and item.text() == tag ):
return item
return None |
def _find_workflows(mcs, attrs):
"""Find workflow definition(s) in a WorkflowEnabled definition.
This method overrides the default behavior from xworkflows in order to
use our custom StateField objects.
"""
workflows = {}
for k, v in attrs.items():
if isinstance(v, StateField):
workflows[k] = v
return workflows | Find workflow definition(s) in a WorkflowEnabled definition.
This method overrides the default behavior from xworkflows in order to
use our custom StateField objects. | Below is the the instruction that describes the task:
### Input:
Find workflow definition(s) in a WorkflowEnabled definition.
This method overrides the default behavior from xworkflows in order to
use our custom StateField objects.
### Response:
def _find_workflows(mcs, attrs):
"""Find workflow definition(s) in a WorkflowEnabled definition.
This method overrides the default behavior from xworkflows in order to
use our custom StateField objects.
"""
workflows = {}
for k, v in attrs.items():
if isinstance(v, StateField):
workflows[k] = v
return workflows |
def sorted_migrations(self):
"""
Sort migrations if necessary and store in self._sorted_migrations
"""
if not self._sorted_migrations:
self._sorted_migrations = sorted(
self.migration_registry.items(),
# sort on the key... the migration number
key=lambda migration_tuple: migration_tuple[0])
return self._sorted_migrations | Sort migrations if necessary and store in self._sorted_migrations | Below is the the instruction that describes the task:
### Input:
Sort migrations if necessary and store in self._sorted_migrations
### Response:
def sorted_migrations(self):
"""
Sort migrations if necessary and store in self._sorted_migrations
"""
if not self._sorted_migrations:
self._sorted_migrations = sorted(
self.migration_registry.items(),
# sort on the key... the migration number
key=lambda migration_tuple: migration_tuple[0])
return self._sorted_migrations |
def goto_time(timeval):
'''Go to a specific time (in nanoseconds) in the current
trajectory.
'''
i = bisect.bisect(viewer.frame_times, timeval * 1000)
goto_frame(i) | Go to a specific time (in nanoseconds) in the current
trajectory. | Below is the the instruction that describes the task:
### Input:
Go to a specific time (in nanoseconds) in the current
trajectory.
### Response:
def goto_time(timeval):
'''Go to a specific time (in nanoseconds) in the current
trajectory.
'''
i = bisect.bisect(viewer.frame_times, timeval * 1000)
goto_frame(i) |
def create(*context, **kwargs):
"""
Build a ContextStack instance from a sequence of context-like items.
This factory-style method is more general than the ContextStack class's
constructor in that, unlike the constructor, the argument list
can itself contain ContextStack instances.
Here is an example illustrating various aspects of this method:
>>> obj1 = {'animal': 'cat', 'vegetable': 'carrot', 'mineral': 'copper'}
>>> obj2 = ContextStack({'vegetable': 'spinach', 'mineral': 'silver'})
>>>
>>> context = ContextStack.create(obj1, None, obj2, mineral='gold')
>>>
>>> context.get('animal')
'cat'
>>> context.get('vegetable')
'spinach'
>>> context.get('mineral')
'gold'
Arguments:
*context: zero or more dictionaries, ContextStack instances, or objects
with which to populate the initial context stack. None
arguments will be skipped. Items in the *context list are
added to the stack in order so that later items in the argument
list take precedence over earlier items. This behavior is the
same as the constructor's.
**kwargs: additional key-value data to add to the context stack.
As these arguments appear after all items in the *context list,
in the case of key conflicts these values take precedence over
all items in the *context list. This behavior is the same as
the constructor's.
"""
items = context
context = ContextStack()
for item in items:
if item is None:
continue
if isinstance(item, ContextStack):
context._stack.extend(item._stack)
else:
context.push(item)
if kwargs:
context.push(kwargs)
return context | Build a ContextStack instance from a sequence of context-like items.
This factory-style method is more general than the ContextStack class's
constructor in that, unlike the constructor, the argument list
can itself contain ContextStack instances.
Here is an example illustrating various aspects of this method:
>>> obj1 = {'animal': 'cat', 'vegetable': 'carrot', 'mineral': 'copper'}
>>> obj2 = ContextStack({'vegetable': 'spinach', 'mineral': 'silver'})
>>>
>>> context = ContextStack.create(obj1, None, obj2, mineral='gold')
>>>
>>> context.get('animal')
'cat'
>>> context.get('vegetable')
'spinach'
>>> context.get('mineral')
'gold'
Arguments:
*context: zero or more dictionaries, ContextStack instances, or objects
with which to populate the initial context stack. None
arguments will be skipped. Items in the *context list are
added to the stack in order so that later items in the argument
list take precedence over earlier items. This behavior is the
same as the constructor's.
**kwargs: additional key-value data to add to the context stack.
As these arguments appear after all items in the *context list,
in the case of key conflicts these values take precedence over
all items in the *context list. This behavior is the same as
the constructor's. | Below is the the instruction that describes the task:
### Input:
Build a ContextStack instance from a sequence of context-like items.
This factory-style method is more general than the ContextStack class's
constructor in that, unlike the constructor, the argument list
can itself contain ContextStack instances.
Here is an example illustrating various aspects of this method:
>>> obj1 = {'animal': 'cat', 'vegetable': 'carrot', 'mineral': 'copper'}
>>> obj2 = ContextStack({'vegetable': 'spinach', 'mineral': 'silver'})
>>>
>>> context = ContextStack.create(obj1, None, obj2, mineral='gold')
>>>
>>> context.get('animal')
'cat'
>>> context.get('vegetable')
'spinach'
>>> context.get('mineral')
'gold'
Arguments:
*context: zero or more dictionaries, ContextStack instances, or objects
with which to populate the initial context stack. None
arguments will be skipped. Items in the *context list are
added to the stack in order so that later items in the argument
list take precedence over earlier items. This behavior is the
same as the constructor's.
**kwargs: additional key-value data to add to the context stack.
As these arguments appear after all items in the *context list,
in the case of key conflicts these values take precedence over
all items in the *context list. This behavior is the same as
the constructor's.
### Response:
def create(*context, **kwargs):
"""
Build a ContextStack instance from a sequence of context-like items.
This factory-style method is more general than the ContextStack class's
constructor in that, unlike the constructor, the argument list
can itself contain ContextStack instances.
Here is an example illustrating various aspects of this method:
>>> obj1 = {'animal': 'cat', 'vegetable': 'carrot', 'mineral': 'copper'}
>>> obj2 = ContextStack({'vegetable': 'spinach', 'mineral': 'silver'})
>>>
>>> context = ContextStack.create(obj1, None, obj2, mineral='gold')
>>>
>>> context.get('animal')
'cat'
>>> context.get('vegetable')
'spinach'
>>> context.get('mineral')
'gold'
Arguments:
*context: zero or more dictionaries, ContextStack instances, or objects
with which to populate the initial context stack. None
arguments will be skipped. Items in the *context list are
added to the stack in order so that later items in the argument
list take precedence over earlier items. This behavior is the
same as the constructor's.
**kwargs: additional key-value data to add to the context stack.
As these arguments appear after all items in the *context list,
in the case of key conflicts these values take precedence over
all items in the *context list. This behavior is the same as
the constructor's.
"""
items = context
context = ContextStack()
for item in items:
if item is None:
continue
if isinstance(item, ContextStack):
context._stack.extend(item._stack)
else:
context.push(item)
if kwargs:
context.push(kwargs)
return context |
def coerce_many(schema=str):
"""Expect the input to be a sequence of items which conform to `schema`."""
def validate(val):
"""Apply schema check/version to each item."""
return [volup.Coerce(schema)(x) for x in val]
return validate | Expect the input to be a sequence of items which conform to `schema`. | Below is the the instruction that describes the task:
### Input:
Expect the input to be a sequence of items which conform to `schema`.
### Response:
def coerce_many(schema=str):
"""Expect the input to be a sequence of items which conform to `schema`."""
def validate(val):
"""Apply schema check/version to each item."""
return [volup.Coerce(schema)(x) for x in val]
return validate |
def get_image_by_kind(self, kind):
""" returns a image of a specific kind """
for ss in self.images:
if ss.kind == kind:
return ss
return None | returns a image of a specific kind | Below is the the instruction that describes the task:
### Input:
returns a image of a specific kind
### Response:
def get_image_by_kind(self, kind):
""" returns a image of a specific kind """
for ss in self.images:
if ss.kind == kind:
return ss
return None |
def sendCMD (self, vel):
'''
Sends CMDVel.
@param vel: CMDVel to publish
@type vel: CMDVel
'''
self.lock.acquire()
self.vel = vel
self.lock.release() | Sends CMDVel.
@param vel: CMDVel to publish
@type vel: CMDVel | Below is the the instruction that describes the task:
### Input:
Sends CMDVel.
@param vel: CMDVel to publish
@type vel: CMDVel
### Response:
def sendCMD (self, vel):
'''
Sends CMDVel.
@param vel: CMDVel to publish
@type vel: CMDVel
'''
self.lock.acquire()
self.vel = vel
self.lock.release() |
def density_2d(self, x, y, rho0, Ra, Rs, center_x=0, center_y=0):
"""
projected density
:param x:
:param y:
:param rho0:
:param Ra:
:param Rs:
:param center_x:
:param center_y:
:return:
"""
Ra, Rs = self._sort_ra_rs(Ra, Rs)
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_**2 + y_**2)
sigma0 = self.rho2sigma(rho0, Ra, Rs)
sigma = sigma0 * Ra * Rs / (Rs - Ra) * (1 / np.sqrt(Ra ** 2 + r ** 2) - 1 / np.sqrt(Rs ** 2 + r ** 2))
return sigma | projected density
:param x:
:param y:
:param rho0:
:param Ra:
:param Rs:
:param center_x:
:param center_y:
:return: | Below is the the instruction that describes the task:
### Input:
projected density
:param x:
:param y:
:param rho0:
:param Ra:
:param Rs:
:param center_x:
:param center_y:
:return:
### Response:
def density_2d(self, x, y, rho0, Ra, Rs, center_x=0, center_y=0):
"""
projected density
:param x:
:param y:
:param rho0:
:param Ra:
:param Rs:
:param center_x:
:param center_y:
:return:
"""
Ra, Rs = self._sort_ra_rs(Ra, Rs)
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_**2 + y_**2)
sigma0 = self.rho2sigma(rho0, Ra, Rs)
sigma = sigma0 * Ra * Rs / (Rs - Ra) * (1 / np.sqrt(Ra ** 2 + r ** 2) - 1 / np.sqrt(Rs ** 2 + r ** 2))
return sigma |
def page(self, from_=values.unset, to=values.unset,
date_created_on_or_before=values.unset,
date_created_after=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of FaxInstance records from the API.
Request is executed immediately
:param unicode from_: Retrieve only those faxes sent from this phone number
:param unicode to: Retrieve only those faxes sent to this phone number
:param datetime date_created_on_or_before: Retrieve only faxes created on or before this date
:param datetime date_created_after: Retrieve only faxes created after this date
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of FaxInstance
:rtype: twilio.rest.fax.v1.fax.FaxPage
"""
params = values.of({
'From': from_,
'To': to,
'DateCreatedOnOrBefore': serialize.iso8601_datetime(date_created_on_or_before),
'DateCreatedAfter': serialize.iso8601_datetime(date_created_after),
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return FaxPage(self._version, response, self._solution) | Retrieve a single page of FaxInstance records from the API.
Request is executed immediately
:param unicode from_: Retrieve only those faxes sent from this phone number
:param unicode to: Retrieve only those faxes sent to this phone number
:param datetime date_created_on_or_before: Retrieve only faxes created on or before this date
:param datetime date_created_after: Retrieve only faxes created after this date
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of FaxInstance
:rtype: twilio.rest.fax.v1.fax.FaxPage | Below is the the instruction that describes the task:
### Input:
Retrieve a single page of FaxInstance records from the API.
Request is executed immediately
:param unicode from_: Retrieve only those faxes sent from this phone number
:param unicode to: Retrieve only those faxes sent to this phone number
:param datetime date_created_on_or_before: Retrieve only faxes created on or before this date
:param datetime date_created_after: Retrieve only faxes created after this date
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of FaxInstance
:rtype: twilio.rest.fax.v1.fax.FaxPage
### Response:
def page(self, from_=values.unset, to=values.unset,
date_created_on_or_before=values.unset,
date_created_after=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of FaxInstance records from the API.
Request is executed immediately
:param unicode from_: Retrieve only those faxes sent from this phone number
:param unicode to: Retrieve only those faxes sent to this phone number
:param datetime date_created_on_or_before: Retrieve only faxes created on or before this date
:param datetime date_created_after: Retrieve only faxes created after this date
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of FaxInstance
:rtype: twilio.rest.fax.v1.fax.FaxPage
"""
params = values.of({
'From': from_,
'To': to,
'DateCreatedOnOrBefore': serialize.iso8601_datetime(date_created_on_or_before),
'DateCreatedAfter': serialize.iso8601_datetime(date_created_after),
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return FaxPage(self._version, response, self._solution) |
def lookup_field_label(self, context, field, default=None):
"""
Figures out what the field label should be for the passed in field name.
We overload this so as to use our form to see if there is label set there. If so
then we'll pass that as the default instead of having our parent derive
the field from the name.
"""
default = None
for form_field in self.form:
if form_field.name == field:
default = form_field.label
break
return super(SmartFormMixin, self).lookup_field_label(context, field, default=default) | Figures out what the field label should be for the passed in field name.
We overload this so as to use our form to see if there is label set there. If so
then we'll pass that as the default instead of having our parent derive
the field from the name. | Below is the the instruction that describes the task:
### Input:
Figures out what the field label should be for the passed in field name.
We overload this so as to use our form to see if there is label set there. If so
then we'll pass that as the default instead of having our parent derive
the field from the name.
### Response:
def lookup_field_label(self, context, field, default=None):
"""
Figures out what the field label should be for the passed in field name.
We overload this so as to use our form to see if there is label set there. If so
then we'll pass that as the default instead of having our parent derive
the field from the name.
"""
default = None
for form_field in self.form:
if form_field.name == field:
default = form_field.label
break
return super(SmartFormMixin, self).lookup_field_label(context, field, default=default) |
def sentiment(self):
"""Return a tuple of form (polarity, subjectivity ) where polarity
is a float within the range [-1.0, 1.0] and subjectivity is a float
within the range [0.0, 1.0] where 0.0 is very objective and 1.0 is
very subjective.
:rtype: named tuple of the form ``Sentiment(polarity=0.0, subjectivity=0.0)``
"""
#: Enhancement Issue #2
#: adapted from 'textblob.en.sentiments.py'
#: Return type declaration
_RETURN_TYPE = namedtuple('Sentiment', ['polarity', 'subjectivity'])
_polarity = 0
_subjectivity = 0
for s in self.sentences:
_polarity += s.polarity
_subjectivity += s.subjectivity
try:
polarity = _polarity / len(self.sentences)
except ZeroDivisionError:
polarity = 0.0
try:
subjectivity = _subjectivity / len(self.sentences)
except ZeroDivisionError:
subjectivity = 0.0
return _RETURN_TYPE(polarity, subjectivity) | Return a tuple of form (polarity, subjectivity ) where polarity
is a float within the range [-1.0, 1.0] and subjectivity is a float
within the range [0.0, 1.0] where 0.0 is very objective and 1.0 is
very subjective.
:rtype: named tuple of the form ``Sentiment(polarity=0.0, subjectivity=0.0)`` | Below is the the instruction that describes the task:
### Input:
Return a tuple of form (polarity, subjectivity ) where polarity
is a float within the range [-1.0, 1.0] and subjectivity is a float
within the range [0.0, 1.0] where 0.0 is very objective and 1.0 is
very subjective.
:rtype: named tuple of the form ``Sentiment(polarity=0.0, subjectivity=0.0)``
### Response:
def sentiment(self):
"""Return a tuple of form (polarity, subjectivity ) where polarity
is a float within the range [-1.0, 1.0] and subjectivity is a float
within the range [0.0, 1.0] where 0.0 is very objective and 1.0 is
very subjective.
:rtype: named tuple of the form ``Sentiment(polarity=0.0, subjectivity=0.0)``
"""
#: Enhancement Issue #2
#: adapted from 'textblob.en.sentiments.py'
#: Return type declaration
_RETURN_TYPE = namedtuple('Sentiment', ['polarity', 'subjectivity'])
_polarity = 0
_subjectivity = 0
for s in self.sentences:
_polarity += s.polarity
_subjectivity += s.subjectivity
try:
polarity = _polarity / len(self.sentences)
except ZeroDivisionError:
polarity = 0.0
try:
subjectivity = _subjectivity / len(self.sentences)
except ZeroDivisionError:
subjectivity = 0.0
return _RETURN_TYPE(polarity, subjectivity) |
async def send(self, metric):
"""Transform metric to JSON bytestring and send to server.
Args:
metric (dict): Complete metric to send as JSON.
"""
message = json.dumps(metric).encode('utf-8')
await self.loop.create_datagram_endpoint(
lambda: UDPClientProtocol(message),
remote_addr=(self.ip, self.port)) | Transform metric to JSON bytestring and send to server.
Args:
metric (dict): Complete metric to send as JSON. | Below is the the instruction that describes the task:
### Input:
Transform metric to JSON bytestring and send to server.
Args:
metric (dict): Complete metric to send as JSON.
### Response:
async def send(self, metric):
"""Transform metric to JSON bytestring and send to server.
Args:
metric (dict): Complete metric to send as JSON.
"""
message = json.dumps(metric).encode('utf-8')
await self.loop.create_datagram_endpoint(
lambda: UDPClientProtocol(message),
remote_addr=(self.ip, self.port)) |
def _parse_depot_section(f):
"""Parse TSPLIB DEPOT_SECTION data part from file descriptor f
Args
----
f : str
File descriptor
Returns
-------
int
an array of depots
"""
depots = []
for line in f:
line = strip(line)
if line == '-1' or line == 'EOF': # End of section
break
else:
depots.append(line)
if len(depots) != 1:
raise ParseException('One and only one depot is supported')
return int(depots[0]) | Parse TSPLIB DEPOT_SECTION data part from file descriptor f
Args
----
f : str
File descriptor
Returns
-------
int
an array of depots | Below is the the instruction that describes the task:
### Input:
Parse TSPLIB DEPOT_SECTION data part from file descriptor f
Args
----
f : str
File descriptor
Returns
-------
int
an array of depots
### Response:
def _parse_depot_section(f):
"""Parse TSPLIB DEPOT_SECTION data part from file descriptor f
Args
----
f : str
File descriptor
Returns
-------
int
an array of depots
"""
depots = []
for line in f:
line = strip(line)
if line == '-1' or line == 'EOF': # End of section
break
else:
depots.append(line)
if len(depots) != 1:
raise ParseException('One and only one depot is supported')
return int(depots[0]) |
def pop_marker(self, reset):
""" Pop a marker off of the marker stack. If reset is True then the
iterator will be returned to the state it was in before the
corresponding call to push_marker().
"""
marker = self.markers.pop()
if reset:
# Make the values available to be read again
marker.extend(self.look_ahead)
self.look_ahead = marker
elif self.markers:
# Otherwise, reassign the values to the top marker
self.markers[-1].extend(marker)
else:
# If there are not more markers in the stack then discard the values
pass | Pop a marker off of the marker stack. If reset is True then the
iterator will be returned to the state it was in before the
corresponding call to push_marker(). | Below is the the instruction that describes the task:
### Input:
Pop a marker off of the marker stack. If reset is True then the
iterator will be returned to the state it was in before the
corresponding call to push_marker().
### Response:
def pop_marker(self, reset):
""" Pop a marker off of the marker stack. If reset is True then the
iterator will be returned to the state it was in before the
corresponding call to push_marker().
"""
marker = self.markers.pop()
if reset:
# Make the values available to be read again
marker.extend(self.look_ahead)
self.look_ahead = marker
elif self.markers:
# Otherwise, reassign the values to the top marker
self.markers[-1].extend(marker)
else:
# If there are not more markers in the stack then discard the values
pass |
def close(self):
'''Close Gdx file and free up resources.'''
h = self.gdx_handle
gdxcc.gdxClose(h)
gdxcc.gdxFree(h) | Close Gdx file and free up resources. | Below is the the instruction that describes the task:
### Input:
Close Gdx file and free up resources.
### Response:
def close(self):
'''Close Gdx file and free up resources.'''
h = self.gdx_handle
gdxcc.gdxClose(h)
gdxcc.gdxFree(h) |
def get_default_for(prop, value):
""" Ensures complex property types have the correct default values """
prop = prop.strip('_') # Handle alternate props (leading underscores)
val = reduce_value(value) # Filtering of value happens here
if prop in _COMPLEX_LISTS:
return wrap_value(val)
elif prop in _COMPLEX_STRUCTS:
return val or {}
else:
return u'' if val is None else val | Ensures complex property types have the correct default values | Below is the the instruction that describes the task:
### Input:
Ensures complex property types have the correct default values
### Response:
def get_default_for(prop, value):
""" Ensures complex property types have the correct default values """
prop = prop.strip('_') # Handle alternate props (leading underscores)
val = reduce_value(value) # Filtering of value happens here
if prop in _COMPLEX_LISTS:
return wrap_value(val)
elif prop in _COMPLEX_STRUCTS:
return val or {}
else:
return u'' if val is None else val |
def identify_id(id: str) -> bool:
"""
Try to identify whether this is an ActivityPub ID.
"""
return re.match(r'^https?://', id, flags=re.IGNORECASE) is not None | Try to identify whether this is an ActivityPub ID. | Below is the the instruction that describes the task:
### Input:
Try to identify whether this is an ActivityPub ID.
### Response:
def identify_id(id: str) -> bool:
"""
Try to identify whether this is an ActivityPub ID.
"""
return re.match(r'^https?://', id, flags=re.IGNORECASE) is not None |
def set_fc_volume(self, port_id,
target_wwn, target_lun=0, boot_prio=1,
initiator_wwnn=None, initiator_wwpn=None):
"""Set FibreChannel volume information to configuration.
:param port_id: Physical port ID.
:param target_wwn: WWN of target.
:param target_lun: LUN number of target.
:param boot_prio: Boot priority of the volume. 1 indicates the highest
priority.
:param initiator_wwnn: Virtual WWNN for initiator if necessary.
:param initiator_wwpn: Virtual WWPN for initiator if necessary.
"""
port_handler = _parse_physical_port_id(port_id)
fc_target = elcm.FCTarget(target_wwn, target_lun)
fc_boot = elcm.FCBoot(boot_prio=boot_prio, boot_enable=True)
fc_boot.add_target(fc_target)
port = self._find_port(port_handler)
if port:
port_handler.set_fc_port(port, fc_boot,
wwnn=initiator_wwnn, wwpn=initiator_wwpn)
else:
port = port_handler.create_fc_port(fc_boot,
wwnn=initiator_wwnn,
wwpn=initiator_wwpn)
self._add_port(port_handler, port) | Set FibreChannel volume information to configuration.
:param port_id: Physical port ID.
:param target_wwn: WWN of target.
:param target_lun: LUN number of target.
:param boot_prio: Boot priority of the volume. 1 indicates the highest
priority.
:param initiator_wwnn: Virtual WWNN for initiator if necessary.
:param initiator_wwpn: Virtual WWPN for initiator if necessary. | Below is the the instruction that describes the task:
### Input:
Set FibreChannel volume information to configuration.
:param port_id: Physical port ID.
:param target_wwn: WWN of target.
:param target_lun: LUN number of target.
:param boot_prio: Boot priority of the volume. 1 indicates the highest
priority.
:param initiator_wwnn: Virtual WWNN for initiator if necessary.
:param initiator_wwpn: Virtual WWPN for initiator if necessary.
### Response:
def set_fc_volume(self, port_id,
target_wwn, target_lun=0, boot_prio=1,
initiator_wwnn=None, initiator_wwpn=None):
"""Set FibreChannel volume information to configuration.
:param port_id: Physical port ID.
:param target_wwn: WWN of target.
:param target_lun: LUN number of target.
:param boot_prio: Boot priority of the volume. 1 indicates the highest
priority.
:param initiator_wwnn: Virtual WWNN for initiator if necessary.
:param initiator_wwpn: Virtual WWPN for initiator if necessary.
"""
port_handler = _parse_physical_port_id(port_id)
fc_target = elcm.FCTarget(target_wwn, target_lun)
fc_boot = elcm.FCBoot(boot_prio=boot_prio, boot_enable=True)
fc_boot.add_target(fc_target)
port = self._find_port(port_handler)
if port:
port_handler.set_fc_port(port, fc_boot,
wwnn=initiator_wwnn, wwpn=initiator_wwpn)
else:
port = port_handler.create_fc_port(fc_boot,
wwnn=initiator_wwnn,
wwpn=initiator_wwpn)
self._add_port(port_handler, port) |
def requestFields(self, field_names, required=False, strict=False):
"""Add the given list of fields to the request
@param field_names: The simple registration data fields to request
@type field_names: [str]
@param required: Whether these values should be presented to
the user as required
@param strict: whether to raise an exception when a field is
added to a request more than once
@raise ValueError: when a field requested is not a simple
registration field or strict is set and a field was
requested more than once
"""
if isinstance(field_names, basestring):
raise TypeError('Fields should be passed as a list of '
'strings (not %r)' % (type(field_names),))
for field_name in field_names:
self.requestField(field_name, required, strict=strict) | Add the given list of fields to the request
@param field_names: The simple registration data fields to request
@type field_names: [str]
@param required: Whether these values should be presented to
the user as required
@param strict: whether to raise an exception when a field is
added to a request more than once
@raise ValueError: when a field requested is not a simple
registration field or strict is set and a field was
requested more than once | Below is the the instruction that describes the task:
### Input:
Add the given list of fields to the request
@param field_names: The simple registration data fields to request
@type field_names: [str]
@param required: Whether these values should be presented to
the user as required
@param strict: whether to raise an exception when a field is
added to a request more than once
@raise ValueError: when a field requested is not a simple
registration field or strict is set and a field was
requested more than once
### Response:
def requestFields(self, field_names, required=False, strict=False):
"""Add the given list of fields to the request
@param field_names: The simple registration data fields to request
@type field_names: [str]
@param required: Whether these values should be presented to
the user as required
@param strict: whether to raise an exception when a field is
added to a request more than once
@raise ValueError: when a field requested is not a simple
registration field or strict is set and a field was
requested more than once
"""
if isinstance(field_names, basestring):
raise TypeError('Fields should be passed as a list of '
'strings (not %r)' % (type(field_names),))
for field_name in field_names:
self.requestField(field_name, required, strict=strict) |
def is_py_script(filename):
"Returns True if a file is a python executable."
if not os.path.exists(filename) and os.path.isfile(filename):
return False
elif filename.endswith(".py"):
return True
elif not os.access(filename, os.X_OK):
return False
else:
try:
with open(filename, "r") as fp:
first_line = fp.readline().strip()
return "#!" in first_line and "python" in first_line
except StopIteration:
return False | Returns True if a file is a python executable. | Below is the the instruction that describes the task:
### Input:
Returns True if a file is a python executable.
### Response:
def is_py_script(filename):
"Returns True if a file is a python executable."
if not os.path.exists(filename) and os.path.isfile(filename):
return False
elif filename.endswith(".py"):
return True
elif not os.access(filename, os.X_OK):
return False
else:
try:
with open(filename, "r") as fp:
first_line = fp.readline().strip()
return "#!" in first_line and "python" in first_line
except StopIteration:
return False |
def _get_token(self, token, http_conn_id):
"""
Given either a manually set token or a conn_id, return the webhook_token to use
:param token: The manually provided token
:type token: str
:param http_conn_id: The conn_id provided
:type http_conn_id: str
:return: webhook_token (str) to use
"""
if token:
return token
elif http_conn_id:
conn = self.get_connection(http_conn_id)
extra = conn.extra_dejson
return extra.get('webhook_token', '')
else:
raise AirflowException('Cannot get token: No valid Slack '
'webhook token nor conn_id supplied') | Given either a manually set token or a conn_id, return the webhook_token to use
:param token: The manually provided token
:type token: str
:param http_conn_id: The conn_id provided
:type http_conn_id: str
:return: webhook_token (str) to use | Below is the the instruction that describes the task:
### Input:
Given either a manually set token or a conn_id, return the webhook_token to use
:param token: The manually provided token
:type token: str
:param http_conn_id: The conn_id provided
:type http_conn_id: str
:return: webhook_token (str) to use
### Response:
def _get_token(self, token, http_conn_id):
"""
Given either a manually set token or a conn_id, return the webhook_token to use
:param token: The manually provided token
:type token: str
:param http_conn_id: The conn_id provided
:type http_conn_id: str
:return: webhook_token (str) to use
"""
if token:
return token
elif http_conn_id:
conn = self.get_connection(http_conn_id)
extra = conn.extra_dejson
return extra.get('webhook_token', '')
else:
raise AirflowException('Cannot get token: No valid Slack '
'webhook token nor conn_id supplied') |
def _parse_docstring(docstring):
"""
Using the sphinx RSTParse to parse __doc__ for argparse `parameters`, `help`, and `description`. The first
rst paragraph encountered it treated as the argparse help text. Any param fields are treated as argparse
arguments. Any other text is combined and added to the argparse description.
example:
\"""
this will be the summary
:param name: describe the parameter called name.
this will be the descriptions
* more description
* more description
This will also be in the description
\"""
:param str docstring:
:return:
:rtype: dict
"""
settings = OptionParser(components=(RSTParser,)).get_default_values()
rstparser = RSTParser()
document = utils.new_document(' ', settings)
rstparser.parse(docstring, document)
if document.children[0].tagname != 'block_quote':
logger.warning("The first line of the docstring must be blank.")
else:
document = document.children[0]
def get_params(field_list_node, params):
for field in field_list_node.children:
name = field.children[0].rawsource.split(' ')
if 'param' == name[0]:
params[name[-1]] = field.children[1].astext()
method_args = {'summary': '', 'params': dict(), 'description': ''}
for node in document.children:
if node.tagname is 'paragraph' and method_args['summary'] == '':
method_args['summary'] = node.astext()
elif node.tagname is 'field_list':
get_params(node, method_args['params'])
else:
method_args['description'] += '\n' + node.astext()
return method_args | Using the sphinx RSTParse to parse __doc__ for argparse `parameters`, `help`, and `description`. The first
rst paragraph encountered it treated as the argparse help text. Any param fields are treated as argparse
arguments. Any other text is combined and added to the argparse description.
example:
\"""
this will be the summary
:param name: describe the parameter called name.
this will be the descriptions
* more description
* more description
This will also be in the description
\"""
:param str docstring:
:return:
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Using the sphinx RSTParse to parse __doc__ for argparse `parameters`, `help`, and `description`. The first
rst paragraph encountered it treated as the argparse help text. Any param fields are treated as argparse
arguments. Any other text is combined and added to the argparse description.
example:
\"""
this will be the summary
:param name: describe the parameter called name.
this will be the descriptions
* more description
* more description
This will also be in the description
\"""
:param str docstring:
:return:
:rtype: dict
### Response:
def _parse_docstring(docstring):
"""
Using the sphinx RSTParse to parse __doc__ for argparse `parameters`, `help`, and `description`. The first
rst paragraph encountered it treated as the argparse help text. Any param fields are treated as argparse
arguments. Any other text is combined and added to the argparse description.
example:
\"""
this will be the summary
:param name: describe the parameter called name.
this will be the descriptions
* more description
* more description
This will also be in the description
\"""
:param str docstring:
:return:
:rtype: dict
"""
settings = OptionParser(components=(RSTParser,)).get_default_values()
rstparser = RSTParser()
document = utils.new_document(' ', settings)
rstparser.parse(docstring, document)
if document.children[0].tagname != 'block_quote':
logger.warning("The first line of the docstring must be blank.")
else:
document = document.children[0]
def get_params(field_list_node, params):
for field in field_list_node.children:
name = field.children[0].rawsource.split(' ')
if 'param' == name[0]:
params[name[-1]] = field.children[1].astext()
method_args = {'summary': '', 'params': dict(), 'description': ''}
for node in document.children:
if node.tagname is 'paragraph' and method_args['summary'] == '':
method_args['summary'] = node.astext()
elif node.tagname is 'field_list':
get_params(node, method_args['params'])
else:
method_args['description'] += '\n' + node.astext()
return method_args |
def cns_vwl_str_len_wb_sb(self):
"""
Return a new IPAString, containing only:
1. the consonants,
2. the vowels, and
3. the stress diacritics,
4. the length diacritics,
5. the word breaks, and
6. the syllable breaks
in the current string.
:rtype: IPAString
"""
return IPAString(ipa_chars=[c for c in self.ipa_chars if (c.is_letter) or (c.is_suprasegmental and (c.is_stress or c.is_length or c.is_word_break or c.is_syllable_break))]) | Return a new IPAString, containing only:
1. the consonants,
2. the vowels, and
3. the stress diacritics,
4. the length diacritics,
5. the word breaks, and
6. the syllable breaks
in the current string.
:rtype: IPAString | Below is the the instruction that describes the task:
### Input:
Return a new IPAString, containing only:
1. the consonants,
2. the vowels, and
3. the stress diacritics,
4. the length diacritics,
5. the word breaks, and
6. the syllable breaks
in the current string.
:rtype: IPAString
### Response:
def cns_vwl_str_len_wb_sb(self):
"""
Return a new IPAString, containing only:
1. the consonants,
2. the vowels, and
3. the stress diacritics,
4. the length diacritics,
5. the word breaks, and
6. the syllable breaks
in the current string.
:rtype: IPAString
"""
return IPAString(ipa_chars=[c for c in self.ipa_chars if (c.is_letter) or (c.is_suprasegmental and (c.is_stress or c.is_length or c.is_word_break or c.is_syllable_break))]) |
def name_match(self, wfn):
"""
Accepts a set of CPE Names K and a candidate CPE Name X. It returns
'True' if X matches any member of K, and 'False' otherwise.
:param CPESet self: A set of m known CPE Names K = {K1, K2, …, Km}.
:param CPE cpe: A candidate CPE Name X.
:returns: True if X matches K, otherwise False.
:rtype: boolean
"""
for N in self.K:
if CPESet2_3.cpe_superset(wfn, N):
return True
return False | Accepts a set of CPE Names K and a candidate CPE Name X. It returns
'True' if X matches any member of K, and 'False' otherwise.
:param CPESet self: A set of m known CPE Names K = {K1, K2, …, Km}.
:param CPE cpe: A candidate CPE Name X.
:returns: True if X matches K, otherwise False.
:rtype: boolean | Below is the the instruction that describes the task:
### Input:
Accepts a set of CPE Names K and a candidate CPE Name X. It returns
'True' if X matches any member of K, and 'False' otherwise.
:param CPESet self: A set of m known CPE Names K = {K1, K2, …, Km}.
:param CPE cpe: A candidate CPE Name X.
:returns: True if X matches K, otherwise False.
:rtype: boolean
### Response:
def name_match(self, wfn):
"""
Accepts a set of CPE Names K and a candidate CPE Name X. It returns
'True' if X matches any member of K, and 'False' otherwise.
:param CPESet self: A set of m known CPE Names K = {K1, K2, …, Km}.
:param CPE cpe: A candidate CPE Name X.
:returns: True if X matches K, otherwise False.
:rtype: boolean
"""
for N in self.K:
if CPESet2_3.cpe_superset(wfn, N):
return True
return False |
def get_domain_connect_template_async_context(self, domain, provider_id, service_id, redirect_uri, params=None,
state=None, service_id_in_path=False):
"""Makes full Domain Connect discovery of a domain and returns full context to request async consent.
:param domain: str
:param provider_id: str
:param service_id: str
:param redirect_uri: str
:param params: dict
:param state: str
:param service_id_in_path: bool
:return: (DomainConnectAsyncContext, str)
asyncConsentUrl field of returned context shall be used to redirect the browser to
second field is an indication of error
:raises: NoDomainConnectRecordException
when no _domainconnect record found
:raises: NoDomainConnectSettingsException
when settings are not found
:raises: TemplateNotSupportedException
when template is not found
:raises: InvalidDomainConnectSettingsException
when parts of the settings are missing
:raises: DomainConnectException
on other domain connect issues
"""
if params is None:
params = {}
config = self.get_domain_config(domain)
self.check_template_supported(config, provider_id, service_id)
if config.urlAsyncUX is None:
raise InvalidDomainConnectSettingsException("No asynch UX URL in config")
if service_id_in_path:
if type(service_id) is list:
raise DomainConnectException("Multiple services are only supported with service_id_in_path=false")
async_url_format = '{0}/v2/domainTemplates/providers/{1}/services/{2}' \
'?client_id={1}&scope={2}&domain={3}&host={4}&{5}'
else:
if type(service_id) is list:
service_id = '+'.join(service_id)
async_url_format = '{0}/v2/domainTemplates/providers/{1}' \
'?client_id={1}&scope={2}&domain={3}&host={4}&{5}'
if redirect_uri is not None:
params["redirect_uri"] = redirect_uri
if state is not None:
params["state"] = state
ret = DomainConnectAsyncContext(config, provider_id, service_id, redirect_uri, params)
ret.asyncConsentUrl = async_url_format.format(config.urlAsyncUX, provider_id, service_id,
config.domain_root, config.host,
urllib.parse.urlencode(
sorted(params.items(), key=lambda val: val[0])))
return ret | Makes full Domain Connect discovery of a domain and returns full context to request async consent.
:param domain: str
:param provider_id: str
:param service_id: str
:param redirect_uri: str
:param params: dict
:param state: str
:param service_id_in_path: bool
:return: (DomainConnectAsyncContext, str)
asyncConsentUrl field of returned context shall be used to redirect the browser to
second field is an indication of error
:raises: NoDomainConnectRecordException
when no _domainconnect record found
:raises: NoDomainConnectSettingsException
when settings are not found
:raises: TemplateNotSupportedException
when template is not found
:raises: InvalidDomainConnectSettingsException
when parts of the settings are missing
:raises: DomainConnectException
on other domain connect issues | Below is the the instruction that describes the task:
### Input:
Makes full Domain Connect discovery of a domain and returns full context to request async consent.
:param domain: str
:param provider_id: str
:param service_id: str
:param redirect_uri: str
:param params: dict
:param state: str
:param service_id_in_path: bool
:return: (DomainConnectAsyncContext, str)
asyncConsentUrl field of returned context shall be used to redirect the browser to
second field is an indication of error
:raises: NoDomainConnectRecordException
when no _domainconnect record found
:raises: NoDomainConnectSettingsException
when settings are not found
:raises: TemplateNotSupportedException
when template is not found
:raises: InvalidDomainConnectSettingsException
when parts of the settings are missing
:raises: DomainConnectException
on other domain connect issues
### Response:
def get_domain_connect_template_async_context(self, domain, provider_id, service_id, redirect_uri, params=None,
state=None, service_id_in_path=False):
"""Makes full Domain Connect discovery of a domain and returns full context to request async consent.
:param domain: str
:param provider_id: str
:param service_id: str
:param redirect_uri: str
:param params: dict
:param state: str
:param service_id_in_path: bool
:return: (DomainConnectAsyncContext, str)
asyncConsentUrl field of returned context shall be used to redirect the browser to
second field is an indication of error
:raises: NoDomainConnectRecordException
when no _domainconnect record found
:raises: NoDomainConnectSettingsException
when settings are not found
:raises: TemplateNotSupportedException
when template is not found
:raises: InvalidDomainConnectSettingsException
when parts of the settings are missing
:raises: DomainConnectException
on other domain connect issues
"""
if params is None:
params = {}
config = self.get_domain_config(domain)
self.check_template_supported(config, provider_id, service_id)
if config.urlAsyncUX is None:
raise InvalidDomainConnectSettingsException("No asynch UX URL in config")
if service_id_in_path:
if type(service_id) is list:
raise DomainConnectException("Multiple services are only supported with service_id_in_path=false")
async_url_format = '{0}/v2/domainTemplates/providers/{1}/services/{2}' \
'?client_id={1}&scope={2}&domain={3}&host={4}&{5}'
else:
if type(service_id) is list:
service_id = '+'.join(service_id)
async_url_format = '{0}/v2/domainTemplates/providers/{1}' \
'?client_id={1}&scope={2}&domain={3}&host={4}&{5}'
if redirect_uri is not None:
params["redirect_uri"] = redirect_uri
if state is not None:
params["state"] = state
ret = DomainConnectAsyncContext(config, provider_id, service_id, redirect_uri, params)
ret.asyncConsentUrl = async_url_format.format(config.urlAsyncUX, provider_id, service_id,
config.domain_root, config.host,
urllib.parse.urlencode(
sorted(params.items(), key=lambda val: val[0])))
return ret |
def get_file(self, file_key):
'''Gets file information
Args:
file_key key for the file to get
return (status code, dict of file info)
'''
uri = '/'.join([
self.api_uri,
self.files_suffix,
file_key
])
return self._req('get', uri) | Gets file information
Args:
file_key key for the file to get
return (status code, dict of file info) | Below is the the instruction that describes the task:
### Input:
Gets file information
Args:
file_key key for the file to get
return (status code, dict of file info)
### Response:
def get_file(self, file_key):
'''Gets file information
Args:
file_key key for the file to get
return (status code, dict of file info)
'''
uri = '/'.join([
self.api_uri,
self.files_suffix,
file_key
])
return self._req('get', uri) |
def tuple(self):
""" Tuple conversion to (value, dimensions), e.g.:
(123, {dimension_1: "foo", dimension_2: "bar"})
"""
return (self.value, {dv.id: dv.value for dv in self.dimensionvalues}) | Tuple conversion to (value, dimensions), e.g.:
(123, {dimension_1: "foo", dimension_2: "bar"}) | Below is the the instruction that describes the task:
### Input:
Tuple conversion to (value, dimensions), e.g.:
(123, {dimension_1: "foo", dimension_2: "bar"})
### Response:
def tuple(self):
""" Tuple conversion to (value, dimensions), e.g.:
(123, {dimension_1: "foo", dimension_2: "bar"})
"""
return (self.value, {dv.id: dv.value for dv in self.dimensionvalues}) |
def _parse_xmatch_catalog_header(xc, xk):
'''
This parses the header for a catalog file and returns it as a file object.
Parameters
----------
xc : str
The file name of an xmatch catalog prepared previously.
xk : list of str
This is a list of column names to extract from the xmatch catalog.
Returns
-------
tuple
The tuple returned is of the form::
(infd: the file object associated with the opened xmatch catalog,
catdefdict: a dict describing the catalog column definitions,
catcolinds: column number indices of the catalog,
catcoldtypes: the numpy dtypes of the catalog columns,
catcolnames: the names of each catalog column,
catcolunits: the units associated with each catalog column)
'''
catdef = []
# read in this catalog and transparently handle gzipped files
if xc.endswith('.gz'):
infd = gzip.open(xc,'rb')
else:
infd = open(xc,'rb')
# read in the defs
for line in infd:
if line.decode().startswith('#'):
catdef.append(
line.decode().replace('#','').strip().rstrip('\n')
)
if not line.decode().startswith('#'):
break
if not len(catdef) > 0:
LOGERROR("catalog definition not parseable "
"for catalog: %s, skipping..." % xc)
return None
catdef = ' '.join(catdef)
catdefdict = json.loads(catdef)
catdefkeys = [x['key'] for x in catdefdict['columns']]
catdefdtypes = [x['dtype'] for x in catdefdict['columns']]
catdefnames = [x['name'] for x in catdefdict['columns']]
catdefunits = [x['unit'] for x in catdefdict['columns']]
# get the correct column indices and dtypes for the requested columns
# from the catdefdict
catcolinds = []
catcoldtypes = []
catcolnames = []
catcolunits = []
for xkcol in xk:
if xkcol in catdefkeys:
xkcolind = catdefkeys.index(xkcol)
catcolinds.append(xkcolind)
catcoldtypes.append(catdefdtypes[xkcolind])
catcolnames.append(catdefnames[xkcolind])
catcolunits.append(catdefunits[xkcolind])
return (infd, catdefdict,
catcolinds, catcoldtypes, catcolnames, catcolunits) | This parses the header for a catalog file and returns it as a file object.
Parameters
----------
xc : str
The file name of an xmatch catalog prepared previously.
xk : list of str
This is a list of column names to extract from the xmatch catalog.
Returns
-------
tuple
The tuple returned is of the form::
(infd: the file object associated with the opened xmatch catalog,
catdefdict: a dict describing the catalog column definitions,
catcolinds: column number indices of the catalog,
catcoldtypes: the numpy dtypes of the catalog columns,
catcolnames: the names of each catalog column,
catcolunits: the units associated with each catalog column) | Below is the the instruction that describes the task:
### Input:
This parses the header for a catalog file and returns it as a file object.
Parameters
----------
xc : str
The file name of an xmatch catalog prepared previously.
xk : list of str
This is a list of column names to extract from the xmatch catalog.
Returns
-------
tuple
The tuple returned is of the form::
(infd: the file object associated with the opened xmatch catalog,
catdefdict: a dict describing the catalog column definitions,
catcolinds: column number indices of the catalog,
catcoldtypes: the numpy dtypes of the catalog columns,
catcolnames: the names of each catalog column,
catcolunits: the units associated with each catalog column)
### Response:
def _parse_xmatch_catalog_header(xc, xk):
'''
This parses the header for a catalog file and returns it as a file object.
Parameters
----------
xc : str
The file name of an xmatch catalog prepared previously.
xk : list of str
This is a list of column names to extract from the xmatch catalog.
Returns
-------
tuple
The tuple returned is of the form::
(infd: the file object associated with the opened xmatch catalog,
catdefdict: a dict describing the catalog column definitions,
catcolinds: column number indices of the catalog,
catcoldtypes: the numpy dtypes of the catalog columns,
catcolnames: the names of each catalog column,
catcolunits: the units associated with each catalog column)
'''
catdef = []
# read in this catalog and transparently handle gzipped files
if xc.endswith('.gz'):
infd = gzip.open(xc,'rb')
else:
infd = open(xc,'rb')
# read in the defs
for line in infd:
if line.decode().startswith('#'):
catdef.append(
line.decode().replace('#','').strip().rstrip('\n')
)
if not line.decode().startswith('#'):
break
if not len(catdef) > 0:
LOGERROR("catalog definition not parseable "
"for catalog: %s, skipping..." % xc)
return None
catdef = ' '.join(catdef)
catdefdict = json.loads(catdef)
catdefkeys = [x['key'] for x in catdefdict['columns']]
catdefdtypes = [x['dtype'] for x in catdefdict['columns']]
catdefnames = [x['name'] for x in catdefdict['columns']]
catdefunits = [x['unit'] for x in catdefdict['columns']]
# get the correct column indices and dtypes for the requested columns
# from the catdefdict
catcolinds = []
catcoldtypes = []
catcolnames = []
catcolunits = []
for xkcol in xk:
if xkcol in catdefkeys:
xkcolind = catdefkeys.index(xkcol)
catcolinds.append(xkcolind)
catcoldtypes.append(catdefdtypes[xkcolind])
catcolnames.append(catdefnames[xkcolind])
catcolunits.append(catdefunits[xkcolind])
return (infd, catdefdict,
catcolinds, catcoldtypes, catcolnames, catcolunits) |
def pxconfig(self, line):
"""configure default targets/blocking for %px magics"""
args = magic_arguments.parse_argstring(self.pxconfig, line)
if args.targets:
self.view.targets = self._eval_target_str(args.targets)
if args.block is not None:
self.view.block = args.block
if args.set_verbose is not None:
self.verbose = args.set_verbose | configure default targets/blocking for %px magics | Below is the the instruction that describes the task:
### Input:
configure default targets/blocking for %px magics
### Response:
def pxconfig(self, line):
"""configure default targets/blocking for %px magics"""
args = magic_arguments.parse_argstring(self.pxconfig, line)
if args.targets:
self.view.targets = self._eval_target_str(args.targets)
if args.block is not None:
self.view.block = args.block
if args.set_verbose is not None:
self.verbose = args.set_verbose |
def _split_cluster_by_most_vote(c, p):
"""split cluster by most-vote strategy"""
old, new = c[p[0]], c[p[1]]
old_size = _get_seqs(old)
new_size = _get_seqs(new)
logger.debug("_most_vote: size of %s with %s - %s with %s" % (old.id, len(old_size), new.id, len(new_size)))
if len(old_size) > len(new_size):
keep, remove = old, new
else:
keep, remove = new, old
common = list(set(old_size).intersection(new_size))
logger.debug("_most_vote: keep %s remove %s with common %s" % (keep.id, remove.id, len(common)))
for idl in remove.loci2seq:
if len(common) > 0:
remove.loci2seq[idl] = list(set(remove.loci2seq[idl]) - set(common))
keep.loci2seq = {k: v for k, v in keep.loci2seq.iteritems() if len(v) > 0}
remove.loci2seq = {k: v for k, v in remove.loci2seq.iteritems() if len(v) > 0}
keep.update()
remove.update()
c[keep.id] = keep
c[remove.id] = remove
return c | split cluster by most-vote strategy | Below is the the instruction that describes the task:
### Input:
split cluster by most-vote strategy
### Response:
def _split_cluster_by_most_vote(c, p):
"""split cluster by most-vote strategy"""
old, new = c[p[0]], c[p[1]]
old_size = _get_seqs(old)
new_size = _get_seqs(new)
logger.debug("_most_vote: size of %s with %s - %s with %s" % (old.id, len(old_size), new.id, len(new_size)))
if len(old_size) > len(new_size):
keep, remove = old, new
else:
keep, remove = new, old
common = list(set(old_size).intersection(new_size))
logger.debug("_most_vote: keep %s remove %s with common %s" % (keep.id, remove.id, len(common)))
for idl in remove.loci2seq:
if len(common) > 0:
remove.loci2seq[idl] = list(set(remove.loci2seq[idl]) - set(common))
keep.loci2seq = {k: v for k, v in keep.loci2seq.iteritems() if len(v) > 0}
remove.loci2seq = {k: v for k, v in remove.loci2seq.iteritems() if len(v) > 0}
keep.update()
remove.update()
c[keep.id] = keep
c[remove.id] = remove
return c |
def _bstar_set(beta, alpha, yTBy, yTBX, yTBM, XTBX, XTBM, MTBM):
"""
Compute -2𝐲ᵀBEⱼ𝐛ⱼ + (𝐛ⱼEⱼ)ᵀBEⱼ𝐛ⱼ.
For 𝐛ⱼ = [𝜷ⱼᵀ 𝜶ⱼᵀ]ᵀ.
"""
from numpy_sugar import epsilon
r = yTBy
r -= 2 * add.reduce([i @ beta for i in yTBX])
r -= 2 * add.reduce([i @ alpha for i in yTBM])
r += add.reduce([beta.T @ i @ beta for i in XTBX])
r += 2 * add.reduce([beta.T @ i @ alpha for i in XTBM])
r += add.reduce([alpha.T @ i @ alpha for i in MTBM])
return clip(r, epsilon.tiny, inf) | Compute -2𝐲ᵀBEⱼ𝐛ⱼ + (𝐛ⱼEⱼ)ᵀBEⱼ𝐛ⱼ.
For 𝐛ⱼ = [𝜷ⱼᵀ 𝜶ⱼᵀ]ᵀ. | Below is the the instruction that describes the task:
### Input:
Compute -2𝐲ᵀBEⱼ𝐛ⱼ + (𝐛ⱼEⱼ)ᵀBEⱼ𝐛ⱼ.
For 𝐛ⱼ = [𝜷ⱼᵀ 𝜶ⱼᵀ]ᵀ.
### Response:
def _bstar_set(beta, alpha, yTBy, yTBX, yTBM, XTBX, XTBM, MTBM):
"""
Compute -2𝐲ᵀBEⱼ𝐛ⱼ + (𝐛ⱼEⱼ)ᵀBEⱼ𝐛ⱼ.
For 𝐛ⱼ = [𝜷ⱼᵀ 𝜶ⱼᵀ]ᵀ.
"""
from numpy_sugar import epsilon
r = yTBy
r -= 2 * add.reduce([i @ beta for i in yTBX])
r -= 2 * add.reduce([i @ alpha for i in yTBM])
r += add.reduce([beta.T @ i @ beta for i in XTBX])
r += 2 * add.reduce([beta.T @ i @ alpha for i in XTBM])
r += add.reduce([alpha.T @ i @ alpha for i in MTBM])
return clip(r, epsilon.tiny, inf) |
def get_parquet_metadata(
self,
path='.'
):
"""
OUTPUT PARQUET METADATA COLUMNS
:param path: FOR INTERNAL USE
:return: LIST OF SchemaElement
"""
children = []
for name, child_schema in sort_using_key(self.more.items(), lambda p: p[0]):
children.extend(child_schema.get_parquet_metadata(concat_field(path, name)))
if path == '.':
return children
else:
self.element.num_children = len(children)
return [self.element] + children | OUTPUT PARQUET METADATA COLUMNS
:param path: FOR INTERNAL USE
:return: LIST OF SchemaElement | Below is the the instruction that describes the task:
### Input:
OUTPUT PARQUET METADATA COLUMNS
:param path: FOR INTERNAL USE
:return: LIST OF SchemaElement
### Response:
def get_parquet_metadata(
self,
path='.'
):
"""
OUTPUT PARQUET METADATA COLUMNS
:param path: FOR INTERNAL USE
:return: LIST OF SchemaElement
"""
children = []
for name, child_schema in sort_using_key(self.more.items(), lambda p: p[0]):
children.extend(child_schema.get_parquet_metadata(concat_field(path, name)))
if path == '.':
return children
else:
self.element.num_children = len(children)
return [self.element] + children |
def _check_args(logZ, f, x, samples, weights):
""" Sanity-check the arguments for :func:`fgivenx.drivers.compute_samples`.
Parameters
----------
f, x, samples, weights:
see arguments for :func:`fgivenx.drivers.compute_samples`
"""
# convert to arrays
if logZ is None:
logZ = [0]
f = [f]
samples = [samples]
weights = [weights]
# logZ
logZ = numpy.array(logZ, dtype='double')
if len(logZ.shape) is not 1:
raise ValueError("logZ should be a 1D array")
# x
x = numpy.array(x, dtype='double')
if len(x.shape) is not 1:
raise ValueError("x should be a 1D array")
# f
if len(logZ) != len(f):
raise ValueError("len(logZ) = %i != len(f)= %i"
% (len(logZ), len(f)))
for func in f:
if not callable(func):
raise ValueError("first argument f must be function"
"(or list of functions) of two variables")
# samples
if len(logZ) != len(samples):
raise ValueError("len(logZ) = %i != len(samples)= %i"
% (len(logZ), len(samples)))
samples = [numpy.array(s, dtype='double') for s in samples]
for s in samples:
if len(s.shape) is not 2:
raise ValueError("each set of samples should be a 2D array")
# weights
if len(logZ) != len(weights):
raise ValueError("len(logZ) = %i != len(weights)= %i"
% (len(logZ), len(weights)))
weights = [numpy.array(w, dtype='double') if w is not None
else numpy.ones(len(s), dtype='double')
for w, s in zip(weights, samples)]
for w, s in zip(weights, samples):
if len(w.shape) is not 1:
raise ValueError("each set of weights should be a 1D array")
if len(w) != len(s):
raise ValueError("len(w) = %i != len(s) = %i" % (len(s), len(w)))
return logZ, f, x, samples, weights | Sanity-check the arguments for :func:`fgivenx.drivers.compute_samples`.
Parameters
----------
f, x, samples, weights:
see arguments for :func:`fgivenx.drivers.compute_samples` | Below is the the instruction that describes the task:
### Input:
Sanity-check the arguments for :func:`fgivenx.drivers.compute_samples`.
Parameters
----------
f, x, samples, weights:
see arguments for :func:`fgivenx.drivers.compute_samples`
### Response:
def _check_args(logZ, f, x, samples, weights):
""" Sanity-check the arguments for :func:`fgivenx.drivers.compute_samples`.
Parameters
----------
f, x, samples, weights:
see arguments for :func:`fgivenx.drivers.compute_samples`
"""
# convert to arrays
if logZ is None:
logZ = [0]
f = [f]
samples = [samples]
weights = [weights]
# logZ
logZ = numpy.array(logZ, dtype='double')
if len(logZ.shape) is not 1:
raise ValueError("logZ should be a 1D array")
# x
x = numpy.array(x, dtype='double')
if len(x.shape) is not 1:
raise ValueError("x should be a 1D array")
# f
if len(logZ) != len(f):
raise ValueError("len(logZ) = %i != len(f)= %i"
% (len(logZ), len(f)))
for func in f:
if not callable(func):
raise ValueError("first argument f must be function"
"(or list of functions) of two variables")
# samples
if len(logZ) != len(samples):
raise ValueError("len(logZ) = %i != len(samples)= %i"
% (len(logZ), len(samples)))
samples = [numpy.array(s, dtype='double') for s in samples]
for s in samples:
if len(s.shape) is not 2:
raise ValueError("each set of samples should be a 2D array")
# weights
if len(logZ) != len(weights):
raise ValueError("len(logZ) = %i != len(weights)= %i"
% (len(logZ), len(weights)))
weights = [numpy.array(w, dtype='double') if w is not None
else numpy.ones(len(s), dtype='double')
for w, s in zip(weights, samples)]
for w, s in zip(weights, samples):
if len(w.shape) is not 1:
raise ValueError("each set of weights should be a 1D array")
if len(w) != len(s):
raise ValueError("len(w) = %i != len(s) = %i" % (len(s), len(w)))
return logZ, f, x, samples, weights |
def _pre_analysis(self):
"""
Executed before analysis starts. Necessary initializations are performed here.
:return: None
"""
l.debug("Starting from %#x", self._start)
# initialize the task stack
self._task_stack = [ ]
# initialize the execution counter dict
self._execution_counter = defaultdict(int)
# Generate a CFG if no CFG is provided
if not self._cfg:
l.debug("Generating a CFG, since none was given...")
# TODO: can we use a fast CFG instead? note that fast CFG does not care of context sensitivity at all, but
# TODO: for state merging, we also don't really care about context sensitivity.
self._cfg = self.project.analyses.CFGEmulated(context_sensitivity_level=self._context_sensitivity_level,
starts=(self._start,)
)
if not self._cfg.normalized:
l.warning("The given CFG is not normalized, which might impact the performance/accuracy of the VFG "
"analysis.")
# Prepare the state
initial_state = self._prepare_initial_state(self._start, self._initial_state)
initial_state.ip = self._start
if self.project.arch.name.startswith('MIPS'):
initial_state.regs.t9 = self._start
# clear function merge points cache
self._function_merge_points = {}
# Create the initial state
state = initial_state.copy()
if self._start_at_function:
# set the return address to an address so we can catch it and terminate the VSA analysis
# TODO: Properly pick an address that will not conflict with any existing code and data in the program
self._final_address = 0x4fff0000
self._set_return_address(state, self._final_address)
call_stack = None
if not self._start_at_function:
# we should build a custom call stack
call_stack = CallStack()
call_stack = call_stack.call(None, self._function_start, retn_target=self._final_address)
job = VFGJob(state.addr, state, self._context_sensitivity_level,
jumpkind='Ijk_Boring', final_return_address=self._final_address,
call_stack=call_stack
)
block_id = BlockID.new(state.addr, job.get_call_stack_suffix(), job.jumpkind)
job._block_id = block_id
self._insert_job(job)
# create the task
function_analysis_task = FunctionAnalysis(self._function_start, self._final_address)
function_analysis_task.jobs.append(job)
self._task_stack.append(function_analysis_task) | Executed before analysis starts. Necessary initializations are performed here.
:return: None | Below is the the instruction that describes the task:
### Input:
Executed before analysis starts. Necessary initializations are performed here.
:return: None
### Response:
def _pre_analysis(self):
"""
Executed before analysis starts. Necessary initializations are performed here.
:return: None
"""
l.debug("Starting from %#x", self._start)
# initialize the task stack
self._task_stack = [ ]
# initialize the execution counter dict
self._execution_counter = defaultdict(int)
# Generate a CFG if no CFG is provided
if not self._cfg:
l.debug("Generating a CFG, since none was given...")
# TODO: can we use a fast CFG instead? note that fast CFG does not care of context sensitivity at all, but
# TODO: for state merging, we also don't really care about context sensitivity.
self._cfg = self.project.analyses.CFGEmulated(context_sensitivity_level=self._context_sensitivity_level,
starts=(self._start,)
)
if not self._cfg.normalized:
l.warning("The given CFG is not normalized, which might impact the performance/accuracy of the VFG "
"analysis.")
# Prepare the state
initial_state = self._prepare_initial_state(self._start, self._initial_state)
initial_state.ip = self._start
if self.project.arch.name.startswith('MIPS'):
initial_state.regs.t9 = self._start
# clear function merge points cache
self._function_merge_points = {}
# Create the initial state
state = initial_state.copy()
if self._start_at_function:
# set the return address to an address so we can catch it and terminate the VSA analysis
# TODO: Properly pick an address that will not conflict with any existing code and data in the program
self._final_address = 0x4fff0000
self._set_return_address(state, self._final_address)
call_stack = None
if not self._start_at_function:
# we should build a custom call stack
call_stack = CallStack()
call_stack = call_stack.call(None, self._function_start, retn_target=self._final_address)
job = VFGJob(state.addr, state, self._context_sensitivity_level,
jumpkind='Ijk_Boring', final_return_address=self._final_address,
call_stack=call_stack
)
block_id = BlockID.new(state.addr, job.get_call_stack_suffix(), job.jumpkind)
job._block_id = block_id
self._insert_job(job)
# create the task
function_analysis_task = FunctionAnalysis(self._function_start, self._final_address)
function_analysis_task.jobs.append(job)
self._task_stack.append(function_analysis_task) |
def scan(self, cursor=0, match=None, count=None):
"""
Incrementally return lists of key names. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
pieces = [cursor]
if match is not None:
pieces.extend([Token.get_token('MATCH'), match])
if count is not None:
pieces.extend([Token.get_token('COUNT'), count])
return self.execute_command('SCAN', *pieces) | Incrementally return lists of key names. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns | Below is the the instruction that describes the task:
### Input:
Incrementally return lists of key names. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
### Response:
def scan(self, cursor=0, match=None, count=None):
"""
Incrementally return lists of key names. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
pieces = [cursor]
if match is not None:
pieces.extend([Token.get_token('MATCH'), match])
if count is not None:
pieces.extend([Token.get_token('COUNT'), count])
return self.execute_command('SCAN', *pieces) |
def make_public(self, client=None):
"""Update blob's ACL, granting read access to anonymous users.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
"""
self.acl.all().grant_read()
self.acl.save(client=client) | Update blob's ACL, granting read access to anonymous users.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket. | Below is the the instruction that describes the task:
### Input:
Update blob's ACL, granting read access to anonymous users.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
### Response:
def make_public(self, client=None):
"""Update blob's ACL, granting read access to anonymous users.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
"""
self.acl.all().grant_read()
self.acl.save(client=client) |
def fold_all(self):
"""Folds/unfolds all levels in the editor"""
line_count = self.GetLineCount()
expanding = True
# find out if we are folding or unfolding
for line_num in range(line_count):
if self.GetFoldLevel(line_num) & stc.STC_FOLDLEVELHEADERFLAG:
expanding = not self.GetFoldExpanded(line_num)
break
line_num = 0
while line_num < line_count:
level = self.GetFoldLevel(line_num)
if level & stc.STC_FOLDLEVELHEADERFLAG and \
(level & stc.STC_FOLDLEVELNUMBERMASK) == stc.STC_FOLDLEVELBASE:
if expanding:
self.SetFoldExpanded(line_num, True)
line_num = self.expand(line_num, True)
line_num = line_num - 1
else:
last_child = self.GetLastChild(line_num, -1)
self.SetFoldExpanded(line_num, False)
if last_child > line_num:
self.HideLines(line_num + 1, last_child)
line_num = line_num + 1 | Folds/unfolds all levels in the editor | Below is the the instruction that describes the task:
### Input:
Folds/unfolds all levels in the editor
### Response:
def fold_all(self):
"""Folds/unfolds all levels in the editor"""
line_count = self.GetLineCount()
expanding = True
# find out if we are folding or unfolding
for line_num in range(line_count):
if self.GetFoldLevel(line_num) & stc.STC_FOLDLEVELHEADERFLAG:
expanding = not self.GetFoldExpanded(line_num)
break
line_num = 0
while line_num < line_count:
level = self.GetFoldLevel(line_num)
if level & stc.STC_FOLDLEVELHEADERFLAG and \
(level & stc.STC_FOLDLEVELNUMBERMASK) == stc.STC_FOLDLEVELBASE:
if expanding:
self.SetFoldExpanded(line_num, True)
line_num = self.expand(line_num, True)
line_num = line_num - 1
else:
last_child = self.GetLastChild(line_num, -1)
self.SetFoldExpanded(line_num, False)
if last_child > line_num:
self.HideLines(line_num + 1, last_child)
line_num = line_num + 1 |
def tokenize(self, line: str, expand: bool = True) -> List[str]:
"""
Lex a string into a list of tokens. Shortcuts and aliases are expanded and comments are removed
:param line: the command line being lexed
:param expand: If True, then aliases and shortcuts will be expanded.
Set this to False if no expansion should occur because the command name is already known.
Otherwise the command could be expanded if it matched an alias name. This is for cases where
a do_* method was called manually (e.g do_help('alias').
:return: A list of tokens
:raises ValueError if there are unclosed quotation marks.
"""
# expand shortcuts and aliases
if expand:
line = self._expand(line)
# check if this line is a comment
if line.lstrip().startswith(constants.COMMENT_CHAR):
return []
# split on whitespace
tokens = shlex_split(line)
# custom lexing
tokens = self._split_on_punctuation(tokens)
return tokens | Lex a string into a list of tokens. Shortcuts and aliases are expanded and comments are removed
:param line: the command line being lexed
:param expand: If True, then aliases and shortcuts will be expanded.
Set this to False if no expansion should occur because the command name is already known.
Otherwise the command could be expanded if it matched an alias name. This is for cases where
a do_* method was called manually (e.g do_help('alias').
:return: A list of tokens
:raises ValueError if there are unclosed quotation marks. | Below is the the instruction that describes the task:
### Input:
Lex a string into a list of tokens. Shortcuts and aliases are expanded and comments are removed
:param line: the command line being lexed
:param expand: If True, then aliases and shortcuts will be expanded.
Set this to False if no expansion should occur because the command name is already known.
Otherwise the command could be expanded if it matched an alias name. This is for cases where
a do_* method was called manually (e.g do_help('alias').
:return: A list of tokens
:raises ValueError if there are unclosed quotation marks.
### Response:
def tokenize(self, line: str, expand: bool = True) -> List[str]:
"""
Lex a string into a list of tokens. Shortcuts and aliases are expanded and comments are removed
:param line: the command line being lexed
:param expand: If True, then aliases and shortcuts will be expanded.
Set this to False if no expansion should occur because the command name is already known.
Otherwise the command could be expanded if it matched an alias name. This is for cases where
a do_* method was called manually (e.g do_help('alias').
:return: A list of tokens
:raises ValueError if there are unclosed quotation marks.
"""
# expand shortcuts and aliases
if expand:
line = self._expand(line)
# check if this line is a comment
if line.lstrip().startswith(constants.COMMENT_CHAR):
return []
# split on whitespace
tokens = shlex_split(line)
# custom lexing
tokens = self._split_on_punctuation(tokens)
return tokens |
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(PostqueueCollector, self).get_default_config()
config.update({
'path': 'postqueue',
'bin': '/usr/bin/postqueue',
'use_sudo': False,
'sudo_cmd': '/usr/bin/sudo',
})
return config | Returns the default collector settings | Below is the the instruction that describes the task:
### Input:
Returns the default collector settings
### Response:
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(PostqueueCollector, self).get_default_config()
config.update({
'path': 'postqueue',
'bin': '/usr/bin/postqueue',
'use_sudo': False,
'sudo_cmd': '/usr/bin/sudo',
})
return config |
def sim_monge_elkan(src, tar, sim_func=sim_levenshtein, symmetric=False):
"""Return the Monge-Elkan similarity of two strings.
This is a wrapper for :py:meth:`MongeElkan.sim`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
sim_func : function
Rhe internal similarity metric to employ
symmetric : bool
Return a symmetric similarity measure
Returns
-------
float
Monge-Elkan similarity
Examples
--------
>>> sim_monge_elkan('cat', 'hat')
0.75
>>> round(sim_monge_elkan('Niall', 'Neil'), 12)
0.666666666667
>>> round(sim_monge_elkan('aluminum', 'Catalan'), 12)
0.388888888889
>>> sim_monge_elkan('ATCG', 'TAGC')
0.5
"""
return MongeElkan().sim(src, tar, sim_func, symmetric) | Return the Monge-Elkan similarity of two strings.
This is a wrapper for :py:meth:`MongeElkan.sim`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
sim_func : function
Rhe internal similarity metric to employ
symmetric : bool
Return a symmetric similarity measure
Returns
-------
float
Monge-Elkan similarity
Examples
--------
>>> sim_monge_elkan('cat', 'hat')
0.75
>>> round(sim_monge_elkan('Niall', 'Neil'), 12)
0.666666666667
>>> round(sim_monge_elkan('aluminum', 'Catalan'), 12)
0.388888888889
>>> sim_monge_elkan('ATCG', 'TAGC')
0.5 | Below is the the instruction that describes the task:
### Input:
Return the Monge-Elkan similarity of two strings.
This is a wrapper for :py:meth:`MongeElkan.sim`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
sim_func : function
Rhe internal similarity metric to employ
symmetric : bool
Return a symmetric similarity measure
Returns
-------
float
Monge-Elkan similarity
Examples
--------
>>> sim_monge_elkan('cat', 'hat')
0.75
>>> round(sim_monge_elkan('Niall', 'Neil'), 12)
0.666666666667
>>> round(sim_monge_elkan('aluminum', 'Catalan'), 12)
0.388888888889
>>> sim_monge_elkan('ATCG', 'TAGC')
0.5
### Response:
def sim_monge_elkan(src, tar, sim_func=sim_levenshtein, symmetric=False):
"""Return the Monge-Elkan similarity of two strings.
This is a wrapper for :py:meth:`MongeElkan.sim`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
sim_func : function
Rhe internal similarity metric to employ
symmetric : bool
Return a symmetric similarity measure
Returns
-------
float
Monge-Elkan similarity
Examples
--------
>>> sim_monge_elkan('cat', 'hat')
0.75
>>> round(sim_monge_elkan('Niall', 'Neil'), 12)
0.666666666667
>>> round(sim_monge_elkan('aluminum', 'Catalan'), 12)
0.388888888889
>>> sim_monge_elkan('ATCG', 'TAGC')
0.5
"""
return MongeElkan().sim(src, tar, sim_func, symmetric) |
def _make_query_from_terms(self, terms, limit=None):
""" Creates a query for dataset from decomposed search terms.
Args:
terms (dict or unicode or string):
Returns:
tuple of (TextClause, dict): First element is FTS query, second is parameters
of the query. Element of the execution of the query is pair: (vid, score).
"""
expanded_terms = self._expand_terms(terms)
if expanded_terms['doc']:
# create query with real score.
query_parts = ["SELECT vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) as score"]
if expanded_terms['doc'] and expanded_terms['keywords']:
query_parts = ["SELECT vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) "
" + ts_rank_cd(setweight(to_tsvector(coalesce(keywords::text,'')),'B'), to_tsquery(:keywords))"
' as score']
else:
# create query with score = 1 because query will not touch doc field.
query_parts = ['SELECT vid, 1 as score']
query_parts.append('FROM dataset_index')
query_params = {}
where_counter = 0
if expanded_terms['doc']:
where_counter += 1
query_parts.append('WHERE doc @@ to_tsquery(:doc)')
query_params['doc'] = self.backend._and_join(expanded_terms['doc'])
if expanded_terms['keywords']:
query_params['keywords'] = self.backend._and_join(expanded_terms['keywords'])
kw_q = "to_tsvector(coalesce(keywords::text,'')) @@ to_tsquery(:keywords)"
query_parts.append( ("AND " if where_counter else "WHERE ") + kw_q )
query_parts.append('ORDER BY score DESC')
if limit:
query_parts.append('LIMIT :limit')
query_params['limit'] = limit
query_parts.append(';')
deb_msg = 'Dataset terms conversion: `{}` terms converted to `{}` with `{}` params query.'\
.format(terms, query_parts, query_params)
logger.debug(deb_msg)
q = text('\n'.join(query_parts)), query_params
logger.debug('Dataset search query: {}'.format(q))
return q | Creates a query for dataset from decomposed search terms.
Args:
terms (dict or unicode or string):
Returns:
tuple of (TextClause, dict): First element is FTS query, second is parameters
of the query. Element of the execution of the query is pair: (vid, score). | Below is the the instruction that describes the task:
### Input:
Creates a query for dataset from decomposed search terms.
Args:
terms (dict or unicode or string):
Returns:
tuple of (TextClause, dict): First element is FTS query, second is parameters
of the query. Element of the execution of the query is pair: (vid, score).
### Response:
def _make_query_from_terms(self, terms, limit=None):
""" Creates a query for dataset from decomposed search terms.
Args:
terms (dict or unicode or string):
Returns:
tuple of (TextClause, dict): First element is FTS query, second is parameters
of the query. Element of the execution of the query is pair: (vid, score).
"""
expanded_terms = self._expand_terms(terms)
if expanded_terms['doc']:
# create query with real score.
query_parts = ["SELECT vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) as score"]
if expanded_terms['doc'] and expanded_terms['keywords']:
query_parts = ["SELECT vid, ts_rank_cd(setweight(doc,'C'), to_tsquery(:doc)) "
" + ts_rank_cd(setweight(to_tsvector(coalesce(keywords::text,'')),'B'), to_tsquery(:keywords))"
' as score']
else:
# create query with score = 1 because query will not touch doc field.
query_parts = ['SELECT vid, 1 as score']
query_parts.append('FROM dataset_index')
query_params = {}
where_counter = 0
if expanded_terms['doc']:
where_counter += 1
query_parts.append('WHERE doc @@ to_tsquery(:doc)')
query_params['doc'] = self.backend._and_join(expanded_terms['doc'])
if expanded_terms['keywords']:
query_params['keywords'] = self.backend._and_join(expanded_terms['keywords'])
kw_q = "to_tsvector(coalesce(keywords::text,'')) @@ to_tsquery(:keywords)"
query_parts.append( ("AND " if where_counter else "WHERE ") + kw_q )
query_parts.append('ORDER BY score DESC')
if limit:
query_parts.append('LIMIT :limit')
query_params['limit'] = limit
query_parts.append(';')
deb_msg = 'Dataset terms conversion: `{}` terms converted to `{}` with `{}` params query.'\
.format(terms, query_parts, query_params)
logger.debug(deb_msg)
q = text('\n'.join(query_parts)), query_params
logger.debug('Dataset search query: {}'.format(q))
return q |
def mark_featured(self, request, queryset):
"""
Mark selected as featured post.
"""
queryset.update(featured=True)
self.message_user(
request, _('Selected entries are now marked as featured.')) | Mark selected as featured post. | Below is the the instruction that describes the task:
### Input:
Mark selected as featured post.
### Response:
def mark_featured(self, request, queryset):
"""
Mark selected as featured post.
"""
queryset.update(featured=True)
self.message_user(
request, _('Selected entries are now marked as featured.')) |
def save_data(self, trigger_id, **data):
"""
get the data from the service
:param trigger_id: id of the trigger
:params data, dict
:rtype: dict
"""
status = False
service = TriggerService.objects.get(id=trigger_id)
desc = service.description
slack = Slack.objects.get(trigger_id=trigger_id)
title = self.set_title(data)
if title is None:
title = data.get('subject')
type_action = data.get('type_action', '')
# set the bot username of Slack to the name of the
# provider service
username = service.provider.name.name.split('Service')[1]
# 'build' a link
title_link = ''
if data.get('permalink'):
title_link = ': <' + data.get('permalink') + '|' + title + '>'
else:
title_link = ': <' + data.get('link') + '|' + title + '>'
data = '*' + desc + '*: ' + type_action + title_link
payload = {'username': username,
'text': data}
r = requests.post(slack.webhook_url, json=payload)
if r.status_code == requests.codes.ok:
status = True
# return the data
return status | get the data from the service
:param trigger_id: id of the trigger
:params data, dict
:rtype: dict | Below is the the instruction that describes the task:
### Input:
get the data from the service
:param trigger_id: id of the trigger
:params data, dict
:rtype: dict
### Response:
def save_data(self, trigger_id, **data):
"""
get the data from the service
:param trigger_id: id of the trigger
:params data, dict
:rtype: dict
"""
status = False
service = TriggerService.objects.get(id=trigger_id)
desc = service.description
slack = Slack.objects.get(trigger_id=trigger_id)
title = self.set_title(data)
if title is None:
title = data.get('subject')
type_action = data.get('type_action', '')
# set the bot username of Slack to the name of the
# provider service
username = service.provider.name.name.split('Service')[1]
# 'build' a link
title_link = ''
if data.get('permalink'):
title_link = ': <' + data.get('permalink') + '|' + title + '>'
else:
title_link = ': <' + data.get('link') + '|' + title + '>'
data = '*' + desc + '*: ' + type_action + title_link
payload = {'username': username,
'text': data}
r = requests.post(slack.webhook_url, json=payload)
if r.status_code == requests.codes.ok:
status = True
# return the data
return status |
def list_records_for_build_configuration(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
"""
List all BuildRecords for a given BuildConfiguration
"""
data = list_records_for_build_configuration_raw(id, name, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data) | List all BuildRecords for a given BuildConfiguration | Below is the the instruction that describes the task:
### Input:
List all BuildRecords for a given BuildConfiguration
### Response:
def list_records_for_build_configuration(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
"""
List all BuildRecords for a given BuildConfiguration
"""
data = list_records_for_build_configuration_raw(id, name, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data) |
def smoothing(labels, smoothing_window):
""" Applies a smoothing on VAD"""
if numpy.sum(labels)< smoothing_window:
return labels
segments = []
for k in range(1,len(labels)-1):
if labels[k]==0 and labels[k-1]==1 and labels[k+1]==1 :
labels[k]=1
for k in range(1,len(labels)-1):
if labels[k]==1 and labels[k-1]==0 and labels[k+1]==0 :
labels[k]=0
seg = numpy.array([0,0,labels[0]])
for k in range(1,len(labels)):
if labels[k] != labels[k-1]:
seg[1]=k-1
segments.append(seg)
seg = numpy.array([k,k,labels[k]])
seg[1]=len(labels)-1
segments.append(seg)
if len(segments) < 2:
return labels
curr = segments[0]
next = segments[1]
# Look at the first segment. If it's short enough, just change its labels
if (curr[1]-curr[0]+1) < smoothing_window and (next[1]-next[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
for k in range(1,len(segments)-1):
prev = segments[k-1]
curr = segments[k]
next = segments[k+1]
if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window and (next[1]-next[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
prev = segments[-2]
curr = segments[-1]
if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #if curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
return labels | Applies a smoothing on VAD | Below is the the instruction that describes the task:
### Input:
Applies a smoothing on VAD
### Response:
def smoothing(labels, smoothing_window):
""" Applies a smoothing on VAD"""
if numpy.sum(labels)< smoothing_window:
return labels
segments = []
for k in range(1,len(labels)-1):
if labels[k]==0 and labels[k-1]==1 and labels[k+1]==1 :
labels[k]=1
for k in range(1,len(labels)-1):
if labels[k]==1 and labels[k-1]==0 and labels[k+1]==0 :
labels[k]=0
seg = numpy.array([0,0,labels[0]])
for k in range(1,len(labels)):
if labels[k] != labels[k-1]:
seg[1]=k-1
segments.append(seg)
seg = numpy.array([k,k,labels[k]])
seg[1]=len(labels)-1
segments.append(seg)
if len(segments) < 2:
return labels
curr = segments[0]
next = segments[1]
# Look at the first segment. If it's short enough, just change its labels
if (curr[1]-curr[0]+1) < smoothing_window and (next[1]-next[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
for k in range(1,len(segments)-1):
prev = segments[k-1]
curr = segments[k]
next = segments[k+1]
if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window and (next[1]-next[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
prev = segments[-2]
curr = segments[-1]
if (curr[1]-curr[0]+1) < smoothing_window and (prev[1]-prev[0]+1) > smoothing_window:
if curr[2]==1:
labels[curr[0] : (curr[1]+1)] = numpy.zeros(curr[1] - curr[0] + 1)
curr[2]=0
else: #if curr[2]==0
labels[curr[0] : (curr[1]+1)] = numpy.ones(curr[1] - curr[0] + 1)
curr[2]=1
return labels |
def sendLocalVoiceClips(
self, clip_paths, message=None, thread_id=None, thread_type=ThreadType.USER
):
"""
Sends local voice clips to a thread
:param clip_paths: Paths of clips to upload and send
:param message: Additional message
:param thread_id: User/Group ID to send to. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type thread_type: models.ThreadType
:return: :ref:`Message ID <intro_message_ids>` of the sent files
:raises: FBchatException if request failed
"""
clip_paths = require_list(clip_paths)
with get_files_from_paths(clip_paths) as x:
files = self._upload(x, voice_clip=True)
return self._sendFiles(
files=files, message=message, thread_id=thread_id, thread_type=thread_type
) | Sends local voice clips to a thread
:param clip_paths: Paths of clips to upload and send
:param message: Additional message
:param thread_id: User/Group ID to send to. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type thread_type: models.ThreadType
:return: :ref:`Message ID <intro_message_ids>` of the sent files
:raises: FBchatException if request failed | Below is the the instruction that describes the task:
### Input:
Sends local voice clips to a thread
:param clip_paths: Paths of clips to upload and send
:param message: Additional message
:param thread_id: User/Group ID to send to. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type thread_type: models.ThreadType
:return: :ref:`Message ID <intro_message_ids>` of the sent files
:raises: FBchatException if request failed
### Response:
def sendLocalVoiceClips(
self, clip_paths, message=None, thread_id=None, thread_type=ThreadType.USER
):
"""
Sends local voice clips to a thread
:param clip_paths: Paths of clips to upload and send
:param message: Additional message
:param thread_id: User/Group ID to send to. See :ref:`intro_threads`
:param thread_type: See :ref:`intro_threads`
:type thread_type: models.ThreadType
:return: :ref:`Message ID <intro_message_ids>` of the sent files
:raises: FBchatException if request failed
"""
clip_paths = require_list(clip_paths)
with get_files_from_paths(clip_paths) as x:
files = self._upload(x, voice_clip=True)
return self._sendFiles(
files=files, message=message, thread_id=thread_id, thread_type=thread_type
) |
def get_file_network_traffic(self, resources):
"""Retrieves a report about the network traffic of a md5, sha1, and/or sha2 hash of
file, when it is executed.
Args:
resources: list of string hashes.
"""
api_name = 'virustotal-file-network-traffic'
api_endpoint = 'file/network-traffic'
return self._extract_all_responses(resources, api_endpoint, api_name) | Retrieves a report about the network traffic of a md5, sha1, and/or sha2 hash of
file, when it is executed.
Args:
resources: list of string hashes. | Below is the the instruction that describes the task:
### Input:
Retrieves a report about the network traffic of a md5, sha1, and/or sha2 hash of
file, when it is executed.
Args:
resources: list of string hashes.
### Response:
def get_file_network_traffic(self, resources):
"""Retrieves a report about the network traffic of a md5, sha1, and/or sha2 hash of
file, when it is executed.
Args:
resources: list of string hashes.
"""
api_name = 'virustotal-file-network-traffic'
api_endpoint = 'file/network-traffic'
return self._extract_all_responses(resources, api_endpoint, api_name) |
def refactor_docstring(self, input, filename):
"""Refactors a docstring, looking for doctests.
This returns a modified version of the input string. It looks
for doctests, which start with a ">>>" prompt, and may be
continued with "..." prompts, as long as the "..." is indented
the same as the ">>>".
(Unfortunately we can't use the doctest module's parser,
since, like most parsers, it is not geared towards preserving
the original source.)
"""
result = []
block = None
block_lineno = None
indent = None
lineno = 0
for line in input.splitlines(True):
lineno += 1
if line.lstrip().startswith(self.PS1):
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block_lineno = lineno
block = [line]
i = line.find(self.PS1)
indent = line[:i]
elif (indent is not None and
(line.startswith(indent + self.PS2) or
line == indent + self.PS2.rstrip() + u"\n")):
block.append(line)
else:
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block = None
indent = None
result.append(line)
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
return u"".join(result) | Refactors a docstring, looking for doctests.
This returns a modified version of the input string. It looks
for doctests, which start with a ">>>" prompt, and may be
continued with "..." prompts, as long as the "..." is indented
the same as the ">>>".
(Unfortunately we can't use the doctest module's parser,
since, like most parsers, it is not geared towards preserving
the original source.) | Below is the the instruction that describes the task:
### Input:
Refactors a docstring, looking for doctests.
This returns a modified version of the input string. It looks
for doctests, which start with a ">>>" prompt, and may be
continued with "..." prompts, as long as the "..." is indented
the same as the ">>>".
(Unfortunately we can't use the doctest module's parser,
since, like most parsers, it is not geared towards preserving
the original source.)
### Response:
def refactor_docstring(self, input, filename):
"""Refactors a docstring, looking for doctests.
This returns a modified version of the input string. It looks
for doctests, which start with a ">>>" prompt, and may be
continued with "..." prompts, as long as the "..." is indented
the same as the ">>>".
(Unfortunately we can't use the doctest module's parser,
since, like most parsers, it is not geared towards preserving
the original source.)
"""
result = []
block = None
block_lineno = None
indent = None
lineno = 0
for line in input.splitlines(True):
lineno += 1
if line.lstrip().startswith(self.PS1):
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block_lineno = lineno
block = [line]
i = line.find(self.PS1)
indent = line[:i]
elif (indent is not None and
(line.startswith(indent + self.PS2) or
line == indent + self.PS2.rstrip() + u"\n")):
block.append(line)
else:
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block = None
indent = None
result.append(line)
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
return u"".join(result) |
def init_board(self):
"""Init a valid board by given settings.
Parameters
----------
mine_map : numpy.ndarray
the map that defines the mine
0 is empty, 1 is mine
info_map : numpy.ndarray
the map that presents to gamer
0-8 is number of mines in srrounding.
9 is flagged field.
10 is questioned field.
11 is undiscovered field.
12 is a mine field.
"""
self.mine_map = np.zeros((self.board_height, self.board_width),
dtype=np.uint8)
idx_list = np.random.permutation(self.board_width*self.board_height)
idx_list = idx_list[:self.num_mines]
for idx in idx_list:
idx_x = int(idx % self.board_width)
idx_y = int(idx / self.board_width)
self.mine_map[idx_y, idx_x] = 1
self.info_map = np.ones((self.board_height, self.board_width),
dtype=np.uint8)*11 | Init a valid board by given settings.
Parameters
----------
mine_map : numpy.ndarray
the map that defines the mine
0 is empty, 1 is mine
info_map : numpy.ndarray
the map that presents to gamer
0-8 is number of mines in srrounding.
9 is flagged field.
10 is questioned field.
11 is undiscovered field.
12 is a mine field. | Below is the the instruction that describes the task:
### Input:
Init a valid board by given settings.
Parameters
----------
mine_map : numpy.ndarray
the map that defines the mine
0 is empty, 1 is mine
info_map : numpy.ndarray
the map that presents to gamer
0-8 is number of mines in srrounding.
9 is flagged field.
10 is questioned field.
11 is undiscovered field.
12 is a mine field.
### Response:
def init_board(self):
"""Init a valid board by given settings.
Parameters
----------
mine_map : numpy.ndarray
the map that defines the mine
0 is empty, 1 is mine
info_map : numpy.ndarray
the map that presents to gamer
0-8 is number of mines in srrounding.
9 is flagged field.
10 is questioned field.
11 is undiscovered field.
12 is a mine field.
"""
self.mine_map = np.zeros((self.board_height, self.board_width),
dtype=np.uint8)
idx_list = np.random.permutation(self.board_width*self.board_height)
idx_list = idx_list[:self.num_mines]
for idx in idx_list:
idx_x = int(idx % self.board_width)
idx_y = int(idx / self.board_width)
self.mine_map[idx_y, idx_x] = 1
self.info_map = np.ones((self.board_height, self.board_width),
dtype=np.uint8)*11 |
def _register_rp(session, url_prefix, rp_name):
"""Synchronously register the RP is paremeter.
Return False if we have a reason to believe this didn't work
"""
post_url = "{}providers/{}/register?api-version=2016-02-01".format(url_prefix, rp_name)
get_url = "{}providers/{}?api-version=2016-02-01".format(url_prefix, rp_name)
_LOGGER.warning("Resource provider '%s' used by this operation is not "
"registered. We are registering for you.", rp_name)
post_response = session.post(post_url)
if post_response.status_code != 200:
_LOGGER.warning("Registration failed. Please register manually.")
return False
while True:
time.sleep(10)
rp_info = session.get(get_url).json()
if rp_info['registrationState'] == 'Registered':
_LOGGER.warning("Registration succeeded.")
return True | Synchronously register the RP is paremeter.
Return False if we have a reason to believe this didn't work | Below is the the instruction that describes the task:
### Input:
Synchronously register the RP is paremeter.
Return False if we have a reason to believe this didn't work
### Response:
def _register_rp(session, url_prefix, rp_name):
"""Synchronously register the RP is paremeter.
Return False if we have a reason to believe this didn't work
"""
post_url = "{}providers/{}/register?api-version=2016-02-01".format(url_prefix, rp_name)
get_url = "{}providers/{}?api-version=2016-02-01".format(url_prefix, rp_name)
_LOGGER.warning("Resource provider '%s' used by this operation is not "
"registered. We are registering for you.", rp_name)
post_response = session.post(post_url)
if post_response.status_code != 200:
_LOGGER.warning("Registration failed. Please register manually.")
return False
while True:
time.sleep(10)
rp_info = session.get(get_url).json()
if rp_info['registrationState'] == 'Registered':
_LOGGER.warning("Registration succeeded.")
return True |
def type_name(value):
"""
Returns a user-readable name for the type of an object
:param value:
A value to get the type name of
:return:
A unicode string of the object's type name
"""
if inspect.isclass(value):
cls = value
else:
cls = value.__class__
if cls.__module__ in set(['builtins', '__builtin__']):
return cls.__name__
return '%s.%s' % (cls.__module__, cls.__name__) | Returns a user-readable name for the type of an object
:param value:
A value to get the type name of
:return:
A unicode string of the object's type name | Below is the the instruction that describes the task:
### Input:
Returns a user-readable name for the type of an object
:param value:
A value to get the type name of
:return:
A unicode string of the object's type name
### Response:
def type_name(value):
"""
Returns a user-readable name for the type of an object
:param value:
A value to get the type name of
:return:
A unicode string of the object's type name
"""
if inspect.isclass(value):
cls = value
else:
cls = value.__class__
if cls.__module__ in set(['builtins', '__builtin__']):
return cls.__name__
return '%s.%s' % (cls.__module__, cls.__name__) |
def patched_forkpty(self):
"""Fork a new process with a new pseudo-terminal as controlling tty."""
pid, master_fd = self.original_os_forkpty()
if not pid:
_LOG('Fork detected. Reinstalling Manhole.')
self.reinstall()
return pid, master_fd | Fork a new process with a new pseudo-terminal as controlling tty. | Below is the the instruction that describes the task:
### Input:
Fork a new process with a new pseudo-terminal as controlling tty.
### Response:
def patched_forkpty(self):
"""Fork a new process with a new pseudo-terminal as controlling tty."""
pid, master_fd = self.original_os_forkpty()
if not pid:
_LOG('Fork detected. Reinstalling Manhole.')
self.reinstall()
return pid, master_fd |
def check_pkgs_integrity(filelist, logger, ftp_connector,
timeout=120, sleep_time=10):
"""
Checks if files are not being uploaded to server.
@timeout - time after which the script will register an error.
"""
ref_1 = []
ref_2 = []
i = 1
print >> sys.stdout, "\nChecking packages integrity."
for filename in filelist:
# ref_1.append(self.get_remote_file_size(filename))
get_remote_file_size(ftp_connector, filename, ref_1)
print >> sys.stdout, "\nGoing to sleep for %i sec." % (sleep_time,)
time.sleep(sleep_time)
while sleep_time*i < timeout:
for filename in filelist:
# ref_2.append(self.get_remote_file_size(filename))
get_remote_file_size(ftp_connector, filename, ref_2)
if ref_1 == ref_2:
print >> sys.stdout, "\nIntegrity OK:)"
logger.info("Packages integrity OK.")
break
else:
print >> sys.stdout, "\nWaiting %d time for itegrity..." % (i,)
logger.info("\nWaiting %d time for itegrity..." % (i,))
i += 1
ref_1, ref_2 = ref_2, []
time.sleep(sleep_time)
else:
not_finished_files = []
for count, val1 in enumerate(ref_1):
if val1 != ref_2[count]:
not_finished_files.append(filelist[count])
print >> sys.stdout, "\nOMG, OMG something wrong with integrity."
logger.error("Integrity check faild for files %s"
% (not_finished_files,)) | Checks if files are not being uploaded to server.
@timeout - time after which the script will register an error. | Below is the the instruction that describes the task:
### Input:
Checks if files are not being uploaded to server.
@timeout - time after which the script will register an error.
### Response:
def check_pkgs_integrity(filelist, logger, ftp_connector,
timeout=120, sleep_time=10):
"""
Checks if files are not being uploaded to server.
@timeout - time after which the script will register an error.
"""
ref_1 = []
ref_2 = []
i = 1
print >> sys.stdout, "\nChecking packages integrity."
for filename in filelist:
# ref_1.append(self.get_remote_file_size(filename))
get_remote_file_size(ftp_connector, filename, ref_1)
print >> sys.stdout, "\nGoing to sleep for %i sec." % (sleep_time,)
time.sleep(sleep_time)
while sleep_time*i < timeout:
for filename in filelist:
# ref_2.append(self.get_remote_file_size(filename))
get_remote_file_size(ftp_connector, filename, ref_2)
if ref_1 == ref_2:
print >> sys.stdout, "\nIntegrity OK:)"
logger.info("Packages integrity OK.")
break
else:
print >> sys.stdout, "\nWaiting %d time for itegrity..." % (i,)
logger.info("\nWaiting %d time for itegrity..." % (i,))
i += 1
ref_1, ref_2 = ref_2, []
time.sleep(sleep_time)
else:
not_finished_files = []
for count, val1 in enumerate(ref_1):
if val1 != ref_2[count]:
not_finished_files.append(filelist[count])
print >> sys.stdout, "\nOMG, OMG something wrong with integrity."
logger.error("Integrity check faild for files %s"
% (not_finished_files,)) |
def request(self, method, *, path=None, json=None,
params=None, headers=None, timeout=None,
backoff_cap=None, **kwargs):
"""Performs an HTTP request with the given parameters.
Implements exponential backoff.
If `ConnectionError` occurs, a timestamp equal to now +
the default delay (`BACKOFF_DELAY`) is assigned to the object.
The timestamp is in UTC. Next time the function is called, it either
waits till the timestamp is passed or raises `TimeoutError`.
If `ConnectionError` occurs two or more times in a row,
the retry count is incremented and the new timestamp is calculated
as now + the default delay multiplied by two to the power of the
number of retries.
If a request is successful, the backoff timestamp is removed,
the retry count is back to zero.
Args:
method (str): HTTP method (e.g.: ``'GET'``).
path (str): API endpoint path (e.g.: ``'/transactions'``).
json (dict): JSON data to send along with the request.
params (dict): Dictionary of URL (query) parameters.
headers (dict): Optional headers to pass to the request.
timeout (int): Optional timeout in seconds.
backoff_cap (int): The maximal allowed backoff delay in seconds
to be assigned to a node.
kwargs: Optional keyword arguments.
"""
backoff_timedelta = self.get_backoff_timedelta()
if timeout is not None and timeout < backoff_timedelta:
raise TimeoutError
if backoff_timedelta > 0:
time.sleep(backoff_timedelta)
connExc = None
timeout = timeout if timeout is None else timeout - backoff_timedelta
try:
response = self._request(
method=method,
timeout=timeout,
url=self.node_url + path if path else self.node_url,
json=json,
params=params,
headers=headers,
**kwargs,
)
except ConnectionError as err:
connExc = err
raise err
finally:
self.update_backoff_time(success=connExc is None,
backoff_cap=backoff_cap)
return response | Performs an HTTP request with the given parameters.
Implements exponential backoff.
If `ConnectionError` occurs, a timestamp equal to now +
the default delay (`BACKOFF_DELAY`) is assigned to the object.
The timestamp is in UTC. Next time the function is called, it either
waits till the timestamp is passed or raises `TimeoutError`.
If `ConnectionError` occurs two or more times in a row,
the retry count is incremented and the new timestamp is calculated
as now + the default delay multiplied by two to the power of the
number of retries.
If a request is successful, the backoff timestamp is removed,
the retry count is back to zero.
Args:
method (str): HTTP method (e.g.: ``'GET'``).
path (str): API endpoint path (e.g.: ``'/transactions'``).
json (dict): JSON data to send along with the request.
params (dict): Dictionary of URL (query) parameters.
headers (dict): Optional headers to pass to the request.
timeout (int): Optional timeout in seconds.
backoff_cap (int): The maximal allowed backoff delay in seconds
to be assigned to a node.
kwargs: Optional keyword arguments. | Below is the the instruction that describes the task:
### Input:
Performs an HTTP request with the given parameters.
Implements exponential backoff.
If `ConnectionError` occurs, a timestamp equal to now +
the default delay (`BACKOFF_DELAY`) is assigned to the object.
The timestamp is in UTC. Next time the function is called, it either
waits till the timestamp is passed or raises `TimeoutError`.
If `ConnectionError` occurs two or more times in a row,
the retry count is incremented and the new timestamp is calculated
as now + the default delay multiplied by two to the power of the
number of retries.
If a request is successful, the backoff timestamp is removed,
the retry count is back to zero.
Args:
method (str): HTTP method (e.g.: ``'GET'``).
path (str): API endpoint path (e.g.: ``'/transactions'``).
json (dict): JSON data to send along with the request.
params (dict): Dictionary of URL (query) parameters.
headers (dict): Optional headers to pass to the request.
timeout (int): Optional timeout in seconds.
backoff_cap (int): The maximal allowed backoff delay in seconds
to be assigned to a node.
kwargs: Optional keyword arguments.
### Response:
def request(self, method, *, path=None, json=None,
params=None, headers=None, timeout=None,
backoff_cap=None, **kwargs):
"""Performs an HTTP request with the given parameters.
Implements exponential backoff.
If `ConnectionError` occurs, a timestamp equal to now +
the default delay (`BACKOFF_DELAY`) is assigned to the object.
The timestamp is in UTC. Next time the function is called, it either
waits till the timestamp is passed or raises `TimeoutError`.
If `ConnectionError` occurs two or more times in a row,
the retry count is incremented and the new timestamp is calculated
as now + the default delay multiplied by two to the power of the
number of retries.
If a request is successful, the backoff timestamp is removed,
the retry count is back to zero.
Args:
method (str): HTTP method (e.g.: ``'GET'``).
path (str): API endpoint path (e.g.: ``'/transactions'``).
json (dict): JSON data to send along with the request.
params (dict): Dictionary of URL (query) parameters.
headers (dict): Optional headers to pass to the request.
timeout (int): Optional timeout in seconds.
backoff_cap (int): The maximal allowed backoff delay in seconds
to be assigned to a node.
kwargs: Optional keyword arguments.
"""
backoff_timedelta = self.get_backoff_timedelta()
if timeout is not None and timeout < backoff_timedelta:
raise TimeoutError
if backoff_timedelta > 0:
time.sleep(backoff_timedelta)
connExc = None
timeout = timeout if timeout is None else timeout - backoff_timedelta
try:
response = self._request(
method=method,
timeout=timeout,
url=self.node_url + path if path else self.node_url,
json=json,
params=params,
headers=headers,
**kwargs,
)
except ConnectionError as err:
connExc = err
raise err
finally:
self.update_backoff_time(success=connExc is None,
backoff_cap=backoff_cap)
return response |
def _setSolidEdgeGeometry(self):
"""Sets the solid edge line geometry if needed"""
if self._lineLengthEdge is not None:
cr = self.contentsRect()
# contents margin usually gives 1
# cursor rectangle left edge for the very first character usually
# gives 4
x = self.fontMetrics().width('9' * self._lineLengthEdge) + \
self._totalMarginWidth + \
self.contentsMargins().left() + \
self.__cursorRect(self.firstVisibleBlock(), 0, offset=0).left()
self._solidEdgeLine.setGeometry(QRect(x, cr.top(), 1, cr.bottom())) | Sets the solid edge line geometry if needed | Below is the the instruction that describes the task:
### Input:
Sets the solid edge line geometry if needed
### Response:
def _setSolidEdgeGeometry(self):
"""Sets the solid edge line geometry if needed"""
if self._lineLengthEdge is not None:
cr = self.contentsRect()
# contents margin usually gives 1
# cursor rectangle left edge for the very first character usually
# gives 4
x = self.fontMetrics().width('9' * self._lineLengthEdge) + \
self._totalMarginWidth + \
self.contentsMargins().left() + \
self.__cursorRect(self.firstVisibleBlock(), 0, offset=0).left()
self._solidEdgeLine.setGeometry(QRect(x, cr.top(), 1, cr.bottom())) |
def makeProducer(self, request, fileForReading):
"""
Make a L{StaticProducer} that will produce the body of this response.
This method will also set the response code and Content-* headers.
@param request: The L{Request} object.
@param fileForReading: The file object containing the resource.
@return: A L{StaticProducer}. Calling C{.start()} on this will begin
producing the response.
"""
byteRange = request.getHeader(b'range')
if byteRange is None or not self.getFileSize():
self._setContentHeaders(request)
request.setResponseCode(http.OK)
return NoRangeStaticProducer(request, fileForReading)
try:
parsedRanges = self._parseRangeHeader(byteRange)
except ValueError:
logger.warning("Ignoring malformed Range header %r" % (byteRange,))
self._setContentHeaders(request)
request.setResponseCode(http.OK)
return NoRangeStaticProducer(request, fileForReading)
if len(parsedRanges) == 1:
offset, size = self._doSingleRangeRequest(
request, parsedRanges[0])
self._setContentHeaders(request, size)
return SingleRangeStaticProducer(
request, fileForReading, offset, size)
else:
rangeInfo = self._doMultipleRangeRequest(request, parsedRanges)
return MultipleRangeStaticProducer(
request, fileForReading, rangeInfo) | Make a L{StaticProducer} that will produce the body of this response.
This method will also set the response code and Content-* headers.
@param request: The L{Request} object.
@param fileForReading: The file object containing the resource.
@return: A L{StaticProducer}. Calling C{.start()} on this will begin
producing the response. | Below is the the instruction that describes the task:
### Input:
Make a L{StaticProducer} that will produce the body of this response.
This method will also set the response code and Content-* headers.
@param request: The L{Request} object.
@param fileForReading: The file object containing the resource.
@return: A L{StaticProducer}. Calling C{.start()} on this will begin
producing the response.
### Response:
def makeProducer(self, request, fileForReading):
"""
Make a L{StaticProducer} that will produce the body of this response.
This method will also set the response code and Content-* headers.
@param request: The L{Request} object.
@param fileForReading: The file object containing the resource.
@return: A L{StaticProducer}. Calling C{.start()} on this will begin
producing the response.
"""
byteRange = request.getHeader(b'range')
if byteRange is None or not self.getFileSize():
self._setContentHeaders(request)
request.setResponseCode(http.OK)
return NoRangeStaticProducer(request, fileForReading)
try:
parsedRanges = self._parseRangeHeader(byteRange)
except ValueError:
logger.warning("Ignoring malformed Range header %r" % (byteRange,))
self._setContentHeaders(request)
request.setResponseCode(http.OK)
return NoRangeStaticProducer(request, fileForReading)
if len(parsedRanges) == 1:
offset, size = self._doSingleRangeRequest(
request, parsedRanges[0])
self._setContentHeaders(request, size)
return SingleRangeStaticProducer(
request, fileForReading, offset, size)
else:
rangeInfo = self._doMultipleRangeRequest(request, parsedRanges)
return MultipleRangeStaticProducer(
request, fileForReading, rangeInfo) |
def import_yaml(file_name, **kwargs):
""" Imports curves and surfaces from files in YAML format.
.. note::
Requires `ruamel.yaml <https://pypi.org/project/ruamel.yaml/>`_ package.
Use ``jinja2=True`` to activate Jinja2 template processing. Please refer to the documentation for details.
:param file_name: name of the input file
:type file_name: str
:return: a list of rational spline geometries
:rtype: list
:raises GeomdlException: an error occurred reading the file
"""
def callback(data):
yaml = YAML()
return yaml.load(data)
# Check if it is possible to import 'ruamel.yaml'
try:
from ruamel.yaml import YAML
except ImportError:
raise exch.GeomdlException("Please install 'ruamel.yaml' package to use YAML format: pip install ruamel.yaml")
# Get keyword arguments
delta = kwargs.get('delta', -1.0)
use_template = kwargs.get('jinja2', False)
# Read file
file_src = exch.read_file(file_name)
# Import data
return exch.import_dict_str(file_src=file_src, delta=delta, callback=callback, tmpl=use_template) | Imports curves and surfaces from files in YAML format.
.. note::
Requires `ruamel.yaml <https://pypi.org/project/ruamel.yaml/>`_ package.
Use ``jinja2=True`` to activate Jinja2 template processing. Please refer to the documentation for details.
:param file_name: name of the input file
:type file_name: str
:return: a list of rational spline geometries
:rtype: list
:raises GeomdlException: an error occurred reading the file | Below is the the instruction that describes the task:
### Input:
Imports curves and surfaces from files in YAML format.
.. note::
Requires `ruamel.yaml <https://pypi.org/project/ruamel.yaml/>`_ package.
Use ``jinja2=True`` to activate Jinja2 template processing. Please refer to the documentation for details.
:param file_name: name of the input file
:type file_name: str
:return: a list of rational spline geometries
:rtype: list
:raises GeomdlException: an error occurred reading the file
### Response:
def import_yaml(file_name, **kwargs):
""" Imports curves and surfaces from files in YAML format.
.. note::
Requires `ruamel.yaml <https://pypi.org/project/ruamel.yaml/>`_ package.
Use ``jinja2=True`` to activate Jinja2 template processing. Please refer to the documentation for details.
:param file_name: name of the input file
:type file_name: str
:return: a list of rational spline geometries
:rtype: list
:raises GeomdlException: an error occurred reading the file
"""
def callback(data):
yaml = YAML()
return yaml.load(data)
# Check if it is possible to import 'ruamel.yaml'
try:
from ruamel.yaml import YAML
except ImportError:
raise exch.GeomdlException("Please install 'ruamel.yaml' package to use YAML format: pip install ruamel.yaml")
# Get keyword arguments
delta = kwargs.get('delta', -1.0)
use_template = kwargs.get('jinja2', False)
# Read file
file_src = exch.read_file(file_name)
# Import data
return exch.import_dict_str(file_src=file_src, delta=delta, callback=callback, tmpl=use_template) |
def setDisabledAlternateColor(self, color):
"""
Sets the alternate color used when drawing this node as disabled.
:param color | <QColor>
"""
color = QColor(color)
if self._palette is None:
self._palette = XNodePalette(self._scenePalette)
self._palette.setColor(self._palette.Disabled,
self._palette.NodeAlternateBackground,
color)
self.setDirty() | Sets the alternate color used when drawing this node as disabled.
:param color | <QColor> | Below is the the instruction that describes the task:
### Input:
Sets the alternate color used when drawing this node as disabled.
:param color | <QColor>
### Response:
def setDisabledAlternateColor(self, color):
"""
Sets the alternate color used when drawing this node as disabled.
:param color | <QColor>
"""
color = QColor(color)
if self._palette is None:
self._palette = XNodePalette(self._scenePalette)
self._palette.setColor(self._palette.Disabled,
self._palette.NodeAlternateBackground,
color)
self.setDirty() |
def remote_file(self, branch='master', filename=''):
"""Read the remote file on Git Server.
Args:
branch (str): Git Branch to find file.
filename (str): Name of file to retrieve relative to root of
repository.
Returns:
str: Contents of remote file.
Raises:
FileNotFoundError: Requested file missing.
"""
LOG.info('Retrieving "%s" from "%s".', filename, self.git_short)
file_contents = ''
try:
file_blob = self.project.files.get(file_path=filename, ref=branch)
except gitlab.exceptions.GitlabGetError:
file_blob = None
LOG.debug('GitLab file response:\n%s', file_blob)
if not file_blob:
msg = 'Project "{0}" is missing file "{1}" in "{2}" branch.'.format(self.git_short, filename, branch)
LOG.warning(msg)
raise FileNotFoundError(msg)
else:
file_contents = b64decode(file_blob.content).decode()
LOG.debug('Remote file contents:\n%s', file_contents)
return file_contents | Read the remote file on Git Server.
Args:
branch (str): Git Branch to find file.
filename (str): Name of file to retrieve relative to root of
repository.
Returns:
str: Contents of remote file.
Raises:
FileNotFoundError: Requested file missing. | Below is the the instruction that describes the task:
### Input:
Read the remote file on Git Server.
Args:
branch (str): Git Branch to find file.
filename (str): Name of file to retrieve relative to root of
repository.
Returns:
str: Contents of remote file.
Raises:
FileNotFoundError: Requested file missing.
### Response:
def remote_file(self, branch='master', filename=''):
"""Read the remote file on Git Server.
Args:
branch (str): Git Branch to find file.
filename (str): Name of file to retrieve relative to root of
repository.
Returns:
str: Contents of remote file.
Raises:
FileNotFoundError: Requested file missing.
"""
LOG.info('Retrieving "%s" from "%s".', filename, self.git_short)
file_contents = ''
try:
file_blob = self.project.files.get(file_path=filename, ref=branch)
except gitlab.exceptions.GitlabGetError:
file_blob = None
LOG.debug('GitLab file response:\n%s', file_blob)
if not file_blob:
msg = 'Project "{0}" is missing file "{1}" in "{2}" branch.'.format(self.git_short, filename, branch)
LOG.warning(msg)
raise FileNotFoundError(msg)
else:
file_contents = b64decode(file_blob.content).decode()
LOG.debug('Remote file contents:\n%s', file_contents)
return file_contents |
def build_if_needed(db):
"""Little helper method for making tables in SQL-Alchemy with SQLite"""
if len(db.engine.table_names()) == 0:
# import all classes here
from my_site.models.tables.user import User
db.create_all() | Little helper method for making tables in SQL-Alchemy with SQLite | Below is the the instruction that describes the task:
### Input:
Little helper method for making tables in SQL-Alchemy with SQLite
### Response:
def build_if_needed(db):
"""Little helper method for making tables in SQL-Alchemy with SQLite"""
if len(db.engine.table_names()) == 0:
# import all classes here
from my_site.models.tables.user import User
db.create_all() |
def _start(self):
"""
Starts the instantiation queue (called by its bundle activator)
"""
try:
# Try to register to factory events
with use_ipopo(self.__context) as ipopo:
ipopo.add_listener(self)
except BundleException:
# Service not yet present
pass
# Register the iPOPO service listener
self.__context.add_service_listener(self, specification=SERVICE_IPOPO) | Starts the instantiation queue (called by its bundle activator) | Below is the the instruction that describes the task:
### Input:
Starts the instantiation queue (called by its bundle activator)
### Response:
def _start(self):
"""
Starts the instantiation queue (called by its bundle activator)
"""
try:
# Try to register to factory events
with use_ipopo(self.__context) as ipopo:
ipopo.add_listener(self)
except BundleException:
# Service not yet present
pass
# Register the iPOPO service listener
self.__context.add_service_listener(self, specification=SERVICE_IPOPO) |
def parse_cookie(self, string):
'''
Parses a cookie string like returned in a Set-Cookie header
@param string: The cookie string
@return: the cookie dict
'''
results = re.findall('([^=]+)=([^\;]+);?\s?', string)
my_dict = {}
for item in results:
my_dict[item[0]] = item[1]
return my_dict | Parses a cookie string like returned in a Set-Cookie header
@param string: The cookie string
@return: the cookie dict | Below is the the instruction that describes the task:
### Input:
Parses a cookie string like returned in a Set-Cookie header
@param string: The cookie string
@return: the cookie dict
### Response:
def parse_cookie(self, string):
'''
Parses a cookie string like returned in a Set-Cookie header
@param string: The cookie string
@return: the cookie dict
'''
results = re.findall('([^=]+)=([^\;]+);?\s?', string)
my_dict = {}
for item in results:
my_dict[item[0]] = item[1]
return my_dict |
def run(self, fnames=None):
"""Run Python scripts"""
if fnames is None:
fnames = self.get_selected_filenames()
for fname in fnames:
self.sig_run.emit(fname) | Run Python scripts | Below is the the instruction that describes the task:
### Input:
Run Python scripts
### Response:
def run(self, fnames=None):
"""Run Python scripts"""
if fnames is None:
fnames = self.get_selected_filenames()
for fname in fnames:
self.sig_run.emit(fname) |
def static_rev(path):
"""
Gets a joined path with the STATIC_URL setting, and applies revisioning
depending on DEBUG setting.
Usage::
{% load rev %}
{% static_rev "css/base.css" %}
Example::
{% static_rev "css/base.css" %}
On DEBUG=True will return: /static/css/base.css?d9wdjs
On DEBUG=False will return: /static/css/base-d9wdjs.css
"""
static_path = StaticNode.handle_simple(path)
if is_debug():
return dev_url(static_path)
return production_url(path, static_path) | Gets a joined path with the STATIC_URL setting, and applies revisioning
depending on DEBUG setting.
Usage::
{% load rev %}
{% static_rev "css/base.css" %}
Example::
{% static_rev "css/base.css" %}
On DEBUG=True will return: /static/css/base.css?d9wdjs
On DEBUG=False will return: /static/css/base-d9wdjs.css | Below is the the instruction that describes the task:
### Input:
Gets a joined path with the STATIC_URL setting, and applies revisioning
depending on DEBUG setting.
Usage::
{% load rev %}
{% static_rev "css/base.css" %}
Example::
{% static_rev "css/base.css" %}
On DEBUG=True will return: /static/css/base.css?d9wdjs
On DEBUG=False will return: /static/css/base-d9wdjs.css
### Response:
def static_rev(path):
"""
Gets a joined path with the STATIC_URL setting, and applies revisioning
depending on DEBUG setting.
Usage::
{% load rev %}
{% static_rev "css/base.css" %}
Example::
{% static_rev "css/base.css" %}
On DEBUG=True will return: /static/css/base.css?d9wdjs
On DEBUG=False will return: /static/css/base-d9wdjs.css
"""
static_path = StaticNode.handle_simple(path)
if is_debug():
return dev_url(static_path)
return production_url(path, static_path) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.