code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def put_job(self, id, body, params=None):
"""
`<>`_
:arg id: The ID of the job to create
:arg body: The job configuration
"""
for param in (id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT", _make_path("_rollup", "job", id), params=params, body=body
) | `<>`_
:arg id: The ID of the job to create
:arg body: The job configuration | Below is the the instruction that describes the task:
### Input:
`<>`_
:arg id: The ID of the job to create
:arg body: The job configuration
### Response:
def put_job(self, id, body, params=None):
"""
`<>`_
:arg id: The ID of the job to create
:arg body: The job configuration
"""
for param in (id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"PUT", _make_path("_rollup", "job", id), params=params, body=body
) |
def flatMap(self, f, preservesPartitioning=False):
"""Apply function f and flatten.
:param f: mapping function
:rtype: DStream
"""
return self.mapPartitions(
lambda p: (e for pp in p for e in f(pp)),
preservesPartitioning,
) | Apply function f and flatten.
:param f: mapping function
:rtype: DStream | Below is the the instruction that describes the task:
### Input:
Apply function f and flatten.
:param f: mapping function
:rtype: DStream
### Response:
def flatMap(self, f, preservesPartitioning=False):
"""Apply function f and flatten.
:param f: mapping function
:rtype: DStream
"""
return self.mapPartitions(
lambda p: (e for pp in p for e in f(pp)),
preservesPartitioning,
) |
def get_template_filelist(repo_path, ignore_files=[], ignore_folders=[]):
"""
input: local repo path
output: path list of files which need to be rendered
"""
default_ignore_files = ['.gitignore']
default_ignore_folders = ['.git']
ignore_files += default_ignore_files
ignore_folders += default_ignore_folders
filelist = []
for root, folders, files in os.walk(repo_path):
for ignore_file in ignore_files:
if ignore_file in files:
files.remove(ignore_file)
for ignore_folder in ignore_folders:
if ignore_folder in folders:
folders.remove(ignore_folder)
for file_name in files:
filelist.append( '%s/%s' % (root, file_name))
return filelist | input: local repo path
output: path list of files which need to be rendered | Below is the the instruction that describes the task:
### Input:
input: local repo path
output: path list of files which need to be rendered
### Response:
def get_template_filelist(repo_path, ignore_files=[], ignore_folders=[]):
"""
input: local repo path
output: path list of files which need to be rendered
"""
default_ignore_files = ['.gitignore']
default_ignore_folders = ['.git']
ignore_files += default_ignore_files
ignore_folders += default_ignore_folders
filelist = []
for root, folders, files in os.walk(repo_path):
for ignore_file in ignore_files:
if ignore_file in files:
files.remove(ignore_file)
for ignore_folder in ignore_folders:
if ignore_folder in folders:
folders.remove(ignore_folder)
for file_name in files:
filelist.append( '%s/%s' % (root, file_name))
return filelist |
def push_sample(self, x, timestamp=0.0, pushthrough=True):
"""Push a sample into the outlet.
Each entry in the list corresponds to one channel.
Keyword arguments:
x -- A list of values to push (one per channel).
timestamp -- Optionally the capture time of the sample, in agreement
with local_clock(); if omitted, the current
time is used. (default 0.0)
pushthrough -- Whether to push the sample through to the receivers
instead of buffering it with subsequent samples.
Note that the chunk_size, if specified at outlet
construction, takes precedence over the pushthrough flag.
(default True)
"""
if len(x) == self.channel_count:
if self.channel_format == cf_string:
x = [v.encode('utf-8') for v in x]
handle_error(self.do_push_sample(self.obj, self.sample_type(*x),
c_double(timestamp),
c_int(pushthrough)))
else:
raise ValueError("length of the data must correspond to the "
"stream's channel count.") | Push a sample into the outlet.
Each entry in the list corresponds to one channel.
Keyword arguments:
x -- A list of values to push (one per channel).
timestamp -- Optionally the capture time of the sample, in agreement
with local_clock(); if omitted, the current
time is used. (default 0.0)
pushthrough -- Whether to push the sample through to the receivers
instead of buffering it with subsequent samples.
Note that the chunk_size, if specified at outlet
construction, takes precedence over the pushthrough flag.
(default True) | Below is the the instruction that describes the task:
### Input:
Push a sample into the outlet.
Each entry in the list corresponds to one channel.
Keyword arguments:
x -- A list of values to push (one per channel).
timestamp -- Optionally the capture time of the sample, in agreement
with local_clock(); if omitted, the current
time is used. (default 0.0)
pushthrough -- Whether to push the sample through to the receivers
instead of buffering it with subsequent samples.
Note that the chunk_size, if specified at outlet
construction, takes precedence over the pushthrough flag.
(default True)
### Response:
def push_sample(self, x, timestamp=0.0, pushthrough=True):
"""Push a sample into the outlet.
Each entry in the list corresponds to one channel.
Keyword arguments:
x -- A list of values to push (one per channel).
timestamp -- Optionally the capture time of the sample, in agreement
with local_clock(); if omitted, the current
time is used. (default 0.0)
pushthrough -- Whether to push the sample through to the receivers
instead of buffering it with subsequent samples.
Note that the chunk_size, if specified at outlet
construction, takes precedence over the pushthrough flag.
(default True)
"""
if len(x) == self.channel_count:
if self.channel_format == cf_string:
x = [v.encode('utf-8') for v in x]
handle_error(self.do_push_sample(self.obj, self.sample_type(*x),
c_double(timestamp),
c_int(pushthrough)))
else:
raise ValueError("length of the data must correspond to the "
"stream's channel count.") |
def _eliminate_leafs(self, graph):
"""
Eliminate leaf objects - that are objects not referencing any other
objects in the list `graph`. Returns the list of objects without the
objects identified as leafs.
"""
result = []
idset = set([id(x) for x in graph])
for n in graph:
refset = set([id(x) for x in get_referents(n)])
if refset.intersection(idset):
result.append(n)
return result | Eliminate leaf objects - that are objects not referencing any other
objects in the list `graph`. Returns the list of objects without the
objects identified as leafs. | Below is the the instruction that describes the task:
### Input:
Eliminate leaf objects - that are objects not referencing any other
objects in the list `graph`. Returns the list of objects without the
objects identified as leafs.
### Response:
def _eliminate_leafs(self, graph):
"""
Eliminate leaf objects - that are objects not referencing any other
objects in the list `graph`. Returns the list of objects without the
objects identified as leafs.
"""
result = []
idset = set([id(x) for x in graph])
for n in graph:
refset = set([id(x) for x in get_referents(n)])
if refset.intersection(idset):
result.append(n)
return result |
def pull_request(ctx, base_branch, open_pr, stop_timer):
"""Create a new pull request for this issue."""
lancet = ctx.obj
review_status = lancet.config.get("tracker", "review_status")
remote_name = lancet.config.get("repository", "remote_name")
if not base_branch:
base_branch = lancet.config.get("repository", "base_branch")
# Get the issue
issue = get_issue(lancet)
transition = get_transition(ctx, lancet, issue, review_status)
# Get the working branch
branch = get_branch(lancet, issue, create=False)
with taskstatus("Checking pre-requisites") as ts:
if not branch:
ts.abort("No working branch found")
if lancet.tracker.whoami() not in issue.assignees:
ts.abort("Issue currently not assigned to you")
# TODO: Check mergeability
# TODO: Check remote status (PR does not already exist)
# Push to remote
with taskstatus('Pushing to "{}"', remote_name) as ts:
remote = lancet.repo.lookup_remote(remote_name)
if not remote:
ts.abort('Remote "{}" not found', remote_name)
from ..git import CredentialsCallbacks
remote.push([branch.name], callbacks=CredentialsCallbacks())
ts.ok('Pushed latest changes to "{}"', remote_name)
# Create pull request
with taskstatus("Creating pull request") as ts:
template_path = lancet.config.get("repository", "pr_template")
message = edit_template(template_path, issue=issue)
if not message:
ts.abort("You didn't provide a title for the pull request")
title, body = message.split("\n", 1)
title = title.strip()
if not title:
ts.abort("You didn't provide a title for the pull request")
try:
pr = lancet.scm_manager.create_pull_request(
branch.branch_name, base_branch, title, body.strip("\n")
)
except PullRequestAlreadyExists as e:
pr = e.pull_request
ts.ok("Pull request does already exist at {}", pr.link)
else:
ts.ok("Pull request created at {}", pr.link)
# Update issue
set_issue_status(lancet, issue, review_status, transition)
# TODO: Post to activity stream on JIRA?
# TODO: Post to Slack?
# Stop harvest timer
if stop_timer:
with taskstatus("Pausing harvest timer") as ts:
lancet.timer.pause()
ts.ok("Harvest timer paused")
# Open the pull request page in the browser if requested
if open_pr:
click.launch(pr.link) | Create a new pull request for this issue. | Below is the the instruction that describes the task:
### Input:
Create a new pull request for this issue.
### Response:
def pull_request(ctx, base_branch, open_pr, stop_timer):
"""Create a new pull request for this issue."""
lancet = ctx.obj
review_status = lancet.config.get("tracker", "review_status")
remote_name = lancet.config.get("repository", "remote_name")
if not base_branch:
base_branch = lancet.config.get("repository", "base_branch")
# Get the issue
issue = get_issue(lancet)
transition = get_transition(ctx, lancet, issue, review_status)
# Get the working branch
branch = get_branch(lancet, issue, create=False)
with taskstatus("Checking pre-requisites") as ts:
if not branch:
ts.abort("No working branch found")
if lancet.tracker.whoami() not in issue.assignees:
ts.abort("Issue currently not assigned to you")
# TODO: Check mergeability
# TODO: Check remote status (PR does not already exist)
# Push to remote
with taskstatus('Pushing to "{}"', remote_name) as ts:
remote = lancet.repo.lookup_remote(remote_name)
if not remote:
ts.abort('Remote "{}" not found', remote_name)
from ..git import CredentialsCallbacks
remote.push([branch.name], callbacks=CredentialsCallbacks())
ts.ok('Pushed latest changes to "{}"', remote_name)
# Create pull request
with taskstatus("Creating pull request") as ts:
template_path = lancet.config.get("repository", "pr_template")
message = edit_template(template_path, issue=issue)
if not message:
ts.abort("You didn't provide a title for the pull request")
title, body = message.split("\n", 1)
title = title.strip()
if not title:
ts.abort("You didn't provide a title for the pull request")
try:
pr = lancet.scm_manager.create_pull_request(
branch.branch_name, base_branch, title, body.strip("\n")
)
except PullRequestAlreadyExists as e:
pr = e.pull_request
ts.ok("Pull request does already exist at {}", pr.link)
else:
ts.ok("Pull request created at {}", pr.link)
# Update issue
set_issue_status(lancet, issue, review_status, transition)
# TODO: Post to activity stream on JIRA?
# TODO: Post to Slack?
# Stop harvest timer
if stop_timer:
with taskstatus("Pausing harvest timer") as ts:
lancet.timer.pause()
ts.ok("Harvest timer paused")
# Open the pull request page in the browser if requested
if open_pr:
click.launch(pr.link) |
def queued(values, qsize):
"""
Queues up readings from *values* (the number of readings queued is
determined by *qsize*) and begins yielding values only when the queue is
full. For example, to "cascade" values along a sequence of LEDs::
from gpiozero import LEDBoard, Button
from gpiozero.tools import queued
from signal import pause
leds = LEDBoard(5, 6, 13, 19, 26)
btn = Button(17)
for i in range(4):
leds[i].source = queued(leds[i + 1], 5)
leds[i].source_delay = 0.01
leds[4].source = btn
pause()
"""
values = [_normalize(v) for v in values]
if qsize < 1:
raise ValueError("qsize must be 1 or larger")
q = []
it = iter(values)
try:
for i in range(qsize):
q.append(next(it))
for i in cycle(range(qsize)):
yield q[i]
q[i] = next(it)
except StopIteration:
pass | Queues up readings from *values* (the number of readings queued is
determined by *qsize*) and begins yielding values only when the queue is
full. For example, to "cascade" values along a sequence of LEDs::
from gpiozero import LEDBoard, Button
from gpiozero.tools import queued
from signal import pause
leds = LEDBoard(5, 6, 13, 19, 26)
btn = Button(17)
for i in range(4):
leds[i].source = queued(leds[i + 1], 5)
leds[i].source_delay = 0.01
leds[4].source = btn
pause() | Below is the the instruction that describes the task:
### Input:
Queues up readings from *values* (the number of readings queued is
determined by *qsize*) and begins yielding values only when the queue is
full. For example, to "cascade" values along a sequence of LEDs::
from gpiozero import LEDBoard, Button
from gpiozero.tools import queued
from signal import pause
leds = LEDBoard(5, 6, 13, 19, 26)
btn = Button(17)
for i in range(4):
leds[i].source = queued(leds[i + 1], 5)
leds[i].source_delay = 0.01
leds[4].source = btn
pause()
### Response:
def queued(values, qsize):
"""
Queues up readings from *values* (the number of readings queued is
determined by *qsize*) and begins yielding values only when the queue is
full. For example, to "cascade" values along a sequence of LEDs::
from gpiozero import LEDBoard, Button
from gpiozero.tools import queued
from signal import pause
leds = LEDBoard(5, 6, 13, 19, 26)
btn = Button(17)
for i in range(4):
leds[i].source = queued(leds[i + 1], 5)
leds[i].source_delay = 0.01
leds[4].source = btn
pause()
"""
values = [_normalize(v) for v in values]
if qsize < 1:
raise ValueError("qsize must be 1 or larger")
q = []
it = iter(values)
try:
for i in range(qsize):
q.append(next(it))
for i in cycle(range(qsize)):
yield q[i]
q[i] = next(it)
except StopIteration:
pass |
def vofile(filename, **kwargs):
"""
Open and return a handle on a VOSpace data connection
@param filename:
@param kwargs:
@return:
"""
basename = os.path.basename(filename)
if os.access(basename, os.R_OK):
return open(basename, 'r')
kwargs['view'] = kwargs.get('view', 'data')
return client.open(filename, **kwargs) | Open and return a handle on a VOSpace data connection
@param filename:
@param kwargs:
@return: | Below is the the instruction that describes the task:
### Input:
Open and return a handle on a VOSpace data connection
@param filename:
@param kwargs:
@return:
### Response:
def vofile(filename, **kwargs):
"""
Open and return a handle on a VOSpace data connection
@param filename:
@param kwargs:
@return:
"""
basename = os.path.basename(filename)
if os.access(basename, os.R_OK):
return open(basename, 'r')
kwargs['view'] = kwargs.get('view', 'data')
return client.open(filename, **kwargs) |
def generate_id(self, agreement_id, types, values):
"""
Generate id for the condition.
:param agreement_id: id of the agreement, hex str
:param types: list of types
:param values: list of values
:return: id, str
"""
values_hash = utils.generate_multi_value_hash(types, values)
return utils.generate_multi_value_hash(
['bytes32', 'address', 'bytes32'],
[agreement_id, self.address, values_hash]
) | Generate id for the condition.
:param agreement_id: id of the agreement, hex str
:param types: list of types
:param values: list of values
:return: id, str | Below is the the instruction that describes the task:
### Input:
Generate id for the condition.
:param agreement_id: id of the agreement, hex str
:param types: list of types
:param values: list of values
:return: id, str
### Response:
def generate_id(self, agreement_id, types, values):
"""
Generate id for the condition.
:param agreement_id: id of the agreement, hex str
:param types: list of types
:param values: list of values
:return: id, str
"""
values_hash = utils.generate_multi_value_hash(types, values)
return utils.generate_multi_value_hash(
['bytes32', 'address', 'bytes32'],
[agreement_id, self.address, values_hash]
) |
def get_changesets(self, start=None, end=None, start_date=None,
end_date=None, branch_name=None, reverse=False):
"""
Returns iterator of ``GitChangeset`` objects from start to end (both
are inclusive), in ascending date order (unless ``reverse`` is set).
:param start: changeset ID, as str; first returned changeset
:param end: changeset ID, as str; last returned changeset
:param start_date: if specified, changesets with commit date less than
``start_date`` would be filtered out from returned set
:param end_date: if specified, changesets with commit date greater than
``end_date`` would be filtered out from returned set
:param branch_name: if specified, changesets not reachable from given
branch would be filtered out from returned set
:param reverse: if ``True``, returned generator would be reversed
(meaning that returned changesets would have descending date order)
:raise BranchDoesNotExistError: If given ``branch_name`` does not
exist.
:raise ChangesetDoesNotExistError: If changeset for given ``start`` or
``end`` could not be found.
"""
if branch_name and branch_name not in self.branches:
raise BranchDoesNotExistError("Branch '%s' not found" \
% branch_name)
# %H at format means (full) commit hash, initial hashes are retrieved
# in ascending date order
cmd_template = 'log --date-order --reverse --pretty=format:"%H"'
cmd_params = {}
if start_date:
cmd_template += ' --since "$since"'
cmd_params['since'] = start_date.strftime('%m/%d/%y %H:%M:%S')
if end_date:
cmd_template += ' --until "$until"'
cmd_params['until'] = end_date.strftime('%m/%d/%y %H:%M:%S')
if branch_name:
cmd_template += ' $branch_name'
cmd_params['branch_name'] = branch_name
else:
rev_filter = settings.GIT_REV_FILTER
cmd_template += ' %s' % (rev_filter)
cmd = string.Template(cmd_template).safe_substitute(**cmd_params)
revs = self.run_git_command(cmd)[0].splitlines()
start_pos = 0
end_pos = len(revs)
if start:
_start = self._get_revision(start)
try:
start_pos = revs.index(_start)
except ValueError:
pass
if end is not None:
_end = self._get_revision(end)
try:
end_pos = revs.index(_end)
except ValueError:
pass
if None not in [start, end] and start_pos > end_pos:
raise RepositoryError('start cannot be after end')
if end_pos is not None:
end_pos += 1
revs = revs[start_pos:end_pos]
if reverse:
revs = reversed(revs)
return CollectionGenerator(self, revs) | Returns iterator of ``GitChangeset`` objects from start to end (both
are inclusive), in ascending date order (unless ``reverse`` is set).
:param start: changeset ID, as str; first returned changeset
:param end: changeset ID, as str; last returned changeset
:param start_date: if specified, changesets with commit date less than
``start_date`` would be filtered out from returned set
:param end_date: if specified, changesets with commit date greater than
``end_date`` would be filtered out from returned set
:param branch_name: if specified, changesets not reachable from given
branch would be filtered out from returned set
:param reverse: if ``True``, returned generator would be reversed
(meaning that returned changesets would have descending date order)
:raise BranchDoesNotExistError: If given ``branch_name`` does not
exist.
:raise ChangesetDoesNotExistError: If changeset for given ``start`` or
``end`` could not be found. | Below is the the instruction that describes the task:
### Input:
Returns iterator of ``GitChangeset`` objects from start to end (both
are inclusive), in ascending date order (unless ``reverse`` is set).
:param start: changeset ID, as str; first returned changeset
:param end: changeset ID, as str; last returned changeset
:param start_date: if specified, changesets with commit date less than
``start_date`` would be filtered out from returned set
:param end_date: if specified, changesets with commit date greater than
``end_date`` would be filtered out from returned set
:param branch_name: if specified, changesets not reachable from given
branch would be filtered out from returned set
:param reverse: if ``True``, returned generator would be reversed
(meaning that returned changesets would have descending date order)
:raise BranchDoesNotExistError: If given ``branch_name`` does not
exist.
:raise ChangesetDoesNotExistError: If changeset for given ``start`` or
``end`` could not be found.
### Response:
def get_changesets(self, start=None, end=None, start_date=None,
end_date=None, branch_name=None, reverse=False):
"""
Returns iterator of ``GitChangeset`` objects from start to end (both
are inclusive), in ascending date order (unless ``reverse`` is set).
:param start: changeset ID, as str; first returned changeset
:param end: changeset ID, as str; last returned changeset
:param start_date: if specified, changesets with commit date less than
``start_date`` would be filtered out from returned set
:param end_date: if specified, changesets with commit date greater than
``end_date`` would be filtered out from returned set
:param branch_name: if specified, changesets not reachable from given
branch would be filtered out from returned set
:param reverse: if ``True``, returned generator would be reversed
(meaning that returned changesets would have descending date order)
:raise BranchDoesNotExistError: If given ``branch_name`` does not
exist.
:raise ChangesetDoesNotExistError: If changeset for given ``start`` or
``end`` could not be found.
"""
if branch_name and branch_name not in self.branches:
raise BranchDoesNotExistError("Branch '%s' not found" \
% branch_name)
# %H at format means (full) commit hash, initial hashes are retrieved
# in ascending date order
cmd_template = 'log --date-order --reverse --pretty=format:"%H"'
cmd_params = {}
if start_date:
cmd_template += ' --since "$since"'
cmd_params['since'] = start_date.strftime('%m/%d/%y %H:%M:%S')
if end_date:
cmd_template += ' --until "$until"'
cmd_params['until'] = end_date.strftime('%m/%d/%y %H:%M:%S')
if branch_name:
cmd_template += ' $branch_name'
cmd_params['branch_name'] = branch_name
else:
rev_filter = settings.GIT_REV_FILTER
cmd_template += ' %s' % (rev_filter)
cmd = string.Template(cmd_template).safe_substitute(**cmd_params)
revs = self.run_git_command(cmd)[0].splitlines()
start_pos = 0
end_pos = len(revs)
if start:
_start = self._get_revision(start)
try:
start_pos = revs.index(_start)
except ValueError:
pass
if end is not None:
_end = self._get_revision(end)
try:
end_pos = revs.index(_end)
except ValueError:
pass
if None not in [start, end] and start_pos > end_pos:
raise RepositoryError('start cannot be after end')
if end_pos is not None:
end_pos += 1
revs = revs[start_pos:end_pos]
if reverse:
revs = reversed(revs)
return CollectionGenerator(self, revs) |
def epd_kepler_lightcurve(lcdict,
xccol='mom_centr1',
yccol='mom_centr2',
timestoignore=None,
filterflags=True,
writetodict=True,
epdsmooth=5):
'''This runs EPD on the Kepler light curve.
Following Huang et al. 2015, we fit the following EPD function to a smoothed
light curve, and then subtract it to obtain EPD corrected magnitudes::
f = c0 +
c1*sin(2*pi*x) + c2*cos(2*pi*x) + c3*sin(2*pi*y) + c4*cos(2*pi*y) +
c5*sin(4*pi*x) + c6*cos(4*pi*x) + c7*sin(4*pi*y) + c8*cos(4*pi*y) +
c9*bgv + c10*bge
By default, this function removes points in the Kepler LC that have ANY
quality flags set.
Parameters
----------
lcdict : lcdict
An `lcdict` produced by `consolidate_kepler_fitslc` or
`read_kepler_fitslc`.
xcol,ycol : str
Indicates the x and y coordinate column names to use from the Kepler LC
in the EPD fit.
timestoignore : list of tuples
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
filterflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
writetodict : bool
If writetodict is True, adds the following columns to the lcdict::
epd_time = time array
epd_sapflux = uncorrected flux before EPD
epd_epdsapflux = corrected flux after EPD
epd_epdsapcorr = EPD flux corrections
epd_bkg = background array
epd_bkg_err = background errors array
epd_xcc = xcoord array
epd_ycc = ycoord array
epd_quality = quality flag array
and updates the 'columns' list in the lcdict as well.
epdsmooth : int
Sets the number of light curve points to smooth over when generating the
EPD fit function.
Returns
-------
tuple
Returns a tuple of the form: (times, epdfluxes, fitcoeffs, epdfit)
'''
times, fluxes, background, background_err = (lcdict['time'],
lcdict['sap']['sap_flux'],
lcdict['sap']['sap_bkg'],
lcdict['sap']['sap_bkg_err'])
xcc = lcdict[xccol]
ycc = lcdict[yccol]
flags = lcdict['sap_quality']
# filter all bad LC points as noted by quality flags
if filterflags:
nbefore = times.size
filterind = flags == 0
times = times[filterind]
fluxes = fluxes[filterind]
background = background[filterind]
background_err = background_err[filterind]
xcc = xcc[filterind]
ycc = ycc[filterind]
flags = flags[filterind]
nafter = times.size
LOGINFO('applied quality flag filter, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# remove nans
find = (npisfinite(xcc) & npisfinite(ycc) &
npisfinite(times) & npisfinite(fluxes) &
npisfinite(background) & npisfinite(background_err))
nbefore = times.size
times = times[find]
fluxes = fluxes[find]
background = background[find]
background_err = background_err[find]
xcc = xcc[find]
ycc = ycc[find]
flags = flags[find]
nafter = times.size
LOGINFO('removed nans, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# exclude all times in timestoignore
if (timestoignore and
isinstance(timestoignore, list) and
len(timestoignore) > 0):
exclind = npfull_like(times,True)
nbefore = times.size
# apply all the masks
for ignoretime in timestoignore:
time0, time1 = ignoretime[0], ignoretime[1]
thismask = (times > time0) & (times < time1)
exclind = exclind & thismask
# quantities after masks have been applied
times = times[exclind]
fluxes = fluxes[exclind]
background = background[exclind]
background_err = background_err[exclind]
xcc = xcc[exclind]
ycc = ycc[exclind]
flags = flags[exclind]
nafter = times.size
LOGINFO('removed timestoignore, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# now that we're all done, we can do EPD
# first, smooth the light curve
smoothedfluxes = median_filter(fluxes, size=epdsmooth)
# initial fit coeffs
initcoeffs = npones(11)
# fit the the smoothed mags and find better coeffs
leastsqfit = leastsq(_epd_residual,
initcoeffs,
args=(smoothedfluxes,
xcc, ycc,
background, background_err))
# if the fit succeeds, then get the EPD fluxes
if leastsqfit[-1] in (1,2,3,4):
fitcoeffs = leastsqfit[0]
epdfit = _epd_function(fitcoeffs,
fluxes,
xcc,
ycc,
background,
background_err)
epdfluxes = npmedian(fluxes) + fluxes - epdfit
# write these to the dictionary if requested
if writetodict:
lcdict['epd'] = {}
lcdict['epd']['time'] = times
lcdict['epd']['sapflux'] = fluxes
lcdict['epd']['epdsapflux'] = epdfluxes
lcdict['epd']['epdsapcorr'] = epdfit
lcdict['epd']['bkg'] = background
lcdict['epd']['bkg_err'] = background_err
lcdict['epd']['xcc'] = xcc
lcdict['epd']['ycc'] = ycc
lcdict['epd']['quality'] = flags
for newcol in ['epd.time','epd.sapflux',
'epd.epdsapflux','epd.epdsapcorr',
'epd.bkg','epd.bkg.err',
'epd.xcc','epd.ycc',
'epd.quality']:
if newcol not in lcdict['columns']:
lcdict['columns'].append(newcol)
return times, epdfluxes, fitcoeffs, epdfit
else:
LOGERROR('could not fit EPD function to light curve')
return None, None, None, None | This runs EPD on the Kepler light curve.
Following Huang et al. 2015, we fit the following EPD function to a smoothed
light curve, and then subtract it to obtain EPD corrected magnitudes::
f = c0 +
c1*sin(2*pi*x) + c2*cos(2*pi*x) + c3*sin(2*pi*y) + c4*cos(2*pi*y) +
c5*sin(4*pi*x) + c6*cos(4*pi*x) + c7*sin(4*pi*y) + c8*cos(4*pi*y) +
c9*bgv + c10*bge
By default, this function removes points in the Kepler LC that have ANY
quality flags set.
Parameters
----------
lcdict : lcdict
An `lcdict` produced by `consolidate_kepler_fitslc` or
`read_kepler_fitslc`.
xcol,ycol : str
Indicates the x and y coordinate column names to use from the Kepler LC
in the EPD fit.
timestoignore : list of tuples
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
filterflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
writetodict : bool
If writetodict is True, adds the following columns to the lcdict::
epd_time = time array
epd_sapflux = uncorrected flux before EPD
epd_epdsapflux = corrected flux after EPD
epd_epdsapcorr = EPD flux corrections
epd_bkg = background array
epd_bkg_err = background errors array
epd_xcc = xcoord array
epd_ycc = ycoord array
epd_quality = quality flag array
and updates the 'columns' list in the lcdict as well.
epdsmooth : int
Sets the number of light curve points to smooth over when generating the
EPD fit function.
Returns
-------
tuple
Returns a tuple of the form: (times, epdfluxes, fitcoeffs, epdfit) | Below is the the instruction that describes the task:
### Input:
This runs EPD on the Kepler light curve.
Following Huang et al. 2015, we fit the following EPD function to a smoothed
light curve, and then subtract it to obtain EPD corrected magnitudes::
f = c0 +
c1*sin(2*pi*x) + c2*cos(2*pi*x) + c3*sin(2*pi*y) + c4*cos(2*pi*y) +
c5*sin(4*pi*x) + c6*cos(4*pi*x) + c7*sin(4*pi*y) + c8*cos(4*pi*y) +
c9*bgv + c10*bge
By default, this function removes points in the Kepler LC that have ANY
quality flags set.
Parameters
----------
lcdict : lcdict
An `lcdict` produced by `consolidate_kepler_fitslc` or
`read_kepler_fitslc`.
xcol,ycol : str
Indicates the x and y coordinate column names to use from the Kepler LC
in the EPD fit.
timestoignore : list of tuples
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
filterflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
writetodict : bool
If writetodict is True, adds the following columns to the lcdict::
epd_time = time array
epd_sapflux = uncorrected flux before EPD
epd_epdsapflux = corrected flux after EPD
epd_epdsapcorr = EPD flux corrections
epd_bkg = background array
epd_bkg_err = background errors array
epd_xcc = xcoord array
epd_ycc = ycoord array
epd_quality = quality flag array
and updates the 'columns' list in the lcdict as well.
epdsmooth : int
Sets the number of light curve points to smooth over when generating the
EPD fit function.
Returns
-------
tuple
Returns a tuple of the form: (times, epdfluxes, fitcoeffs, epdfit)
### Response:
def epd_kepler_lightcurve(lcdict,
xccol='mom_centr1',
yccol='mom_centr2',
timestoignore=None,
filterflags=True,
writetodict=True,
epdsmooth=5):
'''This runs EPD on the Kepler light curve.
Following Huang et al. 2015, we fit the following EPD function to a smoothed
light curve, and then subtract it to obtain EPD corrected magnitudes::
f = c0 +
c1*sin(2*pi*x) + c2*cos(2*pi*x) + c3*sin(2*pi*y) + c4*cos(2*pi*y) +
c5*sin(4*pi*x) + c6*cos(4*pi*x) + c7*sin(4*pi*y) + c8*cos(4*pi*y) +
c9*bgv + c10*bge
By default, this function removes points in the Kepler LC that have ANY
quality flags set.
Parameters
----------
lcdict : lcdict
An `lcdict` produced by `consolidate_kepler_fitslc` or
`read_kepler_fitslc`.
xcol,ycol : str
Indicates the x and y coordinate column names to use from the Kepler LC
in the EPD fit.
timestoignore : list of tuples
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
filterflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
writetodict : bool
If writetodict is True, adds the following columns to the lcdict::
epd_time = time array
epd_sapflux = uncorrected flux before EPD
epd_epdsapflux = corrected flux after EPD
epd_epdsapcorr = EPD flux corrections
epd_bkg = background array
epd_bkg_err = background errors array
epd_xcc = xcoord array
epd_ycc = ycoord array
epd_quality = quality flag array
and updates the 'columns' list in the lcdict as well.
epdsmooth : int
Sets the number of light curve points to smooth over when generating the
EPD fit function.
Returns
-------
tuple
Returns a tuple of the form: (times, epdfluxes, fitcoeffs, epdfit)
'''
times, fluxes, background, background_err = (lcdict['time'],
lcdict['sap']['sap_flux'],
lcdict['sap']['sap_bkg'],
lcdict['sap']['sap_bkg_err'])
xcc = lcdict[xccol]
ycc = lcdict[yccol]
flags = lcdict['sap_quality']
# filter all bad LC points as noted by quality flags
if filterflags:
nbefore = times.size
filterind = flags == 0
times = times[filterind]
fluxes = fluxes[filterind]
background = background[filterind]
background_err = background_err[filterind]
xcc = xcc[filterind]
ycc = ycc[filterind]
flags = flags[filterind]
nafter = times.size
LOGINFO('applied quality flag filter, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# remove nans
find = (npisfinite(xcc) & npisfinite(ycc) &
npisfinite(times) & npisfinite(fluxes) &
npisfinite(background) & npisfinite(background_err))
nbefore = times.size
times = times[find]
fluxes = fluxes[find]
background = background[find]
background_err = background_err[find]
xcc = xcc[find]
ycc = ycc[find]
flags = flags[find]
nafter = times.size
LOGINFO('removed nans, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# exclude all times in timestoignore
if (timestoignore and
isinstance(timestoignore, list) and
len(timestoignore) > 0):
exclind = npfull_like(times,True)
nbefore = times.size
# apply all the masks
for ignoretime in timestoignore:
time0, time1 = ignoretime[0], ignoretime[1]
thismask = (times > time0) & (times < time1)
exclind = exclind & thismask
# quantities after masks have been applied
times = times[exclind]
fluxes = fluxes[exclind]
background = background[exclind]
background_err = background_err[exclind]
xcc = xcc[exclind]
ycc = ycc[exclind]
flags = flags[exclind]
nafter = times.size
LOGINFO('removed timestoignore, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# now that we're all done, we can do EPD
# first, smooth the light curve
smoothedfluxes = median_filter(fluxes, size=epdsmooth)
# initial fit coeffs
initcoeffs = npones(11)
# fit the the smoothed mags and find better coeffs
leastsqfit = leastsq(_epd_residual,
initcoeffs,
args=(smoothedfluxes,
xcc, ycc,
background, background_err))
# if the fit succeeds, then get the EPD fluxes
if leastsqfit[-1] in (1,2,3,4):
fitcoeffs = leastsqfit[0]
epdfit = _epd_function(fitcoeffs,
fluxes,
xcc,
ycc,
background,
background_err)
epdfluxes = npmedian(fluxes) + fluxes - epdfit
# write these to the dictionary if requested
if writetodict:
lcdict['epd'] = {}
lcdict['epd']['time'] = times
lcdict['epd']['sapflux'] = fluxes
lcdict['epd']['epdsapflux'] = epdfluxes
lcdict['epd']['epdsapcorr'] = epdfit
lcdict['epd']['bkg'] = background
lcdict['epd']['bkg_err'] = background_err
lcdict['epd']['xcc'] = xcc
lcdict['epd']['ycc'] = ycc
lcdict['epd']['quality'] = flags
for newcol in ['epd.time','epd.sapflux',
'epd.epdsapflux','epd.epdsapcorr',
'epd.bkg','epd.bkg.err',
'epd.xcc','epd.ycc',
'epd.quality']:
if newcol not in lcdict['columns']:
lcdict['columns'].append(newcol)
return times, epdfluxes, fitcoeffs, epdfit
else:
LOGERROR('could not fit EPD function to light curve')
return None, None, None, None |
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
def serializeElement(element):
if not hasattr(element, "tag"):
if element.docinfo.internalDTD:
if element.docinfo.doctype:
dtd_str = element.docinfo.doctype
else:
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
rv.append(dtd_str)
serializeElement(element.getroot())
elif element.tag == comment_type:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (element.tag,))
else:
attr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if hasattr(element, "tail") and element.tail:
rv.append(element.tail)
serializeElement(element)
return "".join(rv) | Serialize an element and its child nodes to a string | Below is the the instruction that describes the task:
### Input:
Serialize an element and its child nodes to a string
### Response:
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
def serializeElement(element):
if not hasattr(element, "tag"):
if element.docinfo.internalDTD:
if element.docinfo.doctype:
dtd_str = element.docinfo.doctype
else:
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
rv.append(dtd_str)
serializeElement(element.getroot())
elif element.tag == comment_type:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (element.tag,))
else:
attr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if hasattr(element, "tail") and element.tail:
rv.append(element.tail)
serializeElement(element)
return "".join(rv) |
def pause_process(self, key):
"""Pause a specific processes."""
if key in self.processes and key not in self.paused:
os.killpg(os.getpgid(self.processes[key].pid), signal.SIGSTOP)
self.queue[key]['status'] = 'paused'
self.paused.append(key)
return True
return False | Pause a specific processes. | Below is the the instruction that describes the task:
### Input:
Pause a specific processes.
### Response:
def pause_process(self, key):
"""Pause a specific processes."""
if key in self.processes and key not in self.paused:
os.killpg(os.getpgid(self.processes[key].pid), signal.SIGSTOP)
self.queue[key]['status'] = 'paused'
self.paused.append(key)
return True
return False |
def _axis_levels(self, axis):
"""
Return the number of levels in the labels taking into account the axis.
Get the number of levels for the columns (0) or rows (1).
"""
ax = self._axis(axis)
return 1 if not hasattr(ax, 'levels') else len(ax.levels) | Return the number of levels in the labels taking into account the axis.
Get the number of levels for the columns (0) or rows (1). | Below is the the instruction that describes the task:
### Input:
Return the number of levels in the labels taking into account the axis.
Get the number of levels for the columns (0) or rows (1).
### Response:
def _axis_levels(self, axis):
"""
Return the number of levels in the labels taking into account the axis.
Get the number of levels for the columns (0) or rows (1).
"""
ax = self._axis(axis)
return 1 if not hasattr(ax, 'levels') else len(ax.levels) |
def _ReadFixedSizeDataTypeDefinition(
self, definitions_registry, definition_values, data_type_definition_class,
definition_name, supported_attributes,
default_size=definitions.SIZE_NATIVE, default_units='bytes',
is_member=False, supported_size_values=None):
"""Reads a fixed-size data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
data_type_definition_class (str): data type definition class.
definition_name (str): name of the definition.
supported_attributes (set[str]): names of the supported attributes.
default_size (Optional[int]): default size.
default_units (Optional[str]): default units.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
supported_size_values (Optional[tuple[int]]): supported size values,
or None if not set.
Returns:
FixedSizeDataTypeDefinition: fixed-size data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
definition_object = self._ReadStorageDataTypeDefinition(
definitions_registry, definition_values, data_type_definition_class,
definition_name, supported_attributes, is_member=is_member)
attributes = definition_values.get('attributes', None)
if attributes:
size = attributes.get('size', default_size)
if size != definitions.SIZE_NATIVE:
try:
int(size)
except ValueError:
error_message = 'unuspported size attribute: {0!s}'.format(size)
raise errors.DefinitionReaderError(definition_name, error_message)
if supported_size_values and size not in supported_size_values:
error_message = 'unuspported size value: {0!s}'.format(size)
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object.size = size
definition_object.units = attributes.get('units', default_units)
return definition_object | Reads a fixed-size data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
data_type_definition_class (str): data type definition class.
definition_name (str): name of the definition.
supported_attributes (set[str]): names of the supported attributes.
default_size (Optional[int]): default size.
default_units (Optional[str]): default units.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
supported_size_values (Optional[tuple[int]]): supported size values,
or None if not set.
Returns:
FixedSizeDataTypeDefinition: fixed-size data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect. | Below is the the instruction that describes the task:
### Input:
Reads a fixed-size data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
data_type_definition_class (str): data type definition class.
definition_name (str): name of the definition.
supported_attributes (set[str]): names of the supported attributes.
default_size (Optional[int]): default size.
default_units (Optional[str]): default units.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
supported_size_values (Optional[tuple[int]]): supported size values,
or None if not set.
Returns:
FixedSizeDataTypeDefinition: fixed-size data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
### Response:
def _ReadFixedSizeDataTypeDefinition(
self, definitions_registry, definition_values, data_type_definition_class,
definition_name, supported_attributes,
default_size=definitions.SIZE_NATIVE, default_units='bytes',
is_member=False, supported_size_values=None):
"""Reads a fixed-size data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
data_type_definition_class (str): data type definition class.
definition_name (str): name of the definition.
supported_attributes (set[str]): names of the supported attributes.
default_size (Optional[int]): default size.
default_units (Optional[str]): default units.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
supported_size_values (Optional[tuple[int]]): supported size values,
or None if not set.
Returns:
FixedSizeDataTypeDefinition: fixed-size data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
definition_object = self._ReadStorageDataTypeDefinition(
definitions_registry, definition_values, data_type_definition_class,
definition_name, supported_attributes, is_member=is_member)
attributes = definition_values.get('attributes', None)
if attributes:
size = attributes.get('size', default_size)
if size != definitions.SIZE_NATIVE:
try:
int(size)
except ValueError:
error_message = 'unuspported size attribute: {0!s}'.format(size)
raise errors.DefinitionReaderError(definition_name, error_message)
if supported_size_values and size not in supported_size_values:
error_message = 'unuspported size value: {0!s}'.format(size)
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object.size = size
definition_object.units = attributes.get('units', default_units)
return definition_object |
def QA_SU_save_stock_day(client=DATABASE, ui_log=None, ui_progress=None):
'''
save stock_day
保存日线数据
:param client:
:param ui_log: 给GUI qt 界面使用
:param ui_progress: 给GUI qt 界面使用
:param ui_progress_int_value: 给GUI qt 界面使用
'''
stock_list = QA_fetch_get_stock_list().code.unique().tolist()
coll_stock_day = client.stock_day
coll_stock_day.create_index(
[("code",
pymongo.ASCENDING),
("date_stamp",
pymongo.ASCENDING)]
)
err = []
# saveing result
def __gen_param(stock_list, coll_stock_day, ip_list=[]):
results = []
count = len(ip_list)
total = len(stock_list)
for item in range(len(stock_list)):
try:
code = stock_list[item]
QA_util_log_info(
'##JOB01 Now Saving STOCK_DAY==== {}'.format(str(code)),
ui_log
)
# 首选查找数据库 是否 有 这个代码的数据
search_cond = {'code': str(code)[0:6]}
ref = coll_stock_day.find(search_cond)
end_date = str(now_time())[0:10]
ref_count = coll_stock_day.count_documents(search_cond)
# 当前数据库已经包含了这个代码的数据, 继续增量更新
# 加入这个判断的原因是因为如果股票是刚上市的 数据库会没有数据 所以会有负索引问题出现
if ref_count > 0:
# 接着上次获取的日期继续更新
start_date = ref[ref_count - 1]['date']
# print("ref[ref.count() - 1]['date'] {} {}".format(ref.count(), coll_stock_day.count_documents({'code': str(code)[0:6]})))
else:
# 当前数据库中没有这个代码的股票数据, 从1990-01-01 开始下载所有的数据
start_date = '1990-01-01'
QA_util_log_info(
'UPDATE_STOCK_DAY \n Trying updating {} from {} to {}'
.format(code,
start_date,
end_date),
ui_log
)
if start_date != end_date:
# 更新过的,不更新
results.extend([(code, start_date, end_date, '00', 'day', ip_list[item % count]['ip'],
ip_list[item % count]['port'], item, total, ui_log, ui_progress)])
except Exception as error0:
print('Exception:{}'.format(error0))
err.append(code)
return results
ips = get_ip_list_by_multi_process_ping(stock_ip_list, _type='stock')[:cpu_count() * 2 + 1]
param = __gen_param(stock_list, coll_stock_day, ips)
ps = QA_SU_save_stock_day_parallelism(processes=cpu_count() if len(ips) >= cpu_count() else len(ips),
client=client, ui_log=ui_log)
ps.add(do_saving_work, param)
ps.run()
if len(err) < 1:
QA_util_log_info('SUCCESS save stock day ^_^', ui_log)
else:
QA_util_log_info('ERROR CODE \n ', ui_log)
QA_util_log_info(err, ui_log) | save stock_day
保存日线数据
:param client:
:param ui_log: 给GUI qt 界面使用
:param ui_progress: 给GUI qt 界面使用
:param ui_progress_int_value: 给GUI qt 界面使用 | Below is the the instruction that describes the task:
### Input:
save stock_day
保存日线数据
:param client:
:param ui_log: 给GUI qt 界面使用
:param ui_progress: 给GUI qt 界面使用
:param ui_progress_int_value: 给GUI qt 界面使用
### Response:
def QA_SU_save_stock_day(client=DATABASE, ui_log=None, ui_progress=None):
'''
save stock_day
保存日线数据
:param client:
:param ui_log: 给GUI qt 界面使用
:param ui_progress: 给GUI qt 界面使用
:param ui_progress_int_value: 给GUI qt 界面使用
'''
stock_list = QA_fetch_get_stock_list().code.unique().tolist()
coll_stock_day = client.stock_day
coll_stock_day.create_index(
[("code",
pymongo.ASCENDING),
("date_stamp",
pymongo.ASCENDING)]
)
err = []
# saveing result
def __gen_param(stock_list, coll_stock_day, ip_list=[]):
results = []
count = len(ip_list)
total = len(stock_list)
for item in range(len(stock_list)):
try:
code = stock_list[item]
QA_util_log_info(
'##JOB01 Now Saving STOCK_DAY==== {}'.format(str(code)),
ui_log
)
# 首选查找数据库 是否 有 这个代码的数据
search_cond = {'code': str(code)[0:6]}
ref = coll_stock_day.find(search_cond)
end_date = str(now_time())[0:10]
ref_count = coll_stock_day.count_documents(search_cond)
# 当前数据库已经包含了这个代码的数据, 继续增量更新
# 加入这个判断的原因是因为如果股票是刚上市的 数据库会没有数据 所以会有负索引问题出现
if ref_count > 0:
# 接着上次获取的日期继续更新
start_date = ref[ref_count - 1]['date']
# print("ref[ref.count() - 1]['date'] {} {}".format(ref.count(), coll_stock_day.count_documents({'code': str(code)[0:6]})))
else:
# 当前数据库中没有这个代码的股票数据, 从1990-01-01 开始下载所有的数据
start_date = '1990-01-01'
QA_util_log_info(
'UPDATE_STOCK_DAY \n Trying updating {} from {} to {}'
.format(code,
start_date,
end_date),
ui_log
)
if start_date != end_date:
# 更新过的,不更新
results.extend([(code, start_date, end_date, '00', 'day', ip_list[item % count]['ip'],
ip_list[item % count]['port'], item, total, ui_log, ui_progress)])
except Exception as error0:
print('Exception:{}'.format(error0))
err.append(code)
return results
ips = get_ip_list_by_multi_process_ping(stock_ip_list, _type='stock')[:cpu_count() * 2 + 1]
param = __gen_param(stock_list, coll_stock_day, ips)
ps = QA_SU_save_stock_day_parallelism(processes=cpu_count() if len(ips) >= cpu_count() else len(ips),
client=client, ui_log=ui_log)
ps.add(do_saving_work, param)
ps.run()
if len(err) < 1:
QA_util_log_info('SUCCESS save stock day ^_^', ui_log)
else:
QA_util_log_info('ERROR CODE \n ', ui_log)
QA_util_log_info(err, ui_log) |
def get_sql_state(self, state):
"""
Get SQLStateGraph from state.
"""
if not hasattr(state, 'sql_state'):
setattr(state, 'sql_state', SQLStateGraph())
return state.sql_state | Get SQLStateGraph from state. | Below is the the instruction that describes the task:
### Input:
Get SQLStateGraph from state.
### Response:
def get_sql_state(self, state):
"""
Get SQLStateGraph from state.
"""
if not hasattr(state, 'sql_state'):
setattr(state, 'sql_state', SQLStateGraph())
return state.sql_state |
def reload(self):
"""Reload catalog if sufficient time has passed"""
if time.time() - self.updated > self.ttl:
self.force_reload() | Reload catalog if sufficient time has passed | Below is the the instruction that describes the task:
### Input:
Reload catalog if sufficient time has passed
### Response:
def reload(self):
"""Reload catalog if sufficient time has passed"""
if time.time() - self.updated > self.ttl:
self.force_reload() |
def intervalleftjoin(left, right, lstart='start', lstop='stop', rstart='start',
rstop='stop', lkey=None, rkey=None, include_stop=False,
missing=None, lprefix=None, rprefix=None):
"""
Like :func:`petl.transform.intervals.intervaljoin` but rows from the left
table without a match in the right table are also included. E.g.::
>>> import petl as etl
>>> left = [['begin', 'end', 'quux'],
... [1, 2, 'a'],
... [2, 4, 'b'],
... [2, 5, 'c'],
... [9, 14, 'd'],
... [1, 1, 'e'],
... [10, 10, 'f']]
>>> right = [['start', 'stop', 'value'],
... [1, 4, 'foo'],
... [3, 7, 'bar'],
... [4, 9, 'baz']]
>>> table1 = etl.intervalleftjoin(left, right,
... lstart='begin', lstop='end',
... rstart='start', rstop='stop')
>>> table1.lookall()
+-------+-----+------+-------+------+-------+
| begin | end | quux | start | stop | value |
+=======+=====+======+=======+======+=======+
| 1 | 2 | 'a' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 4 | 9 | 'baz' |
+-------+-----+------+-------+------+-------+
| 9 | 14 | 'd' | None | None | None |
+-------+-----+------+-------+------+-------+
| 1 | 1 | 'e' | None | None | None |
+-------+-----+------+-------+------+-------+
| 10 | 10 | 'f' | None | None | None |
+-------+-----+------+-------+------+-------+
Note start coordinates are included and stop coordinates are excluded
from the interval. Use the `include_stop` keyword argument to include the
upper bound of the interval when finding overlaps.
"""
assert (lkey is None) == (rkey is None), \
'facet key field must be provided for both or neither table'
return IntervalLeftJoinView(left, right, lstart=lstart, lstop=lstop,
rstart=rstart, rstop=rstop, lkey=lkey,
rkey=rkey, include_stop=include_stop,
missing=missing, lprefix=lprefix,
rprefix=rprefix) | Like :func:`petl.transform.intervals.intervaljoin` but rows from the left
table without a match in the right table are also included. E.g.::
>>> import petl as etl
>>> left = [['begin', 'end', 'quux'],
... [1, 2, 'a'],
... [2, 4, 'b'],
... [2, 5, 'c'],
... [9, 14, 'd'],
... [1, 1, 'e'],
... [10, 10, 'f']]
>>> right = [['start', 'stop', 'value'],
... [1, 4, 'foo'],
... [3, 7, 'bar'],
... [4, 9, 'baz']]
>>> table1 = etl.intervalleftjoin(left, right,
... lstart='begin', lstop='end',
... rstart='start', rstop='stop')
>>> table1.lookall()
+-------+-----+------+-------+------+-------+
| begin | end | quux | start | stop | value |
+=======+=====+======+=======+======+=======+
| 1 | 2 | 'a' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 4 | 9 | 'baz' |
+-------+-----+------+-------+------+-------+
| 9 | 14 | 'd' | None | None | None |
+-------+-----+------+-------+------+-------+
| 1 | 1 | 'e' | None | None | None |
+-------+-----+------+-------+------+-------+
| 10 | 10 | 'f' | None | None | None |
+-------+-----+------+-------+------+-------+
Note start coordinates are included and stop coordinates are excluded
from the interval. Use the `include_stop` keyword argument to include the
upper bound of the interval when finding overlaps. | Below is the the instruction that describes the task:
### Input:
Like :func:`petl.transform.intervals.intervaljoin` but rows from the left
table without a match in the right table are also included. E.g.::
>>> import petl as etl
>>> left = [['begin', 'end', 'quux'],
... [1, 2, 'a'],
... [2, 4, 'b'],
... [2, 5, 'c'],
... [9, 14, 'd'],
... [1, 1, 'e'],
... [10, 10, 'f']]
>>> right = [['start', 'stop', 'value'],
... [1, 4, 'foo'],
... [3, 7, 'bar'],
... [4, 9, 'baz']]
>>> table1 = etl.intervalleftjoin(left, right,
... lstart='begin', lstop='end',
... rstart='start', rstop='stop')
>>> table1.lookall()
+-------+-----+------+-------+------+-------+
| begin | end | quux | start | stop | value |
+=======+=====+======+=======+======+=======+
| 1 | 2 | 'a' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 4 | 9 | 'baz' |
+-------+-----+------+-------+------+-------+
| 9 | 14 | 'd' | None | None | None |
+-------+-----+------+-------+------+-------+
| 1 | 1 | 'e' | None | None | None |
+-------+-----+------+-------+------+-------+
| 10 | 10 | 'f' | None | None | None |
+-------+-----+------+-------+------+-------+
Note start coordinates are included and stop coordinates are excluded
from the interval. Use the `include_stop` keyword argument to include the
upper bound of the interval when finding overlaps.
### Response:
def intervalleftjoin(left, right, lstart='start', lstop='stop', rstart='start',
rstop='stop', lkey=None, rkey=None, include_stop=False,
missing=None, lprefix=None, rprefix=None):
"""
Like :func:`petl.transform.intervals.intervaljoin` but rows from the left
table without a match in the right table are also included. E.g.::
>>> import petl as etl
>>> left = [['begin', 'end', 'quux'],
... [1, 2, 'a'],
... [2, 4, 'b'],
... [2, 5, 'c'],
... [9, 14, 'd'],
... [1, 1, 'e'],
... [10, 10, 'f']]
>>> right = [['start', 'stop', 'value'],
... [1, 4, 'foo'],
... [3, 7, 'bar'],
... [4, 9, 'baz']]
>>> table1 = etl.intervalleftjoin(left, right,
... lstart='begin', lstop='end',
... rstart='start', rstop='stop')
>>> table1.lookall()
+-------+-----+------+-------+------+-------+
| begin | end | quux | start | stop | value |
+=======+=====+======+=======+======+=======+
| 1 | 2 | 'a' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 4 | 'b' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 1 | 4 | 'foo' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 3 | 7 | 'bar' |
+-------+-----+------+-------+------+-------+
| 2 | 5 | 'c' | 4 | 9 | 'baz' |
+-------+-----+------+-------+------+-------+
| 9 | 14 | 'd' | None | None | None |
+-------+-----+------+-------+------+-------+
| 1 | 1 | 'e' | None | None | None |
+-------+-----+------+-------+------+-------+
| 10 | 10 | 'f' | None | None | None |
+-------+-----+------+-------+------+-------+
Note start coordinates are included and stop coordinates are excluded
from the interval. Use the `include_stop` keyword argument to include the
upper bound of the interval when finding overlaps.
"""
assert (lkey is None) == (rkey is None), \
'facet key field must be provided for both or neither table'
return IntervalLeftJoinView(left, right, lstart=lstart, lstop=lstop,
rstart=rstart, rstop=rstop, lkey=lkey,
rkey=rkey, include_stop=include_stop,
missing=missing, lprefix=lprefix,
rprefix=rprefix) |
def copy_results(self, copy_to_dir, rename_model_to=None, force_rerun=False):
"""Copy the raw information from I-TASSER modeling to a new folder.
Copies all files in the list _attrs_to_copy.
Args:
copy_to_dir (str): Directory to copy the minimal set of results per sequence.
rename_model_to (str): New file name (without extension)
force_rerun (bool): If existing models and results should be overwritten.
"""
# Save path to the structure and copy it if specified
if not rename_model_to:
rename_model_to = self.model_to_use
new_model_path = op.join(copy_to_dir, '{}.pdb'.format(rename_model_to))
if self.structure_path:
if ssbio.utils.force_rerun(flag=force_rerun, outfile=new_model_path):
# Clean and save it
custom_clean = CleanPDB()
my_pdb = StructureIO(self.structure_path)
new_model_path = my_pdb.write_pdb(custom_selection=custom_clean,
custom_name=rename_model_to,
out_dir=copy_to_dir,
force_rerun=force_rerun)
# Update the structure_path to be the new clean file
self.load_structure_path(structure_path=new_model_path, file_type='pdb')
# Other modeling results - store in a new folder
dest_itasser_dir = op.join(copy_to_dir, '{}_itasser'.format(rename_model_to))
if not op.exists(dest_itasser_dir):
os.mkdir(dest_itasser_dir)
for attr in self._attrs_to_copy:
old_file_path = getattr(self, attr)
new_file_path = op.join(dest_itasser_dir, op.basename(old_file_path))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=new_file_path):
shutil.copy2(old_file_path, new_file_path)
log.debug('{}: copied from {}'.format(new_file_path, old_file_path))
else:
log.debug('{}: file already exists'.format(new_file_path))
setattr(self, attr, new_file_path) | Copy the raw information from I-TASSER modeling to a new folder.
Copies all files in the list _attrs_to_copy.
Args:
copy_to_dir (str): Directory to copy the minimal set of results per sequence.
rename_model_to (str): New file name (without extension)
force_rerun (bool): If existing models and results should be overwritten. | Below is the the instruction that describes the task:
### Input:
Copy the raw information from I-TASSER modeling to a new folder.
Copies all files in the list _attrs_to_copy.
Args:
copy_to_dir (str): Directory to copy the minimal set of results per sequence.
rename_model_to (str): New file name (without extension)
force_rerun (bool): If existing models and results should be overwritten.
### Response:
def copy_results(self, copy_to_dir, rename_model_to=None, force_rerun=False):
"""Copy the raw information from I-TASSER modeling to a new folder.
Copies all files in the list _attrs_to_copy.
Args:
copy_to_dir (str): Directory to copy the minimal set of results per sequence.
rename_model_to (str): New file name (without extension)
force_rerun (bool): If existing models and results should be overwritten.
"""
# Save path to the structure and copy it if specified
if not rename_model_to:
rename_model_to = self.model_to_use
new_model_path = op.join(copy_to_dir, '{}.pdb'.format(rename_model_to))
if self.structure_path:
if ssbio.utils.force_rerun(flag=force_rerun, outfile=new_model_path):
# Clean and save it
custom_clean = CleanPDB()
my_pdb = StructureIO(self.structure_path)
new_model_path = my_pdb.write_pdb(custom_selection=custom_clean,
custom_name=rename_model_to,
out_dir=copy_to_dir,
force_rerun=force_rerun)
# Update the structure_path to be the new clean file
self.load_structure_path(structure_path=new_model_path, file_type='pdb')
# Other modeling results - store in a new folder
dest_itasser_dir = op.join(copy_to_dir, '{}_itasser'.format(rename_model_to))
if not op.exists(dest_itasser_dir):
os.mkdir(dest_itasser_dir)
for attr in self._attrs_to_copy:
old_file_path = getattr(self, attr)
new_file_path = op.join(dest_itasser_dir, op.basename(old_file_path))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=new_file_path):
shutil.copy2(old_file_path, new_file_path)
log.debug('{}: copied from {}'.format(new_file_path, old_file_path))
else:
log.debug('{}: file already exists'.format(new_file_path))
setattr(self, attr, new_file_path) |
def reverseCommit(self):
"""
Replace the current widget content with the original text.
Note that the original text has styling information available,
whereas the new text does not.
"""
self.baseClass.setText(self.oldText)
self.qteWidget.SCISetStylingEx(0, 0, self.style) | Replace the current widget content with the original text.
Note that the original text has styling information available,
whereas the new text does not. | Below is the the instruction that describes the task:
### Input:
Replace the current widget content with the original text.
Note that the original text has styling information available,
whereas the new text does not.
### Response:
def reverseCommit(self):
"""
Replace the current widget content with the original text.
Note that the original text has styling information available,
whereas the new text does not.
"""
self.baseClass.setText(self.oldText)
self.qteWidget.SCISetStylingEx(0, 0, self.style) |
def simxGetOutMessageInfo(clientID, infoType):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
info = ct.c_int()
return c_GetOutMessageInfo(clientID, infoType, ct.byref(info)), info.value | Please have a look at the function description/documentation in the V-REP user manual | Below is the the instruction that describes the task:
### Input:
Please have a look at the function description/documentation in the V-REP user manual
### Response:
def simxGetOutMessageInfo(clientID, infoType):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
info = ct.c_int()
return c_GetOutMessageInfo(clientID, infoType, ct.byref(info)), info.value |
def change_resource_record_set_writer(connection, change_set, comment=None):
"""
Forms an XML string that we'll send to Route53 in order to change
record sets.
:param Route53Connection connection: The connection instance used to
query the API.
:param change_set.ChangeSet change_set: The ChangeSet object to create the
XML doc from.
:keyword str comment: An optional comment to go along with the request.
"""
e_root = etree.Element(
"ChangeResourceRecordSetsRequest",
xmlns=connection._xml_namespace
)
e_change_batch = etree.SubElement(e_root, "ChangeBatch")
if comment:
e_comment = etree.SubElement(e_change_batch, "Comment")
e_comment.text = comment
e_changes = etree.SubElement(e_change_batch, "Changes")
# Deletions need to come first in the change sets.
for change in change_set.deletions + change_set.creations:
e_changes.append(write_change(change))
e_tree = etree.ElementTree(element=e_root)
#print(prettyprint_xml(e_root))
fobj = BytesIO()
# This writes bytes.
e_tree.write(fobj, xml_declaration=True, encoding='utf-8', method="xml")
return fobj.getvalue().decode('utf-8') | Forms an XML string that we'll send to Route53 in order to change
record sets.
:param Route53Connection connection: The connection instance used to
query the API.
:param change_set.ChangeSet change_set: The ChangeSet object to create the
XML doc from.
:keyword str comment: An optional comment to go along with the request. | Below is the the instruction that describes the task:
### Input:
Forms an XML string that we'll send to Route53 in order to change
record sets.
:param Route53Connection connection: The connection instance used to
query the API.
:param change_set.ChangeSet change_set: The ChangeSet object to create the
XML doc from.
:keyword str comment: An optional comment to go along with the request.
### Response:
def change_resource_record_set_writer(connection, change_set, comment=None):
"""
Forms an XML string that we'll send to Route53 in order to change
record sets.
:param Route53Connection connection: The connection instance used to
query the API.
:param change_set.ChangeSet change_set: The ChangeSet object to create the
XML doc from.
:keyword str comment: An optional comment to go along with the request.
"""
e_root = etree.Element(
"ChangeResourceRecordSetsRequest",
xmlns=connection._xml_namespace
)
e_change_batch = etree.SubElement(e_root, "ChangeBatch")
if comment:
e_comment = etree.SubElement(e_change_batch, "Comment")
e_comment.text = comment
e_changes = etree.SubElement(e_change_batch, "Changes")
# Deletions need to come first in the change sets.
for change in change_set.deletions + change_set.creations:
e_changes.append(write_change(change))
e_tree = etree.ElementTree(element=e_root)
#print(prettyprint_xml(e_root))
fobj = BytesIO()
# This writes bytes.
e_tree.write(fobj, xml_declaration=True, encoding='utf-8', method="xml")
return fobj.getvalue().decode('utf-8') |
def deserialize(stream_or_string, **options):
'''
Deserialize any string or stream like object into a Python data structure.
:param stream_or_string: stream or string to deserialize.
:param options: options given to lower configparser module.
'''
if six.PY3:
cp = configparser.ConfigParser(**options)
else:
cp = configparser.SafeConfigParser(**options)
try:
if not isinstance(stream_or_string, (bytes, six.string_types)):
if six.PY3:
cp.read_file(stream_or_string)
else:
cp.readfp(stream_or_string)
else:
if six.PY3:
cp.read_file(six.moves.StringIO(stream_or_string))
else:
# python2's ConfigParser cannot parse a config from a string
cp.readfp(six.moves.StringIO(stream_or_string))
data = {}
for section_name in cp.sections():
section = {}
for k, v in cp.items(section_name):
section[k] = v
data[section_name] = section
return data
except Exception as error:
raise DeserializationError(error) | Deserialize any string or stream like object into a Python data structure.
:param stream_or_string: stream or string to deserialize.
:param options: options given to lower configparser module. | Below is the the instruction that describes the task:
### Input:
Deserialize any string or stream like object into a Python data structure.
:param stream_or_string: stream or string to deserialize.
:param options: options given to lower configparser module.
### Response:
def deserialize(stream_or_string, **options):
'''
Deserialize any string or stream like object into a Python data structure.
:param stream_or_string: stream or string to deserialize.
:param options: options given to lower configparser module.
'''
if six.PY3:
cp = configparser.ConfigParser(**options)
else:
cp = configparser.SafeConfigParser(**options)
try:
if not isinstance(stream_or_string, (bytes, six.string_types)):
if six.PY3:
cp.read_file(stream_or_string)
else:
cp.readfp(stream_or_string)
else:
if six.PY3:
cp.read_file(six.moves.StringIO(stream_or_string))
else:
# python2's ConfigParser cannot parse a config from a string
cp.readfp(six.moves.StringIO(stream_or_string))
data = {}
for section_name in cp.sections():
section = {}
for k, v in cp.items(section_name):
section[k] = v
data[section_name] = section
return data
except Exception as error:
raise DeserializationError(error) |
def _download_vswhere():
"""
Download vswhere to DOWNLOAD_PATH.
"""
print('downloading from', _get_latest_release_url())
try:
from urllib.request import urlopen
with urlopen(_get_latest_release_url()) as response, open(DOWNLOAD_PATH, 'wb') as outfile:
shutil.copyfileobj(response, outfile)
except ImportError:
# Python 2
import urllib
urllib.urlretrieve(_get_latest_release_url(), DOWNLOAD_PATH) | Download vswhere to DOWNLOAD_PATH. | Below is the the instruction that describes the task:
### Input:
Download vswhere to DOWNLOAD_PATH.
### Response:
def _download_vswhere():
"""
Download vswhere to DOWNLOAD_PATH.
"""
print('downloading from', _get_latest_release_url())
try:
from urllib.request import urlopen
with urlopen(_get_latest_release_url()) as response, open(DOWNLOAD_PATH, 'wb') as outfile:
shutil.copyfileobj(response, outfile)
except ImportError:
# Python 2
import urllib
urllib.urlretrieve(_get_latest_release_url(), DOWNLOAD_PATH) |
def get_pmids(self):
"""Get list of all PMIDs associated with edges in the network."""
pmids = []
for ea in self._edge_attributes.values():
edge_pmids = ea.get('pmids')
if edge_pmids:
pmids += edge_pmids
return list(set(pmids)) | Get list of all PMIDs associated with edges in the network. | Below is the the instruction that describes the task:
### Input:
Get list of all PMIDs associated with edges in the network.
### Response:
def get_pmids(self):
"""Get list of all PMIDs associated with edges in the network."""
pmids = []
for ea in self._edge_attributes.values():
edge_pmids = ea.get('pmids')
if edge_pmids:
pmids += edge_pmids
return list(set(pmids)) |
def getPrioritySortkey(self):
"""
Returns the key that will be used to sort the current Analysis, from
most prioritary to less prioritary.
:return: string used for sorting
"""
analysis_request = self.getRequest()
if analysis_request is None:
return None
ar_sort_key = analysis_request.getPrioritySortkey()
ar_id = analysis_request.getId().lower()
title = sortable_title(self)
if callable(title):
title = title()
return '{}.{}.{}'.format(ar_sort_key, ar_id, title) | Returns the key that will be used to sort the current Analysis, from
most prioritary to less prioritary.
:return: string used for sorting | Below is the the instruction that describes the task:
### Input:
Returns the key that will be used to sort the current Analysis, from
most prioritary to less prioritary.
:return: string used for sorting
### Response:
def getPrioritySortkey(self):
"""
Returns the key that will be used to sort the current Analysis, from
most prioritary to less prioritary.
:return: string used for sorting
"""
analysis_request = self.getRequest()
if analysis_request is None:
return None
ar_sort_key = analysis_request.getPrioritySortkey()
ar_id = analysis_request.getId().lower()
title = sortable_title(self)
if callable(title):
title = title()
return '{}.{}.{}'.format(ar_sort_key, ar_id, title) |
def transform(self, X):
"""Transform X according to the fitted transformer.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape = [n_samples, n_components]
Transformed array.
"""
if not hasattr(self, '_best_programs'):
raise NotFittedError('SymbolicTransformer not fitted.')
X = check_array(X)
_, n_features = X.shape
if self.n_features_ != n_features:
raise ValueError('Number of features of the model must match the '
'input. Model n_features is %s and input '
'n_features is %s.'
% (self.n_features_, n_features))
X_new = np.array([gp.execute(X) for gp in self._best_programs]).T
return X_new | Transform X according to the fitted transformer.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape = [n_samples, n_components]
Transformed array. | Below is the the instruction that describes the task:
### Input:
Transform X according to the fitted transformer.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape = [n_samples, n_components]
Transformed array.
### Response:
def transform(self, X):
"""Transform X according to the fitted transformer.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape = [n_samples, n_components]
Transformed array.
"""
if not hasattr(self, '_best_programs'):
raise NotFittedError('SymbolicTransformer not fitted.')
X = check_array(X)
_, n_features = X.shape
if self.n_features_ != n_features:
raise ValueError('Number of features of the model must match the '
'input. Model n_features is %s and input '
'n_features is %s.'
% (self.n_features_, n_features))
X_new = np.array([gp.execute(X) for gp in self._best_programs]).T
return X_new |
def google_poem(self, message, topic):
"""make a poem about __: show a google poem about __"""
r = requests.get("http://www.google.com/complete/search?output=toolbar&q=" + topic + "%20")
xmldoc = minidom.parseString(r.text)
item_list = xmldoc.getElementsByTagName("suggestion")
context = {"topic": topic, "lines": [x.attributes["data"].value for x in item_list[:4]]}
self.say(rendered_template("gpoem.html", context), message, html=True) | make a poem about __: show a google poem about __ | Below is the the instruction that describes the task:
### Input:
make a poem about __: show a google poem about __
### Response:
def google_poem(self, message, topic):
"""make a poem about __: show a google poem about __"""
r = requests.get("http://www.google.com/complete/search?output=toolbar&q=" + topic + "%20")
xmldoc = minidom.parseString(r.text)
item_list = xmldoc.getElementsByTagName("suggestion")
context = {"topic": topic, "lines": [x.attributes["data"].value for x in item_list[:4]]}
self.say(rendered_template("gpoem.html", context), message, html=True) |
def append(self, report):
"""Append a new CSP report."""
assert report not in self.examples
self.count += 1
if len(self.examples) < self.top:
self.examples.append(report) | Append a new CSP report. | Below is the the instruction that describes the task:
### Input:
Append a new CSP report.
### Response:
def append(self, report):
"""Append a new CSP report."""
assert report not in self.examples
self.count += 1
if len(self.examples) < self.top:
self.examples.append(report) |
def getExim(exim_id):
"""Returns the instrument interface for the exim_id passed in
"""
interfaces = filter(lambda i: i[0]==exim_id, get_instrument_interfaces())
return interfaces and interfaces[0][1] or None | Returns the instrument interface for the exim_id passed in | Below is the the instruction that describes the task:
### Input:
Returns the instrument interface for the exim_id passed in
### Response:
def getExim(exim_id):
"""Returns the instrument interface for the exim_id passed in
"""
interfaces = filter(lambda i: i[0]==exim_id, get_instrument_interfaces())
return interfaces and interfaces[0][1] or None |
def delete_hook(self, id):
"""
:calls: `DELETE /orgs/:owner/hooks/:id <http://developer.github.com/v3/orgs/hooks>`_
:param id: integer
:rtype: None`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url + "/hooks/" + str(id)
) | :calls: `DELETE /orgs/:owner/hooks/:id <http://developer.github.com/v3/orgs/hooks>`_
:param id: integer
:rtype: None` | Below is the the instruction that describes the task:
### Input:
:calls: `DELETE /orgs/:owner/hooks/:id <http://developer.github.com/v3/orgs/hooks>`_
:param id: integer
:rtype: None`
### Response:
def delete_hook(self, id):
"""
:calls: `DELETE /orgs/:owner/hooks/:id <http://developer.github.com/v3/orgs/hooks>`_
:param id: integer
:rtype: None`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url + "/hooks/" + str(id)
) |
def repr_part(self):
"""String usable in a space's ``__repr__`` method."""
optargs = [('weighting', array_str(self.array, nprint=10), ''),
('exponent', self.exponent, 2.0)]
return signature_string([], optargs, sep=',\n',
mod=[[], ['!s', ':.4']]) | String usable in a space's ``__repr__`` method. | Below is the the instruction that describes the task:
### Input:
String usable in a space's ``__repr__`` method.
### Response:
def repr_part(self):
"""String usable in a space's ``__repr__`` method."""
optargs = [('weighting', array_str(self.array, nprint=10), ''),
('exponent', self.exponent, 2.0)]
return signature_string([], optargs, sep=',\n',
mod=[[], ['!s', ':.4']]) |
def calc_periods(hour=0, minute=0):
"""Returns a tuple of start_period and end_period.
Assumes that the period is 24-hrs.
Parameters:
- `hour`: the hour from 0 to 23 when the period ends
- `minute`: the minute from 0 to 59 when the period ends
This method will calculate the end of the period as the closest hour/minute
going backwards.
It will also calculate the start of the period as the passed hour/minute
but 24 hrs ago.
Example, if we pass 0, 0 - we will get the events from 0:00 midnight of the
day before yesterday until today's midnight.
If we pass 2,0 - we will get the start time as 2am of the previous morning
till 2am of today's morning.
By default it's midnight.
"""
# Calculate the time intervals in a usable form
period_end = datetime.datetime.utcnow().replace(hour=hour,
minute=minute,
second=0,
microsecond=0)
period_start = period_end - datetime.timedelta(days=1)
# period end should be slightly before the midnight.
# hence, we subtract a second
# this will force period_end to store something like:
# datetime.datetime(2016, 5, 19, 23, 59, 59, 999999)
# instead of:
# datetime.datetime(2016, 5, 20, 0, 0, 0, 0)
period_end -= datetime.timedelta(seconds=1)
return (period_start, period_end) | Returns a tuple of start_period and end_period.
Assumes that the period is 24-hrs.
Parameters:
- `hour`: the hour from 0 to 23 when the period ends
- `minute`: the minute from 0 to 59 when the period ends
This method will calculate the end of the period as the closest hour/minute
going backwards.
It will also calculate the start of the period as the passed hour/minute
but 24 hrs ago.
Example, if we pass 0, 0 - we will get the events from 0:00 midnight of the
day before yesterday until today's midnight.
If we pass 2,0 - we will get the start time as 2am of the previous morning
till 2am of today's morning.
By default it's midnight. | Below is the the instruction that describes the task:
### Input:
Returns a tuple of start_period and end_period.
Assumes that the period is 24-hrs.
Parameters:
- `hour`: the hour from 0 to 23 when the period ends
- `minute`: the minute from 0 to 59 when the period ends
This method will calculate the end of the period as the closest hour/minute
going backwards.
It will also calculate the start of the period as the passed hour/minute
but 24 hrs ago.
Example, if we pass 0, 0 - we will get the events from 0:00 midnight of the
day before yesterday until today's midnight.
If we pass 2,0 - we will get the start time as 2am of the previous morning
till 2am of today's morning.
By default it's midnight.
### Response:
def calc_periods(hour=0, minute=0):
"""Returns a tuple of start_period and end_period.
Assumes that the period is 24-hrs.
Parameters:
- `hour`: the hour from 0 to 23 when the period ends
- `minute`: the minute from 0 to 59 when the period ends
This method will calculate the end of the period as the closest hour/minute
going backwards.
It will also calculate the start of the period as the passed hour/minute
but 24 hrs ago.
Example, if we pass 0, 0 - we will get the events from 0:00 midnight of the
day before yesterday until today's midnight.
If we pass 2,0 - we will get the start time as 2am of the previous morning
till 2am of today's morning.
By default it's midnight.
"""
# Calculate the time intervals in a usable form
period_end = datetime.datetime.utcnow().replace(hour=hour,
minute=minute,
second=0,
microsecond=0)
period_start = period_end - datetime.timedelta(days=1)
# period end should be slightly before the midnight.
# hence, we subtract a second
# this will force period_end to store something like:
# datetime.datetime(2016, 5, 19, 23, 59, 59, 999999)
# instead of:
# datetime.datetime(2016, 5, 20, 0, 0, 0, 0)
period_end -= datetime.timedelta(seconds=1)
return (period_start, period_end) |
def send_post(self, mri, method_name, **params):
"""Abstract method to dispatch a Post to the server
Args:
mri (str): The mri of the Block
method_name (str): The name of the Method within the Block
params: The parameters to send
Returns:
The return results from the server
"""
q = Queue()
request = Post(
path=[mri, method_name],
parameters=params)
request.set_callback(q.put)
IOLoopHelper.call(self._send_request, request)
response = q.get()
if isinstance(response, Error):
raise response.message
else:
return response.value | Abstract method to dispatch a Post to the server
Args:
mri (str): The mri of the Block
method_name (str): The name of the Method within the Block
params: The parameters to send
Returns:
The return results from the server | Below is the the instruction that describes the task:
### Input:
Abstract method to dispatch a Post to the server
Args:
mri (str): The mri of the Block
method_name (str): The name of the Method within the Block
params: The parameters to send
Returns:
The return results from the server
### Response:
def send_post(self, mri, method_name, **params):
"""Abstract method to dispatch a Post to the server
Args:
mri (str): The mri of the Block
method_name (str): The name of the Method within the Block
params: The parameters to send
Returns:
The return results from the server
"""
q = Queue()
request = Post(
path=[mri, method_name],
parameters=params)
request.set_callback(q.put)
IOLoopHelper.call(self._send_request, request)
response = q.get()
if isinstance(response, Error):
raise response.message
else:
return response.value |
def put_settings(self, app=None, index=None, settings=None, es=None):
"""Modify index settings.
Index must exist already.
"""
if not index:
index = self.index
if not app:
app = self.app
if not es:
es = self.es
if not settings:
return
for alias, old_settings in self.es.indices.get_settings(index=index).items():
try:
if test_settings_contain(old_settings['settings']['index'], settings['settings']):
return
except KeyError:
pass
es.indices.close(index=index)
es.indices.put_settings(index=index, body=settings)
es.indices.open(index=index) | Modify index settings.
Index must exist already. | Below is the the instruction that describes the task:
### Input:
Modify index settings.
Index must exist already.
### Response:
def put_settings(self, app=None, index=None, settings=None, es=None):
"""Modify index settings.
Index must exist already.
"""
if not index:
index = self.index
if not app:
app = self.app
if not es:
es = self.es
if not settings:
return
for alias, old_settings in self.es.indices.get_settings(index=index).items():
try:
if test_settings_contain(old_settings['settings']['index'], settings['settings']):
return
except KeyError:
pass
es.indices.close(index=index)
es.indices.put_settings(index=index, body=settings)
es.indices.open(index=index) |
def create_serv_obj(self, tenant_id):
"""Creates and stores the service object associated with a tenant. """
self.service_attr[tenant_id] = ServiceIpSegTenantMap()
self.store_tenant_obj(tenant_id, self.service_attr[tenant_id]) | Creates and stores the service object associated with a tenant. | Below is the the instruction that describes the task:
### Input:
Creates and stores the service object associated with a tenant.
### Response:
def create_serv_obj(self, tenant_id):
"""Creates and stores the service object associated with a tenant. """
self.service_attr[tenant_id] = ServiceIpSegTenantMap()
self.store_tenant_obj(tenant_id, self.service_attr[tenant_id]) |
def write_output_files(self, fh):
"""
Write as a comment into the DAG file the list of output files
for this DAG node.
@param fh: descriptor of open DAG file.
"""
for f in self.__output_files:
print >>fh, "## Job %s generates output file %s" % (self.__name, f) | Write as a comment into the DAG file the list of output files
for this DAG node.
@param fh: descriptor of open DAG file. | Below is the the instruction that describes the task:
### Input:
Write as a comment into the DAG file the list of output files
for this DAG node.
@param fh: descriptor of open DAG file.
### Response:
def write_output_files(self, fh):
"""
Write as a comment into the DAG file the list of output files
for this DAG node.
@param fh: descriptor of open DAG file.
"""
for f in self.__output_files:
print >>fh, "## Job %s generates output file %s" % (self.__name, f) |
def label(self) -> str:
"""A latex formatted label representing constant expression and united value."""
label = self.expression.replace("_", "\\;")
if self.units_kind:
symbol = wt_units.get_symbol(self.units)
for v in self.variables:
vl = "%s_{%s}" % (symbol, v.label)
vl = vl.replace("_{}", "") # label can be empty, no empty subscripts
label = label.replace(v.natural_name, vl)
val = (
round(self.value, self.round_spec)
if self.round_spec is not None
else self.value
)
label += r"\,=\,{}".format(format(val, self.format_spec))
if self.units_kind:
units_dictionary = getattr(wt_units, self.units_kind)
label += r"\,"
label += units_dictionary[self.units][2]
label = r"$\mathsf{%s}$" % label
return label | A latex formatted label representing constant expression and united value. | Below is the the instruction that describes the task:
### Input:
A latex formatted label representing constant expression and united value.
### Response:
def label(self) -> str:
"""A latex formatted label representing constant expression and united value."""
label = self.expression.replace("_", "\\;")
if self.units_kind:
symbol = wt_units.get_symbol(self.units)
for v in self.variables:
vl = "%s_{%s}" % (symbol, v.label)
vl = vl.replace("_{}", "") # label can be empty, no empty subscripts
label = label.replace(v.natural_name, vl)
val = (
round(self.value, self.round_spec)
if self.round_spec is not None
else self.value
)
label += r"\,=\,{}".format(format(val, self.format_spec))
if self.units_kind:
units_dictionary = getattr(wt_units, self.units_kind)
label += r"\,"
label += units_dictionary[self.units][2]
label = r"$\mathsf{%s}$" % label
return label |
def _urls(self):
"""Constructs the URLconf for Horizon from registered Dashboards."""
urlpatterns = self._get_default_urlpatterns()
self._autodiscover()
# Discover each dashboard's panels.
for dash in self._registry.values():
dash._autodiscover()
# Load the plugin-based panel configuration
self._load_panel_customization()
# Allow for override modules
if self._conf.get("customization_module", None):
customization_module = self._conf["customization_module"]
bits = customization_module.split('.')
mod_name = bits.pop()
package = '.'.join(bits)
mod = import_module(package)
try:
before_import_registry = copy.copy(self._registry)
import_module('%s.%s' % (package, mod_name))
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, mod_name):
raise
# Compile the dynamic urlconf.
for dash in self._registry.values():
urlpatterns.append(url(r'^%s/' % dash.slug,
_wrapped_include(dash._decorated_urls)))
# Return the three arguments to django.conf.urls.include
return urlpatterns, self.namespace, self.slug | Constructs the URLconf for Horizon from registered Dashboards. | Below is the the instruction that describes the task:
### Input:
Constructs the URLconf for Horizon from registered Dashboards.
### Response:
def _urls(self):
"""Constructs the URLconf for Horizon from registered Dashboards."""
urlpatterns = self._get_default_urlpatterns()
self._autodiscover()
# Discover each dashboard's panels.
for dash in self._registry.values():
dash._autodiscover()
# Load the plugin-based panel configuration
self._load_panel_customization()
# Allow for override modules
if self._conf.get("customization_module", None):
customization_module = self._conf["customization_module"]
bits = customization_module.split('.')
mod_name = bits.pop()
package = '.'.join(bits)
mod = import_module(package)
try:
before_import_registry = copy.copy(self._registry)
import_module('%s.%s' % (package, mod_name))
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, mod_name):
raise
# Compile the dynamic urlconf.
for dash in self._registry.values():
urlpatterns.append(url(r'^%s/' % dash.slug,
_wrapped_include(dash._decorated_urls)))
# Return the three arguments to django.conf.urls.include
return urlpatterns, self.namespace, self.slug |
def generate_random_nhs_number() -> int:
"""
Returns a random valid NHS number, as an ``int``.
"""
check_digit = 10 # NHS numbers with this check digit are all invalid
while check_digit == 10:
digits = [random.randint(1, 9)] # don't start with a zero
digits.extend([random.randint(0, 9) for _ in range(8)])
# ... length now 9
check_digit = nhs_check_digit(digits)
# noinspection PyUnboundLocalVariable
digits.append(check_digit)
return int("".join([str(d) for d in digits])) | Returns a random valid NHS number, as an ``int``. | Below is the the instruction that describes the task:
### Input:
Returns a random valid NHS number, as an ``int``.
### Response:
def generate_random_nhs_number() -> int:
"""
Returns a random valid NHS number, as an ``int``.
"""
check_digit = 10 # NHS numbers with this check digit are all invalid
while check_digit == 10:
digits = [random.randint(1, 9)] # don't start with a zero
digits.extend([random.randint(0, 9) for _ in range(8)])
# ... length now 9
check_digit = nhs_check_digit(digits)
# noinspection PyUnboundLocalVariable
digits.append(check_digit)
return int("".join([str(d) for d in digits])) |
def fetch_file(
download_url,
filename=None,
decompress=False,
subdir=None,
force=False,
timeout=None,
use_wget_if_available=False):
"""
Download a remote file and store it locally in a cache directory. Don't
download it again if it's already present (unless `force` is True.)
Parameters
----------
download_url : str
Remote URL of file to download.
filename : str, optional
Local filename, used as cache key. If omitted, then determine the local
filename from the URL.
decompress : bool, optional
By default any file whose remote extension is one of (".zip", ".gzip")
and whose local filename lacks this suffix is decompressed. If a local
filename wasn't provided but you still want to decompress the stored
data then set this option to True.
subdir : str, optional
Group downloads in a single subdirectory.
force : bool, optional
By default, a remote file is not downloaded if it's already present.
However, with this argument set to True, it will be overwritten.
timeout : float, optional
Timeout for download in seconds, default is None which uses
global timeout.
use_wget_if_available: bool, optional
If the `wget` command is available, use that for download instead
of Python libraries (default True)
Returns the full path of the local file.
"""
filename = build_local_filename(download_url, filename, decompress)
full_path = build_path(filename, subdir)
if not os.path.exists(full_path) or force:
logger.info("Fetching %s from URL %s", filename, download_url)
_download_and_decompress_if_necessary(
full_path=full_path,
download_url=download_url,
timeout=timeout,
use_wget_if_available=use_wget_if_available)
else:
logger.info("Cached file %s from URL %s", filename, download_url)
return full_path | Download a remote file and store it locally in a cache directory. Don't
download it again if it's already present (unless `force` is True.)
Parameters
----------
download_url : str
Remote URL of file to download.
filename : str, optional
Local filename, used as cache key. If omitted, then determine the local
filename from the URL.
decompress : bool, optional
By default any file whose remote extension is one of (".zip", ".gzip")
and whose local filename lacks this suffix is decompressed. If a local
filename wasn't provided but you still want to decompress the stored
data then set this option to True.
subdir : str, optional
Group downloads in a single subdirectory.
force : bool, optional
By default, a remote file is not downloaded if it's already present.
However, with this argument set to True, it will be overwritten.
timeout : float, optional
Timeout for download in seconds, default is None which uses
global timeout.
use_wget_if_available: bool, optional
If the `wget` command is available, use that for download instead
of Python libraries (default True)
Returns the full path of the local file. | Below is the the instruction that describes the task:
### Input:
Download a remote file and store it locally in a cache directory. Don't
download it again if it's already present (unless `force` is True.)
Parameters
----------
download_url : str
Remote URL of file to download.
filename : str, optional
Local filename, used as cache key. If omitted, then determine the local
filename from the URL.
decompress : bool, optional
By default any file whose remote extension is one of (".zip", ".gzip")
and whose local filename lacks this suffix is decompressed. If a local
filename wasn't provided but you still want to decompress the stored
data then set this option to True.
subdir : str, optional
Group downloads in a single subdirectory.
force : bool, optional
By default, a remote file is not downloaded if it's already present.
However, with this argument set to True, it will be overwritten.
timeout : float, optional
Timeout for download in seconds, default is None which uses
global timeout.
use_wget_if_available: bool, optional
If the `wget` command is available, use that for download instead
of Python libraries (default True)
Returns the full path of the local file.
### Response:
def fetch_file(
download_url,
filename=None,
decompress=False,
subdir=None,
force=False,
timeout=None,
use_wget_if_available=False):
"""
Download a remote file and store it locally in a cache directory. Don't
download it again if it's already present (unless `force` is True.)
Parameters
----------
download_url : str
Remote URL of file to download.
filename : str, optional
Local filename, used as cache key. If omitted, then determine the local
filename from the URL.
decompress : bool, optional
By default any file whose remote extension is one of (".zip", ".gzip")
and whose local filename lacks this suffix is decompressed. If a local
filename wasn't provided but you still want to decompress the stored
data then set this option to True.
subdir : str, optional
Group downloads in a single subdirectory.
force : bool, optional
By default, a remote file is not downloaded if it's already present.
However, with this argument set to True, it will be overwritten.
timeout : float, optional
Timeout for download in seconds, default is None which uses
global timeout.
use_wget_if_available: bool, optional
If the `wget` command is available, use that for download instead
of Python libraries (default True)
Returns the full path of the local file.
"""
filename = build_local_filename(download_url, filename, decompress)
full_path = build_path(filename, subdir)
if not os.path.exists(full_path) or force:
logger.info("Fetching %s from URL %s", filename, download_url)
_download_and_decompress_if_necessary(
full_path=full_path,
download_url=download_url,
timeout=timeout,
use_wget_if_available=use_wget_if_available)
else:
logger.info("Cached file %s from URL %s", filename, download_url)
return full_path |
def _save_assignment(self, node, name=None):
"""save assignement situation since node.parent is not available yet"""
if self._global_names and node.name in self._global_names[-1]:
node.root().set_local(node.name, node)
else:
node.parent.set_local(node.name, node) | save assignement situation since node.parent is not available yet | Below is the the instruction that describes the task:
### Input:
save assignement situation since node.parent is not available yet
### Response:
def _save_assignment(self, node, name=None):
"""save assignement situation since node.parent is not available yet"""
if self._global_names and node.name in self._global_names[-1]:
node.root().set_local(node.name, node)
else:
node.parent.set_local(node.name, node) |
def aggregate(d, y_size, x_size):
"""Average every 4 elements (2x2) in a 2D array"""
if d.ndim != 2:
# we can't guarantee what blocks we are getting and how
# it should be reshaped to do the averaging.
raise ValueError("Can't aggregrate (reduce) data arrays with "
"more than 2 dimensions.")
if not (x_size.is_integer() and y_size.is_integer()):
raise ValueError("Aggregation factors are not integers")
for agg_size, chunks in zip([y_size, x_size], d.chunks):
for chunk_size in chunks:
if chunk_size % agg_size != 0:
raise ValueError("Aggregation requires arrays with "
"shapes and chunks divisible by the "
"factor")
new_chunks = (tuple(int(x / y_size) for x in d.chunks[0]),
tuple(int(x / x_size) for x in d.chunks[1]))
return da.core.map_blocks(_mean, d, y_size, x_size, dtype=d.dtype, chunks=new_chunks) | Average every 4 elements (2x2) in a 2D array | Below is the the instruction that describes the task:
### Input:
Average every 4 elements (2x2) in a 2D array
### Response:
def aggregate(d, y_size, x_size):
"""Average every 4 elements (2x2) in a 2D array"""
if d.ndim != 2:
# we can't guarantee what blocks we are getting and how
# it should be reshaped to do the averaging.
raise ValueError("Can't aggregrate (reduce) data arrays with "
"more than 2 dimensions.")
if not (x_size.is_integer() and y_size.is_integer()):
raise ValueError("Aggregation factors are not integers")
for agg_size, chunks in zip([y_size, x_size], d.chunks):
for chunk_size in chunks:
if chunk_size % agg_size != 0:
raise ValueError("Aggregation requires arrays with "
"shapes and chunks divisible by the "
"factor")
new_chunks = (tuple(int(x / y_size) for x in d.chunks[0]),
tuple(int(x / x_size) for x in d.chunks[1]))
return da.core.map_blocks(_mean, d, y_size, x_size, dtype=d.dtype, chunks=new_chunks) |
def _to_zip_product(sweep: Sweep) -> Product:
"""Converts sweep to a product of zips of single sweeps, if possible."""
if not isinstance(sweep, Product):
sweep = Product(sweep)
if not all(isinstance(f, Zip) for f in sweep.factors):
factors = [f if isinstance(f, Zip) else Zip(f) for f in sweep.factors]
sweep = Product(*factors)
for factor in sweep.factors:
for term in cast(Zip, factor).sweeps:
if not isinstance(term, SingleSweep):
raise ValueError('cannot convert to zip-product form: {}'
.format(sweep))
return sweep | Converts sweep to a product of zips of single sweeps, if possible. | Below is the the instruction that describes the task:
### Input:
Converts sweep to a product of zips of single sweeps, if possible.
### Response:
def _to_zip_product(sweep: Sweep) -> Product:
"""Converts sweep to a product of zips of single sweeps, if possible."""
if not isinstance(sweep, Product):
sweep = Product(sweep)
if not all(isinstance(f, Zip) for f in sweep.factors):
factors = [f if isinstance(f, Zip) else Zip(f) for f in sweep.factors]
sweep = Product(*factors)
for factor in sweep.factors:
for term in cast(Zip, factor).sweeps:
if not isinstance(term, SingleSweep):
raise ValueError('cannot convert to zip-product form: {}'
.format(sweep))
return sweep |
def patch_config(config, data):
"""recursively 'patch' `config` with `data`
:returns: `!True` if the `config` was changed"""
is_changed = False
for name, value in data.items():
if value is None:
if config.pop(name, None) is not None:
is_changed = True
elif name in config:
if isinstance(value, dict):
if isinstance(config[name], dict):
if patch_config(config[name], value):
is_changed = True
else:
config[name] = value
is_changed = True
elif str(config[name]) != str(value):
config[name] = value
is_changed = True
else:
config[name] = value
is_changed = True
return is_changed | recursively 'patch' `config` with `data`
:returns: `!True` if the `config` was changed | Below is the the instruction that describes the task:
### Input:
recursively 'patch' `config` with `data`
:returns: `!True` if the `config` was changed
### Response:
def patch_config(config, data):
"""recursively 'patch' `config` with `data`
:returns: `!True` if the `config` was changed"""
is_changed = False
for name, value in data.items():
if value is None:
if config.pop(name, None) is not None:
is_changed = True
elif name in config:
if isinstance(value, dict):
if isinstance(config[name], dict):
if patch_config(config[name], value):
is_changed = True
else:
config[name] = value
is_changed = True
elif str(config[name]) != str(value):
config[name] = value
is_changed = True
else:
config[name] = value
is_changed = True
return is_changed |
def url_unescape( # noqa: F811
value: Union[str, bytes], encoding: Optional[str] = "utf-8", plus: bool = True
) -> Union[str, bytes]:
"""Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
If ``plus`` is true (the default), plus signs will be interpreted
as spaces (literal plus signs must be represented as "%2B"). This
is appropriate for query strings and form-encoded values but not
for the path component of a URL. Note that this default is the
reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
if encoding is None:
if plus:
# unquote_to_bytes doesn't have a _plus variant
value = to_basestring(value).replace("+", " ")
return urllib.parse.unquote_to_bytes(value)
else:
unquote = urllib.parse.unquote_plus if plus else urllib.parse.unquote
return unquote(to_basestring(value), encoding=encoding) | Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
If ``plus`` is true (the default), plus signs will be interpreted
as spaces (literal plus signs must be represented as "%2B"). This
is appropriate for query strings and form-encoded values but not
for the path component of a URL. Note that this default is the
reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument | Below is the the instruction that describes the task:
### Input:
Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
If ``plus`` is true (the default), plus signs will be interpreted
as spaces (literal plus signs must be represented as "%2B"). This
is appropriate for query strings and form-encoded values but not
for the path component of a URL. Note that this default is the
reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
### Response:
def url_unescape( # noqa: F811
value: Union[str, bytes], encoding: Optional[str] = "utf-8", plus: bool = True
) -> Union[str, bytes]:
"""Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
If ``plus`` is true (the default), plus signs will be interpreted
as spaces (literal plus signs must be represented as "%2B"). This
is appropriate for query strings and form-encoded values but not
for the path component of a URL. Note that this default is the
reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
if encoding is None:
if plus:
# unquote_to_bytes doesn't have a _plus variant
value = to_basestring(value).replace("+", " ")
return urllib.parse.unquote_to_bytes(value)
else:
unquote = urllib.parse.unquote_plus if plus else urllib.parse.unquote
return unquote(to_basestring(value), encoding=encoding) |
def server_socket(self, config):
""" :meth:`.WNetworkNativeTransportProto.server_socket` method implementation
"""
if self.__server_socket is None:
self.__server_socket = self.create_server_socket(config)
self.__server_socket.bind(self.bind_socket(config).pair())
return self.__server_socket | :meth:`.WNetworkNativeTransportProto.server_socket` method implementation | Below is the the instruction that describes the task:
### Input:
:meth:`.WNetworkNativeTransportProto.server_socket` method implementation
### Response:
def server_socket(self, config):
""" :meth:`.WNetworkNativeTransportProto.server_socket` method implementation
"""
if self.__server_socket is None:
self.__server_socket = self.create_server_socket(config)
self.__server_socket.bind(self.bind_socket(config).pair())
return self.__server_socket |
def list_secties_by_afdeling(self, afdeling):
'''
List all `secties` in a `kadastrale afdeling`.
:param afdeling: The :class:`Afdeling` for which the `secties` are \
wanted. Can also be the id of and `afdeling`.
:rtype: A :class:`list` of `Sectie`.
'''
try:
aid = afdeling.id
gid = afdeling.gemeente.id
except AttributeError:
aid = afdeling
afdeling = self.get_kadastrale_afdeling_by_id(aid)
gid = afdeling.gemeente.id
afdeling.clear_gateway()
def creator():
url = self.base_url + '/municipality/%s/department/%s/section' % (gid, aid)
h = self.base_headers
res = capakey_rest_gateway_request(url, h).json()
return [
Sectie(
r['sectionCode'],
afdeling
) for r in res['sections']
]
if self.caches['long'].is_configured:
key = 'list_secties_by_afdeling_rest#%s' % aid
secties = self.caches['long'].get_or_create(key, creator)
else:
secties = creator()
for s in secties:
s.set_gateway(self)
return secties | List all `secties` in a `kadastrale afdeling`.
:param afdeling: The :class:`Afdeling` for which the `secties` are \
wanted. Can also be the id of and `afdeling`.
:rtype: A :class:`list` of `Sectie`. | Below is the the instruction that describes the task:
### Input:
List all `secties` in a `kadastrale afdeling`.
:param afdeling: The :class:`Afdeling` for which the `secties` are \
wanted. Can also be the id of and `afdeling`.
:rtype: A :class:`list` of `Sectie`.
### Response:
def list_secties_by_afdeling(self, afdeling):
'''
List all `secties` in a `kadastrale afdeling`.
:param afdeling: The :class:`Afdeling` for which the `secties` are \
wanted. Can also be the id of and `afdeling`.
:rtype: A :class:`list` of `Sectie`.
'''
try:
aid = afdeling.id
gid = afdeling.gemeente.id
except AttributeError:
aid = afdeling
afdeling = self.get_kadastrale_afdeling_by_id(aid)
gid = afdeling.gemeente.id
afdeling.clear_gateway()
def creator():
url = self.base_url + '/municipality/%s/department/%s/section' % (gid, aid)
h = self.base_headers
res = capakey_rest_gateway_request(url, h).json()
return [
Sectie(
r['sectionCode'],
afdeling
) for r in res['sections']
]
if self.caches['long'].is_configured:
key = 'list_secties_by_afdeling_rest#%s' % aid
secties = self.caches['long'].get_or_create(key, creator)
else:
secties = creator()
for s in secties:
s.set_gateway(self)
return secties |
def get_manifest_from_meta(metaurl, name):
"""
Extact manifest url from metadata url
:param metaurl: Url to metadata
:param name: Name of application to extract
:return:
"""
if 'http' in metaurl:
kit = yaml.safe_load(requests.get(url=metaurl).content)['kit']['applications']
else:
kit = yaml.safe_load(open(metaurl).read())['kit']['applications']
app_urls = [x['manifest'] for x in kit if x['name'] == name]
assert len(app_urls) == 1
return app_urls[0] | Extact manifest url from metadata url
:param metaurl: Url to metadata
:param name: Name of application to extract
:return: | Below is the the instruction that describes the task:
### Input:
Extact manifest url from metadata url
:param metaurl: Url to metadata
:param name: Name of application to extract
:return:
### Response:
def get_manifest_from_meta(metaurl, name):
"""
Extact manifest url from metadata url
:param metaurl: Url to metadata
:param name: Name of application to extract
:return:
"""
if 'http' in metaurl:
kit = yaml.safe_load(requests.get(url=metaurl).content)['kit']['applications']
else:
kit = yaml.safe_load(open(metaurl).read())['kit']['applications']
app_urls = [x['manifest'] for x in kit if x['name'] == name]
assert len(app_urls) == 1
return app_urls[0] |
def get_display(display):
"""dname, protocol, host, dno, screen = get_display(display)
Parse DISPLAY into its components. If DISPLAY is None, use
the default display. The return values are:
DNAME -- the full display name (string)
PROTOCOL -- the protocol to use (None if automatic)
HOST -- the host name (string, possibly empty)
DNO -- display number (integer)
SCREEN -- default screen number (integer)
"""
modname = _display_mods.get(platform, _default_display_mod)
mod = _relative_import(modname)
return mod.get_display(display) | dname, protocol, host, dno, screen = get_display(display)
Parse DISPLAY into its components. If DISPLAY is None, use
the default display. The return values are:
DNAME -- the full display name (string)
PROTOCOL -- the protocol to use (None if automatic)
HOST -- the host name (string, possibly empty)
DNO -- display number (integer)
SCREEN -- default screen number (integer) | Below is the the instruction that describes the task:
### Input:
dname, protocol, host, dno, screen = get_display(display)
Parse DISPLAY into its components. If DISPLAY is None, use
the default display. The return values are:
DNAME -- the full display name (string)
PROTOCOL -- the protocol to use (None if automatic)
HOST -- the host name (string, possibly empty)
DNO -- display number (integer)
SCREEN -- default screen number (integer)
### Response:
def get_display(display):
"""dname, protocol, host, dno, screen = get_display(display)
Parse DISPLAY into its components. If DISPLAY is None, use
the default display. The return values are:
DNAME -- the full display name (string)
PROTOCOL -- the protocol to use (None if automatic)
HOST -- the host name (string, possibly empty)
DNO -- display number (integer)
SCREEN -- default screen number (integer)
"""
modname = _display_mods.get(platform, _default_display_mod)
mod = _relative_import(modname)
return mod.get_display(display) |
def switch(self, *args):
"""
Method that attempts to change the switch to the opposite of its
current state. Calls either switch_on() or switch_off() to accomplish
this.
:param kwargs: an variable length dictionary of key-pair arguments
passed through to either switch_on() or switch_off()
:return: Boolean. Returns True if the switch changes state
"""
if self.is_switched():
return self.switch_off(*args)
else:
return self.switch_on(*args) | Method that attempts to change the switch to the opposite of its
current state. Calls either switch_on() or switch_off() to accomplish
this.
:param kwargs: an variable length dictionary of key-pair arguments
passed through to either switch_on() or switch_off()
:return: Boolean. Returns True if the switch changes state | Below is the the instruction that describes the task:
### Input:
Method that attempts to change the switch to the opposite of its
current state. Calls either switch_on() or switch_off() to accomplish
this.
:param kwargs: an variable length dictionary of key-pair arguments
passed through to either switch_on() or switch_off()
:return: Boolean. Returns True if the switch changes state
### Response:
def switch(self, *args):
"""
Method that attempts to change the switch to the opposite of its
current state. Calls either switch_on() or switch_off() to accomplish
this.
:param kwargs: an variable length dictionary of key-pair arguments
passed through to either switch_on() or switch_off()
:return: Boolean. Returns True if the switch changes state
"""
if self.is_switched():
return self.switch_off(*args)
else:
return self.switch_on(*args) |
def get_lb_conn(dd_driver=None):
'''
Return a load-balancer conn object
'''
vm_ = get_configured_provider()
region = config.get_cloud_config_value(
'region', vm_, __opts__
)
user_id = config.get_cloud_config_value(
'user_id', vm_, __opts__
)
key = config.get_cloud_config_value(
'key', vm_, __opts__
)
if not dd_driver:
raise SaltCloudSystemExit(
'Missing dimensiondata_driver for get_lb_conn method.'
)
return get_driver_lb(Provider_lb.DIMENSIONDATA)(user_id, key, region=region) | Return a load-balancer conn object | Below is the the instruction that describes the task:
### Input:
Return a load-balancer conn object
### Response:
def get_lb_conn(dd_driver=None):
'''
Return a load-balancer conn object
'''
vm_ = get_configured_provider()
region = config.get_cloud_config_value(
'region', vm_, __opts__
)
user_id = config.get_cloud_config_value(
'user_id', vm_, __opts__
)
key = config.get_cloud_config_value(
'key', vm_, __opts__
)
if not dd_driver:
raise SaltCloudSystemExit(
'Missing dimensiondata_driver for get_lb_conn method.'
)
return get_driver_lb(Provider_lb.DIMENSIONDATA)(user_id, key, region=region) |
def list_runners(*args):
'''
List the runners loaded on the minion
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' sys.list_runners
Runner names can be specified as globs.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' sys.list_runners 'm*'
'''
run_ = salt.runner.Runner(__opts__)
runners = set()
if not args:
for func in run_.functions:
runners.add(func.split('.')[0])
return sorted(runners)
for module in args:
if '*' in module:
for func in fnmatch.filter(run_.functions, module):
runners.add(func.split('.')[0])
else:
for func in run_.functions:
mod_test = func.split('.')[0]
if mod_test == module:
runners.add(mod_test)
return sorted(runners) | List the runners loaded on the minion
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' sys.list_runners
Runner names can be specified as globs.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' sys.list_runners 'm*' | Below is the the instruction that describes the task:
### Input:
List the runners loaded on the minion
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' sys.list_runners
Runner names can be specified as globs.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' sys.list_runners 'm*'
### Response:
def list_runners(*args):
'''
List the runners loaded on the minion
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' sys.list_runners
Runner names can be specified as globs.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' sys.list_runners 'm*'
'''
run_ = salt.runner.Runner(__opts__)
runners = set()
if not args:
for func in run_.functions:
runners.add(func.split('.')[0])
return sorted(runners)
for module in args:
if '*' in module:
for func in fnmatch.filter(run_.functions, module):
runners.add(func.split('.')[0])
else:
for func in run_.functions:
mod_test = func.split('.')[0]
if mod_test == module:
runners.add(mod_test)
return sorted(runners) |
def coalesce(self):
"""Merge contiguous elements of this list into single objects
This method implicitly sorts and potentially shortens this list.
"""
self.sort(key=lambda ts: ts.t0.value)
i = j = 0
N = len(self)
while j < N:
this = self[j]
j += 1
if j < N and this.is_contiguous(self[j]) == 1:
while j < N and this.is_contiguous(self[j]):
try:
this = self[i] = this.append(self[j])
except ValueError as exc:
if 'cannot resize this array' in str(exc):
this = this.copy()
this = self[i] = this.append(self[j])
else:
raise
j += 1
else:
self[i] = this
i += 1
del self[i:]
return self | Merge contiguous elements of this list into single objects
This method implicitly sorts and potentially shortens this list. | Below is the the instruction that describes the task:
### Input:
Merge contiguous elements of this list into single objects
This method implicitly sorts and potentially shortens this list.
### Response:
def coalesce(self):
"""Merge contiguous elements of this list into single objects
This method implicitly sorts and potentially shortens this list.
"""
self.sort(key=lambda ts: ts.t0.value)
i = j = 0
N = len(self)
while j < N:
this = self[j]
j += 1
if j < N and this.is_contiguous(self[j]) == 1:
while j < N and this.is_contiguous(self[j]):
try:
this = self[i] = this.append(self[j])
except ValueError as exc:
if 'cannot resize this array' in str(exc):
this = this.copy()
this = self[i] = this.append(self[j])
else:
raise
j += 1
else:
self[i] = this
i += 1
del self[i:]
return self |
def base_elts(elt, cls=None, depth=None):
"""Get bases elements of the input elt.
- If elt is an instance, get class and all base classes.
- If elt is a method, get all base methods.
- If elt is a class, get all base classes.
- In other case, get an empty list.
:param elt: supposed inherited elt.
:param cls: cls from where find attributes equal to elt. If None,
it is found as much as possible. Required in python3 for function
classes.
:type cls: type or list
:param int depth: search depth. If None (default), depth is maximal.
:return: elt bases elements. if elt has not base elements, result is empty.
:rtype: list
"""
result = []
elt_name = getattr(elt, '__name__', None)
if elt_name is not None:
cls = [] if cls is None else ensureiterable(cls)
elt_is_class = False
# if cls is None and elt is routine, it is possible to find the cls
if not cls and isroutine(elt):
if hasattr(elt, '__self__'): # from the instance
instance = get_method_self(elt) # get instance
if instance is None and PY2: # get base im_class if PY2
cls = list(elt.im_class.__bases__)
else: # use instance class
cls = [instance.__class__]
# cls is elt if elt is a class
elif isclass(elt):
elt_is_class = True
cls = list(elt.__bases__)
if cls: # if cls is not empty, find all base classes
index_of_found_classes = 0 # get last visited class index
visited_classes = set(cls) # cache for visited classes
len_classes = len(cls)
if depth is None: # if depth is None, get maximal value
depth = -1 # set negative value
while depth != 0 and index_of_found_classes != len_classes:
len_classes = len(cls)
for index in range(index_of_found_classes, len_classes):
_cls = cls[index]
for base_cls in _cls.__bases__:
if base_cls in visited_classes:
continue
else:
visited_classes.add(base_cls)
cls.append(base_cls)
index_of_found_classes = len_classes
depth -= 1
if elt_is_class:
# if cls is elt, result is classes minus first class
result = cls
elif isroutine(elt):
# get an elt to compare with found element
if ismethod(elt):
elt_to_compare = get_method_function(elt)
else:
elt_to_compare = elt
for _cls in cls: # for all classes
# get possible base elt
b_elt = getattr(_cls, elt_name, None)
if b_elt is not None:
# compare funcs
if ismethod(b_elt):
bec = get_method_function(b_elt)
else:
bec = b_elt
# if matching, add to result
if bec is elt_to_compare:
result.append(b_elt)
return result | Get bases elements of the input elt.
- If elt is an instance, get class and all base classes.
- If elt is a method, get all base methods.
- If elt is a class, get all base classes.
- In other case, get an empty list.
:param elt: supposed inherited elt.
:param cls: cls from where find attributes equal to elt. If None,
it is found as much as possible. Required in python3 for function
classes.
:type cls: type or list
:param int depth: search depth. If None (default), depth is maximal.
:return: elt bases elements. if elt has not base elements, result is empty.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Get bases elements of the input elt.
- If elt is an instance, get class and all base classes.
- If elt is a method, get all base methods.
- If elt is a class, get all base classes.
- In other case, get an empty list.
:param elt: supposed inherited elt.
:param cls: cls from where find attributes equal to elt. If None,
it is found as much as possible. Required in python3 for function
classes.
:type cls: type or list
:param int depth: search depth. If None (default), depth is maximal.
:return: elt bases elements. if elt has not base elements, result is empty.
:rtype: list
### Response:
def base_elts(elt, cls=None, depth=None):
"""Get bases elements of the input elt.
- If elt is an instance, get class and all base classes.
- If elt is a method, get all base methods.
- If elt is a class, get all base classes.
- In other case, get an empty list.
:param elt: supposed inherited elt.
:param cls: cls from where find attributes equal to elt. If None,
it is found as much as possible. Required in python3 for function
classes.
:type cls: type or list
:param int depth: search depth. If None (default), depth is maximal.
:return: elt bases elements. if elt has not base elements, result is empty.
:rtype: list
"""
result = []
elt_name = getattr(elt, '__name__', None)
if elt_name is not None:
cls = [] if cls is None else ensureiterable(cls)
elt_is_class = False
# if cls is None and elt is routine, it is possible to find the cls
if not cls and isroutine(elt):
if hasattr(elt, '__self__'): # from the instance
instance = get_method_self(elt) # get instance
if instance is None and PY2: # get base im_class if PY2
cls = list(elt.im_class.__bases__)
else: # use instance class
cls = [instance.__class__]
# cls is elt if elt is a class
elif isclass(elt):
elt_is_class = True
cls = list(elt.__bases__)
if cls: # if cls is not empty, find all base classes
index_of_found_classes = 0 # get last visited class index
visited_classes = set(cls) # cache for visited classes
len_classes = len(cls)
if depth is None: # if depth is None, get maximal value
depth = -1 # set negative value
while depth != 0 and index_of_found_classes != len_classes:
len_classes = len(cls)
for index in range(index_of_found_classes, len_classes):
_cls = cls[index]
for base_cls in _cls.__bases__:
if base_cls in visited_classes:
continue
else:
visited_classes.add(base_cls)
cls.append(base_cls)
index_of_found_classes = len_classes
depth -= 1
if elt_is_class:
# if cls is elt, result is classes minus first class
result = cls
elif isroutine(elt):
# get an elt to compare with found element
if ismethod(elt):
elt_to_compare = get_method_function(elt)
else:
elt_to_compare = elt
for _cls in cls: # for all classes
# get possible base elt
b_elt = getattr(_cls, elt_name, None)
if b_elt is not None:
# compare funcs
if ismethod(b_elt):
bec = get_method_function(b_elt)
else:
bec = b_elt
# if matching, add to result
if bec is elt_to_compare:
result.append(b_elt)
return result |
def line_statuses(self, filename):
"""
Return a list of tuples `(lineno, status)` of all the lines found in
the Cobertura report for the given file `filename` where `lineno` is
the line number and `status` is coverage status of the line which can
be either `True` (line hit) or `False` (line miss).
"""
line_elements = self._get_lines_by_filename(filename)
lines_w_status = []
for line in line_elements:
lineno = int(line.attrib['number'])
status = line.attrib['hits'] != '0'
lines_w_status.append((lineno, status))
return lines_w_status | Return a list of tuples `(lineno, status)` of all the lines found in
the Cobertura report for the given file `filename` where `lineno` is
the line number and `status` is coverage status of the line which can
be either `True` (line hit) or `False` (line miss). | Below is the the instruction that describes the task:
### Input:
Return a list of tuples `(lineno, status)` of all the lines found in
the Cobertura report for the given file `filename` where `lineno` is
the line number and `status` is coverage status of the line which can
be either `True` (line hit) or `False` (line miss).
### Response:
def line_statuses(self, filename):
"""
Return a list of tuples `(lineno, status)` of all the lines found in
the Cobertura report for the given file `filename` where `lineno` is
the line number and `status` is coverage status of the line which can
be either `True` (line hit) or `False` (line miss).
"""
line_elements = self._get_lines_by_filename(filename)
lines_w_status = []
for line in line_elements:
lineno = int(line.attrib['number'])
status = line.attrib['hits'] != '0'
lines_w_status.append((lineno, status))
return lines_w_status |
def clear(self):
"""Clear the statement_group, citation, evidence, and annotations."""
self.statement_group = None
self.citation.clear()
self.evidence = None
self.annotations.clear() | Clear the statement_group, citation, evidence, and annotations. | Below is the the instruction that describes the task:
### Input:
Clear the statement_group, citation, evidence, and annotations.
### Response:
def clear(self):
"""Clear the statement_group, citation, evidence, and annotations."""
self.statement_group = None
self.citation.clear()
self.evidence = None
self.annotations.clear() |
def remove_scope_ip(hostid, auth, url):
"""
Function to add remove IP address allocation
:param hostid: Host id of the host to be deleted
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: String of HTTP response code. Should be 204 is successfull
:rtype: str
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> new_scope = add_ip_scope('10.50.0.1', '10.50.0.254', 'cyoung', 'test group', auth.creds, auth.url)
>>> add_host_to_segment('10.50.0.5', 'cyoung', 'New Test Host', '10.50.0.0/24', auth.creds, auth.url)
>>> host_id = get_host_id('10.50.0.5', '10.50.0.0/24', auth.creds, auth.url)
>>> rem_host = remove_scope_ip(host_id, auth.creds, auth.url)
>>> assert type(rem_host) is int
>>> assert rem_host == 204
"""
f_url = url + '/imcrs/res/access/assignedIpScope/ip/' + str(hostid)
response = requests.delete(f_url, auth=auth, headers=HEADERS, )
try:
if response.status_code == 204:
# print("Host Successfully Deleted")
return response.status_code
elif response.status_code == 409:
# print("IP Scope Already Exists")
return response.status_code
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " add_ip_scope: An Error has occured" | Function to add remove IP address allocation
:param hostid: Host id of the host to be deleted
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: String of HTTP response code. Should be 204 is successfull
:rtype: str
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> new_scope = add_ip_scope('10.50.0.1', '10.50.0.254', 'cyoung', 'test group', auth.creds, auth.url)
>>> add_host_to_segment('10.50.0.5', 'cyoung', 'New Test Host', '10.50.0.0/24', auth.creds, auth.url)
>>> host_id = get_host_id('10.50.0.5', '10.50.0.0/24', auth.creds, auth.url)
>>> rem_host = remove_scope_ip(host_id, auth.creds, auth.url)
>>> assert type(rem_host) is int
>>> assert rem_host == 204 | Below is the the instruction that describes the task:
### Input:
Function to add remove IP address allocation
:param hostid: Host id of the host to be deleted
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: String of HTTP response code. Should be 204 is successfull
:rtype: str
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> new_scope = add_ip_scope('10.50.0.1', '10.50.0.254', 'cyoung', 'test group', auth.creds, auth.url)
>>> add_host_to_segment('10.50.0.5', 'cyoung', 'New Test Host', '10.50.0.0/24', auth.creds, auth.url)
>>> host_id = get_host_id('10.50.0.5', '10.50.0.0/24', auth.creds, auth.url)
>>> rem_host = remove_scope_ip(host_id, auth.creds, auth.url)
>>> assert type(rem_host) is int
>>> assert rem_host == 204
### Response:
def remove_scope_ip(hostid, auth, url):
"""
Function to add remove IP address allocation
:param hostid: Host id of the host to be deleted
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: String of HTTP response code. Should be 204 is successfull
:rtype: str
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> new_scope = add_ip_scope('10.50.0.1', '10.50.0.254', 'cyoung', 'test group', auth.creds, auth.url)
>>> add_host_to_segment('10.50.0.5', 'cyoung', 'New Test Host', '10.50.0.0/24', auth.creds, auth.url)
>>> host_id = get_host_id('10.50.0.5', '10.50.0.0/24', auth.creds, auth.url)
>>> rem_host = remove_scope_ip(host_id, auth.creds, auth.url)
>>> assert type(rem_host) is int
>>> assert rem_host == 204
"""
f_url = url + '/imcrs/res/access/assignedIpScope/ip/' + str(hostid)
response = requests.delete(f_url, auth=auth, headers=HEADERS, )
try:
if response.status_code == 204:
# print("Host Successfully Deleted")
return response.status_code
elif response.status_code == 409:
# print("IP Scope Already Exists")
return response.status_code
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " add_ip_scope: An Error has occured" |
def _draw_rect(ax:plt.Axes, b:Collection[int], color:str='white', text=None, text_size=14):
"Draw bounding box on `ax`."
patch = ax.add_patch(patches.Rectangle(b[:2], *b[-2:], fill=False, edgecolor=color, lw=2))
_draw_outline(patch, 4)
if text is not None:
patch = ax.text(*b[:2], text, verticalalignment='top', color=color, fontsize=text_size, weight='bold')
_draw_outline(patch,1) | Draw bounding box on `ax`. | Below is the the instruction that describes the task:
### Input:
Draw bounding box on `ax`.
### Response:
def _draw_rect(ax:plt.Axes, b:Collection[int], color:str='white', text=None, text_size=14):
"Draw bounding box on `ax`."
patch = ax.add_patch(patches.Rectangle(b[:2], *b[-2:], fill=False, edgecolor=color, lw=2))
_draw_outline(patch, 4)
if text is not None:
patch = ax.text(*b[:2], text, verticalalignment='top', color=color, fontsize=text_size, weight='bold')
_draw_outline(patch,1) |
def config_to_api_list(dic):
"""
Converts a python dictionary into a list containing the proper
ApiConfig encoding for configuration data.
@param dic: Key-value pairs to convert.
@return: JSON dictionary of an ApiConfig list (*not* an ApiList).
"""
config = [ ]
for k, v in dic.iteritems():
config.append({ 'name' : k, 'value': v })
return { ApiList.LIST_KEY : config } | Converts a python dictionary into a list containing the proper
ApiConfig encoding for configuration data.
@param dic: Key-value pairs to convert.
@return: JSON dictionary of an ApiConfig list (*not* an ApiList). | Below is the the instruction that describes the task:
### Input:
Converts a python dictionary into a list containing the proper
ApiConfig encoding for configuration data.
@param dic: Key-value pairs to convert.
@return: JSON dictionary of an ApiConfig list (*not* an ApiList).
### Response:
def config_to_api_list(dic):
"""
Converts a python dictionary into a list containing the proper
ApiConfig encoding for configuration data.
@param dic: Key-value pairs to convert.
@return: JSON dictionary of an ApiConfig list (*not* an ApiList).
"""
config = [ ]
for k, v in dic.iteritems():
config.append({ 'name' : k, 'value': v })
return { ApiList.LIST_KEY : config } |
def invoke(proc, args=None, *, _instance = None, **kwargs):
"""
Executes the processor passing given arguments.
:param args: a list of parameters in --key=value format.
"""
if args is None:
args=[]
for kwargname in kwargs:
args.append('--'+kwargname)
args.append('{}'.format(kwargs[kwargname]))
parser = proc.invoke_parser(noexit=(_instance is not None))
opts = parser.parse_args(args)
kwargs0 = {}
def handle_set(opts, dataset, kwargs0, canMulti = False):
for elem in dataset:
elemname = elem.name
# ml-run-process passes values for not provided inputs, outputs and params as empty strings ('')
if hasattr(opts, elemname) and getattr(opts, elemname) not in [None, '']:
# value for element was given in the invocation
elemvalue = getattr(opts, elemname)
if canMulti and isinstance(elemvalue, list):
elemlist = elemvalue
else:
elemlist = [ elemvalue ]
for elemelem in elemlist:
for validator in elem.validators: validator(elemelem)
if hasattr(opts, elem.name):
prepared = elem.prepare(elemvalue) or elemvalue
kwargs0[elem.name] = prepared
elif elem.optional:
# value was not set but is optional so ignore it
kwargs0[elem.name] = None
else:
# value was not set and is mandatory -- error
raise AttributeError('Missing value for {} '.format(elemname))
try:
handle_set(opts, proc.INPUTS, kwargs0, True)
handle_set(opts, proc.OUTPUTS, kwargs0, True)
for param in proc.PARAMETERS:
if hasattr(opts, param.name) and getattr(opts, param.name) is not None and getattr(opts, param.name) is not '':
value = getattr(opts, param.name)
# validate if needed
for validator in param.validators:
validator(value)
# if param is a tuple of choices, each choice is a tuple itself
# with first element of the input value and second element
# containing the value to be passed to the processor
if param.choices and isinstance(param.choices, tuple):
for choice in param.choices:
if choice[0] == value:
kwargs0[param.name] = choice[1]
break
else:
kwargs0[param.name] = value
elif param.optional:
kwargs0[param.name] = param.default
else:
raise AttributeError('Missing value for {} parameter'.format(param.name))
if not _instance:
_instance = proc(**kwargs0)
else:
_instance.apply(_instance, **kwargs0)
return _instance.run()
# todo: cleanup
except Exception as e:
print("Error:", e)
# traceback.print_exc()
raise | Executes the processor passing given arguments.
:param args: a list of parameters in --key=value format. | Below is the the instruction that describes the task:
### Input:
Executes the processor passing given arguments.
:param args: a list of parameters in --key=value format.
### Response:
def invoke(proc, args=None, *, _instance = None, **kwargs):
"""
Executes the processor passing given arguments.
:param args: a list of parameters in --key=value format.
"""
if args is None:
args=[]
for kwargname in kwargs:
args.append('--'+kwargname)
args.append('{}'.format(kwargs[kwargname]))
parser = proc.invoke_parser(noexit=(_instance is not None))
opts = parser.parse_args(args)
kwargs0 = {}
def handle_set(opts, dataset, kwargs0, canMulti = False):
for elem in dataset:
elemname = elem.name
# ml-run-process passes values for not provided inputs, outputs and params as empty strings ('')
if hasattr(opts, elemname) and getattr(opts, elemname) not in [None, '']:
# value for element was given in the invocation
elemvalue = getattr(opts, elemname)
if canMulti and isinstance(elemvalue, list):
elemlist = elemvalue
else:
elemlist = [ elemvalue ]
for elemelem in elemlist:
for validator in elem.validators: validator(elemelem)
if hasattr(opts, elem.name):
prepared = elem.prepare(elemvalue) or elemvalue
kwargs0[elem.name] = prepared
elif elem.optional:
# value was not set but is optional so ignore it
kwargs0[elem.name] = None
else:
# value was not set and is mandatory -- error
raise AttributeError('Missing value for {} '.format(elemname))
try:
handle_set(opts, proc.INPUTS, kwargs0, True)
handle_set(opts, proc.OUTPUTS, kwargs0, True)
for param in proc.PARAMETERS:
if hasattr(opts, param.name) and getattr(opts, param.name) is not None and getattr(opts, param.name) is not '':
value = getattr(opts, param.name)
# validate if needed
for validator in param.validators:
validator(value)
# if param is a tuple of choices, each choice is a tuple itself
# with first element of the input value and second element
# containing the value to be passed to the processor
if param.choices and isinstance(param.choices, tuple):
for choice in param.choices:
if choice[0] == value:
kwargs0[param.name] = choice[1]
break
else:
kwargs0[param.name] = value
elif param.optional:
kwargs0[param.name] = param.default
else:
raise AttributeError('Missing value for {} parameter'.format(param.name))
if not _instance:
_instance = proc(**kwargs0)
else:
_instance.apply(_instance, **kwargs0)
return _instance.run()
# todo: cleanup
except Exception as e:
print("Error:", e)
# traceback.print_exc()
raise |
def read_losc_hdf5_state(f, path='quality/simple', start=None, end=None,
copy=False):
"""Read a `StateVector` from a LOSC-format HDF file.
Parameters
----------
f : `str`, `h5py.HLObject`
path of HDF5 file, or open `H5File`
path : `str`
path of HDF5 dataset to read.
start : `Time`, `~gwpy.time.LIGOTimeGPS`, optional
start GPS time of desired data
end : `Time`, `~gwpy.time.LIGOTimeGPS`, optional
end GPS time of desired data
copy : `bool`, default: `False`
create a fresh-memory copy of the underlying array
Returns
-------
data : `~gwpy.timeseries.TimeSeries`
a new `TimeSeries` containing the data read from disk
"""
# find data
dataset = io_hdf5.find_dataset(f, '%s/DQmask' % path)
maskset = io_hdf5.find_dataset(f, '%s/DQDescriptions' % path)
# read data
nddata = dataset[()]
bits = [bytes.decode(bytes(b), 'utf-8') for b in maskset[()]]
# read metadata
epoch = dataset.attrs['Xstart']
try:
dt = dataset.attrs['Xspacing']
except KeyError:
dt = Quantity(1, 's')
else:
xunit = parse_unit(dataset.attrs['Xunits'])
dt = Quantity(dt, xunit)
return StateVector(nddata, bits=bits, t0=epoch, name='Data quality',
dx=dt, copy=copy).crop(start=start, end=end) | Read a `StateVector` from a LOSC-format HDF file.
Parameters
----------
f : `str`, `h5py.HLObject`
path of HDF5 file, or open `H5File`
path : `str`
path of HDF5 dataset to read.
start : `Time`, `~gwpy.time.LIGOTimeGPS`, optional
start GPS time of desired data
end : `Time`, `~gwpy.time.LIGOTimeGPS`, optional
end GPS time of desired data
copy : `bool`, default: `False`
create a fresh-memory copy of the underlying array
Returns
-------
data : `~gwpy.timeseries.TimeSeries`
a new `TimeSeries` containing the data read from disk | Below is the the instruction that describes the task:
### Input:
Read a `StateVector` from a LOSC-format HDF file.
Parameters
----------
f : `str`, `h5py.HLObject`
path of HDF5 file, or open `H5File`
path : `str`
path of HDF5 dataset to read.
start : `Time`, `~gwpy.time.LIGOTimeGPS`, optional
start GPS time of desired data
end : `Time`, `~gwpy.time.LIGOTimeGPS`, optional
end GPS time of desired data
copy : `bool`, default: `False`
create a fresh-memory copy of the underlying array
Returns
-------
data : `~gwpy.timeseries.TimeSeries`
a new `TimeSeries` containing the data read from disk
### Response:
def read_losc_hdf5_state(f, path='quality/simple', start=None, end=None,
copy=False):
"""Read a `StateVector` from a LOSC-format HDF file.
Parameters
----------
f : `str`, `h5py.HLObject`
path of HDF5 file, or open `H5File`
path : `str`
path of HDF5 dataset to read.
start : `Time`, `~gwpy.time.LIGOTimeGPS`, optional
start GPS time of desired data
end : `Time`, `~gwpy.time.LIGOTimeGPS`, optional
end GPS time of desired data
copy : `bool`, default: `False`
create a fresh-memory copy of the underlying array
Returns
-------
data : `~gwpy.timeseries.TimeSeries`
a new `TimeSeries` containing the data read from disk
"""
# find data
dataset = io_hdf5.find_dataset(f, '%s/DQmask' % path)
maskset = io_hdf5.find_dataset(f, '%s/DQDescriptions' % path)
# read data
nddata = dataset[()]
bits = [bytes.decode(bytes(b), 'utf-8') for b in maskset[()]]
# read metadata
epoch = dataset.attrs['Xstart']
try:
dt = dataset.attrs['Xspacing']
except KeyError:
dt = Quantity(1, 's')
else:
xunit = parse_unit(dataset.attrs['Xunits'])
dt = Quantity(dt, xunit)
return StateVector(nddata, bits=bits, t0=epoch, name='Data quality',
dx=dt, copy=copy).crop(start=start, end=end) |
def getCredentials(self, request):
"""
Derive credentials from an HTTP request.
Override SessionWrapper.getCredentials to add the Host: header to the
credentials. This will make web-based virtual hosting work.
@type request: L{nevow.inevow.IRequest}
@param request: The request being handled.
@rtype: L{twisted.cred.credentials.1ICredentials}
@return: Credentials derived from the HTTP request.
"""
username = usernameFromRequest(request)
password = request.args.get('password', [''])[0]
return credentials.UsernamePassword(username, password) | Derive credentials from an HTTP request.
Override SessionWrapper.getCredentials to add the Host: header to the
credentials. This will make web-based virtual hosting work.
@type request: L{nevow.inevow.IRequest}
@param request: The request being handled.
@rtype: L{twisted.cred.credentials.1ICredentials}
@return: Credentials derived from the HTTP request. | Below is the the instruction that describes the task:
### Input:
Derive credentials from an HTTP request.
Override SessionWrapper.getCredentials to add the Host: header to the
credentials. This will make web-based virtual hosting work.
@type request: L{nevow.inevow.IRequest}
@param request: The request being handled.
@rtype: L{twisted.cred.credentials.1ICredentials}
@return: Credentials derived from the HTTP request.
### Response:
def getCredentials(self, request):
"""
Derive credentials from an HTTP request.
Override SessionWrapper.getCredentials to add the Host: header to the
credentials. This will make web-based virtual hosting work.
@type request: L{nevow.inevow.IRequest}
@param request: The request being handled.
@rtype: L{twisted.cred.credentials.1ICredentials}
@return: Credentials derived from the HTTP request.
"""
username = usernameFromRequest(request)
password = request.args.get('password', [''])[0]
return credentials.UsernamePassword(username, password) |
def setCurrentPlugin( self, plugin ):
"""
Sets the current plugin item to the inputed plugin.
:param plugin | <XConfigPlugin> || None
"""
if ( not plugin ):
self.uiPluginTREE.setCurrentItem(None)
return
for i in range(self.uiPluginTREE.topLevelItemCount()):
item = self.uiPluginTREE.topLevelItem(i)
for c in range(item.childCount()):
pitem = item.child(c)
if ( pitem.plugin() == plugin ):
self.uiPluginTREE.setCurrentItem(pitem) | Sets the current plugin item to the inputed plugin.
:param plugin | <XConfigPlugin> || None | Below is the the instruction that describes the task:
### Input:
Sets the current plugin item to the inputed plugin.
:param plugin | <XConfigPlugin> || None
### Response:
def setCurrentPlugin( self, plugin ):
"""
Sets the current plugin item to the inputed plugin.
:param plugin | <XConfigPlugin> || None
"""
if ( not plugin ):
self.uiPluginTREE.setCurrentItem(None)
return
for i in range(self.uiPluginTREE.topLevelItemCount()):
item = self.uiPluginTREE.topLevelItem(i)
for c in range(item.childCount()):
pitem = item.child(c)
if ( pitem.plugin() == plugin ):
self.uiPluginTREE.setCurrentItem(pitem) |
def update_todo_item(self, item_id, content, party_id=None, notify=False):
"""
Modifies an existing item. The values work much like the "create item"
operation, so you should refer to that for a more detailed explanation.
"""
path = '/todos/update_item/%u' % item_id
req = ET.Element('request')
item = ET.Element('request')
ET.SubElement(item, 'content').text = str(content)
if party_id is not None:
ET.SubElement(req, 'responsible-party').text = str(party_id)
ET.SubElement(req, 'notify').text = str(bool(notify)).lower()
return self._request(path, req) | Modifies an existing item. The values work much like the "create item"
operation, so you should refer to that for a more detailed explanation. | Below is the the instruction that describes the task:
### Input:
Modifies an existing item. The values work much like the "create item"
operation, so you should refer to that for a more detailed explanation.
### Response:
def update_todo_item(self, item_id, content, party_id=None, notify=False):
"""
Modifies an existing item. The values work much like the "create item"
operation, so you should refer to that for a more detailed explanation.
"""
path = '/todos/update_item/%u' % item_id
req = ET.Element('request')
item = ET.Element('request')
ET.SubElement(item, 'content').text = str(content)
if party_id is not None:
ET.SubElement(req, 'responsible-party').text = str(party_id)
ET.SubElement(req, 'notify').text = str(bool(notify)).lower()
return self._request(path, req) |
def _delete_empty_properties(self, properties):
"""
Delete empty properties before serialization to avoid
extra keys with empty values in the output json.
"""
if not self.parent_id:
del properties['parent_id']
if not self.subsegments:
del properties['subsegments']
if not self.aws:
del properties['aws']
if not self.http:
del properties['http']
if not self.cause:
del properties['cause']
if not self.annotations:
del properties['annotations']
if not self.metadata:
del properties['metadata']
properties.pop(ORIGIN_TRACE_HEADER_ATTR_KEY, None)
del properties['sampled'] | Delete empty properties before serialization to avoid
extra keys with empty values in the output json. | Below is the the instruction that describes the task:
### Input:
Delete empty properties before serialization to avoid
extra keys with empty values in the output json.
### Response:
def _delete_empty_properties(self, properties):
"""
Delete empty properties before serialization to avoid
extra keys with empty values in the output json.
"""
if not self.parent_id:
del properties['parent_id']
if not self.subsegments:
del properties['subsegments']
if not self.aws:
del properties['aws']
if not self.http:
del properties['http']
if not self.cause:
del properties['cause']
if not self.annotations:
del properties['annotations']
if not self.metadata:
del properties['metadata']
properties.pop(ORIGIN_TRACE_HEADER_ATTR_KEY, None)
del properties['sampled'] |
def pop(self, key, default=None):
""" Get item from the dict and remove it.
Return default if expired or does not exist. Never raise KeyError.
"""
with self.lock:
try:
item = OrderedDict.__getitem__(self, key)
del self[key]
return item[0]
except KeyError:
return default | Get item from the dict and remove it.
Return default if expired or does not exist. Never raise KeyError. | Below is the the instruction that describes the task:
### Input:
Get item from the dict and remove it.
Return default if expired or does not exist. Never raise KeyError.
### Response:
def pop(self, key, default=None):
""" Get item from the dict and remove it.
Return default if expired or does not exist. Never raise KeyError.
"""
with self.lock:
try:
item = OrderedDict.__getitem__(self, key)
del self[key]
return item[0]
except KeyError:
return default |
def check_folder_exists(project, path, folder_name):
'''
:param project: project id
:type project: string
:param path: path to where we should look for the folder in question
:type path: string
:param folder_name: name of the folder in question
:type folder_name: string
:returns: A boolean True or False whether the folder exists at the specified path
:type: boolean
:raises: :exc:'ResolutionError' if dxpy.api.container_list_folder raises an exception
This function returns a boolean value that indicates whether a folder of the
specified name exists at the specified path
Note: this function will NOT work on the root folder case, i.e. '/'
'''
if folder_name is None or path is None:
return False
try:
folder_list = dxpy.api.container_list_folder(project, {"folder": path, "only": "folders"})
except dxpy.exceptions.DXAPIError as e:
if e.name == 'ResourceNotFound':
raise ResolutionError(str(e.msg))
else:
raise e
target_folder = path + '/' + folder_name
# sanitize input if necessary
target_folder, _skip = clean_folder_path(target_folder, 'folder')
# Check that folder name exists in return from list folder API call
return target_folder in folder_list['folders'] | :param project: project id
:type project: string
:param path: path to where we should look for the folder in question
:type path: string
:param folder_name: name of the folder in question
:type folder_name: string
:returns: A boolean True or False whether the folder exists at the specified path
:type: boolean
:raises: :exc:'ResolutionError' if dxpy.api.container_list_folder raises an exception
This function returns a boolean value that indicates whether a folder of the
specified name exists at the specified path
Note: this function will NOT work on the root folder case, i.e. '/' | Below is the the instruction that describes the task:
### Input:
:param project: project id
:type project: string
:param path: path to where we should look for the folder in question
:type path: string
:param folder_name: name of the folder in question
:type folder_name: string
:returns: A boolean True or False whether the folder exists at the specified path
:type: boolean
:raises: :exc:'ResolutionError' if dxpy.api.container_list_folder raises an exception
This function returns a boolean value that indicates whether a folder of the
specified name exists at the specified path
Note: this function will NOT work on the root folder case, i.e. '/'
### Response:
def check_folder_exists(project, path, folder_name):
'''
:param project: project id
:type project: string
:param path: path to where we should look for the folder in question
:type path: string
:param folder_name: name of the folder in question
:type folder_name: string
:returns: A boolean True or False whether the folder exists at the specified path
:type: boolean
:raises: :exc:'ResolutionError' if dxpy.api.container_list_folder raises an exception
This function returns a boolean value that indicates whether a folder of the
specified name exists at the specified path
Note: this function will NOT work on the root folder case, i.e. '/'
'''
if folder_name is None or path is None:
return False
try:
folder_list = dxpy.api.container_list_folder(project, {"folder": path, "only": "folders"})
except dxpy.exceptions.DXAPIError as e:
if e.name == 'ResourceNotFound':
raise ResolutionError(str(e.msg))
else:
raise e
target_folder = path + '/' + folder_name
# sanitize input if necessary
target_folder, _skip = clean_folder_path(target_folder, 'folder')
# Check that folder name exists in return from list folder API call
return target_folder in folder_list['folders'] |
def _writeMapTable(self, session, fileObject, mapTable, replaceParamFile):
"""
Write Generic Map Table Method
This method writes a mapping table in the generic format to file. The method will handle
both empty and filled cases of generic formatted mapping tables.
session = SQLAlchemy session object for retrieving data from the database
fileObject = The file object to write to
mapTable = The GSSHAPY MapTable object to write
"""
# Write mapping name
fileObject.write('%s "%s"\n' % (mapTable.name, mapTable.indexMap.name))
# Write mapping table global variables
if mapTable.numIDs:
fileObject.write('NUM_IDS %s\n' % (mapTable.numIDs))
if mapTable.maxNumCells:
fileObject.write('MAX_NUMBER_CELLS %s\n' % (mapTable.maxNumCells))
if mapTable.numSed:
fileObject.write('NUM_SED %s\n' % (mapTable.numSed))
if mapTable.maxSoilID:
fileObject.write('MAX_SOIL_ID %s\n' % (mapTable.maxSoilID))
# Write value lines from the database
self._writeValues(session, fileObject, mapTable, None, replaceParamFile) | Write Generic Map Table Method
This method writes a mapping table in the generic format to file. The method will handle
both empty and filled cases of generic formatted mapping tables.
session = SQLAlchemy session object for retrieving data from the database
fileObject = The file object to write to
mapTable = The GSSHAPY MapTable object to write | Below is the the instruction that describes the task:
### Input:
Write Generic Map Table Method
This method writes a mapping table in the generic format to file. The method will handle
both empty and filled cases of generic formatted mapping tables.
session = SQLAlchemy session object for retrieving data from the database
fileObject = The file object to write to
mapTable = The GSSHAPY MapTable object to write
### Response:
def _writeMapTable(self, session, fileObject, mapTable, replaceParamFile):
"""
Write Generic Map Table Method
This method writes a mapping table in the generic format to file. The method will handle
both empty and filled cases of generic formatted mapping tables.
session = SQLAlchemy session object for retrieving data from the database
fileObject = The file object to write to
mapTable = The GSSHAPY MapTable object to write
"""
# Write mapping name
fileObject.write('%s "%s"\n' % (mapTable.name, mapTable.indexMap.name))
# Write mapping table global variables
if mapTable.numIDs:
fileObject.write('NUM_IDS %s\n' % (mapTable.numIDs))
if mapTable.maxNumCells:
fileObject.write('MAX_NUMBER_CELLS %s\n' % (mapTable.maxNumCells))
if mapTable.numSed:
fileObject.write('NUM_SED %s\n' % (mapTable.numSed))
if mapTable.maxSoilID:
fileObject.write('MAX_SOIL_ID %s\n' % (mapTable.maxSoilID))
# Write value lines from the database
self._writeValues(session, fileObject, mapTable, None, replaceParamFile) |
def get_user(self, id=None, name=None, email=None):
""" Get user object by email or id.
"""
log.info("Picking user: %s (%s) (%s)" % (name, email, id))
from qubell.api.private.user import User
if email:
user = User.get(self._router, organization=self, email=email)
else:
user = self.users[id or name]
return user | Get user object by email or id. | Below is the the instruction that describes the task:
### Input:
Get user object by email or id.
### Response:
def get_user(self, id=None, name=None, email=None):
""" Get user object by email or id.
"""
log.info("Picking user: %s (%s) (%s)" % (name, email, id))
from qubell.api.private.user import User
if email:
user = User.get(self._router, organization=self, email=email)
else:
user = self.users[id or name]
return user |
def _deserialize(self, value, environment=None):
"""A collection traverses over something to deserialize its value.
:param value: a ``dict`` wich contains mapped values
"""
if not isinstance(value, MappingABC):
raise exc.Invalid(self)
# traverse items and match against validated struct
mapping = self._create_deserialize_type(value, environment)
invalids = []
for name, item in self:
# deserialize each item
try:
mapping[name] = item.deserialize(
value.get(name, values.Undefined), environment
)
except exc.IgnoreValue:
# just ignore this value
pass
except exc.Invalid as ex:
# append this to the list of invalids, so we can return a complete overview of errors
invalids.append(ex)
if invalids:
# on invalids this item is also ``Invalid``
raise exc.InvalidChildren(self, invalids)
return mapping | A collection traverses over something to deserialize its value.
:param value: a ``dict`` wich contains mapped values | Below is the the instruction that describes the task:
### Input:
A collection traverses over something to deserialize its value.
:param value: a ``dict`` wich contains mapped values
### Response:
def _deserialize(self, value, environment=None):
"""A collection traverses over something to deserialize its value.
:param value: a ``dict`` wich contains mapped values
"""
if not isinstance(value, MappingABC):
raise exc.Invalid(self)
# traverse items and match against validated struct
mapping = self._create_deserialize_type(value, environment)
invalids = []
for name, item in self:
# deserialize each item
try:
mapping[name] = item.deserialize(
value.get(name, values.Undefined), environment
)
except exc.IgnoreValue:
# just ignore this value
pass
except exc.Invalid as ex:
# append this to the list of invalids, so we can return a complete overview of errors
invalids.append(ex)
if invalids:
# on invalids this item is also ``Invalid``
raise exc.InvalidChildren(self, invalids)
return mapping |
def StateOfCharge(self):
""" % of Full Charge """
return (self.bus.read_byte_data(self.address, 0x02) + self.bus.read_byte_data(self.address, 0x03) * 256) | % of Full Charge | Below is the the instruction that describes the task:
### Input:
% of Full Charge
### Response:
def StateOfCharge(self):
""" % of Full Charge """
return (self.bus.read_byte_data(self.address, 0x02) + self.bus.read_byte_data(self.address, 0x03) * 256) |
def insert_sequences_into_tree(aln, moltype, params={},
write_log=True):
"""Returns a tree from Alignment object aln.
aln: an xxx.Alignment object, or data that can be used to build one.
moltype: cogent.core.moltype.MolType object
params: dict of parameters to pass in to the RAxML app controller.
The result will be an xxx.Alignment object, or None if tree fails.
"""
# convert aln to phy since seq_names need fixed to run through pplacer
new_aln=get_align_for_phylip(StringIO(aln))
# convert aln to fasta in case it is not already a fasta file
aln2 = Alignment(new_aln)
seqs = aln2.toFasta()
ih = '_input_as_multiline_string'
pplacer_app = Pplacer(params=params,
InputHandler=ih,
WorkingDir=None,
SuppressStderr=False,
SuppressStdout=False)
pplacer_result = pplacer_app(seqs)
# write a log file
if write_log:
log_fp = join(params["--out-dir"],'log_pplacer_' + \
split(get_tmp_filename())[-1])
log_file=open(log_fp,'w')
log_file.write(pplacer_result['StdOut'].read())
log_file.close()
# use guppy to convert json file into a placement tree
guppy_params={'tog':None}
new_tree=build_tree_from_json_using_params(pplacer_result['json'].name, \
output_dir=params['--out-dir'], \
params=guppy_params)
pplacer_result.cleanUp()
return new_tree | Returns a tree from Alignment object aln.
aln: an xxx.Alignment object, or data that can be used to build one.
moltype: cogent.core.moltype.MolType object
params: dict of parameters to pass in to the RAxML app controller.
The result will be an xxx.Alignment object, or None if tree fails. | Below is the the instruction that describes the task:
### Input:
Returns a tree from Alignment object aln.
aln: an xxx.Alignment object, or data that can be used to build one.
moltype: cogent.core.moltype.MolType object
params: dict of parameters to pass in to the RAxML app controller.
The result will be an xxx.Alignment object, or None if tree fails.
### Response:
def insert_sequences_into_tree(aln, moltype, params={},
write_log=True):
"""Returns a tree from Alignment object aln.
aln: an xxx.Alignment object, or data that can be used to build one.
moltype: cogent.core.moltype.MolType object
params: dict of parameters to pass in to the RAxML app controller.
The result will be an xxx.Alignment object, or None if tree fails.
"""
# convert aln to phy since seq_names need fixed to run through pplacer
new_aln=get_align_for_phylip(StringIO(aln))
# convert aln to fasta in case it is not already a fasta file
aln2 = Alignment(new_aln)
seqs = aln2.toFasta()
ih = '_input_as_multiline_string'
pplacer_app = Pplacer(params=params,
InputHandler=ih,
WorkingDir=None,
SuppressStderr=False,
SuppressStdout=False)
pplacer_result = pplacer_app(seqs)
# write a log file
if write_log:
log_fp = join(params["--out-dir"],'log_pplacer_' + \
split(get_tmp_filename())[-1])
log_file=open(log_fp,'w')
log_file.write(pplacer_result['StdOut'].read())
log_file.close()
# use guppy to convert json file into a placement tree
guppy_params={'tog':None}
new_tree=build_tree_from_json_using_params(pplacer_result['json'].name, \
output_dir=params['--out-dir'], \
params=guppy_params)
pplacer_result.cleanUp()
return new_tree |
def find_text(self, text, changed=True, forward=True, case=False,
words=False, regexp=False):
"""Find text"""
cursor = self.textCursor()
findflag = QTextDocument.FindFlag()
if not forward:
findflag = findflag | QTextDocument.FindBackward
if case:
findflag = findflag | QTextDocument.FindCaseSensitively
moves = [QTextCursor.NoMove]
if forward:
moves += [QTextCursor.NextWord, QTextCursor.Start]
if changed:
if to_text_string(cursor.selectedText()):
new_position = min([cursor.selectionStart(),
cursor.selectionEnd()])
cursor.setPosition(new_position)
else:
cursor.movePosition(QTextCursor.PreviousWord)
else:
moves += [QTextCursor.End]
if regexp:
text = to_text_string(text)
else:
text = re.escape(to_text_string(text))
if QT55_VERSION:
pattern = QRegularExpression(u"\\b{}\\b".format(text) if words else
text)
if case:
pattern.setPatternOptions(
QRegularExpression.CaseInsensitiveOption)
else:
pattern = QRegExp(u"\\b{}\\b".format(text)
if words else text, Qt.CaseSensitive if case else
Qt.CaseInsensitive, QRegExp.RegExp2)
for move in moves:
cursor.movePosition(move)
if regexp and '\\n' in text:
# Multiline regular expression
found_cursor = self.find_multiline_pattern(pattern, cursor,
findflag)
else:
# Single line find: using the QTextDocument's find function,
# probably much more efficient than ours
found_cursor = self.document().find(pattern, cursor, findflag)
if found_cursor is not None and not found_cursor.isNull():
self.setTextCursor(found_cursor)
return True
return False | Find text | Below is the the instruction that describes the task:
### Input:
Find text
### Response:
def find_text(self, text, changed=True, forward=True, case=False,
words=False, regexp=False):
"""Find text"""
cursor = self.textCursor()
findflag = QTextDocument.FindFlag()
if not forward:
findflag = findflag | QTextDocument.FindBackward
if case:
findflag = findflag | QTextDocument.FindCaseSensitively
moves = [QTextCursor.NoMove]
if forward:
moves += [QTextCursor.NextWord, QTextCursor.Start]
if changed:
if to_text_string(cursor.selectedText()):
new_position = min([cursor.selectionStart(),
cursor.selectionEnd()])
cursor.setPosition(new_position)
else:
cursor.movePosition(QTextCursor.PreviousWord)
else:
moves += [QTextCursor.End]
if regexp:
text = to_text_string(text)
else:
text = re.escape(to_text_string(text))
if QT55_VERSION:
pattern = QRegularExpression(u"\\b{}\\b".format(text) if words else
text)
if case:
pattern.setPatternOptions(
QRegularExpression.CaseInsensitiveOption)
else:
pattern = QRegExp(u"\\b{}\\b".format(text)
if words else text, Qt.CaseSensitive if case else
Qt.CaseInsensitive, QRegExp.RegExp2)
for move in moves:
cursor.movePosition(move)
if regexp and '\\n' in text:
# Multiline regular expression
found_cursor = self.find_multiline_pattern(pattern, cursor,
findflag)
else:
# Single line find: using the QTextDocument's find function,
# probably much more efficient than ours
found_cursor = self.document().find(pattern, cursor, findflag)
if found_cursor is not None and not found_cursor.isNull():
self.setTextCursor(found_cursor)
return True
return False |
def sample_dynamic_posterior(self, inputs, samples, static_sample=None):
"""Sample the static latent posterior.
Args:
inputs: A batch of intermediate representations of image frames
across all timesteps, of shape [..., batch_size, timesteps,
hidden_size].
samples: Number of samples to draw from the latent distribution.
static_sample: A tensor sample of the static latent variable `f`
of shape [..., batch_size, latent_size]. Only used
for the full dynamic posterior formulation.
Returns:
A tuple of a sample tensor of shape [samples, batch_size, length
latent_size], and a MultivariateNormalDiag distribution from which
the tensor was sampled, with event shape [latent_size], and batch
shape [broadcasted_shape, batch_size, length], where
`broadcasted_shape` is the broadcasted sampled shape between the
inputs and static sample.
Raises:
ValueError: If the "full" latent posterior formulation is being
used, yet a static latent sample was not provided.
"""
if self.latent_posterior == "factorized":
dist = self.dynamic_encoder(inputs)
samples = dist.sample(samples) # (s, N, T, lat)
else: # full
if static_sample is None:
raise ValueError(
"The full dynamic posterior requires a static latent sample")
dist = self.dynamic_encoder((inputs, static_sample))
samples = dist.sample() # (samples, N, latent)
return samples, dist | Sample the static latent posterior.
Args:
inputs: A batch of intermediate representations of image frames
across all timesteps, of shape [..., batch_size, timesteps,
hidden_size].
samples: Number of samples to draw from the latent distribution.
static_sample: A tensor sample of the static latent variable `f`
of shape [..., batch_size, latent_size]. Only used
for the full dynamic posterior formulation.
Returns:
A tuple of a sample tensor of shape [samples, batch_size, length
latent_size], and a MultivariateNormalDiag distribution from which
the tensor was sampled, with event shape [latent_size], and batch
shape [broadcasted_shape, batch_size, length], where
`broadcasted_shape` is the broadcasted sampled shape between the
inputs and static sample.
Raises:
ValueError: If the "full" latent posterior formulation is being
used, yet a static latent sample was not provided. | Below is the the instruction that describes the task:
### Input:
Sample the static latent posterior.
Args:
inputs: A batch of intermediate representations of image frames
across all timesteps, of shape [..., batch_size, timesteps,
hidden_size].
samples: Number of samples to draw from the latent distribution.
static_sample: A tensor sample of the static latent variable `f`
of shape [..., batch_size, latent_size]. Only used
for the full dynamic posterior formulation.
Returns:
A tuple of a sample tensor of shape [samples, batch_size, length
latent_size], and a MultivariateNormalDiag distribution from which
the tensor was sampled, with event shape [latent_size], and batch
shape [broadcasted_shape, batch_size, length], where
`broadcasted_shape` is the broadcasted sampled shape between the
inputs and static sample.
Raises:
ValueError: If the "full" latent posterior formulation is being
used, yet a static latent sample was not provided.
### Response:
def sample_dynamic_posterior(self, inputs, samples, static_sample=None):
"""Sample the static latent posterior.
Args:
inputs: A batch of intermediate representations of image frames
across all timesteps, of shape [..., batch_size, timesteps,
hidden_size].
samples: Number of samples to draw from the latent distribution.
static_sample: A tensor sample of the static latent variable `f`
of shape [..., batch_size, latent_size]. Only used
for the full dynamic posterior formulation.
Returns:
A tuple of a sample tensor of shape [samples, batch_size, length
latent_size], and a MultivariateNormalDiag distribution from which
the tensor was sampled, with event shape [latent_size], and batch
shape [broadcasted_shape, batch_size, length], where
`broadcasted_shape` is the broadcasted sampled shape between the
inputs and static sample.
Raises:
ValueError: If the "full" latent posterior formulation is being
used, yet a static latent sample was not provided.
"""
if self.latent_posterior == "factorized":
dist = self.dynamic_encoder(inputs)
samples = dist.sample(samples) # (s, N, T, lat)
else: # full
if static_sample is None:
raise ValueError(
"The full dynamic posterior requires a static latent sample")
dist = self.dynamic_encoder((inputs, static_sample))
samples = dist.sample() # (samples, N, latent)
return samples, dist |
def get_rendition_fit_size(spec, input_w, input_h, output_scale):
""" Determine the scaled size based on the provided spec """
width = input_w
height = input_h
scale = spec.get('scale')
if scale:
width = width / scale
height = height / scale
min_width = spec.get('scale_min_width')
if min_width and width < min_width:
height = height * min_width / width
width = min_width
min_height = spec.get('scale_min_height')
if min_height and height < min_height:
width = width * min_height / height
height = min_height
tgt_width, tgt_height = spec.get('width'), spec.get('height')
if tgt_width and width > tgt_width:
height = height * tgt_width / width
width = tgt_width
if tgt_height and height > tgt_height:
width = width * tgt_height / height
height = tgt_height
tgt_width, tgt_height = spec.get('max_width'), spec.get('max_height')
if tgt_width and width > tgt_width:
height = height * tgt_width / width
width = tgt_width
if tgt_height and height > tgt_height:
width = width * tgt_height / height
height = tgt_height
width = width * output_scale
height = height * output_scale
# Never scale to larger than the base rendition
width = min(round(width), input_w)
height = min(round(height), input_h)
return (width, height), None | Determine the scaled size based on the provided spec | Below is the the instruction that describes the task:
### Input:
Determine the scaled size based on the provided spec
### Response:
def get_rendition_fit_size(spec, input_w, input_h, output_scale):
""" Determine the scaled size based on the provided spec """
width = input_w
height = input_h
scale = spec.get('scale')
if scale:
width = width / scale
height = height / scale
min_width = spec.get('scale_min_width')
if min_width and width < min_width:
height = height * min_width / width
width = min_width
min_height = spec.get('scale_min_height')
if min_height and height < min_height:
width = width * min_height / height
height = min_height
tgt_width, tgt_height = spec.get('width'), spec.get('height')
if tgt_width and width > tgt_width:
height = height * tgt_width / width
width = tgt_width
if tgt_height and height > tgt_height:
width = width * tgt_height / height
height = tgt_height
tgt_width, tgt_height = spec.get('max_width'), spec.get('max_height')
if tgt_width and width > tgt_width:
height = height * tgt_width / width
width = tgt_width
if tgt_height and height > tgt_height:
width = width * tgt_height / height
height = tgt_height
width = width * output_scale
height = height * output_scale
# Never scale to larger than the base rendition
width = min(round(width), input_w)
height = min(round(height), input_h)
return (width, height), None |
def load_object(imp_path):
'''
Given a python import path, load the object
For dynamic imports in a program
>>> isdir = load_object('os.path.isdir')
>>> isdir('/tmp')
True
>>> num = load_object('numbers.Number')
>>> isinstance('x', num)
False
>>> isinstance(777, num)
True
'''
module_name, obj_name = imp_path.split('.', 1)
module = __import__(module_name)
obj = attrgetter(obj_name)(module)
return obj | Given a python import path, load the object
For dynamic imports in a program
>>> isdir = load_object('os.path.isdir')
>>> isdir('/tmp')
True
>>> num = load_object('numbers.Number')
>>> isinstance('x', num)
False
>>> isinstance(777, num)
True | Below is the the instruction that describes the task:
### Input:
Given a python import path, load the object
For dynamic imports in a program
>>> isdir = load_object('os.path.isdir')
>>> isdir('/tmp')
True
>>> num = load_object('numbers.Number')
>>> isinstance('x', num)
False
>>> isinstance(777, num)
True
### Response:
def load_object(imp_path):
'''
Given a python import path, load the object
For dynamic imports in a program
>>> isdir = load_object('os.path.isdir')
>>> isdir('/tmp')
True
>>> num = load_object('numbers.Number')
>>> isinstance('x', num)
False
>>> isinstance(777, num)
True
'''
module_name, obj_name = imp_path.split('.', 1)
module = __import__(module_name)
obj = attrgetter(obj_name)(module)
return obj |
def near(self, center, sphere=False, min=None, max=None):
"""Order results by their distance from the given point, optionally with range limits in meters.
Geospatial operator: {$near: {...}}
Documentation: https://docs.mongodb.com/manual/reference/operator/query/near/#op._S_near
{
$near: {
$geometry: <center; Point or (long, lat)>,
$minDistance: <min; distance in meters>,
$maxDistance: <max; distance in meters>
}
}
Geospatial operator: {$nearSphere: {...}}
Documentation: https://docs.mongodb.com/manual/reference/operator/query/nearSphere/#op._S_nearSphere
{
$nearSphere: {
$geometry: <center; Point or (long, lat)>,
$minDistance: <min; distance in meters>,
$maxDistance: <max; distance in meters>
}
}
"""
from marrow.mongo.geo import Point
near = {'$geometry': Point(*center)}
if min:
near['$minDistance'] = float(min)
if max:
near['$maxDistance'] = float(max)
return Filter({self._name: {'$nearSphere' if sphere else '$near': near}}) | Order results by their distance from the given point, optionally with range limits in meters.
Geospatial operator: {$near: {...}}
Documentation: https://docs.mongodb.com/manual/reference/operator/query/near/#op._S_near
{
$near: {
$geometry: <center; Point or (long, lat)>,
$minDistance: <min; distance in meters>,
$maxDistance: <max; distance in meters>
}
}
Geospatial operator: {$nearSphere: {...}}
Documentation: https://docs.mongodb.com/manual/reference/operator/query/nearSphere/#op._S_nearSphere
{
$nearSphere: {
$geometry: <center; Point or (long, lat)>,
$minDistance: <min; distance in meters>,
$maxDistance: <max; distance in meters>
}
} | Below is the the instruction that describes the task:
### Input:
Order results by their distance from the given point, optionally with range limits in meters.
Geospatial operator: {$near: {...}}
Documentation: https://docs.mongodb.com/manual/reference/operator/query/near/#op._S_near
{
$near: {
$geometry: <center; Point or (long, lat)>,
$minDistance: <min; distance in meters>,
$maxDistance: <max; distance in meters>
}
}
Geospatial operator: {$nearSphere: {...}}
Documentation: https://docs.mongodb.com/manual/reference/operator/query/nearSphere/#op._S_nearSphere
{
$nearSphere: {
$geometry: <center; Point or (long, lat)>,
$minDistance: <min; distance in meters>,
$maxDistance: <max; distance in meters>
}
}
### Response:
def near(self, center, sphere=False, min=None, max=None):
"""Order results by their distance from the given point, optionally with range limits in meters.
Geospatial operator: {$near: {...}}
Documentation: https://docs.mongodb.com/manual/reference/operator/query/near/#op._S_near
{
$near: {
$geometry: <center; Point or (long, lat)>,
$minDistance: <min; distance in meters>,
$maxDistance: <max; distance in meters>
}
}
Geospatial operator: {$nearSphere: {...}}
Documentation: https://docs.mongodb.com/manual/reference/operator/query/nearSphere/#op._S_nearSphere
{
$nearSphere: {
$geometry: <center; Point or (long, lat)>,
$minDistance: <min; distance in meters>,
$maxDistance: <max; distance in meters>
}
}
"""
from marrow.mongo.geo import Point
near = {'$geometry': Point(*center)}
if min:
near['$minDistance'] = float(min)
if max:
near['$maxDistance'] = float(max)
return Filter({self._name: {'$nearSphere' if sphere else '$near': near}}) |
def get_aoi(self, solar_zenith, solar_azimuth):
"""Get the angle of incidence on the system.
Parameters
----------
solar_zenith : float or Series.
Solar zenith angle.
solar_azimuth : float or Series.
Solar azimuth angle.
Returns
-------
aoi : Series
The angle of incidence
"""
aoi = irradiance.aoi(self.surface_tilt, self.surface_azimuth,
solar_zenith, solar_azimuth)
return aoi | Get the angle of incidence on the system.
Parameters
----------
solar_zenith : float or Series.
Solar zenith angle.
solar_azimuth : float or Series.
Solar azimuth angle.
Returns
-------
aoi : Series
The angle of incidence | Below is the the instruction that describes the task:
### Input:
Get the angle of incidence on the system.
Parameters
----------
solar_zenith : float or Series.
Solar zenith angle.
solar_azimuth : float or Series.
Solar azimuth angle.
Returns
-------
aoi : Series
The angle of incidence
### Response:
def get_aoi(self, solar_zenith, solar_azimuth):
"""Get the angle of incidence on the system.
Parameters
----------
solar_zenith : float or Series.
Solar zenith angle.
solar_azimuth : float or Series.
Solar azimuth angle.
Returns
-------
aoi : Series
The angle of incidence
"""
aoi = irradiance.aoi(self.surface_tilt, self.surface_azimuth,
solar_zenith, solar_azimuth)
return aoi |
def run_process(self, process):
"""Runs a single action."""
message = u'#{bright}'
message += u'{} '.format(str(process)[:68]).ljust(69, '.')
stashed = False
if self.unstaged_changes and not self.include_unstaged_changes:
out, err, code = self.git.stash(keep_index=True, quiet=True)
stashed = code == 0
try:
result = process(files=self.files, cwd=self.cwd, fix=self.fix)
# Check for modified files
out, err, code = self.git.status(porcelain=True, untracked_files='no')
for line in out.splitlines():
file_status = Status(line)
# Make sure the file is one of the files that was processed
if file_status.path in self.files and file_status.is_modified:
mtime = os.path.getmtime(file_status.path) if os.path.exists(file_status.path) else 0
if mtime > self.file_mtimes.get(file_status.path, 0):
self.file_mtimes[file_status.path] = mtime
result.add_modified_file(file_status.path)
if self.stage_modified_files:
self.git.add(file_status.path)
except: # noqa: E722
raise
finally:
if stashed:
self.git.reset(hard=True, quiet=True)
self.git.stash.pop(index=True, quiet=True)
if result.is_success:
message += u' #{green}[SUCCESS]'
elif result.is_failure:
message += u' #{red}[FAILURE]'
elif result.is_skip:
message += u' #{cyan}[SKIPPED]'
elif result.is_error:
message += u' #{red}[ERROR!!]'
return result, message | Runs a single action. | Below is the the instruction that describes the task:
### Input:
Runs a single action.
### Response:
def run_process(self, process):
"""Runs a single action."""
message = u'#{bright}'
message += u'{} '.format(str(process)[:68]).ljust(69, '.')
stashed = False
if self.unstaged_changes and not self.include_unstaged_changes:
out, err, code = self.git.stash(keep_index=True, quiet=True)
stashed = code == 0
try:
result = process(files=self.files, cwd=self.cwd, fix=self.fix)
# Check for modified files
out, err, code = self.git.status(porcelain=True, untracked_files='no')
for line in out.splitlines():
file_status = Status(line)
# Make sure the file is one of the files that was processed
if file_status.path in self.files and file_status.is_modified:
mtime = os.path.getmtime(file_status.path) if os.path.exists(file_status.path) else 0
if mtime > self.file_mtimes.get(file_status.path, 0):
self.file_mtimes[file_status.path] = mtime
result.add_modified_file(file_status.path)
if self.stage_modified_files:
self.git.add(file_status.path)
except: # noqa: E722
raise
finally:
if stashed:
self.git.reset(hard=True, quiet=True)
self.git.stash.pop(index=True, quiet=True)
if result.is_success:
message += u' #{green}[SUCCESS]'
elif result.is_failure:
message += u' #{red}[FAILURE]'
elif result.is_skip:
message += u' #{cyan}[SKIPPED]'
elif result.is_error:
message += u' #{red}[ERROR!!]'
return result, message |
def add_typeattr(typeattr,**kwargs):
"""
Add an typeattr to an existing type.
"""
tmpltype = get_templatetype(typeattr.type_id, user_id=kwargs.get('user_id'))
ta = _set_typeattr(typeattr)
tmpltype.typeattrs.append(ta)
db.DBSession.flush()
return ta | Add an typeattr to an existing type. | Below is the the instruction that describes the task:
### Input:
Add an typeattr to an existing type.
### Response:
def add_typeattr(typeattr,**kwargs):
"""
Add an typeattr to an existing type.
"""
tmpltype = get_templatetype(typeattr.type_id, user_id=kwargs.get('user_id'))
ta = _set_typeattr(typeattr)
tmpltype.typeattrs.append(ta)
db.DBSession.flush()
return ta |
def genderize(name, api_token=None):
"""Fetch gender from genderize.io"""
GENDERIZE_API_URL = "https://api.genderize.io/"
TOTAL_RETRIES = 10
MAX_RETRIES = 5
SLEEP_TIME = 0.25
STATUS_FORCELIST = [502]
params = {
'name': name
}
if api_token:
params['apikey'] = api_token
session = requests.Session()
retries = urllib3.util.Retry(total=TOTAL_RETRIES,
connect=MAX_RETRIES,
status=MAX_RETRIES,
status_forcelist=STATUS_FORCELIST,
backoff_factor=SLEEP_TIME,
raise_on_status=True)
session.mount('http://', requests.adapters.HTTPAdapter(max_retries=retries))
session.mount('https://', requests.adapters.HTTPAdapter(max_retries=retries))
r = session.get(GENDERIZE_API_URL, params=params)
r.raise_for_status()
result = r.json()
gender = result['gender']
prob = result.get('probability', None)
acc = int(prob * 100) if prob else None
return gender, acc | Fetch gender from genderize.io | Below is the the instruction that describes the task:
### Input:
Fetch gender from genderize.io
### Response:
def genderize(name, api_token=None):
"""Fetch gender from genderize.io"""
GENDERIZE_API_URL = "https://api.genderize.io/"
TOTAL_RETRIES = 10
MAX_RETRIES = 5
SLEEP_TIME = 0.25
STATUS_FORCELIST = [502]
params = {
'name': name
}
if api_token:
params['apikey'] = api_token
session = requests.Session()
retries = urllib3.util.Retry(total=TOTAL_RETRIES,
connect=MAX_RETRIES,
status=MAX_RETRIES,
status_forcelist=STATUS_FORCELIST,
backoff_factor=SLEEP_TIME,
raise_on_status=True)
session.mount('http://', requests.adapters.HTTPAdapter(max_retries=retries))
session.mount('https://', requests.adapters.HTTPAdapter(max_retries=retries))
r = session.get(GENDERIZE_API_URL, params=params)
r.raise_for_status()
result = r.json()
gender = result['gender']
prob = result.get('probability', None)
acc = int(prob * 100) if prob else None
return gender, acc |
def fill_archive(self, stream=None, kind='tgz', prefix=None,
subrepos=False):
"""
Fills up given stream.
:param stream: file like object.
:param kind: one of following: ``zip``, ``tgz`` or ``tbz2``.
Default: ``tgz``.
:param prefix: name of root directory in archive.
Default is repository name and changeset's raw_id joined with dash
(``repo-tip.<KIND>``).
:param subrepos: include subrepos in this archive.
:raise ImproperArchiveTypeError: If given kind is wrong.
:raise VcsError: If given stream is None
"""
allowed_kinds = settings.ARCHIVE_SPECS.keys()
if kind not in allowed_kinds:
raise ImproperArchiveTypeError('Archive kind not supported use one'
'of %s', allowed_kinds)
if prefix is None:
prefix = '%s-%s' % (self.repository.name, self.short_id)
elif prefix.startswith('/'):
raise VCSError("Prefix cannot start with leading slash")
elif prefix.strip() == '':
raise VCSError("Prefix cannot be empty")
if kind == 'zip':
frmt = 'zip'
else:
frmt = 'tar'
_git_path = settings.GIT_EXECUTABLE_PATH
cmd = '%s archive --format=%s --prefix=%s/ %s' % (_git_path,
frmt, prefix, self.raw_id)
if kind == 'tgz':
cmd += ' | gzip -9'
elif kind == 'tbz2':
cmd += ' | bzip2 -9'
if stream is None:
raise VCSError('You need to pass in a valid stream for filling'
' with archival data')
popen = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True,
cwd=self.repository.path)
buffer_size = 1024 * 8
chunk = popen.stdout.read(buffer_size)
while chunk:
stream.write(chunk)
chunk = popen.stdout.read(buffer_size)
# Make sure all descriptors would be read
popen.communicate() | Fills up given stream.
:param stream: file like object.
:param kind: one of following: ``zip``, ``tgz`` or ``tbz2``.
Default: ``tgz``.
:param prefix: name of root directory in archive.
Default is repository name and changeset's raw_id joined with dash
(``repo-tip.<KIND>``).
:param subrepos: include subrepos in this archive.
:raise ImproperArchiveTypeError: If given kind is wrong.
:raise VcsError: If given stream is None | Below is the the instruction that describes the task:
### Input:
Fills up given stream.
:param stream: file like object.
:param kind: one of following: ``zip``, ``tgz`` or ``tbz2``.
Default: ``tgz``.
:param prefix: name of root directory in archive.
Default is repository name and changeset's raw_id joined with dash
(``repo-tip.<KIND>``).
:param subrepos: include subrepos in this archive.
:raise ImproperArchiveTypeError: If given kind is wrong.
:raise VcsError: If given stream is None
### Response:
def fill_archive(self, stream=None, kind='tgz', prefix=None,
subrepos=False):
"""
Fills up given stream.
:param stream: file like object.
:param kind: one of following: ``zip``, ``tgz`` or ``tbz2``.
Default: ``tgz``.
:param prefix: name of root directory in archive.
Default is repository name and changeset's raw_id joined with dash
(``repo-tip.<KIND>``).
:param subrepos: include subrepos in this archive.
:raise ImproperArchiveTypeError: If given kind is wrong.
:raise VcsError: If given stream is None
"""
allowed_kinds = settings.ARCHIVE_SPECS.keys()
if kind not in allowed_kinds:
raise ImproperArchiveTypeError('Archive kind not supported use one'
'of %s', allowed_kinds)
if prefix is None:
prefix = '%s-%s' % (self.repository.name, self.short_id)
elif prefix.startswith('/'):
raise VCSError("Prefix cannot start with leading slash")
elif prefix.strip() == '':
raise VCSError("Prefix cannot be empty")
if kind == 'zip':
frmt = 'zip'
else:
frmt = 'tar'
_git_path = settings.GIT_EXECUTABLE_PATH
cmd = '%s archive --format=%s --prefix=%s/ %s' % (_git_path,
frmt, prefix, self.raw_id)
if kind == 'tgz':
cmd += ' | gzip -9'
elif kind == 'tbz2':
cmd += ' | bzip2 -9'
if stream is None:
raise VCSError('You need to pass in a valid stream for filling'
' with archival data')
popen = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True,
cwd=self.repository.path)
buffer_size = 1024 * 8
chunk = popen.stdout.read(buffer_size)
while chunk:
stream.write(chunk)
chunk = popen.stdout.read(buffer_size)
# Make sure all descriptors would be read
popen.communicate() |
def extract(self, text: str, get_attr=['PERSON', 'ORG', 'GPE']) -> List[Extraction]:
"""
Args:
text (str): the text to extract from.
get_attr (List[str]): The spaCy NER attributes we're interested in.
Returns:
List(Extraction): the list of extraction or the empty list if there are no matches.
"""
doc = self.__nlp(text)
attr_list = list()
for ent in doc.ents:
if ent.label_ in get_attr:
attr_list.append(Extraction(extractor_name=self.name,
start_char=int(ent.start_char),
end_char=int(ent.end_char),
value=ent.text,
tag=ent.label_,
start_token=ent.start,
end_token=ent.end))
return attr_list | Args:
text (str): the text to extract from.
get_attr (List[str]): The spaCy NER attributes we're interested in.
Returns:
List(Extraction): the list of extraction or the empty list if there are no matches. | Below is the the instruction that describes the task:
### Input:
Args:
text (str): the text to extract from.
get_attr (List[str]): The spaCy NER attributes we're interested in.
Returns:
List(Extraction): the list of extraction or the empty list if there are no matches.
### Response:
def extract(self, text: str, get_attr=['PERSON', 'ORG', 'GPE']) -> List[Extraction]:
"""
Args:
text (str): the text to extract from.
get_attr (List[str]): The spaCy NER attributes we're interested in.
Returns:
List(Extraction): the list of extraction or the empty list if there are no matches.
"""
doc = self.__nlp(text)
attr_list = list()
for ent in doc.ents:
if ent.label_ in get_attr:
attr_list.append(Extraction(extractor_name=self.name,
start_char=int(ent.start_char),
end_char=int(ent.end_char),
value=ent.text,
tag=ent.label_,
start_token=ent.start,
end_token=ent.end))
return attr_list |
def _read_data_type_2(self, length):
"""Read IPv6-Route Type 2 data.
Structure of IPv6-Route Type 2 data [RFC 6275]:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Next Header | Hdr Ext Len=2 | Routing Type=2|Segments Left=1|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Reserved |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ +
| |
+ Home Address +
| |
+ +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 route.next Next Header
1 8 route.length Header Extensive Length
2 16 route.type Routing Type
3 24 route.seg_left Segments Left
4 32 - Reserved
8 64 route.ip Home Address
"""
if length != 20:
raise ProtocolError(f'{self.alias}: [Typeno 2] invalid format')
_resv = self._read_fileng(4)
_home = self._read_fileng(16)
data = dict(
ip=ipaddress.ip_address(_home),
)
return data | Read IPv6-Route Type 2 data.
Structure of IPv6-Route Type 2 data [RFC 6275]:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Next Header | Hdr Ext Len=2 | Routing Type=2|Segments Left=1|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Reserved |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ +
| |
+ Home Address +
| |
+ +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 route.next Next Header
1 8 route.length Header Extensive Length
2 16 route.type Routing Type
3 24 route.seg_left Segments Left
4 32 - Reserved
8 64 route.ip Home Address | Below is the the instruction that describes the task:
### Input:
Read IPv6-Route Type 2 data.
Structure of IPv6-Route Type 2 data [RFC 6275]:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Next Header | Hdr Ext Len=2 | Routing Type=2|Segments Left=1|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Reserved |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ +
| |
+ Home Address +
| |
+ +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 route.next Next Header
1 8 route.length Header Extensive Length
2 16 route.type Routing Type
3 24 route.seg_left Segments Left
4 32 - Reserved
8 64 route.ip Home Address
### Response:
def _read_data_type_2(self, length):
"""Read IPv6-Route Type 2 data.
Structure of IPv6-Route Type 2 data [RFC 6275]:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Next Header | Hdr Ext Len=2 | Routing Type=2|Segments Left=1|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Reserved |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ +
| |
+ Home Address +
| |
+ +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 route.next Next Header
1 8 route.length Header Extensive Length
2 16 route.type Routing Type
3 24 route.seg_left Segments Left
4 32 - Reserved
8 64 route.ip Home Address
"""
if length != 20:
raise ProtocolError(f'{self.alias}: [Typeno 2] invalid format')
_resv = self._read_fileng(4)
_home = self._read_fileng(16)
data = dict(
ip=ipaddress.ip_address(_home),
)
return data |
def get_lonlatalts(self):
"""Obtain GCPs and construct latitude and longitude arrays.
Args:
band (gdal band): Measurement band which comes with GCP's
array_shape (tuple) : The size of the data array
Returns:
coordinates (tuple): A tuple with longitude and latitude arrays
"""
band = self.filehandle
(xpoints, ypoints), (gcp_lons, gcp_lats, gcp_alts), (gcps, crs) = self.get_gcps()
# FIXME: do interpolation on cartesion coordinates if the area is
# problematic.
longitudes = interpolate_xarray(xpoints, ypoints, gcp_lons, band.shape)
latitudes = interpolate_xarray(xpoints, ypoints, gcp_lats, band.shape)
altitudes = interpolate_xarray(xpoints, ypoints, gcp_alts, band.shape)
longitudes.attrs['gcps'] = gcps
longitudes.attrs['crs'] = crs
latitudes.attrs['gcps'] = gcps
latitudes.attrs['crs'] = crs
altitudes.attrs['gcps'] = gcps
altitudes.attrs['crs'] = crs
return longitudes, latitudes, altitudes | Obtain GCPs and construct latitude and longitude arrays.
Args:
band (gdal band): Measurement band which comes with GCP's
array_shape (tuple) : The size of the data array
Returns:
coordinates (tuple): A tuple with longitude and latitude arrays | Below is the the instruction that describes the task:
### Input:
Obtain GCPs and construct latitude and longitude arrays.
Args:
band (gdal band): Measurement band which comes with GCP's
array_shape (tuple) : The size of the data array
Returns:
coordinates (tuple): A tuple with longitude and latitude arrays
### Response:
def get_lonlatalts(self):
"""Obtain GCPs and construct latitude and longitude arrays.
Args:
band (gdal band): Measurement band which comes with GCP's
array_shape (tuple) : The size of the data array
Returns:
coordinates (tuple): A tuple with longitude and latitude arrays
"""
band = self.filehandle
(xpoints, ypoints), (gcp_lons, gcp_lats, gcp_alts), (gcps, crs) = self.get_gcps()
# FIXME: do interpolation on cartesion coordinates if the area is
# problematic.
longitudes = interpolate_xarray(xpoints, ypoints, gcp_lons, band.shape)
latitudes = interpolate_xarray(xpoints, ypoints, gcp_lats, band.shape)
altitudes = interpolate_xarray(xpoints, ypoints, gcp_alts, band.shape)
longitudes.attrs['gcps'] = gcps
longitudes.attrs['crs'] = crs
latitudes.attrs['gcps'] = gcps
latitudes.attrs['crs'] = crs
altitudes.attrs['gcps'] = gcps
altitudes.attrs['crs'] = crs
return longitudes, latitudes, altitudes |
def ping(self) -> None:
"""Pings a database connection, reconnecting if necessary."""
if self.db is None or self.db_pythonlib not in [PYTHONLIB_MYSQLDB,
PYTHONLIB_PYMYSQL]:
return
try:
self.db.ping(True) # test connection; reconnect upon failure
# ... should auto-reconnect; however, it seems to fail the first
# time, then work the next time.
# Exception (the first time) is:
# <class '_mysql_exceptions.OperationalError'>:
# (2006, 'MySQL server has gone away')
# http://mail.python.org/pipermail/python-list/2008-February/
# 474598.html
except mysql.OperationalError: # loss of connection
self.db = None
self.connect_to_database_mysql(
self._database, self._user, self._password, self._server,
self._port, self._charset, self._use_unicode) | Pings a database connection, reconnecting if necessary. | Below is the the instruction that describes the task:
### Input:
Pings a database connection, reconnecting if necessary.
### Response:
def ping(self) -> None:
"""Pings a database connection, reconnecting if necessary."""
if self.db is None or self.db_pythonlib not in [PYTHONLIB_MYSQLDB,
PYTHONLIB_PYMYSQL]:
return
try:
self.db.ping(True) # test connection; reconnect upon failure
# ... should auto-reconnect; however, it seems to fail the first
# time, then work the next time.
# Exception (the first time) is:
# <class '_mysql_exceptions.OperationalError'>:
# (2006, 'MySQL server has gone away')
# http://mail.python.org/pipermail/python-list/2008-February/
# 474598.html
except mysql.OperationalError: # loss of connection
self.db = None
self.connect_to_database_mysql(
self._database, self._user, self._password, self._server,
self._port, self._charset, self._use_unicode) |
def fix_variables(bqm, sampling_mode=True):
"""Determine assignments for some variables of a binary quadratic model.
Roof duality finds a lower bound for the minimum of a quadratic polynomial. It
can also find minimizing assignments for some of the polynomial's variables;
these fixed variables take the same values in all optimal solutions [BHT]_ [BH]_.
A quadratic pseudo-Boolean function can be represented as a network to find
the lower bound through network-flow computations. `fix_variables` uses maximum
flow in the implication network to correctly fix variables. Consequently, you can
find an assignment for the remaining variables that attains the optimal value.
Args:
bqm (:obj:`.BinaryQuadraticModel`)
A binary quadratic model.
sampling_mode (bool, optional, default=True):
In sampling mode, only roof-duality is used. When `sampling_mode` is false, strongly
connected components are used to fix more variables, but in some optimal solutions
these variables may take different values.
Returns:
dict: Variable assignments for some variables of the specified binary quadratic model.
Examples:
This example creates a binary quadratic model with a single ground state
and fixes the model's single variable to the minimizing assignment.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_variable('a', 1.0)
>>> dimod.fix_variables(bqm)
{'a': -1}
This example has two ground states, :math:`a=b=-1` and :math:`a=b=1`, with
no variable having a single value for all ground states, so neither variable
is fixed.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_interaction('a', 'b', -1.0)
>>> dimod.fix_variables(bqm) # doctest: +SKIP
{}
This example turns sampling model off, so variables are fixed to an assignment
that attains the ground state.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_interaction('a', 'b', -1.0)
>>> dimod.fix_variables(bqm, sampling_mode=False) # doctest: +SKIP
{'a': 1, 'b': 1}
.. [BHT] Boros, E., P.L. Hammer, G. Tavares. Preprocessing of Unconstraint Quadratic Binary
Optimization. Rutcor Research Report 10-2006, April, 2006.
.. [BH] Boros, E., P.L. Hammer. Pseudo-Boolean optimization. Discrete Applied Mathematics 123,
(2002), pp. 155-225
"""
try:
from dimod.roof_duality._fix_variables import fix_variables_wrapper
except ImportError:
raise ImportError("c++ extension roof_duality is not built")
if sampling_mode:
method = 2 # roof-duality only
else:
method = 1 # roof-duality and strongly connected components
linear = bqm.linear
if all(v in linear for v in range(len(bqm))):
# we can work with the binary form of the bqm directly
fixed = fix_variables_wrapper(bqm.binary, method)
else:
try:
inverse_mapping = dict(enumerate(sorted(linear)))
except TypeError:
# in python3 unlike types cannot be sorted
inverse_mapping = dict(enumerate(linear))
mapping = {v: i for i, v in inverse_mapping.items()}
fixed = fix_variables_wrapper(bqm.relabel_variables(mapping, inplace=False).binary, method)
fixed = {inverse_mapping[v]: val for v, val in fixed.items()}
if bqm.vartype is Vartype.SPIN:
return {v: 2*val - 1 for v, val in fixed.items()}
else:
return fixed | Determine assignments for some variables of a binary quadratic model.
Roof duality finds a lower bound for the minimum of a quadratic polynomial. It
can also find minimizing assignments for some of the polynomial's variables;
these fixed variables take the same values in all optimal solutions [BHT]_ [BH]_.
A quadratic pseudo-Boolean function can be represented as a network to find
the lower bound through network-flow computations. `fix_variables` uses maximum
flow in the implication network to correctly fix variables. Consequently, you can
find an assignment for the remaining variables that attains the optimal value.
Args:
bqm (:obj:`.BinaryQuadraticModel`)
A binary quadratic model.
sampling_mode (bool, optional, default=True):
In sampling mode, only roof-duality is used. When `sampling_mode` is false, strongly
connected components are used to fix more variables, but in some optimal solutions
these variables may take different values.
Returns:
dict: Variable assignments for some variables of the specified binary quadratic model.
Examples:
This example creates a binary quadratic model with a single ground state
and fixes the model's single variable to the minimizing assignment.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_variable('a', 1.0)
>>> dimod.fix_variables(bqm)
{'a': -1}
This example has two ground states, :math:`a=b=-1` and :math:`a=b=1`, with
no variable having a single value for all ground states, so neither variable
is fixed.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_interaction('a', 'b', -1.0)
>>> dimod.fix_variables(bqm) # doctest: +SKIP
{}
This example turns sampling model off, so variables are fixed to an assignment
that attains the ground state.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_interaction('a', 'b', -1.0)
>>> dimod.fix_variables(bqm, sampling_mode=False) # doctest: +SKIP
{'a': 1, 'b': 1}
.. [BHT] Boros, E., P.L. Hammer, G. Tavares. Preprocessing of Unconstraint Quadratic Binary
Optimization. Rutcor Research Report 10-2006, April, 2006.
.. [BH] Boros, E., P.L. Hammer. Pseudo-Boolean optimization. Discrete Applied Mathematics 123,
(2002), pp. 155-225 | Below is the the instruction that describes the task:
### Input:
Determine assignments for some variables of a binary quadratic model.
Roof duality finds a lower bound for the minimum of a quadratic polynomial. It
can also find minimizing assignments for some of the polynomial's variables;
these fixed variables take the same values in all optimal solutions [BHT]_ [BH]_.
A quadratic pseudo-Boolean function can be represented as a network to find
the lower bound through network-flow computations. `fix_variables` uses maximum
flow in the implication network to correctly fix variables. Consequently, you can
find an assignment for the remaining variables that attains the optimal value.
Args:
bqm (:obj:`.BinaryQuadraticModel`)
A binary quadratic model.
sampling_mode (bool, optional, default=True):
In sampling mode, only roof-duality is used. When `sampling_mode` is false, strongly
connected components are used to fix more variables, but in some optimal solutions
these variables may take different values.
Returns:
dict: Variable assignments for some variables of the specified binary quadratic model.
Examples:
This example creates a binary quadratic model with a single ground state
and fixes the model's single variable to the minimizing assignment.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_variable('a', 1.0)
>>> dimod.fix_variables(bqm)
{'a': -1}
This example has two ground states, :math:`a=b=-1` and :math:`a=b=1`, with
no variable having a single value for all ground states, so neither variable
is fixed.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_interaction('a', 'b', -1.0)
>>> dimod.fix_variables(bqm) # doctest: +SKIP
{}
This example turns sampling model off, so variables are fixed to an assignment
that attains the ground state.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_interaction('a', 'b', -1.0)
>>> dimod.fix_variables(bqm, sampling_mode=False) # doctest: +SKIP
{'a': 1, 'b': 1}
.. [BHT] Boros, E., P.L. Hammer, G. Tavares. Preprocessing of Unconstraint Quadratic Binary
Optimization. Rutcor Research Report 10-2006, April, 2006.
.. [BH] Boros, E., P.L. Hammer. Pseudo-Boolean optimization. Discrete Applied Mathematics 123,
(2002), pp. 155-225
### Response:
def fix_variables(bqm, sampling_mode=True):
"""Determine assignments for some variables of a binary quadratic model.
Roof duality finds a lower bound for the minimum of a quadratic polynomial. It
can also find minimizing assignments for some of the polynomial's variables;
these fixed variables take the same values in all optimal solutions [BHT]_ [BH]_.
A quadratic pseudo-Boolean function can be represented as a network to find
the lower bound through network-flow computations. `fix_variables` uses maximum
flow in the implication network to correctly fix variables. Consequently, you can
find an assignment for the remaining variables that attains the optimal value.
Args:
bqm (:obj:`.BinaryQuadraticModel`)
A binary quadratic model.
sampling_mode (bool, optional, default=True):
In sampling mode, only roof-duality is used. When `sampling_mode` is false, strongly
connected components are used to fix more variables, but in some optimal solutions
these variables may take different values.
Returns:
dict: Variable assignments for some variables of the specified binary quadratic model.
Examples:
This example creates a binary quadratic model with a single ground state
and fixes the model's single variable to the minimizing assignment.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_variable('a', 1.0)
>>> dimod.fix_variables(bqm)
{'a': -1}
This example has two ground states, :math:`a=b=-1` and :math:`a=b=1`, with
no variable having a single value for all ground states, so neither variable
is fixed.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_interaction('a', 'b', -1.0)
>>> dimod.fix_variables(bqm) # doctest: +SKIP
{}
This example turns sampling model off, so variables are fixed to an assignment
that attains the ground state.
>>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN)
>>> bqm.add_interaction('a', 'b', -1.0)
>>> dimod.fix_variables(bqm, sampling_mode=False) # doctest: +SKIP
{'a': 1, 'b': 1}
.. [BHT] Boros, E., P.L. Hammer, G. Tavares. Preprocessing of Unconstraint Quadratic Binary
Optimization. Rutcor Research Report 10-2006, April, 2006.
.. [BH] Boros, E., P.L. Hammer. Pseudo-Boolean optimization. Discrete Applied Mathematics 123,
(2002), pp. 155-225
"""
try:
from dimod.roof_duality._fix_variables import fix_variables_wrapper
except ImportError:
raise ImportError("c++ extension roof_duality is not built")
if sampling_mode:
method = 2 # roof-duality only
else:
method = 1 # roof-duality and strongly connected components
linear = bqm.linear
if all(v in linear for v in range(len(bqm))):
# we can work with the binary form of the bqm directly
fixed = fix_variables_wrapper(bqm.binary, method)
else:
try:
inverse_mapping = dict(enumerate(sorted(linear)))
except TypeError:
# in python3 unlike types cannot be sorted
inverse_mapping = dict(enumerate(linear))
mapping = {v: i for i, v in inverse_mapping.items()}
fixed = fix_variables_wrapper(bqm.relabel_variables(mapping, inplace=False).binary, method)
fixed = {inverse_mapping[v]: val for v, val in fixed.items()}
if bqm.vartype is Vartype.SPIN:
return {v: 2*val - 1 for v, val in fixed.items()}
else:
return fixed |
def _get_bandgap_doscar(filename):
"""Get the bandgap from the DOSCAR file"""
with open(filename) as fp:
for i in range(6):
l = fp.readline()
efermi = float(l.split()[3])
step1 = fp.readline().split()[0]
step2 = fp.readline().split()[0]
step_size = float(step2)-float(step1)
not_found = True
while not_found:
l = fp.readline().split()
e = float(l.pop(0))
dens = 0.0
for i in range(int(len(l)/2)):
dens += float(l[i])
if e < efermi and dens > 1e-3:
bot = e
elif e > efermi and dens > 1e-3:
top = e
not_found = False
if top - bot < step_size*2:
bandgap = 0.0
else:
bandgap = float(top - bot)
return bandgap | Get the bandgap from the DOSCAR file | Below is the the instruction that describes the task:
### Input:
Get the bandgap from the DOSCAR file
### Response:
def _get_bandgap_doscar(filename):
"""Get the bandgap from the DOSCAR file"""
with open(filename) as fp:
for i in range(6):
l = fp.readline()
efermi = float(l.split()[3])
step1 = fp.readline().split()[0]
step2 = fp.readline().split()[0]
step_size = float(step2)-float(step1)
not_found = True
while not_found:
l = fp.readline().split()
e = float(l.pop(0))
dens = 0.0
for i in range(int(len(l)/2)):
dens += float(l[i])
if e < efermi and dens > 1e-3:
bot = e
elif e > efermi and dens > 1e-3:
top = e
not_found = False
if top - bot < step_size*2:
bandgap = 0.0
else:
bandgap = float(top - bot)
return bandgap |
def replace_row(self, line, ndx):
"""
replace a grids row at index 'ndx' with 'line'
"""
for col in range(len(line)):
self.set_tile(ndx, col, line[col]) | replace a grids row at index 'ndx' with 'line' | Below is the the instruction that describes the task:
### Input:
replace a grids row at index 'ndx' with 'line'
### Response:
def replace_row(self, line, ndx):
"""
replace a grids row at index 'ndx' with 'line'
"""
for col in range(len(line)):
self.set_tile(ndx, col, line[col]) |
def path(self, path):
"""
Defines a URL path to match.
Only call this method if the URL has no path already defined.
Arguments:
path (str): URL path value to match. E.g: ``/api/users``.
Returns:
self: current Mock instance.
"""
url = furl(self._request.rawurl)
url.path = path
self._request.url = url.url
self.add_matcher(matcher('PathMatcher', path)) | Defines a URL path to match.
Only call this method if the URL has no path already defined.
Arguments:
path (str): URL path value to match. E.g: ``/api/users``.
Returns:
self: current Mock instance. | Below is the the instruction that describes the task:
### Input:
Defines a URL path to match.
Only call this method if the URL has no path already defined.
Arguments:
path (str): URL path value to match. E.g: ``/api/users``.
Returns:
self: current Mock instance.
### Response:
def path(self, path):
"""
Defines a URL path to match.
Only call this method if the URL has no path already defined.
Arguments:
path (str): URL path value to match. E.g: ``/api/users``.
Returns:
self: current Mock instance.
"""
url = furl(self._request.rawurl)
url.path = path
self._request.url = url.url
self.add_matcher(matcher('PathMatcher', path)) |
def get_root_nodes(self, project, depth=None):
"""GetRootNodes.
[Preview API] Gets root classification nodes under the project.
:param str project: Project ID or project name
:param int depth: Depth of children to fetch.
:rtype: [WorkItemClassificationNode]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if depth is not None:
query_parameters['$depth'] = self._serialize.query('depth', depth, 'int')
response = self._send(http_method='GET',
location_id='a70579d1-f53a-48ee-a5be-7be8659023b9',
version='5.1-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemClassificationNode]', self._unwrap_collection(response)) | GetRootNodes.
[Preview API] Gets root classification nodes under the project.
:param str project: Project ID or project name
:param int depth: Depth of children to fetch.
:rtype: [WorkItemClassificationNode] | Below is the the instruction that describes the task:
### Input:
GetRootNodes.
[Preview API] Gets root classification nodes under the project.
:param str project: Project ID or project name
:param int depth: Depth of children to fetch.
:rtype: [WorkItemClassificationNode]
### Response:
def get_root_nodes(self, project, depth=None):
"""GetRootNodes.
[Preview API] Gets root classification nodes under the project.
:param str project: Project ID or project name
:param int depth: Depth of children to fetch.
:rtype: [WorkItemClassificationNode]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if depth is not None:
query_parameters['$depth'] = self._serialize.query('depth', depth, 'int')
response = self._send(http_method='GET',
location_id='a70579d1-f53a-48ee-a5be-7be8659023b9',
version='5.1-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemClassificationNode]', self._unwrap_collection(response)) |
async def certify(client: Client, certification_signed_raw: str) -> ClientResponse:
"""
POST certification raw document
:param client: Client to connect to the api
:param certification_signed_raw: Certification raw document
:return:
"""
return await client.post(MODULE + '/certify', {'cert': certification_signed_raw}, rtype=RESPONSE_AIOHTTP) | POST certification raw document
:param client: Client to connect to the api
:param certification_signed_raw: Certification raw document
:return: | Below is the the instruction that describes the task:
### Input:
POST certification raw document
:param client: Client to connect to the api
:param certification_signed_raw: Certification raw document
:return:
### Response:
async def certify(client: Client, certification_signed_raw: str) -> ClientResponse:
"""
POST certification raw document
:param client: Client to connect to the api
:param certification_signed_raw: Certification raw document
:return:
"""
return await client.post(MODULE + '/certify', {'cert': certification_signed_raw}, rtype=RESPONSE_AIOHTTP) |
def fit(self, X, y):
"""
Fit CAIM
Parameters
----------
X : array-like, pandas dataframe, shape [n_samples, n_feature]
Input array can contain missing values
y: array-like, pandas dataframe, shape [n_samples]
Target variable. Must be categorical.
Returns
-------
self
"""
self.split_scheme = dict()
if isinstance(X, pd.DataFrame):
# self.indx = X.index
# self.columns = X.columns
if isinstance(self._features, list):
self.categorical = [X.columns.get_loc(label) for label in self._features]
X = X.values
y = y.values
if self._features == 'auto':
self.categorical = self.check_categorical(X, y)
categorical = self.categorical
print('Categorical', categorical)
min_splits = np.unique(y).shape[0]
for j in range(X.shape[1]):
if j in categorical:
continue
xj = X[:, j]
xj = xj[np.invert(np.isnan(xj))]
new_index = xj.argsort()
xj = xj[new_index]
yj = y[new_index]
allsplits = np.unique(xj)[1:-1].tolist() # potential split points
global_caim = -1
mainscheme = [xj[0], xj[-1]]
best_caim = 0
k = 1
while (k <= min_splits) or ((global_caim < best_caim) and (allsplits)):
split_points = np.random.permutation(allsplits).tolist()
best_scheme = None
best_point = None
best_caim = 0
k = k + 1
while split_points:
scheme = mainscheme[:]
sp = split_points.pop()
scheme.append(sp)
scheme.sort()
c = self.get_caim(scheme, xj, yj)
if c > best_caim:
best_caim = c
best_scheme = scheme
best_point = sp
if (k <= min_splits) or (best_caim > global_caim):
mainscheme = best_scheme
global_caim = best_caim
try:
allsplits.remove(best_point)
except ValueError:
raise NotEnoughPoints('The feature #' + str(j) + ' does not have' +
' enough unique values for discretization!' +
' Add it to categorical list!')
self.split_scheme[j] = mainscheme
print('#', j, ' GLOBAL CAIM ', global_caim)
return self | Fit CAIM
Parameters
----------
X : array-like, pandas dataframe, shape [n_samples, n_feature]
Input array can contain missing values
y: array-like, pandas dataframe, shape [n_samples]
Target variable. Must be categorical.
Returns
-------
self | Below is the the instruction that describes the task:
### Input:
Fit CAIM
Parameters
----------
X : array-like, pandas dataframe, shape [n_samples, n_feature]
Input array can contain missing values
y: array-like, pandas dataframe, shape [n_samples]
Target variable. Must be categorical.
Returns
-------
self
### Response:
def fit(self, X, y):
"""
Fit CAIM
Parameters
----------
X : array-like, pandas dataframe, shape [n_samples, n_feature]
Input array can contain missing values
y: array-like, pandas dataframe, shape [n_samples]
Target variable. Must be categorical.
Returns
-------
self
"""
self.split_scheme = dict()
if isinstance(X, pd.DataFrame):
# self.indx = X.index
# self.columns = X.columns
if isinstance(self._features, list):
self.categorical = [X.columns.get_loc(label) for label in self._features]
X = X.values
y = y.values
if self._features == 'auto':
self.categorical = self.check_categorical(X, y)
categorical = self.categorical
print('Categorical', categorical)
min_splits = np.unique(y).shape[0]
for j in range(X.shape[1]):
if j in categorical:
continue
xj = X[:, j]
xj = xj[np.invert(np.isnan(xj))]
new_index = xj.argsort()
xj = xj[new_index]
yj = y[new_index]
allsplits = np.unique(xj)[1:-1].tolist() # potential split points
global_caim = -1
mainscheme = [xj[0], xj[-1]]
best_caim = 0
k = 1
while (k <= min_splits) or ((global_caim < best_caim) and (allsplits)):
split_points = np.random.permutation(allsplits).tolist()
best_scheme = None
best_point = None
best_caim = 0
k = k + 1
while split_points:
scheme = mainscheme[:]
sp = split_points.pop()
scheme.append(sp)
scheme.sort()
c = self.get_caim(scheme, xj, yj)
if c > best_caim:
best_caim = c
best_scheme = scheme
best_point = sp
if (k <= min_splits) or (best_caim > global_caim):
mainscheme = best_scheme
global_caim = best_caim
try:
allsplits.remove(best_point)
except ValueError:
raise NotEnoughPoints('The feature #' + str(j) + ' does not have' +
' enough unique values for discretization!' +
' Add it to categorical list!')
self.split_scheme[j] = mainscheme
print('#', j, ' GLOBAL CAIM ', global_caim)
return self |
def if_body_action(self, text, loc, arg):
"""Code executed after recognising if statement's body"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("IF_BODY:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
#generate conditional jump (based on last compare)
label = self.codegen.label("false{0}".format(self.false_label_number), True, False)
self.codegen.jump(self.relexp_code, True, label)
#generate 'true' label (executes if condition is satisfied)
self.codegen.newline_label("true{0}".format(self.label_number), True, True)
#save label numbers (needed for nested if/while statements)
self.label_stack.append(self.false_label_number)
self.label_stack.append(self.label_number) | Code executed after recognising if statement's body | Below is the the instruction that describes the task:
### Input:
Code executed after recognising if statement's body
### Response:
def if_body_action(self, text, loc, arg):
"""Code executed after recognising if statement's body"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("IF_BODY:",arg)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
#generate conditional jump (based on last compare)
label = self.codegen.label("false{0}".format(self.false_label_number), True, False)
self.codegen.jump(self.relexp_code, True, label)
#generate 'true' label (executes if condition is satisfied)
self.codegen.newline_label("true{0}".format(self.label_number), True, True)
#save label numbers (needed for nested if/while statements)
self.label_stack.append(self.false_label_number)
self.label_stack.append(self.label_number) |
def run_fn_atomically(self, request):
"""Execute a function, atomically and reply with the result."""
fn = serializer.loads_fn(request[Msgs.info])
args, kwargs = request[Msgs.args], request[Msgs.kwargs]
with self.mutate_safely():
self.reply(fn(self.state, *args, **kwargs)) | Execute a function, atomically and reply with the result. | Below is the the instruction that describes the task:
### Input:
Execute a function, atomically and reply with the result.
### Response:
def run_fn_atomically(self, request):
"""Execute a function, atomically and reply with the result."""
fn = serializer.loads_fn(request[Msgs.info])
args, kwargs = request[Msgs.args], request[Msgs.kwargs]
with self.mutate_safely():
self.reply(fn(self.state, *args, **kwargs)) |
def capture_role(self, service_name, deployment_name, role_name,
post_capture_action, target_image_name,
target_image_label, provisioning_configuration=None):
'''
The Capture Role operation captures a virtual machine image to your
image gallery. From the captured image, you can create additional
customized virtual machines.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_name:
The name of the role.
post_capture_action:
Specifies the action after capture operation completes. Possible
values are: Delete, Reprovision.
target_image_name:
Specifies the image name of the captured virtual machine.
target_image_label:
Specifies the friendly name of the captured virtual machine.
provisioning_configuration:
Use an instance of WindowsConfigurationSet or LinuxConfigurationSet.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_name', role_name)
_validate_not_none('post_capture_action', post_capture_action)
_validate_not_none('target_image_name', target_image_name)
_validate_not_none('target_image_label', target_image_label)
return self._perform_post(
self._get_role_instance_operations_path(
service_name, deployment_name, role_name),
_XmlSerializer.capture_role_to_xml(
post_capture_action,
target_image_name,
target_image_label,
provisioning_configuration),
as_async=True) | The Capture Role operation captures a virtual machine image to your
image gallery. From the captured image, you can create additional
customized virtual machines.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_name:
The name of the role.
post_capture_action:
Specifies the action after capture operation completes. Possible
values are: Delete, Reprovision.
target_image_name:
Specifies the image name of the captured virtual machine.
target_image_label:
Specifies the friendly name of the captured virtual machine.
provisioning_configuration:
Use an instance of WindowsConfigurationSet or LinuxConfigurationSet. | Below is the the instruction that describes the task:
### Input:
The Capture Role operation captures a virtual machine image to your
image gallery. From the captured image, you can create additional
customized virtual machines.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_name:
The name of the role.
post_capture_action:
Specifies the action after capture operation completes. Possible
values are: Delete, Reprovision.
target_image_name:
Specifies the image name of the captured virtual machine.
target_image_label:
Specifies the friendly name of the captured virtual machine.
provisioning_configuration:
Use an instance of WindowsConfigurationSet or LinuxConfigurationSet.
### Response:
def capture_role(self, service_name, deployment_name, role_name,
post_capture_action, target_image_name,
target_image_label, provisioning_configuration=None):
'''
The Capture Role operation captures a virtual machine image to your
image gallery. From the captured image, you can create additional
customized virtual machines.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_name:
The name of the role.
post_capture_action:
Specifies the action after capture operation completes. Possible
values are: Delete, Reprovision.
target_image_name:
Specifies the image name of the captured virtual machine.
target_image_label:
Specifies the friendly name of the captured virtual machine.
provisioning_configuration:
Use an instance of WindowsConfigurationSet or LinuxConfigurationSet.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_name', role_name)
_validate_not_none('post_capture_action', post_capture_action)
_validate_not_none('target_image_name', target_image_name)
_validate_not_none('target_image_label', target_image_label)
return self._perform_post(
self._get_role_instance_operations_path(
service_name, deployment_name, role_name),
_XmlSerializer.capture_role_to_xml(
post_capture_action,
target_image_name,
target_image_label,
provisioning_configuration),
as_async=True) |
def run_copy(self,
source_project_dataset_tables,
destination_project_dataset_table,
write_disposition='WRITE_EMPTY',
create_disposition='CREATE_IF_NEEDED',
labels=None):
"""
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
``(project:|project.)<dataset>.<table>``
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If ``<project>`` is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: ``(project:|project.)<dataset>.<table>``
:type destination_project_dataset_table: str
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
"""
source_project_dataset_tables = ([
source_project_dataset_tables
] if not isinstance(source_project_dataset_tables, list) else
source_project_dataset_tables)
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
source_project_dataset_tables_fixup.append({
'projectId':
source_project,
'datasetId':
source_dataset,
'tableId':
source_table
})
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id)
configuration = {
'copy': {
'createDisposition': create_disposition,
'writeDisposition': write_disposition,
'sourceTables': source_project_dataset_tables_fixup,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table
}
}
}
if labels:
configuration['labels'] = labels
return self.run_with_configuration(configuration) | Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
``(project:|project.)<dataset>.<table>``
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If ``<project>`` is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: ``(project:|project.)<dataset>.<table>``
:type destination_project_dataset_table: str
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict | Below is the the instruction that describes the task:
### Input:
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
``(project:|project.)<dataset>.<table>``
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If ``<project>`` is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: ``(project:|project.)<dataset>.<table>``
:type destination_project_dataset_table: str
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
### Response:
def run_copy(self,
source_project_dataset_tables,
destination_project_dataset_table,
write_disposition='WRITE_EMPTY',
create_disposition='CREATE_IF_NEEDED',
labels=None):
"""
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
``(project:|project.)<dataset>.<table>``
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If ``<project>`` is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: ``(project:|project.)<dataset>.<table>``
:type destination_project_dataset_table: str
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
"""
source_project_dataset_tables = ([
source_project_dataset_tables
] if not isinstance(source_project_dataset_tables, list) else
source_project_dataset_tables)
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
source_project_dataset_tables_fixup.append({
'projectId':
source_project,
'datasetId':
source_dataset,
'tableId':
source_table
})
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id)
configuration = {
'copy': {
'createDisposition': create_disposition,
'writeDisposition': write_disposition,
'sourceTables': source_project_dataset_tables_fixup,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table
}
}
}
if labels:
configuration['labels'] = labels
return self.run_with_configuration(configuration) |
def getL2Representations(self):
"""
Returns the active representation in L2.
"""
return [set(L2.getSelf()._pooler.getActiveCells()) for L2 in self.L2Regions] | Returns the active representation in L2. | Below is the the instruction that describes the task:
### Input:
Returns the active representation in L2.
### Response:
def getL2Representations(self):
"""
Returns the active representation in L2.
"""
return [set(L2.getSelf()._pooler.getActiveCells()) for L2 in self.L2Regions] |
Subsets and Splits